diff options
522 files changed, 21182 insertions, 3916 deletions
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index 8bca1d5cec09..e8473eae2a20 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl | |||
| @@ -16,6 +16,15 @@ | |||
| 16 | </address> | 16 | </address> |
| 17 | </affiliation> | 17 | </affiliation> |
| 18 | </author> | 18 | </author> |
| 19 | <author> | ||
| 20 | <firstname>William</firstname> | ||
| 21 | <surname>Cohen</surname> | ||
| 22 | <affiliation> | ||
| 23 | <address> | ||
| 24 | <email>wcohen@redhat.com</email> | ||
| 25 | </address> | ||
| 26 | </affiliation> | ||
| 27 | </author> | ||
| 19 | </authorgroup> | 28 | </authorgroup> |
| 20 | 29 | ||
| 21 | <legalnotice> | 30 | <legalnotice> |
| @@ -91,4 +100,8 @@ | |||
| 91 | !Iinclude/trace/events/signal.h | 100 | !Iinclude/trace/events/signal.h |
| 92 | </chapter> | 101 | </chapter> |
| 93 | 102 | ||
| 103 | <chapter id="block"> | ||
| 104 | <title>Block IO</title> | ||
| 105 | !Iinclude/trace/events/block.h | ||
| 106 | </chapter> | ||
| 94 | </book> | 107 | </book> |
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt index a6d32e65d222..a8536cb88091 100644 --- a/Documentation/RCU/NMI-RCU.txt +++ b/Documentation/RCU/NMI-RCU.txt | |||
| @@ -34,7 +34,7 @@ NMI handler. | |||
| 34 | cpu = smp_processor_id(); | 34 | cpu = smp_processor_id(); |
| 35 | ++nmi_count(cpu); | 35 | ++nmi_count(cpu); |
| 36 | 36 | ||
| 37 | if (!rcu_dereference(nmi_callback)(regs, cpu)) | 37 | if (!rcu_dereference_sched(nmi_callback)(regs, cpu)) |
| 38 | default_do_nmi(regs); | 38 | default_do_nmi(regs); |
| 39 | 39 | ||
| 40 | nmi_exit(); | 40 | nmi_exit(); |
| @@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the | |||
| 47 | default_do_nmi() function to handle a machine-specific NMI. Finally, | 47 | default_do_nmi() function to handle a machine-specific NMI. Finally, |
| 48 | preemption is restored. | 48 | preemption is restored. |
| 49 | 49 | ||
| 50 | Strictly speaking, rcu_dereference() is not needed, since this code runs | 50 | In theory, rcu_dereference_sched() is not needed, since this code runs |
| 51 | only on i386, which does not need rcu_dereference() anyway. However, | 51 | only on i386, which in theory does not need rcu_dereference_sched() |
| 52 | it is a good documentation aid, particularly for anyone attempting to | 52 | anyway. However, in practice it is a good documentation aid, particularly |
| 53 | do something similar on Alpha. | 53 | for anyone attempting to do something similar on Alpha or on systems |
| 54 | with aggressive optimizing compilers. | ||
| 54 | 55 | ||
| 55 | Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, | 56 | Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha, |
| 56 | given that the code referenced by the pointer is read-only? | 57 | given that the code referenced by the pointer is read-only? |
| 57 | 58 | ||
| 58 | 59 | ||
| @@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively. | |||
| 99 | 100 | ||
| 100 | Answer to Quick Quiz | 101 | Answer to Quick Quiz |
| 101 | 102 | ||
| 102 | Why might the rcu_dereference() be necessary on Alpha, given | 103 | Why might the rcu_dereference_sched() be necessary on Alpha, given |
| 103 | that the code referenced by the pointer is read-only? | 104 | that the code referenced by the pointer is read-only? |
| 104 | 105 | ||
| 105 | Answer: The caller to set_nmi_callback() might well have | 106 | Answer: The caller to set_nmi_callback() might well have |
| 106 | initialized some data that is to be used by the | 107 | initialized some data that is to be used by the new NMI |
| 107 | new NMI handler. In this case, the rcu_dereference() | 108 | handler. In this case, the rcu_dereference_sched() would |
| 108 | would be needed, because otherwise a CPU that received | 109 | be needed, because otherwise a CPU that received an NMI |
| 109 | an NMI just after the new handler was set might see | 110 | just after the new handler was set might see the pointer |
| 110 | the pointer to the new NMI handler, but the old | 111 | to the new NMI handler, but the old pre-initialized |
| 111 | pre-initialized version of the handler's data. | 112 | version of the handler's data. |
| 112 | 113 | ||
| 113 | More important, the rcu_dereference() makes it clear | 114 | This same sad story can happen on other CPUs when using |
| 114 | to someone reading the code that the pointer is being | 115 | a compiler with aggressive pointer-value speculation |
| 115 | protected by RCU. | 116 | optimizations. |
| 117 | |||
| 118 | More important, the rcu_dereference_sched() makes it | ||
| 119 | clear to someone reading the code that the pointer is | ||
| 120 | being protected by RCU-sched. | ||
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index cbc180f90194..790d1a812376 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
| @@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome! | |||
| 260 | The reason that it is permissible to use RCU list-traversal | 260 | The reason that it is permissible to use RCU list-traversal |
| 261 | primitives when the update-side lock is held is that doing so | 261 | primitives when the update-side lock is held is that doing so |
| 262 | can be quite helpful in reducing code bloat when common code is | 262 | can be quite helpful in reducing code bloat when common code is |
| 263 | shared between readers and updaters. | 263 | shared between readers and updaters. Additional primitives |
| 264 | are provided for this case, as discussed in lockdep.txt. | ||
| 264 | 265 | ||
| 265 | 10. Conversely, if you are in an RCU read-side critical section, | 266 | 10. Conversely, if you are in an RCU read-side critical section, |
| 266 | and you don't hold the appropriate update-side lock, you -must- | 267 | and you don't hold the appropriate update-side lock, you -must- |
| @@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome! | |||
| 344 | requiring SRCU's read-side deadlock immunity or low read-side | 345 | requiring SRCU's read-side deadlock immunity or low read-side |
| 345 | realtime latency. | 346 | realtime latency. |
| 346 | 347 | ||
| 347 | Note that, rcu_assign_pointer() and rcu_dereference() relate to | 348 | Note that, rcu_assign_pointer() relates to SRCU just as they do |
| 348 | SRCU just as they do to other forms of RCU. | 349 | to other forms of RCU. |
| 349 | 350 | ||
| 350 | 15. The whole point of call_rcu(), synchronize_rcu(), and friends | 351 | 15. The whole point of call_rcu(), synchronize_rcu(), and friends |
| 351 | is to wait until all pre-existing readers have finished before | 352 | is to wait until all pre-existing readers have finished before |
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt index fe24b58627bd..d7a49b2f6994 100644 --- a/Documentation/RCU/lockdep.txt +++ b/Documentation/RCU/lockdep.txt | |||
| @@ -32,9 +32,20 @@ checking of rcu_dereference() primitives: | |||
| 32 | srcu_dereference(p, sp): | 32 | srcu_dereference(p, sp): |
| 33 | Check for SRCU read-side critical section. | 33 | Check for SRCU read-side critical section. |
| 34 | rcu_dereference_check(p, c): | 34 | rcu_dereference_check(p, c): |
| 35 | Use explicit check expression "c". | 35 | Use explicit check expression "c". This is useful in |
| 36 | code that is invoked by both readers and updaters. | ||
| 36 | rcu_dereference_raw(p) | 37 | rcu_dereference_raw(p) |
| 37 | Don't check. (Use sparingly, if at all.) | 38 | Don't check. (Use sparingly, if at all.) |
| 39 | rcu_dereference_protected(p, c): | ||
| 40 | Use explicit check expression "c", and omit all barriers | ||
| 41 | and compiler constraints. This is useful when the data | ||
| 42 | structure cannot change, for example, in code that is | ||
| 43 | invoked only by updaters. | ||
| 44 | rcu_access_pointer(p): | ||
| 45 | Return the value of the pointer and omit all barriers, | ||
| 46 | but retain the compiler constraints that prevent duplicating | ||
| 47 | or coalescsing. This is useful when when testing the | ||
| 48 | value of the pointer itself, for example, against NULL. | ||
| 38 | 49 | ||
| 39 | The rcu_dereference_check() check expression can be any boolean | 50 | The rcu_dereference_check() check expression can be any boolean |
| 40 | expression, but would normally include one of the rcu_read_lock_held() | 51 | expression, but would normally include one of the rcu_read_lock_held() |
| @@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla | |||
| 59 | RCU read-side critical sections, in case (2) the ->file_lock prevents | 70 | RCU read-side critical sections, in case (2) the ->file_lock prevents |
| 60 | any change from taking place, and finally, in case (3) the current task | 71 | any change from taking place, and finally, in case (3) the current task |
| 61 | is the only task accessing the file_struct, again preventing any change | 72 | is the only task accessing the file_struct, again preventing any change |
| 62 | from taking place. | 73 | from taking place. If the above statement was invoked only from updater |
| 74 | code, it could instead be written as follows: | ||
| 75 | |||
| 76 | file = rcu_dereference_protected(fdt->fd[fd], | ||
| 77 | lockdep_is_held(&files->file_lock) || | ||
| 78 | atomic_read(&files->count) == 1); | ||
| 79 | |||
| 80 | This would verify cases #2 and #3 above, and furthermore lockdep would | ||
| 81 | complain if this was used in an RCU read-side critical section unless one | ||
| 82 | of these two cases held. Because rcu_dereference_protected() omits all | ||
| 83 | barriers and compiler constraints, it generates better code than do the | ||
| 84 | other flavors of rcu_dereference(). On the other hand, it is illegal | ||
| 85 | to use rcu_dereference_protected() if either the RCU-protected pointer | ||
| 86 | or the RCU-protected data that it points to can change concurrently. | ||
| 63 | 87 | ||
| 64 | There are currently only "universal" versions of the rcu_assign_pointer() | 88 | There are currently only "universal" versions of the rcu_assign_pointer() |
| 65 | and RCU list-/tree-traversal primitives, which do not (yet) check for | 89 | and RCU list-/tree-traversal primitives, which do not (yet) check for |
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 1dc00ee97163..cfaac34c4557 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt | |||
| @@ -840,6 +840,12 @@ SRCU: Initialization/cleanup | |||
| 840 | init_srcu_struct | 840 | init_srcu_struct |
| 841 | cleanup_srcu_struct | 841 | cleanup_srcu_struct |
| 842 | 842 | ||
| 843 | All: lockdep-checked RCU-protected pointer access | ||
| 844 | |||
| 845 | rcu_dereference_check | ||
| 846 | rcu_dereference_protected | ||
| 847 | rcu_access_pointer | ||
| 848 | |||
| 843 | See the comment headers in the source code (or the docbook generated | 849 | See the comment headers in the source code (or the docbook generated |
| 844 | from them) for more information. | 850 | from them) for more information. |
| 845 | 851 | ||
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 6fab97ea7e6b..508b5b2b0289 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt | |||
| @@ -1162,8 +1162,8 @@ where a driver received a request ala this before: | |||
| 1162 | 1162 | ||
| 1163 | As mentioned, there is no virtual mapping of a bio. For DMA, this is | 1163 | As mentioned, there is no virtual mapping of a bio. For DMA, this is |
| 1164 | not a problem as the driver probably never will need a virtual mapping. | 1164 | not a problem as the driver probably never will need a virtual mapping. |
| 1165 | Instead it needs a bus mapping (pci_map_page for a single segment or | 1165 | Instead it needs a bus mapping (dma_map_page for a single segment or |
| 1166 | use blk_rq_map_sg for scatter gather) to be able to ship it to the driver. For | 1166 | use dma_map_sg for scatter gather) to be able to ship it to the driver. For |
| 1167 | PIO drivers (or drivers that need to revert to PIO transfer once in a | 1167 | PIO drivers (or drivers that need to revert to PIO transfer once in a |
| 1168 | while (IDE for example)), where the CPU is doing the actual data | 1168 | while (IDE for example)), where the CPU is doing the actual data |
| 1169 | transfer a virtual mapping is needed. If the driver supports highmem I/O, | 1169 | transfer a virtual mapping is needed. If the driver supports highmem I/O, |
diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/efifb.txt index 316ec9bb7deb..a59916c29b33 100644 --- a/Documentation/fb/imacfb.txt +++ b/Documentation/fb/efifb.txt | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | 1 | ||
| 2 | What is imacfb? | 2 | What is efifb? |
| 3 | =============== | 3 | =============== |
| 4 | 4 | ||
| 5 | This is a generic EFI platform driver for Intel based Apple computers. | 5 | This is a generic EFI platform driver for Intel based Apple computers. |
| 6 | Imacfb is only for EFI booted Intel Macs. | 6 | efifb is only for EFI booted Intel Macs. |
| 7 | 7 | ||
| 8 | Supported Hardware | 8 | Supported Hardware |
| 9 | ================== | 9 | ================== |
| @@ -16,16 +16,16 @@ MacMini | |||
| 16 | How to use it? | 16 | How to use it? |
| 17 | ============== | 17 | ============== |
| 18 | 18 | ||
| 19 | Imacfb does not have any kind of autodetection of your machine. | 19 | efifb does not have any kind of autodetection of your machine. |
| 20 | You have to add the following kernel parameters in your elilo.conf: | 20 | You have to add the following kernel parameters in your elilo.conf: |
| 21 | Macbook : | 21 | Macbook : |
| 22 | video=imacfb:macbook | 22 | video=efifb:macbook |
| 23 | MacMini : | 23 | MacMini : |
| 24 | video=imacfb:mini | 24 | video=efifb:mini |
| 25 | Macbook Pro 15", iMac 17" : | 25 | Macbook Pro 15", iMac 17" : |
| 26 | video=imacfb:i17 | 26 | video=efifb:i17 |
| 27 | Macbook Pro 17", iMac 20" : | 27 | Macbook Pro 17", iMac 20" : |
| 28 | video=imacfb:i20 | 28 | video=efifb:i20 |
| 29 | 29 | ||
| 30 | -- | 30 | -- |
| 31 | Edgar Hucek <gimli@dark-green.com> | 31 | Edgar Hucek <gimli@dark-green.com> |
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 8490480ce432..c0fc1c75fd88 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
| @@ -68,6 +68,22 @@ like: | |||
| 68 | SYN_MT_REPORT | 68 | SYN_MT_REPORT |
| 69 | SYN_REPORT | 69 | SYN_REPORT |
| 70 | 70 | ||
| 71 | Here is the sequence after lifting one of the fingers: | ||
| 72 | |||
| 73 | ABS_MT_POSITION_X | ||
| 74 | ABS_MT_POSITION_Y | ||
| 75 | SYN_MT_REPORT | ||
| 76 | SYN_REPORT | ||
| 77 | |||
| 78 | And here is the sequence after lifting the remaining finger: | ||
| 79 | |||
| 80 | SYN_MT_REPORT | ||
| 81 | SYN_REPORT | ||
| 82 | |||
| 83 | If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the | ||
| 84 | ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the | ||
| 85 | last SYN_REPORT will be dropped by the input core, resulting in no | ||
| 86 | zero-finger event reaching userland. | ||
| 71 | 87 | ||
| 72 | Event Semantics | 88 | Event Semantics |
| 73 | --------------- | 89 | --------------- |
| @@ -217,11 +233,6 @@ where examples can be found. | |||
| 217 | difference between the contact position and the approaching tool position | 233 | difference between the contact position and the approaching tool position |
| 218 | could be used to derive tilt. | 234 | could be used to derive tilt. |
| 219 | [2] The list can of course be extended. | 235 | [2] The list can of course be extended. |
| 220 | [3] The multi-touch X driver is currently in the prototyping stage. At the | 236 | [3] Multitouch X driver project: http://bitmath.org/code/multitouch/. |
| 221 | time of writing (April 2009), the MT protocol is not yet merged, and the | ||
| 222 | prototype implements finger matching, basic mouse support and two-finger | ||
| 223 | scrolling. The project aims at improving the quality of current multi-touch | ||
| 224 | functionality available in the Synaptics X driver, and in addition | ||
| 225 | implement more advanced gestures. | ||
| 226 | [4] See the section on event computation. | 237 | [4] See the section on event computation. |
| 227 | [5] See the section on finger tracking. | 238 | [5] See the section on finger tracking. |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e4cbca58536c..e2202e93b148 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 320 | amd_iommu= [HW,X86-84] | 320 | amd_iommu= [HW,X86-84] |
| 321 | Pass parameters to the AMD IOMMU driver in the system. | 321 | Pass parameters to the AMD IOMMU driver in the system. |
| 322 | Possible values are: | 322 | Possible values are: |
| 323 | isolate - enable device isolation (each device, as far | ||
| 324 | as possible, will get its own protection | ||
| 325 | domain) [default] | ||
| 326 | share - put every device behind one IOMMU into the | ||
| 327 | same protection domain | ||
| 328 | fullflush - enable flushing of IO/TLB entries when | 323 | fullflush - enable flushing of IO/TLB entries when |
| 329 | they are unmapped. Otherwise they are | 324 | they are unmapped. Otherwise they are |
| 330 | flushed before they will be reused, which | 325 | flushed before they will be reused, which |
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt new file mode 100644 index 000000000000..7ee770b5ef5f --- /dev/null +++ b/Documentation/networking/stmmac.txt | |||
| @@ -0,0 +1,143 @@ | |||
| 1 | STMicroelectronics 10/100/1000 Synopsys Ethernet driver | ||
| 2 | |||
| 3 | Copyright (C) 2007-2010 STMicroelectronics Ltd | ||
| 4 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
| 5 | |||
| 6 | This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers | ||
| 7 | (Synopsys IP blocks); it has been fully tested on STLinux platforms. | ||
| 8 | |||
| 9 | Currently this network device driver is for all STM embedded MAC/GMAC | ||
| 10 | (7xxx SoCs). | ||
| 11 | |||
| 12 | DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 | ||
| 13 | Universal version 4.0 have been used for developing the first code | ||
| 14 | implementation. | ||
| 15 | |||
| 16 | Please, for more information also visit: www.stlinux.com | ||
| 17 | |||
| 18 | 1) Kernel Configuration | ||
| 19 | The kernel configuration option is STMMAC_ETH: | ||
| 20 | Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) ---> | ||
| 21 | STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH) | ||
| 22 | |||
| 23 | 2) Driver parameters list: | ||
| 24 | debug: message level (0: no output, 16: all); | ||
| 25 | phyaddr: to manually provide the physical address to the PHY device; | ||
| 26 | dma_rxsize: DMA rx ring size; | ||
| 27 | dma_txsize: DMA tx ring size; | ||
| 28 | buf_sz: DMA buffer size; | ||
| 29 | tc: control the HW FIFO threshold; | ||
| 30 | tx_coe: Enable/Disable Tx Checksum Offload engine; | ||
| 31 | watchdog: transmit timeout (in milliseconds); | ||
| 32 | flow_ctrl: Flow control ability [on/off]; | ||
| 33 | pause: Flow Control Pause Time; | ||
| 34 | tmrate: timer period (only if timer optimisation is configured). | ||
| 35 | |||
| 36 | 3) Command line options | ||
| 37 | Driver parameters can be also passed in command line by using: | ||
| 38 | stmmaceth=dma_rxsize:128,dma_txsize:512 | ||
| 39 | |||
| 40 | 4) Driver information and notes | ||
| 41 | |||
| 42 | 4.1) Transmit process | ||
| 43 | The xmit method is invoked when the kernel needs to transmit a packet; it sets | ||
| 44 | the descriptors in the ring and informs the DMA engine that there is a packet | ||
| 45 | ready to be transmitted. | ||
| 46 | Once the controller has finished transmitting the packet, an interrupt is | ||
| 47 | triggered; So the driver will be able to release the socket buffers. | ||
| 48 | By default, the driver sets the NETIF_F_SG bit in the features field of the | ||
| 49 | net_device structure enabling the scatter/gather feature. | ||
| 50 | |||
| 51 | 4.2) Receive process | ||
| 52 | When one or more packets are received, an interrupt happens. The interrupts | ||
| 53 | are not queued so the driver has to scan all the descriptors in the ring during | ||
| 54 | the receive process. | ||
| 55 | This is based on NAPI so the interrupt handler signals only if there is work to be | ||
| 56 | done, and it exits. | ||
| 57 | Then the poll method will be scheduled at some future point. | ||
| 58 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket | ||
| 59 | buffers in order to avoid the memcpy (Zero-copy). | ||
| 60 | |||
| 61 | 4.3) Timer-Driver Interrupt | ||
| 62 | Instead of having the device that asynchronously notifies the frame receptions, the | ||
| 63 | driver configures a timer to generate an interrupt at regular intervals. | ||
| 64 | Based on the granularity of the timer, the frames that are received by the device | ||
| 65 | will experience different levels of latency. Some NICs have dedicated timer | ||
| 66 | device to perform this task. STMMAC can use either the RTC device or the TMU | ||
| 67 | channel 2 on STLinux platforms. | ||
| 68 | The timers frequency can be passed to the driver as parameter; when change it, | ||
| 69 | take care of both hardware capability and network stability/performance impact. | ||
| 70 | Several performance tests on STM platforms showed this optimisation allows to spare | ||
| 71 | the CPU while having the maximum throughput. | ||
| 72 | |||
| 73 | 4.4) WOL | ||
| 74 | Wake up on Lan feature through Magic Frame is only supported for the GMAC | ||
| 75 | core. | ||
| 76 | |||
| 77 | 4.5) DMA descriptors | ||
| 78 | Driver handles both normal and enhanced descriptors. The latter has been only | ||
| 79 | tested on DWC Ether MAC 10/100/1000 Universal version 3.41a. | ||
| 80 | |||
| 81 | 4.6) Ethtool support | ||
| 82 | Ethtool is supported. Driver statistics and internal errors can be taken using: | ||
| 83 | ethtool -S ethX command. It is possible to dump registers etc. | ||
| 84 | |||
| 85 | 4.7) Jumbo and Segmentation Offloading | ||
| 86 | Jumbo frames are supported and tested for the GMAC. | ||
| 87 | The GSO has been also added but it's performed in software. | ||
| 88 | LRO is not supported. | ||
| 89 | |||
| 90 | 4.8) Physical | ||
| 91 | The driver is compatible with PAL to work with PHY and GPHY devices. | ||
| 92 | |||
| 93 | 4.9) Platform information | ||
| 94 | Several information came from the platform; please refer to the | ||
| 95 | driver's Header file in include/linux directory. | ||
| 96 | |||
| 97 | struct plat_stmmacenet_data { | ||
| 98 | int bus_id; | ||
| 99 | int pbl; | ||
| 100 | int has_gmac; | ||
| 101 | void (*fix_mac_speed)(void *priv, unsigned int speed); | ||
| 102 | void (*bus_setup)(unsigned long ioaddr); | ||
| 103 | #ifdef CONFIG_STM_DRIVERS | ||
| 104 | struct stm_pad_config *pad_config; | ||
| 105 | #endif | ||
| 106 | void *bsp_priv; | ||
| 107 | }; | ||
| 108 | |||
| 109 | Where: | ||
| 110 | - pbl (Programmable Burst Length) is maximum number of | ||
| 111 | beats to be transferred in one DMA transaction. | ||
| 112 | GMAC also enables the 4xPBL by default. | ||
| 113 | - fix_mac_speed and bus_setup are used to configure internal target | ||
| 114 | registers (on STM platforms); | ||
| 115 | - has_gmac: GMAC core is on board (get it at run-time in the next step); | ||
| 116 | - bus_id: bus identifier. | ||
| 117 | |||
| 118 | struct plat_stmmacphy_data { | ||
| 119 | int bus_id; | ||
| 120 | int phy_addr; | ||
| 121 | unsigned int phy_mask; | ||
| 122 | int interface; | ||
| 123 | int (*phy_reset)(void *priv); | ||
| 124 | void *priv; | ||
| 125 | }; | ||
| 126 | |||
| 127 | Where: | ||
| 128 | - bus_id: bus identifier; | ||
| 129 | - phy_addr: physical address used for the attached phy device; | ||
| 130 | set it to -1 to get it at run-time; | ||
| 131 | - interface: physical MII interface mode; | ||
| 132 | - phy_reset: hook to reset HW function. | ||
| 133 | |||
| 134 | TODO: | ||
| 135 | - Continue to make the driver more generic and suitable for other Synopsys | ||
| 136 | Ethernet controllers used on other architectures (i.e. ARM). | ||
| 137 | - 10G controllers are not supported. | ||
| 138 | - MAC uses Normal descriptors and GMAC uses enhanced ones. | ||
| 139 | This is a limit that should be reviewed. MAC could want to | ||
| 140 | use the enhanced structure. | ||
| 141 | - Checksumming: Rx/Tx csum is done in HW in case of GMAC only. | ||
| 142 | - Review the timer optimisation code to use an embedded device that seems to be | ||
| 143 | available in new chip generations. | ||
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 0e58b4539176..e8c8f4f06c67 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
| @@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in | |||
| 41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. | 41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. |
| 42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the | 42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the |
| 43 | following control message: | 43 | following control message: |
| 44 | struct scm_timestamping { | 44 | |
| 45 | struct timespec systime; | 45 | struct scm_timestamping { |
| 46 | struct timespec hwtimetrans; | 46 | struct timespec systime; |
| 47 | struct timespec hwtimeraw; | 47 | struct timespec hwtimetrans; |
| 48 | }; | 48 | struct timespec hwtimeraw; |
| 49 | }; | ||
| 49 | 50 | ||
| 50 | recvmsg() can be used to get this control message for regular incoming | 51 | recvmsg() can be used to get this control message for regular incoming |
| 51 | packets. For send time stamps the outgoing packet is looped back to | 52 | packets. For send time stamps the outgoing packet is looped back to |
| @@ -87,12 +88,13 @@ by the network device and will be empty without that support. | |||
| 87 | SIOCSHWTSTAMP: | 88 | SIOCSHWTSTAMP: |
| 88 | 89 | ||
| 89 | Hardware time stamping must also be initialized for each device driver | 90 | Hardware time stamping must also be initialized for each device driver |
| 90 | that is expected to do hardware time stamping. The parameter is: | 91 | that is expected to do hardware time stamping. The parameter is defined in |
| 92 | /include/linux/net_tstamp.h as: | ||
| 91 | 93 | ||
| 92 | struct hwtstamp_config { | 94 | struct hwtstamp_config { |
| 93 | int flags; /* no flags defined right now, must be zero */ | 95 | int flags; /* no flags defined right now, must be zero */ |
| 94 | int tx_type; /* HWTSTAMP_TX_* */ | 96 | int tx_type; /* HWTSTAMP_TX_* */ |
| 95 | int rx_filter; /* HWTSTAMP_FILTER_* */ | 97 | int rx_filter; /* HWTSTAMP_FILTER_* */ |
| 96 | }; | 98 | }; |
| 97 | 99 | ||
| 98 | Desired behavior is passed into the kernel and to a specific device by | 100 | Desired behavior is passed into the kernel and to a specific device by |
| @@ -139,42 +141,56 @@ enum { | |||
| 139 | /* time stamp any incoming packet */ | 141 | /* time stamp any incoming packet */ |
| 140 | HWTSTAMP_FILTER_ALL, | 142 | HWTSTAMP_FILTER_ALL, |
| 141 | 143 | ||
| 142 | /* return value: time stamp all packets requested plus some others */ | 144 | /* return value: time stamp all packets requested plus some others */ |
| 143 | HWTSTAMP_FILTER_SOME, | 145 | HWTSTAMP_FILTER_SOME, |
| 144 | 146 | ||
| 145 | /* PTP v1, UDP, any kind of event packet */ | 147 | /* PTP v1, UDP, any kind of event packet */ |
| 146 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, | 148 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, |
| 147 | 149 | ||
| 148 | ... | 150 | /* for the complete list of values, please check |
| 151 | * the include file /include/linux/net_tstamp.h | ||
| 152 | */ | ||
| 149 | }; | 153 | }; |
| 150 | 154 | ||
| 151 | 155 | ||
| 152 | DEVICE IMPLEMENTATION | 156 | DEVICE IMPLEMENTATION |
| 153 | 157 | ||
| 154 | A driver which supports hardware time stamping must support the | 158 | A driver which supports hardware time stamping must support the |
| 155 | SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored | 159 | SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with |
| 156 | in the skb with skb_hwtstamp_set(). | 160 | the actual values as described in the section on SIOCSHWTSTAMP. |
| 161 | |||
| 162 | Time stamps for received packets must be stored in the skb. To get a pointer | ||
| 163 | to the shared time stamp structure of the skb call skb_hwtstamps(). Then | ||
| 164 | set the time stamps in the structure: | ||
| 165 | |||
| 166 | struct skb_shared_hwtstamps { | ||
| 167 | /* hardware time stamp transformed into duration | ||
| 168 | * since arbitrary point in time | ||
| 169 | */ | ||
| 170 | ktime_t hwtstamp; | ||
| 171 | ktime_t syststamp; /* hwtstamp transformed to system time base */ | ||
| 172 | }; | ||
| 157 | 173 | ||
| 158 | Time stamps for outgoing packets are to be generated as follows: | 174 | Time stamps for outgoing packets are to be generated as follows: |
| 159 | - In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() | 175 | - In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. |
| 160 | returns non-zero. If yes, then the driver is expected | 176 | If yes, then the driver is expected to do hardware time stamping. |
| 161 | to do hardware time stamping. | ||
| 162 | - If this is possible for the skb and requested, then declare | 177 | - If this is possible for the skb and requested, then declare |
| 163 | that the driver is doing the time stamping by calling | 178 | that the driver is doing the time stamping by setting the field |
| 164 | skb_hwtstamp_tx_in_progress(). A driver not supporting | 179 | skb_tx(skb)->in_progress non-zero. You might want to keep a pointer |
| 165 | hardware time stamping doesn't do that. A driver must never | 180 | to the associated skb for the next step and not free the skb. A driver |
| 166 | touch sk_buff::tstamp! It is used to store how time stamping | 181 | not supporting hardware time stamping doesn't do that. A driver must |
| 167 | for an outgoing packets is to be done. | 182 | never touch sk_buff::tstamp! It is used to store software generated |
| 183 | time stamps by the network subsystem. | ||
| 168 | - As soon as the driver has sent the packet and/or obtained a | 184 | - As soon as the driver has sent the packet and/or obtained a |
| 169 | hardware time stamp for it, it passes the time stamp back by | 185 | hardware time stamp for it, it passes the time stamp back by |
| 170 | calling skb_hwtstamp_tx() with the original skb, the raw | 186 | calling skb_hwtstamp_tx() with the original skb, the raw |
| 171 | hardware time stamp and a handle to the device (necessary | 187 | hardware time stamp. skb_hwtstamp_tx() clones the original skb and |
| 172 | to convert the hardware time stamp to system time). If obtaining | 188 | adds the timestamps, therefore the original skb has to be freed now. |
| 173 | the hardware time stamp somehow fails, then the driver should | 189 | If obtaining the hardware time stamp somehow fails, then the driver |
| 174 | not fall back to software time stamping. The rationale is that | 190 | should not fall back to software time stamping. The rationale is that |
| 175 | this would occur at a later time in the processing pipeline | 191 | this would occur at a later time in the processing pipeline than other |
| 176 | than other software time stamping and therefore could lead | 192 | software time stamping and therefore could lead to unexpected deltas |
| 177 | to unexpected deltas between time stamps. | 193 | between time stamps. |
| 178 | - If the driver did not call skb_hwtstamp_tx_in_progress(), then | 194 | - If the driver did not call set skb_tx(skb)->in_progress, then |
| 179 | dev_hard_start_xmit() checks whether software time stamping | 195 | dev_hard_start_xmit() checks whether software time stamping |
| 180 | is wanted as fallback and potentially generates the time stamp. | 196 | is wanted as fallback and potentially generates the time stamp. |
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt index f4dd3bf99d12..98d14cb8a85d 100644 --- a/Documentation/sound/alsa/HD-Audio.txt +++ b/Documentation/sound/alsa/HD-Audio.txt | |||
| @@ -119,10 +119,18 @@ the codec slots 0 and 1 no matter what the hardware reports. | |||
| 119 | 119 | ||
| 120 | Interrupt Handling | 120 | Interrupt Handling |
| 121 | ~~~~~~~~~~~~~~~~~~ | 121 | ~~~~~~~~~~~~~~~~~~ |
| 122 | In rare but some cases, the interrupt isn't properly handled as | 122 | HD-audio driver uses MSI as default (if available) since 2.6.33 |
| 123 | default. You would notice this by the DMA transfer error reported by | 123 | kernel as MSI works better on some machines, and in general, it's |
| 124 | ALSA PCM core, for example. Using MSI might help in such a case. | 124 | better for performance. However, Nvidia controllers showed bad |
| 125 | Pass `enable_msi=1` option for enabling MSI. | 125 | regressions with MSI (especially in a combination with AMD chipset), |
| 126 | thus we disabled MSI for them. | ||
| 127 | |||
| 128 | There seem also still other devices that don't work with MSI. If you | ||
| 129 | see a regression wrt the sound quality (stuttering, etc) or a lock-up | ||
| 130 | in the recent kernel, try to pass `enable_msi=0` option to disable | ||
| 131 | MSI. If it works, you can add the known bad device to the blacklist | ||
| 132 | defined in hda_intel.c. In such a case, please report and give the | ||
| 133 | patch back to the upstream developer. | ||
| 126 | 134 | ||
| 127 | 135 | ||
| 128 | HD-AUDIO CODEC | 136 | HD-AUDIO CODEC |
diff --git a/Documentation/watchdog/src/watchdog-simple.c b/Documentation/watchdog/src/watchdog-simple.c index 4cf72f3fa8e9..ba45803a2216 100644 --- a/Documentation/watchdog/src/watchdog-simple.c +++ b/Documentation/watchdog/src/watchdog-simple.c | |||
| @@ -17,9 +17,6 @@ int main(void) | |||
| 17 | ret = -1; | 17 | ret = -1; |
| 18 | break; | 18 | break; |
| 19 | } | 19 | } |
| 20 | ret = fsync(fd); | ||
| 21 | if (ret) | ||
| 22 | break; | ||
| 23 | sleep(10); | 20 | sleep(10); |
| 24 | } | 21 | } |
| 25 | close(fd); | 22 | close(fd); |
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c index a750532ffcf8..63fdc34ceb98 100644 --- a/Documentation/watchdog/src/watchdog-test.c +++ b/Documentation/watchdog/src/watchdog-test.c | |||
| @@ -31,6 +31,8 @@ static void keep_alive(void) | |||
| 31 | */ | 31 | */ |
| 32 | int main(int argc, char *argv[]) | 32 | int main(int argc, char *argv[]) |
| 33 | { | 33 | { |
| 34 | int flags; | ||
| 35 | |||
| 34 | fd = open("/dev/watchdog", O_WRONLY); | 36 | fd = open("/dev/watchdog", O_WRONLY); |
| 35 | 37 | ||
| 36 | if (fd == -1) { | 38 | if (fd == -1) { |
| @@ -41,12 +43,14 @@ int main(int argc, char *argv[]) | |||
| 41 | 43 | ||
| 42 | if (argc > 1) { | 44 | if (argc > 1) { |
| 43 | if (!strncasecmp(argv[1], "-d", 2)) { | 45 | if (!strncasecmp(argv[1], "-d", 2)) { |
| 44 | ioctl(fd, WDIOC_SETOPTIONS, WDIOS_DISABLECARD); | 46 | flags = WDIOS_DISABLECARD; |
| 47 | ioctl(fd, WDIOC_SETOPTIONS, &flags); | ||
| 45 | fprintf(stderr, "Watchdog card disabled.\n"); | 48 | fprintf(stderr, "Watchdog card disabled.\n"); |
| 46 | fflush(stderr); | 49 | fflush(stderr); |
| 47 | exit(0); | 50 | exit(0); |
| 48 | } else if (!strncasecmp(argv[1], "-e", 2)) { | 51 | } else if (!strncasecmp(argv[1], "-e", 2)) { |
| 49 | ioctl(fd, WDIOC_SETOPTIONS, WDIOS_ENABLECARD); | 52 | flags = WDIOS_ENABLECARD; |
| 53 | ioctl(fd, WDIOC_SETOPTIONS, &flags); | ||
| 50 | fprintf(stderr, "Watchdog card enabled.\n"); | 54 | fprintf(stderr, "Watchdog card enabled.\n"); |
| 51 | fflush(stderr); | 55 | fflush(stderr); |
| 52 | exit(0); | 56 | exit(0); |
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt index 4cc4ba9d7150..eb7132ed8bbc 100644 --- a/Documentation/watchdog/watchdog-api.txt +++ b/Documentation/watchdog/watchdog-api.txt | |||
| @@ -222,11 +222,10 @@ returned value is the temperature in degrees fahrenheit. | |||
| 222 | ioctl(fd, WDIOC_GETTEMP, &temperature); | 222 | ioctl(fd, WDIOC_GETTEMP, &temperature); |
| 223 | 223 | ||
| 224 | Finally the SETOPTIONS ioctl can be used to control some aspects of | 224 | Finally the SETOPTIONS ioctl can be used to control some aspects of |
| 225 | the cards operation; right now the pcwd driver is the only one | 225 | the cards operation. |
| 226 | supporting this ioctl. | ||
| 227 | 226 | ||
| 228 | int options = 0; | 227 | int options = 0; |
| 229 | ioctl(fd, WDIOC_SETOPTIONS, options); | 228 | ioctl(fd, WDIOC_SETOPTIONS, &options); |
| 230 | 229 | ||
| 231 | The following options are available: | 230 | The following options are available: |
| 232 | 231 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3d29fa389888..a0e3c3a47a51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -485,8 +485,8 @@ S: Maintained | |||
| 485 | F: drivers/input/mouse/bcm5974.c | 485 | F: drivers/input/mouse/bcm5974.c |
| 486 | 486 | ||
| 487 | APPLE SMC DRIVER | 487 | APPLE SMC DRIVER |
| 488 | M: Nicolas Boichat <nicolas@boichat.ch> | 488 | M: Henrik Rydberg <rydberg@euromail.se> |
| 489 | L: mactel-linux-devel@lists.sourceforge.net | 489 | L: lm-sensors@lm-sensors.org |
| 490 | S: Maintained | 490 | S: Maintained |
| 491 | F: drivers/hwmon/applesmc.c | 491 | F: drivers/hwmon/applesmc.c |
| 492 | 492 | ||
| @@ -971,6 +971,16 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | |||
| 971 | W: http://www.mcuos.com | 971 | W: http://www.mcuos.com |
| 972 | S: Maintained | 972 | S: Maintained |
| 973 | 973 | ||
| 974 | ARM/U300 MACHINE SUPPORT | ||
| 975 | M: Linus Walleij <linus.walleij@stericsson.com> | ||
| 976 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 977 | S: Supported | ||
| 978 | F: arch/arm/mach-u300/ | ||
| 979 | F: drivers/i2c/busses/i2c-stu300.c | ||
| 980 | F: drivers/rtc/rtc-coh901331.c | ||
| 981 | F: drivers/watchdog/coh901327_wdt.c | ||
| 982 | F: drivers/dma/coh901318* | ||
| 983 | |||
| 974 | ARM/U8500 ARM ARCHITECTURE | 984 | ARM/U8500 ARM ARCHITECTURE |
| 975 | M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> | 985 | M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> |
| 976 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 986 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| @@ -2474,12 +2484,6 @@ L: linuxppc-dev@ozlabs.org | |||
| 2474 | S: Odd Fixes | 2484 | S: Odd Fixes |
| 2475 | F: drivers/char/hvc_* | 2485 | F: drivers/char/hvc_* |
| 2476 | 2486 | ||
| 2477 | VIRTIO CONSOLE DRIVER | ||
| 2478 | M: Amit Shah <amit.shah@redhat.com> | ||
| 2479 | L: virtualization@lists.linux-foundation.org | ||
| 2480 | S: Maintained | ||
| 2481 | F: drivers/char/virtio_console.c | ||
| 2482 | |||
| 2483 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER | 2487 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER |
| 2484 | M: Peter Jones <pjones@redhat.com> | 2488 | M: Peter Jones <pjones@redhat.com> |
| 2485 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> | 2489 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> |
| @@ -5971,6 +5975,13 @@ S: Maintained | |||
| 5971 | F: Documentation/filesystems/vfat.txt | 5975 | F: Documentation/filesystems/vfat.txt |
| 5972 | F: fs/fat/ | 5976 | F: fs/fat/ |
| 5973 | 5977 | ||
| 5978 | VIRTIO CONSOLE DRIVER | ||
| 5979 | M: Amit Shah <amit.shah@redhat.com> | ||
| 5980 | L: virtualization@lists.linux-foundation.org | ||
| 5981 | S: Maintained | ||
| 5982 | F: drivers/char/virtio_console.c | ||
| 5983 | F: include/linux/virtio_console.h | ||
| 5984 | |||
| 5974 | VIRTIO HOST (VHOST) | 5985 | VIRTIO HOST (VHOST) |
| 5975 | M: "Michael S. Tsirkin" <mst@redhat.com> | 5986 | M: "Michael S. Tsirkin" <mst@redhat.com> |
| 5976 | L: kvm@vger.kernel.org | 5987 | L: kvm@vger.kernel.org |
| @@ -1,8 +1,8 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 34 | 3 | SUBLEVEL = 34 |
| 4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc5 |
| 5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Sheep on Meth |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
| 8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 0f23009170a1..6ab6b337a913 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
| @@ -172,7 +172,7 @@ not_angel: | |||
| 172 | adr r0, LC0 | 172 | adr r0, LC0 |
| 173 | ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) | 173 | ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) |
| 174 | THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) | 174 | THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) |
| 175 | THUMB( ldr sp, [r0, #28] ) | 175 | THUMB( ldr sp, [r0, #32] ) |
| 176 | subs r0, r0, r1 @ calculate the delta offset | 176 | subs r0, r0, r1 @ calculate the delta offset |
| 177 | 177 | ||
| 178 | @ if delta is zero, we are | 178 | @ if delta is zero, we are |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00600b4..feb988a7ec37 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
| @@ -11,7 +11,11 @@ | |||
| 11 | 11 | ||
| 12 | #define kmap_prot PAGE_KERNEL | 12 | #define kmap_prot PAGE_KERNEL |
| 13 | 13 | ||
| 14 | #define flush_cache_kmaps() flush_cache_all() | 14 | #define flush_cache_kmaps() \ |
| 15 | do { \ | ||
| 16 | if (cache_is_vivt()) \ | ||
| 17 | flush_cache_all(); \ | ||
| 18 | } while (0) | ||
| 15 | 19 | ||
| 16 | extern pte_t *pkmap_page_table; | 20 | extern pte_t *pkmap_page_table; |
| 17 | 21 | ||
| @@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); | |||
| 21 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
| 22 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
| 23 | 27 | ||
| 28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
| 29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
| 30 | |||
| 31 | /* | ||
| 32 | * The following functions are already defined by <linux/highmem.h> | ||
| 33 | * when CONFIG_HIGHMEM is not set. | ||
| 34 | */ | ||
| 35 | #ifdef CONFIG_HIGHMEM | ||
| 24 | extern void *kmap(struct page *page); | 36 | extern void *kmap(struct page *page); |
| 25 | extern void kunmap(struct page *page); | 37 | extern void kunmap(struct page *page); |
| 26 | extern void *kmap_atomic(struct page *page, enum km_type type); | 38 | extern void *kmap_atomic(struct page *page, enum km_type type); |
| 27 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | 39 | extern void kunmap_atomic(void *kvaddr, enum km_type type); |
| 28 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 40 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); |
| 29 | extern struct page *kmap_atomic_to_page(const void *ptr); | 41 | extern struct page *kmap_atomic_to_page(const void *ptr); |
| 42 | #endif | ||
| 30 | 43 | ||
| 31 | #endif | 44 | #endif |
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949a5189..c4b2ea3fbe42 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h | |||
| @@ -18,6 +18,7 @@ enum km_type { | |||
| 18 | KM_IRQ1, | 18 | KM_IRQ1, |
| 19 | KM_SOFTIRQ0, | 19 | KM_SOFTIRQ0, |
| 20 | KM_SOFTIRQ1, | 20 | KM_SOFTIRQ1, |
| 21 | KM_L1_CACHE, | ||
| 21 | KM_L2_CACHE, | 22 | KM_L2_CACHE, |
| 22 | KM_TYPE_NR | 23 | KM_TYPE_NR |
| 23 | }; | 24 | }; |
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f4525d..47f023aa8495 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h | |||
| @@ -59,23 +59,22 @@ struct iwmmxt_sigframe { | |||
| 59 | #endif /* CONFIG_IWMMXT */ | 59 | #endif /* CONFIG_IWMMXT */ |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_VFP | 61 | #ifdef CONFIG_VFP |
| 62 | #if __LINUX_ARM_ARCH__ < 6 | ||
| 63 | /* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra | ||
| 64 | * word after the registers, and a word of padding at the end for | ||
| 65 | * alignment. */ | ||
| 66 | #define VFP_MAGIC 0x56465001 | 62 | #define VFP_MAGIC 0x56465001 |
| 67 | #define VFP_STORAGE_SIZE 152 | ||
| 68 | #else | ||
| 69 | #define VFP_MAGIC 0x56465002 | ||
| 70 | #define VFP_STORAGE_SIZE 144 | ||
| 71 | #endif | ||
| 72 | 63 | ||
| 73 | struct vfp_sigframe | 64 | struct vfp_sigframe |
| 74 | { | 65 | { |
| 75 | unsigned long magic; | 66 | unsigned long magic; |
| 76 | unsigned long size; | 67 | unsigned long size; |
| 77 | union vfp_state storage; | 68 | struct user_vfp ufp; |
| 78 | }; | 69 | struct user_vfp_exc ufp_exc; |
| 70 | } __attribute__((__aligned__(8))); | ||
| 71 | |||
| 72 | /* | ||
| 73 | * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, | ||
| 74 | * 4 bytes padding. | ||
| 75 | */ | ||
| 76 | #define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) | ||
| 77 | |||
| 79 | #endif /* CONFIG_VFP */ | 78 | #endif /* CONFIG_VFP */ |
| 80 | 79 | ||
| 81 | /* | 80 | /* |
| @@ -91,7 +90,7 @@ struct aux_sigframe { | |||
| 91 | #ifdef CONFIG_IWMMXT | 90 | #ifdef CONFIG_IWMMXT |
| 92 | struct iwmmxt_sigframe iwmmxt; | 91 | struct iwmmxt_sigframe iwmmxt; |
| 93 | #endif | 92 | #endif |
| 94 | #if 0 && defined CONFIG_VFP /* Not yet saved. */ | 93 | #ifdef CONFIG_VFP |
| 95 | struct vfp_sigframe vfp; | 94 | struct vfp_sigframe vfp; |
| 96 | #endif | 95 | #endif |
| 97 | /* Something that isn't a valid magic number for any coprocessor. */ | 96 | /* Something that isn't a valid magic number for any coprocessor. */ |
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e050f9dd..05ac4b06876a 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h | |||
| @@ -83,11 +83,21 @@ struct user{ | |||
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 | 85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 |
| 86 | * are ignored by the ptrace system call. | 86 | * are ignored by the ptrace system call and the signal handler. |
| 87 | */ | 87 | */ |
| 88 | struct user_vfp { | 88 | struct user_vfp { |
| 89 | unsigned long long fpregs[32]; | 89 | unsigned long long fpregs[32]; |
| 90 | unsigned long fpscr; | 90 | unsigned long fpscr; |
| 91 | }; | 91 | }; |
| 92 | 92 | ||
| 93 | /* | ||
| 94 | * VFP exception registers exposed to user space during signal delivery. | ||
| 95 | * Fields not relavant to the current VFP architecture are ignored. | ||
| 96 | */ | ||
| 97 | struct user_vfp_exc { | ||
| 98 | unsigned long fpexc; | ||
| 99 | unsigned long fpinst; | ||
| 100 | unsigned long fpinst2; | ||
| 101 | }; | ||
| 102 | |||
| 93 | #endif /* _ARM_USER_H */ | 103 | #endif /* _ARM_USER_H */ |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
| 19 | #include <asm/ucontext.h> | 19 | #include <asm/ucontext.h> |
| 20 | #include <asm/unistd.h> | 20 | #include <asm/unistd.h> |
| 21 | #include <asm/vfp.h> | ||
| 21 | 22 | ||
| 22 | #include "ptrace.h" | 23 | #include "ptrace.h" |
| 23 | #include "signal.h" | 24 | #include "signal.h" |
| @@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) | |||
| 175 | 176 | ||
| 176 | #endif | 177 | #endif |
| 177 | 178 | ||
| 179 | #ifdef CONFIG_VFP | ||
| 180 | |||
| 181 | static int preserve_vfp_context(struct vfp_sigframe __user *frame) | ||
| 182 | { | ||
| 183 | struct thread_info *thread = current_thread_info(); | ||
| 184 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
| 185 | const unsigned long magic = VFP_MAGIC; | ||
| 186 | const unsigned long size = VFP_STORAGE_SIZE; | ||
| 187 | int err = 0; | ||
| 188 | |||
| 189 | vfp_sync_hwstate(thread); | ||
| 190 | __put_user_error(magic, &frame->magic, err); | ||
| 191 | __put_user_error(size, &frame->size, err); | ||
| 192 | |||
| 193 | /* | ||
| 194 | * Copy the floating point registers. There can be unused | ||
| 195 | * registers see asm/hwcap.h for details. | ||
| 196 | */ | ||
| 197 | err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, | ||
| 198 | sizeof(h->fpregs)); | ||
| 199 | /* | ||
| 200 | * Copy the status and control register. | ||
| 201 | */ | ||
| 202 | __put_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
| 203 | |||
| 204 | /* | ||
| 205 | * Copy the exception registers. | ||
| 206 | */ | ||
| 207 | __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); | ||
| 208 | __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
| 209 | __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
| 210 | |||
| 211 | return err ? -EFAULT : 0; | ||
| 212 | } | ||
| 213 | |||
| 214 | static int restore_vfp_context(struct vfp_sigframe __user *frame) | ||
| 215 | { | ||
| 216 | struct thread_info *thread = current_thread_info(); | ||
| 217 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
| 218 | unsigned long magic; | ||
| 219 | unsigned long size; | ||
| 220 | unsigned long fpexc; | ||
| 221 | int err = 0; | ||
| 222 | |||
| 223 | __get_user_error(magic, &frame->magic, err); | ||
| 224 | __get_user_error(size, &frame->size, err); | ||
| 225 | |||
| 226 | if (err) | ||
| 227 | return -EFAULT; | ||
| 228 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | ||
| 229 | return -EINVAL; | ||
| 230 | |||
| 231 | /* | ||
| 232 | * Copy the floating point registers. There can be unused | ||
| 233 | * registers see asm/hwcap.h for details. | ||
| 234 | */ | ||
| 235 | err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, | ||
| 236 | sizeof(h->fpregs)); | ||
| 237 | /* | ||
| 238 | * Copy the status and control register. | ||
| 239 | */ | ||
| 240 | __get_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
| 241 | |||
| 242 | /* | ||
| 243 | * Sanitise and restore the exception registers. | ||
| 244 | */ | ||
| 245 | __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); | ||
| 246 | /* Ensure the VFP is enabled. */ | ||
| 247 | fpexc |= FPEXC_EN; | ||
| 248 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ | ||
| 249 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); | ||
| 250 | h->fpexc = fpexc; | ||
| 251 | |||
| 252 | __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
| 253 | __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
| 254 | |||
| 255 | if (!err) | ||
| 256 | vfp_flush_hwstate(thread); | ||
| 257 | |||
| 258 | return err ? -EFAULT : 0; | ||
| 259 | } | ||
| 260 | |||
| 261 | #endif | ||
| 262 | |||
| 178 | /* | 263 | /* |
| 179 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | 264 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. |
| 180 | */ | 265 | */ |
| @@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | |||
| 233 | err |= restore_iwmmxt_context(&aux->iwmmxt); | 318 | err |= restore_iwmmxt_context(&aux->iwmmxt); |
| 234 | #endif | 319 | #endif |
| 235 | #ifdef CONFIG_VFP | 320 | #ifdef CONFIG_VFP |
| 236 | // if (err == 0) | 321 | if (err == 0) |
| 237 | // err |= vfp_restore_state(&sf->aux.vfp); | 322 | err |= restore_vfp_context(&aux->vfp); |
| 238 | #endif | 323 | #endif |
| 239 | 324 | ||
| 240 | return err; | 325 | return err; |
| @@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) | |||
| 348 | err |= preserve_iwmmxt_context(&aux->iwmmxt); | 433 | err |= preserve_iwmmxt_context(&aux->iwmmxt); |
| 349 | #endif | 434 | #endif |
| 350 | #ifdef CONFIG_VFP | 435 | #ifdef CONFIG_VFP |
| 351 | // if (err == 0) | 436 | if (err == 0) |
| 352 | // err |= vfp_save_state(&sf->aux.vfp); | 437 | err |= preserve_vfp_context(&aux->vfp); |
| 353 | #endif | 438 | #endif |
| 354 | __put_user_error(0, &aux->end_magic, err); | 439 | __put_user_error(0, &aux->end_magic, err); |
| 355 | 440 | ||
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 027dd570dcc3..d4004557532a 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile | |||
| @@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d | |||
| 16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o | 16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o |
| 17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o | 17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o |
| 18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o | 18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o |
| 19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o | 19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o |
| 20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o | 20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o |
| 21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o | 21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o |
| 22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o | 22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o |
| 23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o | 23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 987fab3d846a..9c5b48e68a71 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
| @@ -175,8 +175,6 @@ ENTRY(at91_slow_clock) | |||
| 175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ | 175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ |
| 176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] | 176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] |
| 177 | 177 | ||
| 178 | wait_pllalock | ||
| 179 | |||
| 180 | /* Save PLLB setting and disable it */ | 178 | /* Save PLLB setting and disable it */ |
| 181 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 179 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
| 182 | str r3, .saved_pllbr | 180 | str r3, .saved_pllbr |
| @@ -184,8 +182,6 @@ ENTRY(at91_slow_clock) | |||
| 184 | mov r3, #AT91_PMC_PLLCOUNT | 182 | mov r3, #AT91_PMC_PLLCOUNT |
| 185 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 183 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
| 186 | 184 | ||
| 187 | wait_pllblock | ||
| 188 | |||
| 189 | /* Turn off the main oscillator */ | 185 | /* Turn off the main oscillator */ |
| 190 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] | 186 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] |
| 191 | bic r3, r3, #AT91_PMC_MOSCEN | 187 | bic r3, r3, #AT91_PMC_MOSCEN |
| @@ -205,13 +201,25 @@ ENTRY(at91_slow_clock) | |||
| 205 | ldr r3, .saved_pllbr | 201 | ldr r3, .saved_pllbr |
| 206 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 202 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
| 207 | 203 | ||
| 204 | tst r3, #(AT91_PMC_MUL & 0xff0000) | ||
| 205 | bne 1f | ||
| 206 | tst r3, #(AT91_PMC_MUL & ~0xff0000) | ||
| 207 | beq 2f | ||
| 208 | 1: | ||
| 208 | wait_pllblock | 209 | wait_pllblock |
| 210 | 2: | ||
| 209 | 211 | ||
| 210 | /* Restore PLLA setting */ | 212 | /* Restore PLLA setting */ |
| 211 | ldr r3, .saved_pllar | 213 | ldr r3, .saved_pllar |
| 212 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] | 214 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] |
| 213 | 215 | ||
| 216 | tst r3, #(AT91_PMC_MUL & 0xff0000) | ||
| 217 | bne 3f | ||
| 218 | tst r3, #(AT91_PMC_MUL & ~0xff0000) | ||
| 219 | beq 4f | ||
| 220 | 3: | ||
| 214 | wait_pllalock | 221 | wait_pllalock |
| 222 | 4: | ||
| 215 | 223 | ||
| 216 | #ifdef SLOWDOWN_MASTER_CLOCK | 224 | #ifdef SLOWDOWN_MASTER_CLOCK |
| 217 | /* | 225 | /* |
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 2ccf670ce1ac..29c0a911df26 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c | |||
| @@ -2221,11 +2221,15 @@ EXPORT_SYMBOL(dma_map_create_descriptor_ring); | |||
| 2221 | int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | 2221 | int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ |
| 2222 | int dirtied /* non-zero if any of the pages were modified */ | 2222 | int dirtied /* non-zero if any of the pages were modified */ |
| 2223 | ) { | 2223 | ) { |
| 2224 | |||
| 2225 | int rc = 0; | ||
| 2224 | int regionIdx; | 2226 | int regionIdx; |
| 2225 | int segmentIdx; | 2227 | int segmentIdx; |
| 2226 | DMA_Region_t *region; | 2228 | DMA_Region_t *region; |
| 2227 | DMA_Segment_t *segment; | 2229 | DMA_Segment_t *segment; |
| 2228 | 2230 | ||
| 2231 | down(&memMap->lock); | ||
| 2232 | |||
| 2229 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | 2233 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { |
| 2230 | region = &memMap->region[regionIdx]; | 2234 | region = &memMap->region[regionIdx]; |
| 2231 | 2235 | ||
| @@ -2239,7 +2243,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | |||
| 2239 | printk(KERN_ERR | 2243 | printk(KERN_ERR |
| 2240 | "%s: vmalloc'd pages are not yet supported\n", | 2244 | "%s: vmalloc'd pages are not yet supported\n", |
| 2241 | __func__); | 2245 | __func__); |
| 2242 | return -EINVAL; | 2246 | rc = -EINVAL; |
| 2247 | goto out; | ||
| 2243 | } | 2248 | } |
| 2244 | 2249 | ||
| 2245 | case DMA_MEM_TYPE_KMALLOC: | 2250 | case DMA_MEM_TYPE_KMALLOC: |
| @@ -2276,7 +2281,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | |||
| 2276 | printk(KERN_ERR | 2281 | printk(KERN_ERR |
| 2277 | "%s: Unsupported memory type: %d\n", | 2282 | "%s: Unsupported memory type: %d\n", |
| 2278 | __func__, region->memType); | 2283 | __func__, region->memType); |
| 2279 | return -EINVAL; | 2284 | rc = -EINVAL; |
| 2285 | goto out; | ||
| 2280 | } | 2286 | } |
| 2281 | } | 2287 | } |
| 2282 | 2288 | ||
| @@ -2314,9 +2320,10 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | |||
| 2314 | memMap->numRegionsUsed = 0; | 2320 | memMap->numRegionsUsed = 0; |
| 2315 | memMap->inUse = 0; | 2321 | memMap->inUse = 0; |
| 2316 | 2322 | ||
| 2323 | out: | ||
| 2317 | up(&memMap->lock); | 2324 | up(&memMap->lock); |
| 2318 | 2325 | ||
| 2319 | return 0; | 2326 | return rc; |
| 2320 | } | 2327 | } |
| 2321 | 2328 | ||
| 2322 | EXPORT_SYMBOL(dma_unmap); | 2329 | EXPORT_SYMBOL(dma_unmap); |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 27772e18e45b..0d6ee583f65c 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
| @@ -758,7 +758,6 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
| 758 | [IRQ_MMCINT] = 7, | 758 | [IRQ_MMCINT] = 7, |
| 759 | [IRQ_DM365_MMCINT1] = 7, | 759 | [IRQ_DM365_MMCINT1] = 7, |
| 760 | [IRQ_DM365_PWMINT3] = 7, | 760 | [IRQ_DM365_PWMINT3] = 7, |
| 761 | [IRQ_DDRINT] = 4, | ||
| 762 | [IRQ_AEMIFINT] = 2, | 761 | [IRQ_AEMIFINT] = 2, |
| 763 | [IRQ_DM365_SDIOINT1] = 2, | 762 | [IRQ_DM365_SDIOINT1] = 2, |
| 764 | [IRQ_TINT0_TINT12] = 7, | 763 | [IRQ_TINT0_TINT12] = 7, |
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index 02d939853b88..53137387aee1 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c | |||
| @@ -1267,7 +1267,8 @@ int edma_start(unsigned channel) | |||
| 1267 | /* EDMA channel with event association */ | 1267 | /* EDMA channel with event association */ |
| 1268 | pr_debug("EDMA: ER%d %08x\n", j, | 1268 | pr_debug("EDMA: ER%d %08x\n", j, |
| 1269 | edma_shadow0_read_array(ctlr, SH_ER, j)); | 1269 | edma_shadow0_read_array(ctlr, SH_ER, j)); |
| 1270 | /* Clear any pending error */ | 1270 | /* Clear any pending event or error */ |
| 1271 | edma_write_array(ctlr, EDMA_ECR, j, mask); | ||
| 1271 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | 1272 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
| 1272 | /* Clear any SER */ | 1273 | /* Clear any SER */ |
| 1273 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | 1274 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index cc9be7fee627..03acfd39042b 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Author: Mark A. Greer <mgreer@mvista.com> | 4 | * Author: Mark A. Greer <mgreer@mvista.com> |
| 5 | * | 5 | * |
| 6 | * 2007, 2009 (c) MontaVista Software, Inc. This file is licensed under | 6 | * 2007, 2009-2010 (c) MontaVista Software, Inc. This file is licensed under |
| 7 | * the terms of the GNU General Public License version 2. This program | 7 | * the terms of the GNU General Public License version 2. This program |
| 8 | * is licensed "as is" without any warranty of any kind, whether express | 8 | * is licensed "as is" without any warranty of any kind, whether express |
| 9 | * or implied. | 9 | * or implied. |
| @@ -13,7 +13,9 @@ | |||
| 13 | 13 | ||
| 14 | #include <video/da8xx-fb.h> | 14 | #include <video/da8xx-fb.h> |
| 15 | 15 | ||
| 16 | #include <linux/platform_device.h> | ||
| 16 | #include <linux/davinci_emac.h> | 17 | #include <linux/davinci_emac.h> |
| 18 | |||
| 17 | #include <mach/serial.h> | 19 | #include <mach/serial.h> |
| 18 | #include <mach/edma.h> | 20 | #include <mach/edma.h> |
| 19 | #include <mach/i2c.h> | 21 | #include <mach/i2c.h> |
| @@ -144,6 +146,10 @@ extern const short da850_mmcsd0_pins[]; | |||
| 144 | extern const short da850_nand_pins[]; | 146 | extern const short da850_nand_pins[]; |
| 145 | extern const short da850_nor_pins[]; | 147 | extern const short da850_nor_pins[]; |
| 146 | 148 | ||
| 149 | #ifdef CONFIG_DAVINCI_MUX | ||
| 147 | int da8xx_pinmux_setup(const short pins[]); | 150 | int da8xx_pinmux_setup(const short pins[]); |
| 151 | #else | ||
| 152 | static inline int da8xx_pinmux_setup(const short pins[]) { return 0; } | ||
| 153 | #endif | ||
| 148 | 154 | ||
| 149 | #endif /* __ASM_ARCH_DAVINCI_DA8XX_H */ | 155 | #endif /* __ASM_ARCH_DAVINCI_DA8XX_H */ |
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 42d985beece5..9e0b106b4f5f 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c | |||
| @@ -253,8 +253,6 @@ static void __init timer_init(void) | |||
| 253 | irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq; | 253 | irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq; |
| 254 | setup_irq(irq, &t->irqaction); | 254 | setup_irq(irq, &t->irqaction); |
| 255 | } | 255 | } |
| 256 | |||
| 257 | timer32_config(&timers[i]); | ||
| 258 | } | 256 | } |
| 259 | } | 257 | } |
| 260 | 258 | ||
| @@ -331,6 +329,7 @@ static void __init davinci_timer_init(void) | |||
| 331 | unsigned int clocksource_id; | 329 | unsigned int clocksource_id; |
| 332 | static char err[] __initdata = KERN_ERR | 330 | static char err[] __initdata = KERN_ERR |
| 333 | "%s: can't register clocksource!\n"; | 331 | "%s: can't register clocksource!\n"; |
| 332 | int i; | ||
| 334 | 333 | ||
| 335 | clockevent_id = soc_info->timer_info->clockevent_id; | 334 | clockevent_id = soc_info->timer_info->clockevent_id; |
| 336 | clocksource_id = soc_info->timer_info->clocksource_id; | 335 | clocksource_id = soc_info->timer_info->clocksource_id; |
| @@ -389,6 +388,9 @@ static void __init davinci_timer_init(void) | |||
| 389 | 388 | ||
| 390 | clockevent_davinci.cpumask = cpumask_of(0); | 389 | clockevent_davinci.cpumask = cpumask_of(0); |
| 391 | clockevents_register_device(&clockevent_davinci); | 390 | clockevents_register_device(&clockevent_davinci); |
| 391 | |||
| 392 | for (i=0; i< ARRAY_SIZE(timers); i++) | ||
| 393 | timer32_config(&timers[i]); | ||
| 392 | } | 394 | } |
| 393 | 395 | ||
| 394 | struct sys_timer davinci_timer = { | 396 | struct sys_timer davinci_timer = { |
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c index cc377ae8c428..cf547ad7ebd4 100644 --- a/arch/arm/mach-ep93xx/gpio.c +++ b/arch/arm/mach-ep93xx/gpio.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
| 26 | 26 | ||
| 27 | /************************************************************************* | 27 | /************************************************************************* |
| 28 | * GPIO handling for EP93xx | 28 | * Interrupt handling for EP93xx on-chip GPIOs |
| 29 | *************************************************************************/ | 29 | *************************************************************************/ |
| 30 | static unsigned char gpio_int_unmasked[3]; | 30 | static unsigned char gpio_int_unmasked[3]; |
| 31 | static unsigned char gpio_int_enabled[3]; | 31 | static unsigned char gpio_int_enabled[3]; |
| @@ -40,7 +40,7 @@ static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; | |||
| 40 | static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; | 40 | static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; |
| 41 | static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; | 41 | static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; |
| 42 | 42 | ||
| 43 | void ep93xx_gpio_update_int_params(unsigned port) | 43 | static void ep93xx_gpio_update_int_params(unsigned port) |
| 44 | { | 44 | { |
| 45 | BUG_ON(port > 2); | 45 | BUG_ON(port > 2); |
| 46 | 46 | ||
| @@ -56,7 +56,7 @@ void ep93xx_gpio_update_int_params(unsigned port) | |||
| 56 | EP93XX_GPIO_REG(int_en_register_offset[port])); | 56 | EP93XX_GPIO_REG(int_en_register_offset[port])); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | void ep93xx_gpio_int_mask(unsigned line) | 59 | static inline void ep93xx_gpio_int_mask(unsigned line) |
| 60 | { | 60 | { |
| 61 | gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); | 61 | gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); |
| 62 | } | 62 | } |
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig index 3872af1cf2c3..170f68e46dd5 100644 --- a/arch/arm/mach-mx3/Kconfig +++ b/arch/arm/mach-mx3/Kconfig | |||
| @@ -62,6 +62,15 @@ config MACH_MX31_3DS | |||
| 62 | Include support for MX31PDK (3DS) platform. This includes specific | 62 | Include support for MX31PDK (3DS) platform. This includes specific |
| 63 | configurations for the board and its peripherals. | 63 | configurations for the board and its peripherals. |
| 64 | 64 | ||
| 65 | config MACH_MX31_3DS_MXC_NAND_USE_BBT | ||
| 66 | bool "Make the MXC NAND driver use the in flash Bad Block Table" | ||
| 67 | depends on MACH_MX31_3DS | ||
| 68 | depends on MTD_NAND_MXC | ||
| 69 | help | ||
| 70 | Enable this if you want that the MXC NAND driver uses the in flash | ||
| 71 | Bad Block Table to know what blocks are bad instead of scanning the | ||
| 72 | entire flash looking for bad block markers. | ||
| 73 | |||
| 65 | config MACH_MX31MOBOARD | 74 | config MACH_MX31MOBOARD |
| 66 | bool "Support mx31moboard platforms (EPFL Mobots group)" | 75 | bool "Support mx31moboard platforms (EPFL Mobots group)" |
| 67 | select ARCH_MX31 | 76 | select ARCH_MX31 |
| @@ -95,6 +104,7 @@ config MACH_PCM043 | |||
| 95 | config MACH_ARMADILLO5X0 | 104 | config MACH_ARMADILLO5X0 |
| 96 | bool "Support Atmark Armadillo-500 Development Base Board" | 105 | bool "Support Atmark Armadillo-500 Development Base Board" |
| 97 | select ARCH_MX31 | 106 | select ARCH_MX31 |
| 107 | select MXC_ULPI if USB_ULPI | ||
| 98 | help | 108 | help |
| 99 | Include support for Atmark Armadillo-500 platform. This includes | 109 | Include support for Atmark Armadillo-500 platform. This includes |
| 100 | specific configurations for the board and its peripherals. | 110 | specific configurations for the board and its peripherals. |
diff --git a/arch/arm/mach-mx3/clock-imx31.c b/arch/arm/mach-mx3/clock-imx31.c index 80dba9966b5e..9a9eb6de6127 100644 --- a/arch/arm/mach-mx3/clock-imx31.c +++ b/arch/arm/mach-mx3/clock-imx31.c | |||
| @@ -468,6 +468,7 @@ static struct clk ahb_clk = { | |||
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); | 470 | DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); |
| 471 | DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL); | ||
| 471 | 472 | ||
| 472 | DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk); | 473 | DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk); |
| 473 | DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk); | 474 | DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk); |
| @@ -490,7 +491,7 @@ DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk); | |||
| 490 | DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk); | 491 | DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk); |
| 491 | DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk); | 492 | DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk); |
| 492 | DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk); | 493 | DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk); |
| 493 | DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ipg_clk); | 494 | DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk); |
| 494 | DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk); | 495 | DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk); |
| 495 | DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk); | 496 | DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk); |
| 496 | DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk); | 497 | DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk); |
| @@ -514,7 +515,6 @@ DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk) | |||
| 514 | DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk); | 515 | DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk); |
| 515 | DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); | 516 | DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); |
| 516 | DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); | 517 | DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); |
| 517 | DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL); | ||
| 518 | 518 | ||
| 519 | #define _REGISTER_CLOCK(d, n, c) \ | 519 | #define _REGISTER_CLOCK(d, n, c) \ |
| 520 | { \ | 520 | { \ |
| @@ -572,7 +572,6 @@ static struct clk_lookup lookups[] = { | |||
| 572 | _REGISTER_CLOCK(NULL, "iim", iim_clk) | 572 | _REGISTER_CLOCK(NULL, "iim", iim_clk) |
| 573 | _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk) | 573 | _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk) |
| 574 | _REGISTER_CLOCK(NULL, "mbx", mbx_clk) | 574 | _REGISTER_CLOCK(NULL, "mbx", mbx_clk) |
| 575 | _REGISTER_CLOCK("mxc_rtc", NULL, ckil_clk) | ||
| 576 | }; | 575 | }; |
| 577 | 576 | ||
| 578 | int __init mx31_clocks_init(unsigned long fref) | 577 | int __init mx31_clocks_init(unsigned long fref) |
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c index 6adb586515ea..f8911154a9fa 100644 --- a/arch/arm/mach-mx3/devices.c +++ b/arch/arm/mach-mx3/devices.c | |||
| @@ -575,11 +575,26 @@ struct platform_device imx_ssi_device1 = { | |||
| 575 | .resource = imx_ssi_resources1, | 575 | .resource = imx_ssi_resources1, |
| 576 | }; | 576 | }; |
| 577 | 577 | ||
| 578 | static int mx3_devices_init(void) | 578 | static struct resource imx_wdt_resources[] = { |
| 579 | { | ||
| 580 | .flags = IORESOURCE_MEM, | ||
| 581 | }, | ||
| 582 | }; | ||
| 583 | |||
| 584 | struct platform_device imx_wdt_device0 = { | ||
| 585 | .name = "imx-wdt", | ||
| 586 | .id = 0, | ||
| 587 | .num_resources = ARRAY_SIZE(imx_wdt_resources), | ||
| 588 | .resource = imx_wdt_resources, | ||
| 589 | }; | ||
| 590 | |||
| 591 | static int __init mx3_devices_init(void) | ||
| 579 | { | 592 | { |
| 580 | if (cpu_is_mx31()) { | 593 | if (cpu_is_mx31()) { |
| 581 | mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR; | 594 | mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR; |
| 582 | mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff; | 595 | mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff; |
| 596 | imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR; | ||
| 597 | imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff; | ||
| 583 | mxc_register_device(&mxc_rnga_device, NULL); | 598 | mxc_register_device(&mxc_rnga_device, NULL); |
| 584 | } | 599 | } |
| 585 | if (cpu_is_mx35()) { | 600 | if (cpu_is_mx35()) { |
| @@ -597,6 +612,8 @@ static int mx3_devices_init(void) | |||
| 597 | imx_ssi_resources0[1].end = MX35_INT_SSI1; | 612 | imx_ssi_resources0[1].end = MX35_INT_SSI1; |
| 598 | imx_ssi_resources1[1].start = MX35_INT_SSI2; | 613 | imx_ssi_resources1[1].start = MX35_INT_SSI2; |
| 599 | imx_ssi_resources1[1].end = MX35_INT_SSI2; | 614 | imx_ssi_resources1[1].end = MX35_INT_SSI2; |
| 615 | imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR; | ||
| 616 | imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff; | ||
| 600 | } | 617 | } |
| 601 | 618 | ||
| 602 | return 0; | 619 | return 0; |
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h index 42cf175eac6b..4f77eb501274 100644 --- a/arch/arm/mach-mx3/devices.h +++ b/arch/arm/mach-mx3/devices.h | |||
| @@ -25,4 +25,5 @@ extern struct platform_device mxc_spi_device1; | |||
| 25 | extern struct platform_device mxc_spi_device2; | 25 | extern struct platform_device mxc_spi_device2; |
| 26 | extern struct platform_device imx_ssi_device0; | 26 | extern struct platform_device imx_ssi_device0; |
| 27 | extern struct platform_device imx_ssi_device1; | 27 | extern struct platform_device imx_ssi_device1; |
| 28 | 28 | extern struct platform_device imx_ssi_device1; | |
| 29 | extern struct platform_device imx_wdt_device0; | ||
diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c index 3d72b0b89705..5f72ec91af2d 100644 --- a/arch/arm/mach-mx3/mach-armadillo5x0.c +++ b/arch/arm/mach-mx3/mach-armadillo5x0.c | |||
| @@ -36,6 +36,9 @@ | |||
| 36 | #include <linux/input.h> | 36 | #include <linux/input.h> |
| 37 | #include <linux/gpio_keys.h> | 37 | #include <linux/gpio_keys.h> |
| 38 | #include <linux/i2c.h> | 38 | #include <linux/i2c.h> |
| 39 | #include <linux/usb/otg.h> | ||
| 40 | #include <linux/usb/ulpi.h> | ||
| 41 | #include <linux/delay.h> | ||
| 39 | 42 | ||
| 40 | #include <mach/hardware.h> | 43 | #include <mach/hardware.h> |
| 41 | #include <asm/mach-types.h> | 44 | #include <asm/mach-types.h> |
| @@ -52,6 +55,8 @@ | |||
| 52 | #include <mach/ipu.h> | 55 | #include <mach/ipu.h> |
| 53 | #include <mach/mx3fb.h> | 56 | #include <mach/mx3fb.h> |
| 54 | #include <mach/mxc_nand.h> | 57 | #include <mach/mxc_nand.h> |
| 58 | #include <mach/mxc_ehci.h> | ||
| 59 | #include <mach/ulpi.h> | ||
| 55 | 60 | ||
| 56 | #include "devices.h" | 61 | #include "devices.h" |
| 57 | #include "crm_regs.h" | 62 | #include "crm_regs.h" |
| @@ -103,8 +108,158 @@ static int armadillo5x0_pins[] = { | |||
| 103 | /* I2C2 */ | 108 | /* I2C2 */ |
| 104 | MX31_PIN_CSPI2_MOSI__SCL, | 109 | MX31_PIN_CSPI2_MOSI__SCL, |
| 105 | MX31_PIN_CSPI2_MISO__SDA, | 110 | MX31_PIN_CSPI2_MISO__SDA, |
| 111 | /* OTG */ | ||
| 112 | MX31_PIN_USBOTG_DATA0__USBOTG_DATA0, | ||
| 113 | MX31_PIN_USBOTG_DATA1__USBOTG_DATA1, | ||
| 114 | MX31_PIN_USBOTG_DATA2__USBOTG_DATA2, | ||
| 115 | MX31_PIN_USBOTG_DATA3__USBOTG_DATA3, | ||
| 116 | MX31_PIN_USBOTG_DATA4__USBOTG_DATA4, | ||
| 117 | MX31_PIN_USBOTG_DATA5__USBOTG_DATA5, | ||
| 118 | MX31_PIN_USBOTG_DATA6__USBOTG_DATA6, | ||
| 119 | MX31_PIN_USBOTG_DATA7__USBOTG_DATA7, | ||
| 120 | MX31_PIN_USBOTG_CLK__USBOTG_CLK, | ||
| 121 | MX31_PIN_USBOTG_DIR__USBOTG_DIR, | ||
| 122 | MX31_PIN_USBOTG_NXT__USBOTG_NXT, | ||
| 123 | MX31_PIN_USBOTG_STP__USBOTG_STP, | ||
| 124 | /* USB host 2 */ | ||
| 125 | IOMUX_MODE(MX31_PIN_USBH2_CLK, IOMUX_CONFIG_FUNC), | ||
| 126 | IOMUX_MODE(MX31_PIN_USBH2_DIR, IOMUX_CONFIG_FUNC), | ||
| 127 | IOMUX_MODE(MX31_PIN_USBH2_NXT, IOMUX_CONFIG_FUNC), | ||
| 128 | IOMUX_MODE(MX31_PIN_USBH2_STP, IOMUX_CONFIG_FUNC), | ||
| 129 | IOMUX_MODE(MX31_PIN_USBH2_DATA0, IOMUX_CONFIG_FUNC), | ||
| 130 | IOMUX_MODE(MX31_PIN_USBH2_DATA1, IOMUX_CONFIG_FUNC), | ||
| 131 | IOMUX_MODE(MX31_PIN_STXD3, IOMUX_CONFIG_FUNC), | ||
| 132 | IOMUX_MODE(MX31_PIN_SRXD3, IOMUX_CONFIG_FUNC), | ||
| 133 | IOMUX_MODE(MX31_PIN_SCK3, IOMUX_CONFIG_FUNC), | ||
| 134 | IOMUX_MODE(MX31_PIN_SFS3, IOMUX_CONFIG_FUNC), | ||
| 135 | IOMUX_MODE(MX31_PIN_STXD6, IOMUX_CONFIG_FUNC), | ||
| 136 | IOMUX_MODE(MX31_PIN_SRXD6, IOMUX_CONFIG_FUNC), | ||
| 106 | }; | 137 | }; |
| 107 | 138 | ||
| 139 | /* USB */ | ||
| 140 | #if defined(CONFIG_USB_ULPI) | ||
| 141 | |||
| 142 | #define OTG_RESET IOMUX_TO_GPIO(MX31_PIN_STXD4) | ||
| 143 | #define USBH2_RESET IOMUX_TO_GPIO(MX31_PIN_SCK6) | ||
| 144 | #define USBH2_CS IOMUX_TO_GPIO(MX31_PIN_GPIO1_3) | ||
| 145 | |||
| 146 | #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ | ||
| 147 | PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) | ||
| 148 | |||
| 149 | static int usbotg_init(struct platform_device *pdev) | ||
| 150 | { | ||
| 151 | int err; | ||
| 152 | |||
| 153 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG); | ||
| 154 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG); | ||
| 155 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG); | ||
| 156 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG); | ||
| 157 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG); | ||
| 158 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG); | ||
| 159 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG); | ||
| 160 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG); | ||
| 161 | mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG); | ||
| 162 | mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG); | ||
| 163 | mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG); | ||
| 164 | mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG); | ||
| 165 | |||
| 166 | /* Chip already enabled by hardware */ | ||
| 167 | /* OTG phy reset*/ | ||
| 168 | err = gpio_request(OTG_RESET, "USB-OTG-RESET"); | ||
| 169 | if (err) { | ||
| 170 | pr_err("Failed to request the usb otg reset gpio\n"); | ||
| 171 | return err; | ||
| 172 | } | ||
| 173 | |||
| 174 | err = gpio_direction_output(OTG_RESET, 1/*HIGH*/); | ||
| 175 | if (err) { | ||
| 176 | pr_err("Failed to reset the usb otg phy\n"); | ||
| 177 | goto otg_free_reset; | ||
| 178 | } | ||
| 179 | |||
| 180 | gpio_set_value(OTG_RESET, 0/*LOW*/); | ||
| 181 | mdelay(5); | ||
| 182 | gpio_set_value(OTG_RESET, 1/*HIGH*/); | ||
| 183 | |||
| 184 | return 0; | ||
| 185 | |||
| 186 | otg_free_reset: | ||
| 187 | gpio_free(OTG_RESET); | ||
| 188 | return err; | ||
| 189 | } | ||
| 190 | |||
| 191 | static int usbh2_init(struct platform_device *pdev) | ||
| 192 | { | ||
| 193 | int err; | ||
| 194 | |||
| 195 | mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG); | ||
| 196 | mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG); | ||
| 197 | mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG); | ||
| 198 | mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG); | ||
| 199 | mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG); | ||
| 200 | mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG); | ||
| 201 | mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG); | ||
| 202 | mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG); | ||
| 203 | mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG); | ||
| 204 | mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG); | ||
| 205 | mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG); | ||
| 206 | mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG); | ||
| 207 | |||
| 208 | mxc_iomux_set_gpr(MUX_PGP_UH2, true); | ||
| 209 | |||
| 210 | |||
| 211 | /* Enable the chip */ | ||
| 212 | err = gpio_request(USBH2_CS, "USB-H2-CS"); | ||
| 213 | if (err) { | ||
| 214 | pr_err("Failed to request the usb host 2 CS gpio\n"); | ||
| 215 | return err; | ||
| 216 | } | ||
| 217 | |||
| 218 | err = gpio_direction_output(USBH2_CS, 0/*Enabled*/); | ||
| 219 | if (err) { | ||
| 220 | pr_err("Failed to drive the usb host 2 CS gpio\n"); | ||
| 221 | goto h2_free_cs; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* H2 phy reset*/ | ||
| 225 | err = gpio_request(USBH2_RESET, "USB-H2-RESET"); | ||
| 226 | if (err) { | ||
| 227 | pr_err("Failed to request the usb host 2 reset gpio\n"); | ||
| 228 | goto h2_free_cs; | ||
| 229 | } | ||
| 230 | |||
| 231 | err = gpio_direction_output(USBH2_RESET, 1/*HIGH*/); | ||
| 232 | if (err) { | ||
| 233 | pr_err("Failed to reset the usb host 2 phy\n"); | ||
| 234 | goto h2_free_reset; | ||
| 235 | } | ||
| 236 | |||
| 237 | gpio_set_value(USBH2_RESET, 0/*LOW*/); | ||
| 238 | mdelay(5); | ||
| 239 | gpio_set_value(USBH2_RESET, 1/*HIGH*/); | ||
| 240 | |||
| 241 | return 0; | ||
| 242 | |||
| 243 | h2_free_reset: | ||
| 244 | gpio_free(USBH2_RESET); | ||
| 245 | h2_free_cs: | ||
| 246 | gpio_free(USBH2_CS); | ||
| 247 | return err; | ||
| 248 | } | ||
| 249 | |||
| 250 | static struct mxc_usbh_platform_data usbotg_pdata = { | ||
| 251 | .init = usbotg_init, | ||
| 252 | .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, | ||
| 253 | .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI, | ||
| 254 | }; | ||
| 255 | |||
| 256 | static struct mxc_usbh_platform_data usbh2_pdata = { | ||
| 257 | .init = usbh2_init, | ||
| 258 | .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, | ||
| 259 | .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI, | ||
| 260 | }; | ||
| 261 | #endif /* CONFIG_USB_ULPI */ | ||
| 262 | |||
| 108 | /* RTC over I2C*/ | 263 | /* RTC over I2C*/ |
| 109 | #define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4) | 264 | #define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4) |
| 110 | 265 | ||
| @@ -393,6 +548,17 @@ static void __init armadillo5x0_init(void) | |||
| 393 | if (armadillo5x0_i2c_rtc.irq == 0) | 548 | if (armadillo5x0_i2c_rtc.irq == 0) |
| 394 | pr_warning("armadillo5x0_init: failed to get RTC IRQ\n"); | 549 | pr_warning("armadillo5x0_init: failed to get RTC IRQ\n"); |
| 395 | i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1); | 550 | i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1); |
| 551 | |||
| 552 | /* USB */ | ||
| 553 | #if defined(CONFIG_USB_ULPI) | ||
| 554 | usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | ||
| 555 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | ||
| 556 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | ||
| 557 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | ||
| 558 | |||
| 559 | mxc_register_device(&mxc_otg_host, &usbotg_pdata); | ||
| 560 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); | ||
| 561 | #endif | ||
| 396 | } | 562 | } |
| 397 | 563 | ||
| 398 | static void __init armadillo5x0_timer_init(void) | 564 | static void __init armadillo5x0_timer_init(void) |
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c index b88c18ad7698..f54af1e29ca4 100644 --- a/arch/arm/mach-mx3/mach-mx31_3ds.c +++ b/arch/arm/mach-mx3/mach-mx31_3ds.c | |||
| @@ -23,6 +23,9 @@ | |||
| 23 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
| 24 | #include <linux/smsc911x.h> | 24 | #include <linux/smsc911x.h> |
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/mfd/mc13783.h> | ||
| 27 | #include <linux/spi/spi.h> | ||
| 28 | #include <linux/regulator/machine.h> | ||
| 26 | 29 | ||
| 27 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
| 28 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
| @@ -31,26 +34,96 @@ | |||
| 31 | #include <asm/memory.h> | 34 | #include <asm/memory.h> |
| 32 | #include <asm/mach/map.h> | 35 | #include <asm/mach/map.h> |
| 33 | #include <mach/common.h> | 36 | #include <mach/common.h> |
| 34 | #include <mach/board-mx31pdk.h> | 37 | #include <mach/board-mx31_3ds.h> |
| 35 | #include <mach/imx-uart.h> | 38 | #include <mach/imx-uart.h> |
| 36 | #include <mach/iomux-mx3.h> | 39 | #include <mach/iomux-mx3.h> |
| 40 | #include <mach/mxc_nand.h> | ||
| 41 | #include <mach/spi.h> | ||
| 37 | #include "devices.h" | 42 | #include "devices.h" |
| 38 | 43 | ||
| 39 | /*! | 44 | /*! |
| 40 | * @file mx31pdk.c | 45 | * @file mx31_3ds.c |
| 41 | * | 46 | * |
| 42 | * @brief This file contains the board-specific initialization routines. | 47 | * @brief This file contains the board-specific initialization routines. |
| 43 | * | 48 | * |
| 44 | * @ingroup System | 49 | * @ingroup System |
| 45 | */ | 50 | */ |
| 46 | 51 | ||
| 47 | static int mx31pdk_pins[] = { | 52 | static int mx31_3ds_pins[] = { |
| 48 | /* UART1 */ | 53 | /* UART1 */ |
| 49 | MX31_PIN_CTS1__CTS1, | 54 | MX31_PIN_CTS1__CTS1, |
| 50 | MX31_PIN_RTS1__RTS1, | 55 | MX31_PIN_RTS1__RTS1, |
| 51 | MX31_PIN_TXD1__TXD1, | 56 | MX31_PIN_TXD1__TXD1, |
| 52 | MX31_PIN_RXD1__RXD1, | 57 | MX31_PIN_RXD1__RXD1, |
| 53 | IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), | 58 | IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), |
| 59 | /* SPI 1 */ | ||
| 60 | MX31_PIN_CSPI2_SCLK__SCLK, | ||
| 61 | MX31_PIN_CSPI2_MOSI__MOSI, | ||
| 62 | MX31_PIN_CSPI2_MISO__MISO, | ||
| 63 | MX31_PIN_CSPI2_SPI_RDY__SPI_RDY, | ||
| 64 | MX31_PIN_CSPI2_SS0__SS0, | ||
| 65 | MX31_PIN_CSPI2_SS2__SS2, /*CS for MC13783 */ | ||
| 66 | /* MC13783 IRQ */ | ||
| 67 | IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO), | ||
| 68 | }; | ||
| 69 | |||
| 70 | /* Regulators */ | ||
| 71 | static struct regulator_init_data pwgtx_init = { | ||
| 72 | .constraints = { | ||
| 73 | .boot_on = 1, | ||
| 74 | .always_on = 1, | ||
| 75 | }, | ||
| 76 | }; | ||
| 77 | |||
| 78 | static struct mc13783_regulator_init_data mx31_3ds_regulators[] = { | ||
| 79 | { | ||
| 80 | .id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */ | ||
| 81 | .init_data = &pwgtx_init, | ||
| 82 | }, { | ||
| 83 | .id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */ | ||
| 84 | .init_data = &pwgtx_init, | ||
| 85 | }, | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* MC13783 */ | ||
| 89 | static struct mc13783_platform_data mc13783_pdata __initdata = { | ||
| 90 | .regulators = mx31_3ds_regulators, | ||
| 91 | .num_regulators = ARRAY_SIZE(mx31_3ds_regulators), | ||
| 92 | .flags = MC13783_USE_REGULATOR, | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* SPI */ | ||
| 96 | static int spi1_internal_chipselect[] = { | ||
| 97 | MXC_SPI_CS(0), | ||
| 98 | MXC_SPI_CS(2), | ||
| 99 | }; | ||
| 100 | |||
| 101 | static struct spi_imx_master spi1_pdata = { | ||
| 102 | .chipselect = spi1_internal_chipselect, | ||
| 103 | .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect), | ||
| 104 | }; | ||
| 105 | |||
| 106 | static struct spi_board_info mx31_3ds_spi_devs[] __initdata = { | ||
| 107 | { | ||
| 108 | .modalias = "mc13783", | ||
| 109 | .max_speed_hz = 1000000, | ||
| 110 | .bus_num = 1, | ||
| 111 | .chip_select = 1, /* SS2 */ | ||
| 112 | .platform_data = &mc13783_pdata, | ||
| 113 | .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3), | ||
| 114 | .mode = SPI_CS_HIGH, | ||
| 115 | }, | ||
| 116 | }; | ||
| 117 | |||
| 118 | /* | ||
| 119 | * NAND Flash | ||
| 120 | */ | ||
| 121 | static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = { | ||
| 122 | .width = 1, | ||
| 123 | .hw_ecc = 1, | ||
| 124 | #ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT | ||
| 125 | .flash_bbt = 1, | ||
| 126 | #endif | ||
| 54 | }; | 127 | }; |
| 55 | 128 | ||
| 56 | static struct imxuart_platform_data uart_pdata = { | 129 | static struct imxuart_platform_data uart_pdata = { |
| @@ -95,7 +168,7 @@ static struct platform_device smsc911x_device = { | |||
| 95 | * LEDs, switches, interrupts for Ethernet. | 168 | * LEDs, switches, interrupts for Ethernet. |
| 96 | */ | 169 | */ |
| 97 | 170 | ||
| 98 | static void mx31pdk_expio_irq_handler(uint32_t irq, struct irq_desc *desc) | 171 | static void mx31_3ds_expio_irq_handler(uint32_t irq, struct irq_desc *desc) |
| 99 | { | 172 | { |
| 100 | uint32_t imr_val; | 173 | uint32_t imr_val; |
| 101 | uint32_t int_valid; | 174 | uint32_t int_valid; |
| @@ -163,7 +236,7 @@ static struct irq_chip expio_irq_chip = { | |||
| 163 | .unmask = expio_unmask_irq, | 236 | .unmask = expio_unmask_irq, |
| 164 | }; | 237 | }; |
| 165 | 238 | ||
| 166 | static int __init mx31pdk_init_expio(void) | 239 | static int __init mx31_3ds_init_expio(void) |
| 167 | { | 240 | { |
| 168 | int i; | 241 | int i; |
| 169 | int ret; | 242 | int ret; |
| @@ -176,7 +249,7 @@ static int __init mx31pdk_init_expio(void) | |||
| 176 | return -ENODEV; | 249 | return -ENODEV; |
| 177 | } | 250 | } |
| 178 | 251 | ||
| 179 | pr_info("i.MX31PDK Debug board detected, rev = 0x%04X\n", | 252 | pr_info("i.MX31 3DS Debug board detected, rev = 0x%04X\n", |
| 180 | __raw_readw(CPLD_CODE_VER_REG)); | 253 | __raw_readw(CPLD_CODE_VER_REG)); |
| 181 | 254 | ||
| 182 | /* | 255 | /* |
| @@ -201,7 +274,7 @@ static int __init mx31pdk_init_expio(void) | |||
| 201 | set_irq_flags(i, IRQF_VALID); | 274 | set_irq_flags(i, IRQF_VALID); |
| 202 | } | 275 | } |
| 203 | set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW); | 276 | set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW); |
| 204 | set_irq_chained_handler(EXPIO_PARENT_INT, mx31pdk_expio_irq_handler); | 277 | set_irq_chained_handler(EXPIO_PARENT_INT, mx31_3ds_expio_irq_handler); |
| 205 | 278 | ||
| 206 | return 0; | 279 | return 0; |
| 207 | } | 280 | } |
| @@ -209,7 +282,7 @@ static int __init mx31pdk_init_expio(void) | |||
| 209 | /* | 282 | /* |
| 210 | * This structure defines the MX31 memory map. | 283 | * This structure defines the MX31 memory map. |
| 211 | */ | 284 | */ |
| 212 | static struct map_desc mx31pdk_io_desc[] __initdata = { | 285 | static struct map_desc mx31_3ds_io_desc[] __initdata = { |
| 213 | { | 286 | { |
| 214 | .virtual = MX31_CS5_BASE_ADDR_VIRT, | 287 | .virtual = MX31_CS5_BASE_ADDR_VIRT, |
| 215 | .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), | 288 | .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), |
| @@ -221,10 +294,10 @@ static struct map_desc mx31pdk_io_desc[] __initdata = { | |||
| 221 | /* | 294 | /* |
| 222 | * Set up static virtual mappings. | 295 | * Set up static virtual mappings. |
| 223 | */ | 296 | */ |
| 224 | static void __init mx31pdk_map_io(void) | 297 | static void __init mx31_3ds_map_io(void) |
| 225 | { | 298 | { |
| 226 | mx31_map_io(); | 299 | mx31_map_io(); |
| 227 | iotable_init(mx31pdk_io_desc, ARRAY_SIZE(mx31pdk_io_desc)); | 300 | iotable_init(mx31_3ds_io_desc, ARRAY_SIZE(mx31_3ds_io_desc)); |
| 228 | } | 301 | } |
| 229 | 302 | ||
| 230 | /*! | 303 | /*! |
| @@ -232,35 +305,40 @@ static void __init mx31pdk_map_io(void) | |||
| 232 | */ | 305 | */ |
| 233 | static void __init mxc_board_init(void) | 306 | static void __init mxc_board_init(void) |
| 234 | { | 307 | { |
| 235 | mxc_iomux_setup_multiple_pins(mx31pdk_pins, ARRAY_SIZE(mx31pdk_pins), | 308 | mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), |
| 236 | "mx31pdk"); | 309 | "mx31_3ds"); |
| 237 | 310 | ||
| 238 | mxc_register_device(&mxc_uart_device0, &uart_pdata); | 311 | mxc_register_device(&mxc_uart_device0, &uart_pdata); |
| 312 | mxc_register_device(&mxc_nand_device, &imx31_3ds_nand_flash_pdata); | ||
| 313 | |||
| 314 | mxc_register_device(&mxc_spi_device1, &spi1_pdata); | ||
| 315 | spi_register_board_info(mx31_3ds_spi_devs, | ||
| 316 | ARRAY_SIZE(mx31_3ds_spi_devs)); | ||
| 239 | 317 | ||
| 240 | if (!mx31pdk_init_expio()) | 318 | if (!mx31_3ds_init_expio()) |
| 241 | platform_device_register(&smsc911x_device); | 319 | platform_device_register(&smsc911x_device); |
| 242 | } | 320 | } |
| 243 | 321 | ||
| 244 | static void __init mx31pdk_timer_init(void) | 322 | static void __init mx31_3ds_timer_init(void) |
| 245 | { | 323 | { |
| 246 | mx31_clocks_init(26000000); | 324 | mx31_clocks_init(26000000); |
| 247 | } | 325 | } |
| 248 | 326 | ||
| 249 | static struct sys_timer mx31pdk_timer = { | 327 | static struct sys_timer mx31_3ds_timer = { |
| 250 | .init = mx31pdk_timer_init, | 328 | .init = mx31_3ds_timer_init, |
| 251 | }; | 329 | }; |
| 252 | 330 | ||
| 253 | /* | 331 | /* |
| 254 | * The following uses standard kernel macros defined in arch.h in order to | 332 | * The following uses standard kernel macros defined in arch.h in order to |
| 255 | * initialize __mach_desc_MX31PDK data structure. | 333 | * initialize __mach_desc_MX31_3DS data structure. |
| 256 | */ | 334 | */ |
| 257 | MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)") | 335 | MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)") |
| 258 | /* Maintainer: Freescale Semiconductor, Inc. */ | 336 | /* Maintainer: Freescale Semiconductor, Inc. */ |
| 259 | .phys_io = MX31_AIPS1_BASE_ADDR, | 337 | .phys_io = MX31_AIPS1_BASE_ADDR, |
| 260 | .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc, | 338 | .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc, |
| 261 | .boot_params = MX3x_PHYS_OFFSET + 0x100, | 339 | .boot_params = MX3x_PHYS_OFFSET + 0x100, |
| 262 | .map_io = mx31pdk_map_io, | 340 | .map_io = mx31_3ds_map_io, |
| 263 | .init_irq = mx31_init_irq, | 341 | .init_irq = mx31_init_irq, |
| 264 | .init_machine = mxc_board_init, | 342 | .init_machine = mxc_board_init, |
| 265 | .timer = &mx31pdk_timer, | 343 | .timer = &mx31_3ds_timer, |
| 266 | MACHINE_END | 344 | MACHINE_END |
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c index 034ec8190065..2df1ec55a97e 100644 --- a/arch/arm/mach-mx3/mach-pcm037.c +++ b/arch/arm/mach-mx3/mach-pcm037.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | #include <linux/can/platform/sja1000.h> | 35 | #include <linux/can/platform/sja1000.h> |
| 36 | #include <linux/usb/otg.h> | 36 | #include <linux/usb/otg.h> |
| 37 | #include <linux/usb/ulpi.h> | 37 | #include <linux/usb/ulpi.h> |
| 38 | #include <linux/fsl_devices.h> | ||
| 39 | #include <linux/gfp.h> | 38 | #include <linux/gfp.h> |
| 40 | 39 | ||
| 41 | #include <media/soc_camera.h> | 40 | #include <media/soc_camera.h> |
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c index ccd874225c3b..093c595ca581 100644 --- a/arch/arm/mach-mx3/mx31lite-db.c +++ b/arch/arm/mach-mx3/mx31lite-db.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/gpio.h> | 30 | #include <linux/gpio.h> |
| 31 | #include <linux/platform_device.h> | ||
| 32 | #include <linux/leds.h> | 31 | #include <linux/leds.h> |
| 33 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
| 34 | 33 | ||
| @@ -206,5 +205,6 @@ void __init mx31lite_db_init(void) | |||
| 206 | mxc_register_device(&mxcsdhc_device0, &mmc_pdata); | 205 | mxc_register_device(&mxcsdhc_device0, &mmc_pdata); |
| 207 | mxc_register_device(&mxc_spi_device0, &spi0_pdata); | 206 | mxc_register_device(&mxc_spi_device0, &spi0_pdata); |
| 208 | platform_device_register(&litekit_led_device); | 207 | platform_device_register(&litekit_led_device); |
| 208 | mxc_register_device(&imx_wdt_device0, NULL); | ||
| 209 | } | 209 | } |
| 210 | 210 | ||
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index be90c03101cd..8f85f73b83a8 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c | |||
| @@ -757,7 +757,7 @@ DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET, | |||
| 757 | 757 | ||
| 758 | /* GPT */ | 758 | /* GPT */ |
| 759 | DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET, | 759 | DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET, |
| 760 | NULL, NULL, &ipg_perclk, NULL); | 760 | NULL, NULL, &ipg_clk, NULL); |
| 761 | DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET, | 761 | DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET, |
| 762 | NULL, NULL, &ipg_clk, NULL); | 762 | NULL, NULL, &ipg_clk, NULL); |
| 763 | 763 | ||
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c index 41c769f08c4d..2d37785e3857 100644 --- a/arch/arm/mach-mx5/cpu.c +++ b/arch/arm/mach-mx5/cpu.c | |||
| @@ -14,9 +14,62 @@ | |||
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/module.h> | ||
| 17 | #include <mach/hardware.h> | 18 | #include <mach/hardware.h> |
| 18 | #include <asm/io.h> | 19 | #include <asm/io.h> |
| 19 | 20 | ||
| 21 | static int cpu_silicon_rev = -1; | ||
| 22 | |||
| 23 | #define SI_REV 0x48 | ||
| 24 | |||
| 25 | static void query_silicon_parameter(void) | ||
| 26 | { | ||
| 27 | void __iomem *rom = ioremap(MX51_IROM_BASE_ADDR, MX51_IROM_SIZE); | ||
| 28 | u32 rev; | ||
| 29 | |||
| 30 | if (!rom) { | ||
| 31 | cpu_silicon_rev = -EINVAL; | ||
| 32 | return; | ||
| 33 | } | ||
| 34 | |||
| 35 | rev = readl(rom + SI_REV); | ||
| 36 | switch (rev) { | ||
| 37 | case 0x1: | ||
| 38 | cpu_silicon_rev = MX51_CHIP_REV_1_0; | ||
| 39 | break; | ||
| 40 | case 0x2: | ||
| 41 | cpu_silicon_rev = MX51_CHIP_REV_1_1; | ||
| 42 | break; | ||
| 43 | case 0x10: | ||
| 44 | cpu_silicon_rev = MX51_CHIP_REV_2_0; | ||
| 45 | break; | ||
| 46 | case 0x20: | ||
| 47 | cpu_silicon_rev = MX51_CHIP_REV_3_0; | ||
| 48 | break; | ||
| 49 | default: | ||
| 50 | cpu_silicon_rev = 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | iounmap(rom); | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Returns: | ||
| 58 | * the silicon revision of the cpu | ||
| 59 | * -EINVAL - not a mx51 | ||
| 60 | */ | ||
| 61 | int mx51_revision(void) | ||
| 62 | { | ||
| 63 | if (!cpu_is_mx51()) | ||
| 64 | return -EINVAL; | ||
| 65 | |||
| 66 | if (cpu_silicon_rev == -1) | ||
| 67 | query_silicon_parameter(); | ||
| 68 | |||
| 69 | return cpu_silicon_rev; | ||
| 70 | } | ||
| 71 | EXPORT_SYMBOL(mx51_revision); | ||
| 72 | |||
| 20 | static int __init post_cpu_init(void) | 73 | static int __init post_cpu_init(void) |
| 21 | { | 74 | { |
| 22 | unsigned int reg; | 75 | unsigned int reg; |
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c index c21e18be7af8..b7677ef80cc4 100644 --- a/arch/arm/mach-mx5/mm.c +++ b/arch/arm/mach-mx5/mm.c | |||
| @@ -35,11 +35,6 @@ static struct map_desc mxc_io_desc[] __initdata = { | |||
| 35 | .length = MX51_DEBUG_SIZE, | 35 | .length = MX51_DEBUG_SIZE, |
| 36 | .type = MT_DEVICE | 36 | .type = MT_DEVICE |
| 37 | }, { | 37 | }, { |
| 38 | .virtual = MX51_TZIC_BASE_ADDR_VIRT, | ||
| 39 | .pfn = __phys_to_pfn(MX51_TZIC_BASE_ADDR), | ||
| 40 | .length = MX51_TZIC_SIZE, | ||
| 41 | .type = MT_DEVICE | ||
| 42 | }, { | ||
| 43 | .virtual = MX51_AIPS1_BASE_ADDR_VIRT, | 38 | .virtual = MX51_AIPS1_BASE_ADDR_VIRT, |
| 44 | .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR), | 39 | .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR), |
| 45 | .length = MX51_AIPS1_SIZE, | 40 | .length = MX51_AIPS1_SIZE, |
| @@ -54,11 +49,6 @@ static struct map_desc mxc_io_desc[] __initdata = { | |||
| 54 | .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR), | 49 | .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR), |
| 55 | .length = MX51_AIPS2_SIZE, | 50 | .length = MX51_AIPS2_SIZE, |
| 56 | .type = MT_DEVICE | 51 | .type = MT_DEVICE |
| 57 | }, { | ||
| 58 | .virtual = MX51_NFC_AXI_BASE_ADDR_VIRT, | ||
| 59 | .pfn = __phys_to_pfn(MX51_NFC_AXI_BASE_ADDR), | ||
| 60 | .length = MX51_NFC_AXI_SIZE, | ||
| 61 | .type = MT_DEVICE | ||
| 62 | }, | 52 | }, |
| 63 | }; | 53 | }; |
| 64 | 54 | ||
| @@ -69,14 +59,6 @@ static struct map_desc mxc_io_desc[] __initdata = { | |||
| 69 | */ | 59 | */ |
| 70 | void __init mx51_map_io(void) | 60 | void __init mx51_map_io(void) |
| 71 | { | 61 | { |
| 72 | u32 tzic_addr; | ||
| 73 | |||
| 74 | if (mx51_revision() < MX51_CHIP_REV_2_0) | ||
| 75 | tzic_addr = 0x8FFFC000; | ||
| 76 | else | ||
| 77 | tzic_addr = 0xE0003000; | ||
| 78 | mxc_io_desc[2].pfn = __phys_to_pfn(tzic_addr); | ||
| 79 | |||
| 80 | mxc_set_cpu_type(MXC_CPU_MX51); | 62 | mxc_set_cpu_type(MXC_CPU_MX51); |
| 81 | mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); | 63 | mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); |
| 82 | mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR)); | 64 | mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR)); |
| @@ -85,5 +67,17 @@ void __init mx51_map_io(void) | |||
| 85 | 67 | ||
| 86 | void __init mx51_init_irq(void) | 68 | void __init mx51_init_irq(void) |
| 87 | { | 69 | { |
| 88 | tzic_init_irq(MX51_IO_ADDRESS(MX51_TZIC_BASE_ADDR)); | 70 | unsigned long tzic_addr; |
| 71 | void __iomem *tzic_virt; | ||
| 72 | |||
| 73 | if (mx51_revision() < MX51_CHIP_REV_2_0) | ||
| 74 | tzic_addr = MX51_TZIC_BASE_ADDR_TO1; | ||
| 75 | else | ||
| 76 | tzic_addr = MX51_TZIC_BASE_ADDR; | ||
| 77 | |||
| 78 | tzic_virt = ioremap(tzic_addr, SZ_16K); | ||
| 79 | if (!tzic_virt) | ||
| 80 | panic("unable to map TZIC interrupt controller\n"); | ||
| 81 | |||
| 82 | tzic_init_irq(tzic_virt); | ||
| 89 | } | 83 | } |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6dfa..f55fa1044f72 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
| @@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
| 41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
| 42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
| 43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
| 44 | #ifdef CONFIG_HIGHMEM | 44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); |
| 45 | /* | ||
| 46 | * kmap_atomic() doesn't set the page virtual address, and | ||
| 47 | * kunmap_atomic() takes care of cache flushing already. | ||
| 48 | */ | ||
| 49 | if (page_address(to) != NULL) | ||
| 50 | #endif | ||
| 51 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
| 52 | kunmap_atomic(kto, KM_USER1); | 45 | kunmap_atomic(kto, KM_USER1); |
| 53 | kunmap_atomic(kfrom, KM_USER0); | 46 | kunmap_atomic(kfrom, KM_USER0); |
| 54 | } | 47 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc0b26f..13fa536d82e6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
| 464 | vaddr += offset; | 464 | vaddr += offset; |
| 465 | op(vaddr, len, dir); | 465 | op(vaddr, len, dir); |
| 466 | kunmap_high(page); | 466 | kunmap_high(page); |
| 467 | } else if (cache_is_vipt()) { | ||
| 468 | pte_t saved_pte; | ||
| 469 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | ||
| 470 | op(vaddr + offset, len, dir); | ||
| 471 | kunmap_high_l1_vipt(page, saved_pte); | ||
| 467 | } | 472 | } |
| 468 | } else { | 473 | } else { |
| 469 | vaddr = page_address(page) + offset; | 474 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e2090..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
| 15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
| 16 | #include <asm/highmem.h> | ||
| 16 | #include <asm/smp_plat.h> | 17 | #include <asm/smp_plat.h> |
| 17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
| @@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
| 152 | 153 | ||
| 153 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 154 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
| 154 | { | 155 | { |
| 155 | void *addr = page_address(page); | ||
| 156 | |||
| 157 | /* | 156 | /* |
| 158 | * Writeback any data associated with the kernel mapping of this | 157 | * Writeback any data associated with the kernel mapping of this |
| 159 | * page. This ensures that data in the physical page is mutually | 158 | * page. This ensures that data in the physical page is mutually |
| 160 | * coherent with the kernels mapping. | 159 | * coherent with the kernels mapping. |
| 161 | */ | 160 | */ |
| 162 | #ifdef CONFIG_HIGHMEM | 161 | if (!PageHighMem(page)) { |
| 163 | /* | 162 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
| 164 | * kmap_atomic() doesn't set the page virtual address, and | 163 | } else { |
| 165 | * kunmap_atomic() takes care of cache flushing already. | 164 | void *addr = kmap_high_get(page); |
| 166 | */ | 165 | if (addr) { |
| 167 | if (addr) | 166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 168 | #endif | 167 | kunmap_high(page); |
| 169 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 168 | } else if (cache_is_vipt()) { |
| 169 | pte_t saved_pte; | ||
| 170 | addr = kmap_high_l1_vipt(page, &saved_pte); | ||
| 171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
| 172 | kunmap_high_l1_vipt(page, saved_pte); | ||
| 173 | } | ||
| 174 | } | ||
| 170 | 175 | ||
| 171 | /* | 176 | /* |
| 172 | * If this is a page cache page, and we have an aliasing VIPT cache, | 177 | * If this is a page cache page, and we have an aliasing VIPT cache, |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b41..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
| @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
| 80 | 80 | ||
| 81 | if (kvaddr >= (void *)FIXADDR_START) { | 81 | if (kvaddr >= (void *)FIXADDR_START) { |
| 82 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 82 | if (cache_is_vivt()) |
| 83 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | ||
| 83 | #ifdef CONFIG_DEBUG_HIGHMEM | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 84 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| 85 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 86 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
| @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
| 124 | pte = TOP_PTE(vaddr); | 125 | pte = TOP_PTE(vaddr); |
| 125 | return pte_page(*pte); | 126 | return pte_page(*pte); |
| 126 | } | 127 | } |
| 128 | |||
| 129 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
| 130 | |||
| 131 | #include <linux/percpu.h> | ||
| 132 | |||
| 133 | /* | ||
| 134 | * The VIVT cache of a highmem page is always flushed before the page | ||
| 135 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
| 136 | * in that case. | ||
| 137 | * | ||
| 138 | * However unmapped pages may still be cached with a VIPT cache, and | ||
| 139 | * it is not possible to perform cache maintenance on them using physical | ||
| 140 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
| 141 | * virtual mapping for that purpose. | ||
| 142 | * | ||
| 143 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
| 144 | * functions which are possibly called from interrupt context. As we don't | ||
| 145 | * want to keep interrupt disabled all the time when such maintenance is | ||
| 146 | * taking place, we therefore allow for some reentrancy by preserving and | ||
| 147 | * restoring the previous fixmap entry before the interrupted context is | ||
| 148 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
| 149 | * the previous fixmap, and leaving the current one in place allow it to | ||
| 150 | * be reused the next time without a TLB flush (common with DMA). | ||
| 151 | */ | ||
| 152 | |||
| 153 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
| 154 | |||
| 155 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
| 156 | { | ||
| 157 | unsigned int idx, cpu = smp_processor_id(); | ||
| 158 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 159 | unsigned long vaddr, flags; | ||
| 160 | pte_t pte, *ptep; | ||
| 161 | |||
| 162 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 163 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 164 | ptep = TOP_PTE(vaddr); | ||
| 165 | pte = mk_pte(page, kmap_prot); | ||
| 166 | |||
| 167 | if (!in_interrupt()) | ||
| 168 | preempt_disable(); | ||
| 169 | |||
| 170 | raw_local_irq_save(flags); | ||
| 171 | (*depth)++; | ||
| 172 | if (pte_val(*ptep) == pte_val(pte)) { | ||
| 173 | *saved_pte = pte; | ||
| 174 | } else { | ||
| 175 | *saved_pte = *ptep; | ||
| 176 | set_pte_ext(ptep, pte, 0); | ||
| 177 | local_flush_tlb_kernel_page(vaddr); | ||
| 178 | } | ||
| 179 | raw_local_irq_restore(flags); | ||
| 180 | |||
| 181 | return (void *)vaddr; | ||
| 182 | } | ||
| 183 | |||
| 184 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
| 185 | { | ||
| 186 | unsigned int idx, cpu = smp_processor_id(); | ||
| 187 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
| 188 | unsigned long vaddr, flags; | ||
| 189 | pte_t pte, *ptep; | ||
| 190 | |||
| 191 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
| 192 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 193 | ptep = TOP_PTE(vaddr); | ||
| 194 | pte = mk_pte(page, kmap_prot); | ||
| 195 | |||
| 196 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
| 197 | BUG_ON(*depth <= 0); | ||
| 198 | |||
| 199 | raw_local_irq_save(flags); | ||
| 200 | (*depth)--; | ||
| 201 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
| 202 | set_pte_ext(ptep, saved_pte, 0); | ||
| 203 | local_flush_tlb_kernel_page(vaddr); | ||
| 204 | } | ||
| 205 | raw_local_irq_restore(flags); | ||
| 206 | |||
| 207 | if (!in_interrupt()) | ||
| 208 | preempt_enable(); | ||
| 209 | } | ||
| 210 | |||
| 211 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d4da6ac28eb..241c24a1c18f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -420,6 +420,10 @@ static void __init build_mem_type_table(void) | |||
| 420 | user_pgprot |= L_PTE_SHARED; | 420 | user_pgprot |= L_PTE_SHARED; |
| 421 | kern_pgprot |= L_PTE_SHARED; | 421 | kern_pgprot |= L_PTE_SHARED; |
| 422 | vecs_pgprot |= L_PTE_SHARED; | 422 | vecs_pgprot |= L_PTE_SHARED; |
| 423 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | ||
| 424 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | ||
| 425 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | ||
| 426 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | ||
| 423 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 427 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
| 424 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 428 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
| 425 | #endif | 429 | #endif |
| @@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode) | |||
| 1050 | pgd_t *pgd; | 1054 | pgd_t *pgd; |
| 1051 | int i; | 1055 | int i; |
| 1052 | 1056 | ||
| 1053 | if (current->mm && current->mm->pgd) | 1057 | /* |
| 1054 | pgd = current->mm->pgd; | 1058 | * We need to access to user-mode page tables here. For kernel threads |
| 1055 | else | 1059 | * we don't have any user-mode mappings so we use the context that we |
| 1056 | pgd = init_mm.pgd; | 1060 | * "borrowed". |
| 1061 | */ | ||
| 1062 | pgd = current->active_mm->pgd; | ||
| 1057 | 1063 | ||
| 1058 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 1064 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
| 1059 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 1065 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h index 2bbd6ed17f50..da92933a233b 100644 --- a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h +++ b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #ifndef __ASM_ARCH_MXC_BOARD_MX31PDK_H__ | 11 | #ifndef __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ |
| 12 | #define __ASM_ARCH_MXC_BOARD_MX31PDK_H__ | 12 | #define __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ |
| 13 | 13 | ||
| 14 | /* Definitions for components on the Debug board */ | 14 | /* Definitions for components on the Debug board */ |
| 15 | 15 | ||
| @@ -56,4 +56,4 @@ | |||
| 56 | 56 | ||
| 57 | #define MXC_MAX_EXP_IO_LINES 16 | 57 | #define MXC_MAX_EXP_IO_LINES 16 |
| 58 | 58 | ||
| 59 | #endif /* __ASM_ARCH_MXC_BOARD_MX31PDK_H__ */ | 59 | #endif /* __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ */ |
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h index 771532b6b4a6..5aad344d5651 100644 --- a/arch/arm/plat-mxc/include/mach/mx51.h +++ b/arch/arm/plat-mxc/include/mach/mx51.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | * FB100000 70000000 1M SPBA 0 | 14 | * FB100000 70000000 1M SPBA 0 |
| 15 | * FB000000 73F00000 1M AIPS 1 | 15 | * FB000000 73F00000 1M AIPS 1 |
| 16 | * FB200000 83F00000 1M AIPS 2 | 16 | * FB200000 83F00000 1M AIPS 2 |
| 17 | * FA100000 8FFFC000 16K TZIC (interrupt controller) | 17 | * 8FFFC000 16K TZIC (interrupt controller) |
| 18 | * 90000000 256M CSD0 SDRAM/DDR | 18 | * 90000000 256M CSD0 SDRAM/DDR |
| 19 | * A0000000 256M CSD1 SDRAM/DDR | 19 | * A0000000 256M CSD1 SDRAM/DDR |
| 20 | * B0000000 128M CS0 Flash | 20 | * B0000000 128M CS0 Flash |
| @@ -23,11 +23,17 @@ | |||
| 23 | * C8000000 64M CS3 Flash | 23 | * C8000000 64M CS3 Flash |
| 24 | * CC000000 32M CS4 SRAM | 24 | * CC000000 32M CS4 SRAM |
| 25 | * CE000000 32M CS5 SRAM | 25 | * CE000000 32M CS5 SRAM |
| 26 | * F9000000 CFFF0000 64K NFC (NAND Flash AXI) | 26 | * CFFF0000 64K NFC (NAND Flash AXI) |
| 27 | * | 27 | * |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | /* | 30 | /* |
| 31 | * IROM | ||
| 32 | */ | ||
| 33 | #define MX51_IROM_BASE_ADDR 0x0 | ||
| 34 | #define MX51_IROM_SIZE SZ_64K | ||
| 35 | |||
| 36 | /* | ||
| 31 | * IRAM | 37 | * IRAM |
| 32 | */ | 38 | */ |
| 33 | #define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */ | 39 | #define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */ |
| @@ -40,7 +46,6 @@ | |||
| 40 | * NFC | 46 | * NFC |
| 41 | */ | 47 | */ |
| 42 | #define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */ | 48 | #define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */ |
| 43 | #define MX51_NFC_AXI_BASE_ADDR_VIRT 0xF9000000 | ||
| 44 | #define MX51_NFC_AXI_SIZE SZ_64K | 49 | #define MX51_NFC_AXI_SIZE SZ_64K |
| 45 | 50 | ||
| 46 | /* | 51 | /* |
| @@ -49,9 +54,8 @@ | |||
| 49 | #define MX51_GPU_BASE_ADDR 0x20000000 | 54 | #define MX51_GPU_BASE_ADDR 0x20000000 |
| 50 | #define MX51_GPU2D_BASE_ADDR 0xD0000000 | 55 | #define MX51_GPU2D_BASE_ADDR 0xD0000000 |
| 51 | 56 | ||
| 52 | #define MX51_TZIC_BASE_ADDR 0x8FFFC000 | 57 | #define MX51_TZIC_BASE_ADDR_TO1 0x8FFFC000 |
| 53 | #define MX51_TZIC_BASE_ADDR_VIRT 0xFA100000 | 58 | #define MX51_TZIC_BASE_ADDR 0xE0000000 |
| 54 | #define MX51_TZIC_SIZE SZ_16K | ||
| 55 | 59 | ||
| 56 | #define MX51_DEBUG_BASE_ADDR 0x60000000 | 60 | #define MX51_DEBUG_BASE_ADDR 0x60000000 |
| 57 | #define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000 | 61 | #define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000 |
| @@ -232,12 +236,10 @@ | |||
| 232 | #define MX51_IO_ADDRESS(x) \ | 236 | #define MX51_IO_ADDRESS(x) \ |
| 233 | (void __iomem *) \ | 237 | (void __iomem *) \ |
| 234 | (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \ | 238 | (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \ |
| 235 | MX51_IS_MODULE(x, TZIC) ? MX51_TZIC_IO_ADDRESS(x) : \ | ||
| 236 | MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \ | 239 | MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \ |
| 237 | MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \ | 240 | MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \ |
| 238 | MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \ | 241 | MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \ |
| 239 | MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ | 242 | MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ |
| 240 | MX51_IS_MODULE(x, NFC_AXI) ? MX51_NFC_AXI_IO_ADDRESS(x) : \ | ||
| 241 | 0xDEADBEEF) | 243 | 0xDEADBEEF) |
| 242 | 244 | ||
| 243 | /* | 245 | /* |
| @@ -246,9 +248,6 @@ | |||
| 246 | #define MX51_IRAM_IO_ADDRESS(x) \ | 248 | #define MX51_IRAM_IO_ADDRESS(x) \ |
| 247 | (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT) | 249 | (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT) |
| 248 | 250 | ||
| 249 | #define MX51_TZIC_IO_ADDRESS(x) \ | ||
| 250 | (((x) - MX51_TZIC_BASE_ADDR) + MX51_TZIC_BASE_ADDR_VIRT) | ||
| 251 | |||
| 252 | #define MX51_DEBUG_IO_ADDRESS(x) \ | 251 | #define MX51_DEBUG_IO_ADDRESS(x) \ |
| 253 | (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT) | 252 | (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT) |
| 254 | 253 | ||
| @@ -261,9 +260,6 @@ | |||
| 261 | #define MX51_AIPS2_IO_ADDRESS(x) \ | 260 | #define MX51_AIPS2_IO_ADDRESS(x) \ |
| 262 | (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT) | 261 | (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT) |
| 263 | 262 | ||
| 264 | #define MX51_NFC_AXI_IO_ADDRESS(x) \ | ||
| 265 | (((x) - MX51_NFC_AXI_BASE_ADDR) + MX51_NFC_AXI_BASE_ADDR_VIRT) | ||
| 266 | |||
| 267 | #define MX51_IS_MEM_DEVICE_NONSHARED(x) 0 | 263 | #define MX51_IS_MEM_DEVICE_NONSHARED(x) 0 |
| 268 | 264 | ||
| 269 | /* | 265 | /* |
| @@ -443,12 +439,7 @@ | |||
| 443 | 439 | ||
| 444 | #if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) | 440 | #if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) |
| 445 | 441 | ||
| 446 | extern unsigned int system_rev; | 442 | extern int mx51_revision(void); |
| 447 | |||
| 448 | static inline unsigned int mx51_revision(void) | ||
| 449 | { | ||
| 450 | return system_rev; | ||
| 451 | } | ||
| 452 | #endif | 443 | #endif |
| 453 | 444 | ||
| 454 | #endif /* __ASM_ARCH_MXC_MX51_H__ */ | 445 | #endif /* __ASM_ARCH_MXC_MX51_H__ */ |
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 52e476a150ca..b6d3d0fddc48 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h | |||
| @@ -66,6 +66,7 @@ static inline void flush(void) | |||
| 66 | #define MX2X_UART1_BASE_ADDR 0x1000a000 | 66 | #define MX2X_UART1_BASE_ADDR 0x1000a000 |
| 67 | #define MX3X_UART1_BASE_ADDR 0x43F90000 | 67 | #define MX3X_UART1_BASE_ADDR 0x43F90000 |
| 68 | #define MX3X_UART2_BASE_ADDR 0x43F94000 | 68 | #define MX3X_UART2_BASE_ADDR 0x43F94000 |
| 69 | #define MX51_UART1_BASE_ADDR 0x73fbc000 | ||
| 69 | 70 | ||
| 70 | static __inline__ void __arch_decomp_setup(unsigned long arch_id) | 71 | static __inline__ void __arch_decomp_setup(unsigned long arch_id) |
| 71 | { | 72 | { |
| @@ -101,6 +102,9 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) | |||
| 101 | case MACH_TYPE_MAGX_ZN5: | 102 | case MACH_TYPE_MAGX_ZN5: |
| 102 | uart_base = MX3X_UART2_BASE_ADDR; | 103 | uart_base = MX3X_UART2_BASE_ADDR; |
| 103 | break; | 104 | break; |
| 105 | case MACH_TYPE_MX51_BABBAGE: | ||
| 106 | uart_base = MX51_UART1_BASE_ADDR; | ||
| 107 | break; | ||
| 104 | default: | 108 | default: |
| 105 | break; | 109 | break; |
| 106 | } | 110 | } |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a420cb949328..315a540c7ce5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -428,26 +428,6 @@ static void vfp_pm_init(void) | |||
| 428 | static inline void vfp_pm_init(void) { } | 428 | static inline void vfp_pm_init(void) { } |
| 429 | #endif /* CONFIG_PM */ | 429 | #endif /* CONFIG_PM */ |
| 430 | 430 | ||
| 431 | /* | ||
| 432 | * Synchronise the hardware VFP state of a thread other than current with the | ||
| 433 | * saved one. This function is used by the ptrace mechanism. | ||
| 434 | */ | ||
| 435 | #ifdef CONFIG_SMP | ||
| 436 | void vfp_sync_hwstate(struct thread_info *thread) | ||
| 437 | { | ||
| 438 | } | ||
| 439 | |||
| 440 | void vfp_flush_hwstate(struct thread_info *thread) | ||
| 441 | { | ||
| 442 | /* | ||
| 443 | * On SMP systems, the VFP state is automatically saved at every | ||
| 444 | * context switch. We mark the thread VFP state as belonging to a | ||
| 445 | * non-existent CPU so that the saved one will be reloaded when | ||
| 446 | * needed. | ||
| 447 | */ | ||
| 448 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
| 449 | } | ||
| 450 | #else | ||
| 451 | void vfp_sync_hwstate(struct thread_info *thread) | 431 | void vfp_sync_hwstate(struct thread_info *thread) |
| 452 | { | 432 | { |
| 453 | unsigned int cpu = get_cpu(); | 433 | unsigned int cpu = get_cpu(); |
| @@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread) | |||
| 490 | last_VFP_context[cpu] = NULL; | 470 | last_VFP_context[cpu] = NULL; |
| 491 | } | 471 | } |
| 492 | 472 | ||
| 473 | #ifdef CONFIG_SMP | ||
| 474 | /* | ||
| 475 | * For SMP we still have to take care of the case where the thread | ||
| 476 | * migrates to another CPU and then back to the original CPU on which | ||
| 477 | * the last VFP user is still the same thread. Mark the thread VFP | ||
| 478 | * state as belonging to a non-existent CPU so that the saved one will | ||
| 479 | * be reloaded in the above case. | ||
| 480 | */ | ||
| 481 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
| 482 | #endif | ||
| 493 | put_cpu(); | 483 | put_cpu(); |
| 494 | } | 484 | } |
| 495 | #endif | ||
| 496 | 485 | ||
| 497 | #include <linux/smp.h> | 486 | #include <linux/smp.h> |
| 498 | 487 | ||
diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h index e3616a6f941d..a2320a4a0042 100644 --- a/arch/frv/include/asm/segment.h +++ b/arch/frv/include/asm/segment.h | |||
| @@ -21,12 +21,12 @@ typedef struct { | |||
| 21 | 21 | ||
| 22 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | 22 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
| 23 | 23 | ||
| 24 | #define KERNEL_DS MAKE_MM_SEG(0xdfffffffUL) | ||
| 25 | |||
| 26 | #ifdef CONFIG_MMU | 24 | #ifdef CONFIG_MMU |
| 27 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | 25 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) |
| 26 | #define KERNEL_DS MAKE_MM_SEG(0xdfffffffUL) | ||
| 28 | #else | 27 | #else |
| 29 | #define USER_DS KERNEL_DS | 28 | #define USER_DS MAKE_MM_SEG(memory_end) |
| 29 | #define KERNEL_DS MAKE_MM_SEG(0xe0000000UL) | ||
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
| 32 | #define get_ds() (KERNEL_DS) | 32 | #define get_ds() (KERNEL_DS) |
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 53650c958f41..0b67ec5b4414 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #define VERIFY_READ 0 | 27 | #define VERIFY_READ 0 |
| 28 | #define VERIFY_WRITE 1 | 28 | #define VERIFY_WRITE 1 |
| 29 | 29 | ||
| 30 | #define __addr_ok(addr) ((unsigned long)(addr) < get_addr_limit()) | ||
| 31 | |||
| 32 | /* | 30 | /* |
| 33 | * check that a range of addresses falls within the current address limit | 31 | * check that a range of addresses falls within the current address limit |
| 34 | */ | 32 | */ |
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index 88b7af20a996..d9d2ed647435 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h | |||
| @@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new) | |||
| 148 | static inline int atomic_sub_and_test(int i, atomic_t *v) | 148 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
| 149 | { | 149 | { |
| 150 | char c; | 150 | char c; |
| 151 | __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); | 151 | __asm__ __volatile__("subl %2,%1; seq %0" |
| 152 | : "=d" (c), "+m" (*v) | ||
| 153 | : "id" (i)); | ||
| 152 | return c != 0; | 154 | return c != 0; |
| 153 | } | 155 | } |
| 154 | 156 | ||
| 155 | static inline int atomic_add_negative(int i, atomic_t *v) | 157 | static inline int atomic_add_negative(int i, atomic_t *v) |
| 156 | { | 158 | { |
| 157 | char c; | 159 | char c; |
| 158 | __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); | 160 | __asm__ __volatile__("addl %2,%1; smi %0" |
| 161 | : "=d" (c), "+m" (*v) | ||
| 162 | : "id" (i)); | ||
| 159 | return c != 0; | 163 | return c != 0; |
| 160 | } | 164 | } |
| 161 | 165 | ||
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index 1320eaa4cc2a..a29dd74a17cb 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h | |||
| @@ -17,13 +17,11 @@ struct sigcontext { | |||
| 17 | #ifndef __uClinux__ | 17 | #ifndef __uClinux__ |
| 18 | # ifdef __mcoldfire__ | 18 | # ifdef __mcoldfire__ |
| 19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ | 19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ |
| 20 | unsigned long sc_fpcntl[3]; | ||
| 21 | unsigned char sc_fpstate[16+6*8]; | ||
| 22 | # else | 20 | # else |
| 23 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | 21 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ |
| 22 | # endif | ||
| 24 | unsigned long sc_fpcntl[3]; | 23 | unsigned long sc_fpcntl[3]; |
| 25 | unsigned char sc_fpstate[216]; | 24 | unsigned char sc_fpstate[216]; |
| 26 | # endif | ||
| 27 | #endif | 25 | #endif |
| 28 | }; | 26 | }; |
| 29 | 27 | ||
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index 8dbb6e7a03a2..ad3fd61b2fe7 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h | |||
| @@ -55,7 +55,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
| 55 | __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); | 55 | __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); |
| 56 | break; | 56 | break; |
| 57 | case FUTEX_OP_ANDN: | 57 | case FUTEX_OP_ANDN: |
| 58 | __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg); | 58 | __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg); |
| 59 | break; | 59 | break; |
| 60 | case FUTEX_OP_XOR: | 60 | case FUTEX_OP_XOR: |
| 61 | __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); | 61 | __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); |
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 32d621a56aee..e45a6eea92e0 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h | |||
| @@ -108,6 +108,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) | |||
| 108 | #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) | 108 | #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) |
| 109 | #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) | 109 | #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) |
| 110 | 110 | ||
| 111 | #define ioread16be(addr) __raw_readw((u16 *)(addr)) | ||
| 112 | #define ioread32be(addr) __raw_readl((u32 *)(addr)) | ||
| 113 | #define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) | ||
| 114 | #define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) | ||
| 115 | |||
| 111 | /* These are the definitions for the x86 IO instructions | 116 | /* These are the definitions for the x86 IO instructions |
| 112 | * inb/inw/inl/outb/outw/outl, the "string" versions | 117 | * inb/inw/inl/outb/outw/outl, the "string" versions |
| 113 | * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions | 118 | * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions |
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index 388b31ca65a1..515feb404555 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c | |||
| @@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod, | |||
| 151 | return ret; | 151 | return ret; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static int ret_addr; /* initialized as 0 by default */ | ||
| 155 | |||
| 156 | /* I believe that first is called ftrace_make_nop before this function */ | 154 | /* I believe that first is called ftrace_make_nop before this function */ |
| 157 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 155 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
| 158 | { | 156 | { |
| 159 | int ret; | 157 | int ret; |
| 160 | ret_addr = addr; /* saving where the barrier jump is */ | ||
| 161 | pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", | 158 | pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", |
| 162 | __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); | 159 | __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); |
| 163 | ret = ftrace_modify_code(rec->ip, imm); | 160 | ret = ftrace_modify_code(rec->ip, imm); |
| @@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
| 194 | ret = ftrace_modify_code(ip, upper); | 191 | ret = ftrace_modify_code(ip, upper); |
| 195 | ret += ftrace_modify_code(ip + 4, lower); | 192 | ret += ftrace_modify_code(ip + 4, lower); |
| 196 | 193 | ||
| 197 | /* We just need to remove the rtsd r15, 8 by NOP */ | 194 | /* We just need to replace the rtsd r15, 8 with NOP */ |
| 198 | BUG_ON(!ret_addr); | 195 | ret += ftrace_modify_code((unsigned long)&ftrace_caller, |
| 199 | if (ret_addr) | 196 | MICROBLAZE_NOP); |
| 200 | ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP); | ||
| 201 | else | ||
| 202 | ret = 1; /* fault */ | ||
| 203 | 197 | ||
| 204 | /* All changes are done - lets do caches consistent */ | 198 | /* All changes are done - lets do caches consistent */ |
| 205 | flush_icache(); | 199 | flush_icache(); |
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 6d6349a145f9..a4a7770c6140 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c | |||
| @@ -75,7 +75,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 75 | { | 75 | { |
| 76 | int rval; | 76 | int rval; |
| 77 | unsigned long val = 0; | 77 | unsigned long val = 0; |
| 78 | unsigned long copied; | ||
| 79 | 78 | ||
| 80 | switch (request) { | 79 | switch (request) { |
| 81 | /* Read/write the word at location ADDR in the registers. */ | 80 | /* Read/write the word at location ADDR in the registers. */ |
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 379536e3abd1..be7e92ea01f3 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c | |||
| @@ -60,43 +60,6 @@ void __init board_setup(void) | |||
| 60 | wmb(); | 60 | wmb(); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | /* use the hexleds to count the number of times the cpu has entered | ||
| 64 | * wait, the dots to indicate whether the CPU is currently idle or | ||
| 65 | * active (dots off = sleeping, dots on = working) for cases where | ||
| 66 | * the number doesn't change for a long(er) period of time. | ||
| 67 | */ | ||
| 68 | static void db1200_wait(void) | ||
| 69 | { | ||
| 70 | __asm__(" .set push \n" | ||
| 71 | " .set mips3 \n" | ||
| 72 | " .set noreorder \n" | ||
| 73 | " cache 0x14, 0(%0) \n" | ||
| 74 | " cache 0x14, 32(%0) \n" | ||
| 75 | " cache 0x14, 64(%0) \n" | ||
| 76 | /* dots off: we're about to call wait */ | ||
| 77 | " lui $26, 0xb980 \n" | ||
| 78 | " ori $27, $0, 3 \n" | ||
| 79 | " sb $27, 0x18($26) \n" | ||
| 80 | " sync \n" | ||
| 81 | " nop \n" | ||
| 82 | " wait \n" | ||
| 83 | " nop \n" | ||
| 84 | " nop \n" | ||
| 85 | " nop \n" | ||
| 86 | " nop \n" | ||
| 87 | " nop \n" | ||
| 88 | /* dots on: there's work to do, increment cntr */ | ||
| 89 | " lui $26, 0xb980 \n" | ||
| 90 | " sb $0, 0x18($26) \n" | ||
| 91 | " lui $26, 0xb9c0 \n" | ||
| 92 | " lb $27, 0($26) \n" | ||
| 93 | " addiu $27, $27, 1 \n" | ||
| 94 | " sb $27, 0($26) \n" | ||
| 95 | " sync \n" | ||
| 96 | " .set pop \n" | ||
| 97 | : : "r" (db1200_wait)); | ||
| 98 | } | ||
| 99 | |||
| 100 | static int __init db1200_arch_init(void) | 63 | static int __init db1200_arch_init(void) |
| 101 | { | 64 | { |
| 102 | /* GPIO7 is low-level triggered CPLD cascade */ | 65 | /* GPIO7 is low-level triggered CPLD cascade */ |
| @@ -110,9 +73,6 @@ static int __init db1200_arch_init(void) | |||
| 110 | irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; | 73 | irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; |
| 111 | irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; | 74 | irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; |
| 112 | 75 | ||
| 113 | if (cpu_wait) | ||
| 114 | cpu_wait = db1200_wait; | ||
| 115 | |||
| 116 | return 0; | 76 | return 0; |
| 117 | } | 77 | } |
| 118 | arch_initcall(db1200_arch_init); | 78 | arch_initcall(db1200_arch_init); |
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 246df7aca2e7..2fafc78e5ce1 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c | |||
| @@ -168,7 +168,7 @@ static struct plat_vlynq_data vlynq_high_data = { | |||
| 168 | .on = vlynq_on, | 168 | .on = vlynq_on, |
| 169 | .off = vlynq_off, | 169 | .off = vlynq_off, |
| 170 | }, | 170 | }, |
| 171 | .reset_bit = 26, | 171 | .reset_bit = 16, |
| 172 | .gpio_bit = 19, | 172 | .gpio_bit = 19, |
| 173 | }; | 173 | }; |
| 174 | 174 | ||
| @@ -600,6 +600,7 @@ static int __init ar7_register_devices(void) | |||
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | if (ar7_has_high_cpmac()) { | 602 | if (ar7_has_high_cpmac()) { |
| 603 | res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status); | ||
| 603 | if (!res) { | 604 | if (!res) { |
| 604 | cpmac_get_mac(1, cpmac_high_data.dev_addr); | 605 | cpmac_get_mac(1, cpmac_high_data.dev_addr); |
| 605 | 606 | ||
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ea17941168ca..8dba8cfb752f 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
| 19 | #include <bcm63xx_board.h> | 19 | #include <bcm63xx_board.h> |
| 20 | #include <bcm63xx_cpu.h> | 20 | #include <bcm63xx_cpu.h> |
| 21 | #include <bcm63xx_dev_uart.h> | ||
| 21 | #include <bcm63xx_regs.h> | 22 | #include <bcm63xx_regs.h> |
| 22 | #include <bcm63xx_io.h> | 23 | #include <bcm63xx_io.h> |
| 23 | #include <bcm63xx_dev_pci.h> | 24 | #include <bcm63xx_dev_pci.h> |
| @@ -40,6 +41,7 @@ static struct board_info __initdata board_96338gw = { | |||
| 40 | .name = "96338GW", | 41 | .name = "96338GW", |
| 41 | .expected_cpu_id = 0x6338, | 42 | .expected_cpu_id = 0x6338, |
| 42 | 43 | ||
| 44 | .has_uart0 = 1, | ||
| 43 | .has_enet0 = 1, | 45 | .has_enet0 = 1, |
| 44 | .enet0 = { | 46 | .enet0 = { |
| 45 | .force_speed_100 = 1, | 47 | .force_speed_100 = 1, |
| @@ -82,6 +84,7 @@ static struct board_info __initdata board_96338w = { | |||
| 82 | .name = "96338W", | 84 | .name = "96338W", |
| 83 | .expected_cpu_id = 0x6338, | 85 | .expected_cpu_id = 0x6338, |
| 84 | 86 | ||
| 87 | .has_uart0 = 1, | ||
| 85 | .has_enet0 = 1, | 88 | .has_enet0 = 1, |
| 86 | .enet0 = { | 89 | .enet0 = { |
| 87 | .force_speed_100 = 1, | 90 | .force_speed_100 = 1, |
| @@ -126,6 +129,8 @@ static struct board_info __initdata board_96338w = { | |||
| 126 | static struct board_info __initdata board_96345gw2 = { | 129 | static struct board_info __initdata board_96345gw2 = { |
| 127 | .name = "96345GW2", | 130 | .name = "96345GW2", |
| 128 | .expected_cpu_id = 0x6345, | 131 | .expected_cpu_id = 0x6345, |
| 132 | |||
| 133 | .has_uart0 = 1, | ||
| 129 | }; | 134 | }; |
| 130 | #endif | 135 | #endif |
| 131 | 136 | ||
| @@ -137,6 +142,7 @@ static struct board_info __initdata board_96348r = { | |||
| 137 | .name = "96348R", | 142 | .name = "96348R", |
| 138 | .expected_cpu_id = 0x6348, | 143 | .expected_cpu_id = 0x6348, |
| 139 | 144 | ||
| 145 | .has_uart0 = 1, | ||
| 140 | .has_enet0 = 1, | 146 | .has_enet0 = 1, |
| 141 | .has_pci = 1, | 147 | .has_pci = 1, |
| 142 | 148 | ||
| @@ -180,6 +186,7 @@ static struct board_info __initdata board_96348gw_10 = { | |||
| 180 | .name = "96348GW-10", | 186 | .name = "96348GW-10", |
| 181 | .expected_cpu_id = 0x6348, | 187 | .expected_cpu_id = 0x6348, |
| 182 | 188 | ||
| 189 | .has_uart0 = 1, | ||
| 183 | .has_enet0 = 1, | 190 | .has_enet0 = 1, |
| 184 | .has_enet1 = 1, | 191 | .has_enet1 = 1, |
| 185 | .has_pci = 1, | 192 | .has_pci = 1, |
| @@ -239,6 +246,7 @@ static struct board_info __initdata board_96348gw_11 = { | |||
| 239 | .name = "96348GW-11", | 246 | .name = "96348GW-11", |
| 240 | .expected_cpu_id = 0x6348, | 247 | .expected_cpu_id = 0x6348, |
| 241 | 248 | ||
| 249 | .has_uart0 = 1, | ||
| 242 | .has_enet0 = 1, | 250 | .has_enet0 = 1, |
| 243 | .has_enet1 = 1, | 251 | .has_enet1 = 1, |
| 244 | .has_pci = 1, | 252 | .has_pci = 1, |
| @@ -292,6 +300,7 @@ static struct board_info __initdata board_96348gw = { | |||
| 292 | .name = "96348GW", | 300 | .name = "96348GW", |
| 293 | .expected_cpu_id = 0x6348, | 301 | .expected_cpu_id = 0x6348, |
| 294 | 302 | ||
| 303 | .has_uart0 = 1, | ||
| 295 | .has_enet0 = 1, | 304 | .has_enet0 = 1, |
| 296 | .has_enet1 = 1, | 305 | .has_enet1 = 1, |
| 297 | .has_pci = 1, | 306 | .has_pci = 1, |
| @@ -349,9 +358,10 @@ static struct board_info __initdata board_FAST2404 = { | |||
| 349 | .name = "F@ST2404", | 358 | .name = "F@ST2404", |
| 350 | .expected_cpu_id = 0x6348, | 359 | .expected_cpu_id = 0x6348, |
| 351 | 360 | ||
| 352 | .has_enet0 = 1, | 361 | .has_uart0 = 1, |
| 353 | .has_enet1 = 1, | 362 | .has_enet0 = 1, |
| 354 | .has_pci = 1, | 363 | .has_enet1 = 1, |
| 364 | .has_pci = 1, | ||
| 355 | 365 | ||
| 356 | .enet0 = { | 366 | .enet0 = { |
| 357 | .has_phy = 1, | 367 | .has_phy = 1, |
| @@ -368,10 +378,30 @@ static struct board_info __initdata board_FAST2404 = { | |||
| 368 | .has_ehci0 = 1, | 378 | .has_ehci0 = 1, |
| 369 | }; | 379 | }; |
| 370 | 380 | ||
| 381 | static struct board_info __initdata board_rta1025w_16 = { | ||
| 382 | .name = "RTA1025W_16", | ||
| 383 | .expected_cpu_id = 0x6348, | ||
| 384 | |||
| 385 | .has_enet0 = 1, | ||
| 386 | .has_enet1 = 1, | ||
| 387 | .has_pci = 1, | ||
| 388 | |||
| 389 | .enet0 = { | ||
| 390 | .has_phy = 1, | ||
| 391 | .use_internal_phy = 1, | ||
| 392 | }, | ||
| 393 | .enet1 = { | ||
| 394 | .force_speed_100 = 1, | ||
| 395 | .force_duplex_full = 1, | ||
| 396 | }, | ||
| 397 | }; | ||
| 398 | |||
| 399 | |||
| 371 | static struct board_info __initdata board_DV201AMR = { | 400 | static struct board_info __initdata board_DV201AMR = { |
| 372 | .name = "DV201AMR", | 401 | .name = "DV201AMR", |
| 373 | .expected_cpu_id = 0x6348, | 402 | .expected_cpu_id = 0x6348, |
| 374 | 403 | ||
| 404 | .has_uart0 = 1, | ||
| 375 | .has_pci = 1, | 405 | .has_pci = 1, |
| 376 | .has_ohci0 = 1, | 406 | .has_ohci0 = 1, |
| 377 | 407 | ||
| @@ -391,6 +421,7 @@ static struct board_info __initdata board_96348gw_a = { | |||
| 391 | .name = "96348GW-A", | 421 | .name = "96348GW-A", |
| 392 | .expected_cpu_id = 0x6348, | 422 | .expected_cpu_id = 0x6348, |
| 393 | 423 | ||
| 424 | .has_uart0 = 1, | ||
| 394 | .has_enet0 = 1, | 425 | .has_enet0 = 1, |
| 395 | .has_enet1 = 1, | 426 | .has_enet1 = 1, |
| 396 | .has_pci = 1, | 427 | .has_pci = 1, |
| @@ -416,6 +447,7 @@ static struct board_info __initdata board_96358vw = { | |||
| 416 | .name = "96358VW", | 447 | .name = "96358VW", |
| 417 | .expected_cpu_id = 0x6358, | 448 | .expected_cpu_id = 0x6358, |
| 418 | 449 | ||
| 450 | .has_uart0 = 1, | ||
| 419 | .has_enet0 = 1, | 451 | .has_enet0 = 1, |
| 420 | .has_enet1 = 1, | 452 | .has_enet1 = 1, |
| 421 | .has_pci = 1, | 453 | .has_pci = 1, |
| @@ -467,6 +499,7 @@ static struct board_info __initdata board_96358vw2 = { | |||
| 467 | .name = "96358VW2", | 499 | .name = "96358VW2", |
| 468 | .expected_cpu_id = 0x6358, | 500 | .expected_cpu_id = 0x6358, |
| 469 | 501 | ||
| 502 | .has_uart0 = 1, | ||
| 470 | .has_enet0 = 1, | 503 | .has_enet0 = 1, |
| 471 | .has_enet1 = 1, | 504 | .has_enet1 = 1, |
| 472 | .has_pci = 1, | 505 | .has_pci = 1, |
| @@ -514,6 +547,7 @@ static struct board_info __initdata board_AGPFS0 = { | |||
| 514 | .name = "AGPF-S0", | 547 | .name = "AGPF-S0", |
| 515 | .expected_cpu_id = 0x6358, | 548 | .expected_cpu_id = 0x6358, |
| 516 | 549 | ||
| 550 | .has_uart0 = 1, | ||
| 517 | .has_enet0 = 1, | 551 | .has_enet0 = 1, |
| 518 | .has_enet1 = 1, | 552 | .has_enet1 = 1, |
| 519 | .has_pci = 1, | 553 | .has_pci = 1, |
| @@ -531,6 +565,27 @@ static struct board_info __initdata board_AGPFS0 = { | |||
| 531 | .has_ohci0 = 1, | 565 | .has_ohci0 = 1, |
| 532 | .has_ehci0 = 1, | 566 | .has_ehci0 = 1, |
| 533 | }; | 567 | }; |
| 568 | |||
| 569 | static struct board_info __initdata board_DWVS0 = { | ||
| 570 | .name = "DWV-S0", | ||
| 571 | .expected_cpu_id = 0x6358, | ||
| 572 | |||
| 573 | .has_enet0 = 1, | ||
| 574 | .has_enet1 = 1, | ||
| 575 | .has_pci = 1, | ||
| 576 | |||
| 577 | .enet0 = { | ||
| 578 | .has_phy = 1, | ||
| 579 | .use_internal_phy = 1, | ||
| 580 | }, | ||
| 581 | |||
| 582 | .enet1 = { | ||
| 583 | .force_speed_100 = 1, | ||
| 584 | .force_duplex_full = 1, | ||
| 585 | }, | ||
| 586 | |||
| 587 | .has_ohci0 = 1, | ||
| 588 | }; | ||
| 534 | #endif | 589 | #endif |
| 535 | 590 | ||
| 536 | /* | 591 | /* |
| @@ -552,16 +607,88 @@ static const struct board_info __initdata *bcm963xx_boards[] = { | |||
| 552 | &board_FAST2404, | 607 | &board_FAST2404, |
| 553 | &board_DV201AMR, | 608 | &board_DV201AMR, |
| 554 | &board_96348gw_a, | 609 | &board_96348gw_a, |
| 610 | &board_rta1025w_16, | ||
| 555 | #endif | 611 | #endif |
| 556 | 612 | ||
| 557 | #ifdef CONFIG_BCM63XX_CPU_6358 | 613 | #ifdef CONFIG_BCM63XX_CPU_6358 |
| 558 | &board_96358vw, | 614 | &board_96358vw, |
| 559 | &board_96358vw2, | 615 | &board_96358vw2, |
| 560 | &board_AGPFS0, | 616 | &board_AGPFS0, |
| 617 | &board_DWVS0, | ||
| 561 | #endif | 618 | #endif |
| 562 | }; | 619 | }; |
| 563 | 620 | ||
| 564 | /* | 621 | /* |
| 622 | * Register a sane SPROMv2 to make the on-board | ||
| 623 | * bcm4318 WLAN work | ||
| 624 | */ | ||
| 625 | #ifdef CONFIG_SSB_PCIHOST | ||
| 626 | static struct ssb_sprom bcm63xx_sprom = { | ||
| 627 | .revision = 0x02, | ||
| 628 | .board_rev = 0x17, | ||
| 629 | .country_code = 0x0, | ||
| 630 | .ant_available_bg = 0x3, | ||
| 631 | .pa0b0 = 0x15ae, | ||
| 632 | .pa0b1 = 0xfa85, | ||
| 633 | .pa0b2 = 0xfe8d, | ||
| 634 | .pa1b0 = 0xffff, | ||
| 635 | .pa1b1 = 0xffff, | ||
| 636 | .pa1b2 = 0xffff, | ||
| 637 | .gpio0 = 0xff, | ||
| 638 | .gpio1 = 0xff, | ||
| 639 | .gpio2 = 0xff, | ||
| 640 | .gpio3 = 0xff, | ||
| 641 | .maxpwr_bg = 0x004c, | ||
| 642 | .itssi_bg = 0x00, | ||
| 643 | .boardflags_lo = 0x2848, | ||
| 644 | .boardflags_hi = 0x0000, | ||
| 645 | }; | ||
| 646 | #endif | ||
| 647 | |||
| 648 | /* | ||
| 649 | * return board name for /proc/cpuinfo | ||
| 650 | */ | ||
| 651 | const char *board_get_name(void) | ||
| 652 | { | ||
| 653 | return board.name; | ||
| 654 | } | ||
| 655 | |||
| 656 | /* | ||
| 657 | * register & return a new board mac address | ||
| 658 | */ | ||
| 659 | static int board_get_mac_address(u8 *mac) | ||
| 660 | { | ||
| 661 | u8 *p; | ||
| 662 | int count; | ||
| 663 | |||
| 664 | if (mac_addr_used >= nvram.mac_addr_count) { | ||
| 665 | printk(KERN_ERR PFX "not enough mac address\n"); | ||
| 666 | return -ENODEV; | ||
| 667 | } | ||
| 668 | |||
| 669 | memcpy(mac, nvram.mac_addr_base, ETH_ALEN); | ||
| 670 | p = mac + ETH_ALEN - 1; | ||
| 671 | count = mac_addr_used; | ||
| 672 | |||
| 673 | while (count--) { | ||
| 674 | do { | ||
| 675 | (*p)++; | ||
| 676 | if (*p != 0) | ||
| 677 | break; | ||
| 678 | p--; | ||
| 679 | } while (p != mac); | ||
| 680 | } | ||
| 681 | |||
| 682 | if (p == mac) { | ||
| 683 | printk(KERN_ERR PFX "unable to fetch mac address\n"); | ||
| 684 | return -ENODEV; | ||
| 685 | } | ||
| 686 | |||
| 687 | mac_addr_used++; | ||
| 688 | return 0; | ||
| 689 | } | ||
| 690 | |||
| 691 | /* | ||
| 565 | * early init callback, read nvram data from flash and checksum it | 692 | * early init callback, read nvram data from flash and checksum it |
| 566 | */ | 693 | */ |
| 567 | void __init board_prom_init(void) | 694 | void __init board_prom_init(void) |
| @@ -659,6 +786,17 @@ void __init board_prom_init(void) | |||
| 659 | } | 786 | } |
| 660 | 787 | ||
| 661 | bcm_gpio_writel(val, GPIO_MODE_REG); | 788 | bcm_gpio_writel(val, GPIO_MODE_REG); |
| 789 | |||
| 790 | /* Generate MAC address for WLAN and | ||
| 791 | * register our SPROM */ | ||
| 792 | #ifdef CONFIG_SSB_PCIHOST | ||
| 793 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { | ||
| 794 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); | ||
| 795 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); | ||
| 796 | if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) | ||
| 797 | printk(KERN_ERR "failed to register fallback SPROM\n"); | ||
| 798 | } | ||
| 799 | #endif | ||
| 662 | } | 800 | } |
| 663 | 801 | ||
| 664 | /* | 802 | /* |
| @@ -676,49 +814,6 @@ void __init board_setup(void) | |||
| 676 | panic("unexpected CPU for bcm963xx board"); | 814 | panic("unexpected CPU for bcm963xx board"); |
| 677 | } | 815 | } |
| 678 | 816 | ||
| 679 | /* | ||
| 680 | * return board name for /proc/cpuinfo | ||
| 681 | */ | ||
| 682 | const char *board_get_name(void) | ||
| 683 | { | ||
| 684 | return board.name; | ||
| 685 | } | ||
| 686 | |||
| 687 | /* | ||
| 688 | * register & return a new board mac address | ||
| 689 | */ | ||
| 690 | static int board_get_mac_address(u8 *mac) | ||
| 691 | { | ||
| 692 | u8 *p; | ||
| 693 | int count; | ||
| 694 | |||
| 695 | if (mac_addr_used >= nvram.mac_addr_count) { | ||
| 696 | printk(KERN_ERR PFX "not enough mac address\n"); | ||
| 697 | return -ENODEV; | ||
| 698 | } | ||
| 699 | |||
| 700 | memcpy(mac, nvram.mac_addr_base, ETH_ALEN); | ||
| 701 | p = mac + ETH_ALEN - 1; | ||
| 702 | count = mac_addr_used; | ||
| 703 | |||
| 704 | while (count--) { | ||
| 705 | do { | ||
| 706 | (*p)++; | ||
| 707 | if (*p != 0) | ||
| 708 | break; | ||
| 709 | p--; | ||
| 710 | } while (p != mac); | ||
| 711 | } | ||
| 712 | |||
| 713 | if (p == mac) { | ||
| 714 | printk(KERN_ERR PFX "unable to fetch mac address\n"); | ||
| 715 | return -ENODEV; | ||
| 716 | } | ||
| 717 | |||
| 718 | mac_addr_used++; | ||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | static struct mtd_partition mtd_partitions[] = { | 817 | static struct mtd_partition mtd_partitions[] = { |
| 723 | { | 818 | { |
| 724 | .name = "cfe", | 819 | .name = "cfe", |
| @@ -750,33 +845,6 @@ static struct platform_device mtd_dev = { | |||
| 750 | }, | 845 | }, |
| 751 | }; | 846 | }; |
| 752 | 847 | ||
| 753 | /* | ||
| 754 | * Register a sane SPROMv2 to make the on-board | ||
| 755 | * bcm4318 WLAN work | ||
| 756 | */ | ||
| 757 | #ifdef CONFIG_SSB_PCIHOST | ||
| 758 | static struct ssb_sprom bcm63xx_sprom = { | ||
| 759 | .revision = 0x02, | ||
| 760 | .board_rev = 0x17, | ||
| 761 | .country_code = 0x0, | ||
| 762 | .ant_available_bg = 0x3, | ||
| 763 | .pa0b0 = 0x15ae, | ||
| 764 | .pa0b1 = 0xfa85, | ||
| 765 | .pa0b2 = 0xfe8d, | ||
| 766 | .pa1b0 = 0xffff, | ||
| 767 | .pa1b1 = 0xffff, | ||
| 768 | .pa1b2 = 0xffff, | ||
| 769 | .gpio0 = 0xff, | ||
| 770 | .gpio1 = 0xff, | ||
| 771 | .gpio2 = 0xff, | ||
| 772 | .gpio3 = 0xff, | ||
| 773 | .maxpwr_bg = 0x004c, | ||
| 774 | .itssi_bg = 0x00, | ||
| 775 | .boardflags_lo = 0x2848, | ||
| 776 | .boardflags_hi = 0x0000, | ||
| 777 | }; | ||
| 778 | #endif | ||
| 779 | |||
| 780 | static struct gpio_led_platform_data bcm63xx_led_data; | 848 | static struct gpio_led_platform_data bcm63xx_led_data; |
| 781 | 849 | ||
| 782 | static struct platform_device bcm63xx_gpio_leds = { | 850 | static struct platform_device bcm63xx_gpio_leds = { |
| @@ -792,6 +860,12 @@ int __init board_register_devices(void) | |||
| 792 | { | 860 | { |
| 793 | u32 val; | 861 | u32 val; |
| 794 | 862 | ||
| 863 | if (board.has_uart0) | ||
| 864 | bcm63xx_uart_register(0); | ||
| 865 | |||
| 866 | if (board.has_uart1) | ||
| 867 | bcm63xx_uart_register(1); | ||
| 868 | |||
| 795 | if (board.has_pccard) | 869 | if (board.has_pccard) |
| 796 | bcm63xx_pcmcia_register(); | 870 | bcm63xx_pcmcia_register(); |
| 797 | 871 | ||
| @@ -806,17 +880,6 @@ int __init board_register_devices(void) | |||
| 806 | if (board.has_dsp) | 880 | if (board.has_dsp) |
| 807 | bcm63xx_dsp_register(&board.dsp); | 881 | bcm63xx_dsp_register(&board.dsp); |
| 808 | 882 | ||
| 809 | /* Generate MAC address for WLAN and | ||
| 810 | * register our SPROM */ | ||
| 811 | #ifdef CONFIG_SSB_PCIHOST | ||
| 812 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { | ||
| 813 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); | ||
| 814 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); | ||
| 815 | if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) | ||
| 816 | printk(KERN_ERR "failed to register fallback SPROM\n"); | ||
| 817 | } | ||
| 818 | #endif | ||
| 819 | |||
| 820 | /* read base address of boot chip select (0) */ | 883 | /* read base address of boot chip select (0) */ |
| 821 | if (BCMCPU_IS_6345()) | 884 | if (BCMCPU_IS_6345()) |
| 822 | val = 0x1fc00000; | 885 | val = 0x1fc00000; |
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index 70378bb5e3f9..cbb7caf86d77 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c | |||
| @@ -36,6 +36,7 @@ static const unsigned long bcm96338_regs_base[] = { | |||
| 36 | [RSET_TIMER] = BCM_6338_TIMER_BASE, | 36 | [RSET_TIMER] = BCM_6338_TIMER_BASE, |
| 37 | [RSET_WDT] = BCM_6338_WDT_BASE, | 37 | [RSET_WDT] = BCM_6338_WDT_BASE, |
| 38 | [RSET_UART0] = BCM_6338_UART0_BASE, | 38 | [RSET_UART0] = BCM_6338_UART0_BASE, |
| 39 | [RSET_UART1] = BCM_6338_UART1_BASE, | ||
| 39 | [RSET_GPIO] = BCM_6338_GPIO_BASE, | 40 | [RSET_GPIO] = BCM_6338_GPIO_BASE, |
| 40 | [RSET_SPI] = BCM_6338_SPI_BASE, | 41 | [RSET_SPI] = BCM_6338_SPI_BASE, |
| 41 | [RSET_OHCI0] = BCM_6338_OHCI0_BASE, | 42 | [RSET_OHCI0] = BCM_6338_OHCI0_BASE, |
| @@ -72,6 +73,7 @@ static const unsigned long bcm96345_regs_base[] = { | |||
| 72 | [RSET_TIMER] = BCM_6345_TIMER_BASE, | 73 | [RSET_TIMER] = BCM_6345_TIMER_BASE, |
| 73 | [RSET_WDT] = BCM_6345_WDT_BASE, | 74 | [RSET_WDT] = BCM_6345_WDT_BASE, |
| 74 | [RSET_UART0] = BCM_6345_UART0_BASE, | 75 | [RSET_UART0] = BCM_6345_UART0_BASE, |
| 76 | [RSET_UART1] = BCM_6345_UART1_BASE, | ||
| 75 | [RSET_GPIO] = BCM_6345_GPIO_BASE, | 77 | [RSET_GPIO] = BCM_6345_GPIO_BASE, |
| 76 | [RSET_SPI] = BCM_6345_SPI_BASE, | 78 | [RSET_SPI] = BCM_6345_SPI_BASE, |
| 77 | [RSET_UDC0] = BCM_6345_UDC0_BASE, | 79 | [RSET_UDC0] = BCM_6345_UDC0_BASE, |
| @@ -109,6 +111,7 @@ static const unsigned long bcm96348_regs_base[] = { | |||
| 109 | [RSET_TIMER] = BCM_6348_TIMER_BASE, | 111 | [RSET_TIMER] = BCM_6348_TIMER_BASE, |
| 110 | [RSET_WDT] = BCM_6348_WDT_BASE, | 112 | [RSET_WDT] = BCM_6348_WDT_BASE, |
| 111 | [RSET_UART0] = BCM_6348_UART0_BASE, | 113 | [RSET_UART0] = BCM_6348_UART0_BASE, |
| 114 | [RSET_UART1] = BCM_6348_UART1_BASE, | ||
| 112 | [RSET_GPIO] = BCM_6348_GPIO_BASE, | 115 | [RSET_GPIO] = BCM_6348_GPIO_BASE, |
| 113 | [RSET_SPI] = BCM_6348_SPI_BASE, | 116 | [RSET_SPI] = BCM_6348_SPI_BASE, |
| 114 | [RSET_OHCI0] = BCM_6348_OHCI0_BASE, | 117 | [RSET_OHCI0] = BCM_6348_OHCI0_BASE, |
| @@ -150,6 +153,7 @@ static const unsigned long bcm96358_regs_base[] = { | |||
| 150 | [RSET_TIMER] = BCM_6358_TIMER_BASE, | 153 | [RSET_TIMER] = BCM_6358_TIMER_BASE, |
| 151 | [RSET_WDT] = BCM_6358_WDT_BASE, | 154 | [RSET_WDT] = BCM_6358_WDT_BASE, |
| 152 | [RSET_UART0] = BCM_6358_UART0_BASE, | 155 | [RSET_UART0] = BCM_6358_UART0_BASE, |
| 156 | [RSET_UART1] = BCM_6358_UART1_BASE, | ||
| 153 | [RSET_GPIO] = BCM_6358_GPIO_BASE, | 157 | [RSET_GPIO] = BCM_6358_GPIO_BASE, |
| 154 | [RSET_SPI] = BCM_6358_SPI_BASE, | 158 | [RSET_SPI] = BCM_6358_SPI_BASE, |
| 155 | [RSET_OHCI0] = BCM_6358_OHCI0_BASE, | 159 | [RSET_OHCI0] = BCM_6358_OHCI0_BASE, |
| @@ -170,6 +174,7 @@ static const unsigned long bcm96358_regs_base[] = { | |||
| 170 | static const int bcm96358_irqs[] = { | 174 | static const int bcm96358_irqs[] = { |
| 171 | [IRQ_TIMER] = BCM_6358_TIMER_IRQ, | 175 | [IRQ_TIMER] = BCM_6358_TIMER_IRQ, |
| 172 | [IRQ_UART0] = BCM_6358_UART0_IRQ, | 176 | [IRQ_UART0] = BCM_6358_UART0_IRQ, |
| 177 | [IRQ_UART1] = BCM_6358_UART1_IRQ, | ||
| 173 | [IRQ_DSL] = BCM_6358_DSL_IRQ, | 178 | [IRQ_DSL] = BCM_6358_DSL_IRQ, |
| 174 | [IRQ_ENET0] = BCM_6358_ENET0_IRQ, | 179 | [IRQ_ENET0] = BCM_6358_ENET0_IRQ, |
| 175 | [IRQ_ENET1] = BCM_6358_ENET1_IRQ, | 180 | [IRQ_ENET1] = BCM_6358_ENET1_IRQ, |
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c index b0519461ad9b..c2963da0253e 100644 --- a/arch/mips/bcm63xx/dev-uart.c +++ b/arch/mips/bcm63xx/dev-uart.c | |||
| @@ -11,31 +11,65 @@ | |||
| 11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
| 12 | #include <bcm63xx_cpu.h> | 12 | #include <bcm63xx_cpu.h> |
| 13 | 13 | ||
| 14 | static struct resource uart_resources[] = { | 14 | static struct resource uart0_resources[] = { |
| 15 | { | 15 | { |
| 16 | .start = -1, /* filled at runtime */ | 16 | /* start & end filled at runtime */ |
| 17 | .end = -1, /* filled at runtime */ | ||
| 18 | .flags = IORESOURCE_MEM, | 17 | .flags = IORESOURCE_MEM, |
| 19 | }, | 18 | }, |
| 20 | { | 19 | { |
| 21 | .start = -1, /* filled at runtime */ | 20 | /* start filled at runtime */ |
| 22 | .flags = IORESOURCE_IRQ, | 21 | .flags = IORESOURCE_IRQ, |
| 23 | }, | 22 | }, |
| 24 | }; | 23 | }; |
| 25 | 24 | ||
| 26 | static struct platform_device bcm63xx_uart_device = { | 25 | static struct resource uart1_resources[] = { |
| 27 | .name = "bcm63xx_uart", | 26 | { |
| 28 | .id = 0, | 27 | /* start & end filled at runtime */ |
| 29 | .num_resources = ARRAY_SIZE(uart_resources), | 28 | .flags = IORESOURCE_MEM, |
| 30 | .resource = uart_resources, | 29 | }, |
| 30 | { | ||
| 31 | /* start filled at runtime */ | ||
| 32 | .flags = IORESOURCE_IRQ, | ||
| 33 | }, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static struct platform_device bcm63xx_uart_devices[] = { | ||
| 37 | { | ||
| 38 | .name = "bcm63xx_uart", | ||
| 39 | .id = 0, | ||
| 40 | .num_resources = ARRAY_SIZE(uart0_resources), | ||
| 41 | .resource = uart0_resources, | ||
| 42 | }, | ||
| 43 | |||
| 44 | { | ||
| 45 | .name = "bcm63xx_uart", | ||
| 46 | .id = 1, | ||
| 47 | .num_resources = ARRAY_SIZE(uart1_resources), | ||
| 48 | .resource = uart1_resources, | ||
| 49 | } | ||
| 31 | }; | 50 | }; |
| 32 | 51 | ||
| 33 | int __init bcm63xx_uart_register(void) | 52 | int __init bcm63xx_uart_register(unsigned int id) |
| 34 | { | 53 | { |
| 35 | uart_resources[0].start = bcm63xx_regset_address(RSET_UART0); | 54 | if (id >= ARRAY_SIZE(bcm63xx_uart_devices)) |
| 36 | uart_resources[0].end = uart_resources[0].start; | 55 | return -ENODEV; |
| 37 | uart_resources[0].end += RSET_UART_SIZE - 1; | 56 | |
| 38 | uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); | 57 | if (id == 1 && !BCMCPU_IS_6358()) |
| 39 | return platform_device_register(&bcm63xx_uart_device); | 58 | return -ENODEV; |
| 59 | |||
| 60 | if (id == 0) { | ||
| 61 | uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0); | ||
| 62 | uart0_resources[0].end = uart0_resources[0].start + | ||
| 63 | RSET_UART_SIZE - 1; | ||
| 64 | uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); | ||
| 65 | } | ||
| 66 | |||
| 67 | if (id == 1) { | ||
| 68 | uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1); | ||
| 69 | uart1_resources[0].end = uart1_resources[0].start + | ||
| 70 | RSET_UART_SIZE - 1; | ||
| 71 | uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1); | ||
| 72 | } | ||
| 73 | |||
| 74 | return platform_device_register(&bcm63xx_uart_devices[id]); | ||
| 40 | } | 75 | } |
| 41 | arch_initcall(bcm63xx_uart_register); | ||
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index 87ca39046334..315bc7f79ce1 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c | |||
| @@ -125,10 +125,10 @@ static struct gpio_chip bcm63xx_gpio_chip = { | |||
| 125 | 125 | ||
| 126 | int __init bcm63xx_gpio_init(void) | 126 | int __init bcm63xx_gpio_init(void) |
| 127 | { | 127 | { |
| 128 | gpio_out_low = bcm_gpio_readl(GPIO_DATA_LO_REG); | ||
| 129 | gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG); | ||
| 128 | bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); | 130 | bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); |
| 129 | pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); | 131 | pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); |
| 130 | 132 | ||
| 131 | return gpiochip_add(&bcm63xx_gpio_chip); | 133 | return gpiochip_add(&bcm63xx_gpio_chip); |
| 132 | } | 134 | } |
| 133 | |||
| 134 | arch_initcall(bcm63xx_gpio_init); | ||
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b321d3b16877..9a06fa9f9f0c 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
| @@ -45,9 +45,6 @@ extern struct plat_smp_ops octeon_smp_ops; | |||
| 45 | extern void pci_console_init(const char *arg); | 45 | extern void pci_console_init(const char *arg); |
| 46 | #endif | 46 | #endif |
| 47 | 47 | ||
| 48 | #ifdef CONFIG_CAVIUM_RESERVE32 | ||
| 49 | extern uint64_t octeon_reserve32_memory; | ||
| 50 | #endif | ||
| 51 | static unsigned long long MAX_MEMORY = 512ull << 20; | 48 | static unsigned long long MAX_MEMORY = 512ull << 20; |
| 52 | 49 | ||
| 53 | struct octeon_boot_descriptor *octeon_boot_desc_ptr; | 50 | struct octeon_boot_descriptor *octeon_boot_desc_ptr; |
| @@ -186,54 +183,6 @@ void octeon_check_cpu_bist(void) | |||
| 186 | write_octeon_c0_dcacheerr(0); | 183 | write_octeon_c0_dcacheerr(0); |
| 187 | } | 184 | } |
| 188 | 185 | ||
| 189 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
| 190 | /** | ||
| 191 | * Called on every core to setup the wired tlb entry needed | ||
| 192 | * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set. | ||
| 193 | * | ||
| 194 | */ | ||
| 195 | static void octeon_hal_setup_per_cpu_reserved32(void *unused) | ||
| 196 | { | ||
| 197 | /* | ||
| 198 | * The config has selected to wire the reserve32 memory for all | ||
| 199 | * userspace applications. We need to put a wired TLB entry in for each | ||
| 200 | * 512MB of reserve32 memory. We only handle double 256MB pages here, | ||
| 201 | * so reserve32 must be multiple of 512MB. | ||
| 202 | */ | ||
| 203 | uint32_t size = CONFIG_CAVIUM_RESERVE32; | ||
| 204 | uint32_t entrylo0 = | ||
| 205 | 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6); | ||
| 206 | uint32_t entrylo1 = entrylo0 + (256 << 14); | ||
| 207 | uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20)); | ||
| 208 | while (size >= 512) { | ||
| 209 | #if 0 | ||
| 210 | pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n", | ||
| 211 | smp_processor_id(), entryhi); | ||
| 212 | #endif | ||
| 213 | add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M); | ||
| 214 | entrylo0 += 512 << 14; | ||
| 215 | entrylo1 += 512 << 14; | ||
| 216 | entryhi += 512 << 20; | ||
| 217 | size -= 512; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | #endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */ | ||
| 221 | |||
| 222 | /** | ||
| 223 | * Called to release the named block which was used to made sure | ||
| 224 | * that nobody used the memory for something else during | ||
| 225 | * init. Now we'll free it so userspace apps can use this | ||
| 226 | * memory region with bootmem_alloc. | ||
| 227 | * | ||
| 228 | * This function is called only once from prom_free_prom_memory(). | ||
| 229 | */ | ||
| 230 | void octeon_hal_setup_reserved32(void) | ||
| 231 | { | ||
| 232 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
| 233 | on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1); | ||
| 234 | #endif | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | 186 | /** |
| 238 | * Reboot Octeon | 187 | * Reboot Octeon |
| 239 | * | 188 | * |
| @@ -294,18 +243,6 @@ static void octeon_halt(void) | |||
| 294 | octeon_kill_core(NULL); | 243 | octeon_kill_core(NULL); |
| 295 | } | 244 | } |
| 296 | 245 | ||
| 297 | #if 0 | ||
| 298 | /** | ||
| 299 | * Platform time init specifics. | ||
| 300 | * Returns | ||
| 301 | */ | ||
| 302 | void __init plat_time_init(void) | ||
| 303 | { | ||
| 304 | /* Nothing special here, but we are required to have one */ | ||
| 305 | } | ||
| 306 | |||
| 307 | #endif | ||
| 308 | |||
| 309 | /** | 246 | /** |
| 310 | * Handle all the error condition interrupts that might occur. | 247 | * Handle all the error condition interrupts that might occur. |
| 311 | * | 248 | * |
| @@ -502,25 +439,13 @@ void __init prom_init(void) | |||
| 502 | * memory when it is getting memory from the | 439 | * memory when it is getting memory from the |
| 503 | * bootloader. Later, after the memory allocations are | 440 | * bootloader. Later, after the memory allocations are |
| 504 | * complete, the reserve32 will be freed. | 441 | * complete, the reserve32 will be freed. |
| 505 | */ | 442 | * |
| 506 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB | ||
| 507 | if (CONFIG_CAVIUM_RESERVE32 & 0x1ff) | ||
| 508 | pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. " | ||
| 509 | "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB " | ||
| 510 | "is set\n"); | ||
| 511 | else | ||
| 512 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, | ||
| 513 | 0, 0, 512 << 20, | ||
| 514 | "CAVIUM_RESERVE32", 0); | ||
| 515 | #else | ||
| 516 | /* | ||
| 517 | * Allocate memory for RESERVED32 aligned on 2MB boundary. This | 443 | * Allocate memory for RESERVED32 aligned on 2MB boundary. This |
| 518 | * is in case we later use hugetlb entries with it. | 444 | * is in case we later use hugetlb entries with it. |
| 519 | */ | 445 | */ |
| 520 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, | 446 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, |
| 521 | 0, 0, 2 << 20, | 447 | 0, 0, 2 << 20, |
| 522 | "CAVIUM_RESERVE32", 0); | 448 | "CAVIUM_RESERVE32", 0); |
| 523 | #endif | ||
| 524 | if (addr < 0) | 449 | if (addr < 0) |
| 525 | pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); | 450 | pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); |
| 526 | else | 451 | else |
| @@ -817,9 +742,4 @@ void prom_free_prom_memory(void) | |||
| 817 | panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); | 742 | panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); |
| 818 | } | 743 | } |
| 819 | #endif | 744 | #endif |
| 820 | |||
| 821 | /* This call is here so that it is performed after any TLB | ||
| 822 | initializations. It needs to be after these in case the | ||
| 823 | CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */ | ||
| 824 | octeon_hal_setup_reserved32(); | ||
| 825 | } | 745 | } |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 51e980290ce1..6d99b9d8887d 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
| @@ -279,14 +279,6 @@ static void octeon_cpu_die(unsigned int cpu) | |||
| 279 | uint32_t avail_coremask; | 279 | uint32_t avail_coremask; |
| 280 | struct cvmx_bootmem_named_block_desc *block_desc; | 280 | struct cvmx_bootmem_named_block_desc *block_desc; |
| 281 | 281 | ||
| 282 | #ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG | ||
| 283 | /* Disable the watchdog */ | ||
| 284 | cvmx_ciu_wdogx_t ciu_wdog; | ||
| 285 | ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu)); | ||
| 286 | ciu_wdog.s.mode = 0; | ||
| 287 | cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64); | ||
| 288 | #endif | ||
| 289 | |||
| 290 | while (per_cpu(cpu_state, cpu) != CPU_DEAD) | 282 | while (per_cpu(cpu_state, cpu) != CPU_DEAD) |
| 291 | cpu_relax(); | 283 | cpu_relax(); |
| 292 | 284 | ||
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index c2f06e38c854..0583bb29150f 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
| 3 | # Linux kernel version: 2.6.26-rc8 | 3 | # Linux kernel version: 2.6.34-rc3 |
| 4 | # Wed Jul 2 17:02:55 2008 | 4 | # Sat Apr 3 16:32:11 2010 |
| 5 | # | 5 | # |
| 6 | CONFIG_MIPS=y | 6 | CONFIG_MIPS=y |
| 7 | 7 | ||
| @@ -9,20 +9,25 @@ CONFIG_MIPS=y | |||
| 9 | # Machine selection | 9 | # Machine selection |
| 10 | # | 10 | # |
| 11 | # CONFIG_MACH_ALCHEMY is not set | 11 | # CONFIG_MACH_ALCHEMY is not set |
| 12 | # CONFIG_AR7 is not set | ||
| 12 | # CONFIG_BCM47XX is not set | 13 | # CONFIG_BCM47XX is not set |
| 14 | # CONFIG_BCM63XX is not set | ||
| 13 | # CONFIG_MIPS_COBALT is not set | 15 | # CONFIG_MIPS_COBALT is not set |
| 14 | # CONFIG_MACH_DECSTATION is not set | 16 | # CONFIG_MACH_DECSTATION is not set |
| 15 | # CONFIG_MACH_JAZZ is not set | 17 | # CONFIG_MACH_JAZZ is not set |
| 16 | # CONFIG_LASAT is not set | 18 | # CONFIG_LASAT is not set |
| 17 | # CONFIG_LEMOTE_FULONG is not set | 19 | # CONFIG_MACH_LOONGSON is not set |
| 18 | # CONFIG_MIPS_MALTA is not set | 20 | # CONFIG_MIPS_MALTA is not set |
| 19 | # CONFIG_MIPS_SIM is not set | 21 | # CONFIG_MIPS_SIM is not set |
| 20 | # CONFIG_MARKEINS is not set | 22 | # CONFIG_NEC_MARKEINS is not set |
| 21 | # CONFIG_MACH_VR41XX is not set | 23 | # CONFIG_MACH_VR41XX is not set |
| 24 | # CONFIG_NXP_STB220 is not set | ||
| 25 | # CONFIG_NXP_STB225 is not set | ||
| 22 | # CONFIG_PNX8550_JBS is not set | 26 | # CONFIG_PNX8550_JBS is not set |
| 23 | # CONFIG_PNX8550_STB810 is not set | 27 | # CONFIG_PNX8550_STB810 is not set |
| 24 | # CONFIG_PMC_MSP is not set | 28 | # CONFIG_PMC_MSP is not set |
| 25 | # CONFIG_PMC_YOSEMITE is not set | 29 | # CONFIG_PMC_YOSEMITE is not set |
| 30 | # CONFIG_POWERTV is not set | ||
| 26 | # CONFIG_SGI_IP22 is not set | 31 | # CONFIG_SGI_IP22 is not set |
| 27 | # CONFIG_SGI_IP27 is not set | 32 | # CONFIG_SGI_IP27 is not set |
| 28 | # CONFIG_SGI_IP28 is not set | 33 | # CONFIG_SGI_IP28 is not set |
| @@ -36,10 +41,13 @@ CONFIG_MIPS=y | |||
| 36 | # CONFIG_SIBYTE_SENTOSA is not set | 41 | # CONFIG_SIBYTE_SENTOSA is not set |
| 37 | CONFIG_SIBYTE_BIGSUR=y | 42 | CONFIG_SIBYTE_BIGSUR=y |
| 38 | # CONFIG_SNI_RM is not set | 43 | # CONFIG_SNI_RM is not set |
| 39 | # CONFIG_TOSHIBA_JMR3927 is not set | 44 | # CONFIG_MACH_TX39XX is not set |
| 40 | # CONFIG_TOSHIBA_RBTX4927 is not set | 45 | # CONFIG_MACH_TX49XX is not set |
| 41 | # CONFIG_TOSHIBA_RBTX4938 is not set | 46 | # CONFIG_MIKROTIK_RB532 is not set |
| 42 | # CONFIG_WR_PPMC is not set | 47 | # CONFIG_WR_PPMC is not set |
| 48 | # CONFIG_CAVIUM_OCTEON_SIMULATOR is not set | ||
| 49 | # CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set | ||
| 50 | # CONFIG_ALCHEMY_GPIO_INDIRECT is not set | ||
| 43 | CONFIG_SIBYTE_BCM1x80=y | 51 | CONFIG_SIBYTE_BCM1x80=y |
| 44 | CONFIG_SIBYTE_SB1xxx_SOC=y | 52 | CONFIG_SIBYTE_SB1xxx_SOC=y |
| 45 | # CONFIG_CPU_SB1_PASS_1 is not set | 53 | # CONFIG_CPU_SB1_PASS_1 is not set |
| @@ -48,14 +56,13 @@ CONFIG_SIBYTE_SB1xxx_SOC=y | |||
| 48 | # CONFIG_CPU_SB1_PASS_4 is not set | 56 | # CONFIG_CPU_SB1_PASS_4 is not set |
| 49 | # CONFIG_CPU_SB1_PASS_2_112x is not set | 57 | # CONFIG_CPU_SB1_PASS_2_112x is not set |
| 50 | # CONFIG_CPU_SB1_PASS_3 is not set | 58 | # CONFIG_CPU_SB1_PASS_3 is not set |
| 51 | # CONFIG_SIMULATION is not set | ||
| 52 | # CONFIG_SB1_CEX_ALWAYS_FATAL is not set | 59 | # CONFIG_SB1_CEX_ALWAYS_FATAL is not set |
| 53 | # CONFIG_SB1_CERR_STALL is not set | 60 | # CONFIG_SB1_CERR_STALL is not set |
| 54 | CONFIG_SIBYTE_CFE=y | ||
| 55 | # CONFIG_SIBYTE_CFE_CONSOLE is not set | 61 | # CONFIG_SIBYTE_CFE_CONSOLE is not set |
| 56 | # CONFIG_SIBYTE_BUS_WATCHER is not set | 62 | # CONFIG_SIBYTE_BUS_WATCHER is not set |
| 57 | # CONFIG_SIBYTE_TBPROF is not set | 63 | # CONFIG_SIBYTE_TBPROF is not set |
| 58 | CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y | 64 | CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y |
| 65 | CONFIG_LOONGSON_UART_BASE=y | ||
| 59 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 66 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
| 60 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | 67 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set |
| 61 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 68 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
| @@ -66,15 +73,13 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y | |||
| 66 | CONFIG_GENERIC_CLOCKEVENTS=y | 73 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 67 | CONFIG_GENERIC_TIME=y | 74 | CONFIG_GENERIC_TIME=y |
| 68 | CONFIG_GENERIC_CMOS_UPDATE=y | 75 | CONFIG_GENERIC_CMOS_UPDATE=y |
| 69 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 76 | CONFIG_SCHED_OMIT_FRAME_POINTER=y |
| 70 | # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set | 77 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
| 71 | CONFIG_CEVT_BCM1480=y | 78 | CONFIG_CEVT_BCM1480=y |
| 72 | CONFIG_CSRC_BCM1480=y | 79 | CONFIG_CSRC_BCM1480=y |
| 73 | CONFIG_CFE=y | 80 | CONFIG_CFE=y |
| 74 | CONFIG_DMA_COHERENT=y | 81 | CONFIG_DMA_COHERENT=y |
| 75 | CONFIG_EARLY_PRINTK=y | ||
| 76 | CONFIG_SYS_HAS_EARLY_PRINTK=y | 82 | CONFIG_SYS_HAS_EARLY_PRINTK=y |
| 77 | # CONFIG_HOTPLUG_CPU is not set | ||
| 78 | # CONFIG_NO_IOPORT is not set | 83 | # CONFIG_NO_IOPORT is not set |
| 79 | CONFIG_CPU_BIG_ENDIAN=y | 84 | CONFIG_CPU_BIG_ENDIAN=y |
| 80 | # CONFIG_CPU_LITTLE_ENDIAN is not set | 85 | # CONFIG_CPU_LITTLE_ENDIAN is not set |
| @@ -88,7 +93,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 | |||
| 88 | # | 93 | # |
| 89 | # CPU selection | 94 | # CPU selection |
| 90 | # | 95 | # |
| 91 | # CONFIG_CPU_LOONGSON2 is not set | 96 | # CONFIG_CPU_LOONGSON2E is not set |
| 97 | # CONFIG_CPU_LOONGSON2F is not set | ||
| 92 | # CONFIG_CPU_MIPS32_R1 is not set | 98 | # CONFIG_CPU_MIPS32_R1 is not set |
| 93 | # CONFIG_CPU_MIPS32_R2 is not set | 99 | # CONFIG_CPU_MIPS32_R2 is not set |
| 94 | # CONFIG_CPU_MIPS64_R1 is not set | 100 | # CONFIG_CPU_MIPS64_R1 is not set |
| @@ -101,6 +107,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 | |||
| 101 | # CONFIG_CPU_TX49XX is not set | 107 | # CONFIG_CPU_TX49XX is not set |
| 102 | # CONFIG_CPU_R5000 is not set | 108 | # CONFIG_CPU_R5000 is not set |
| 103 | # CONFIG_CPU_R5432 is not set | 109 | # CONFIG_CPU_R5432 is not set |
| 110 | # CONFIG_CPU_R5500 is not set | ||
| 104 | # CONFIG_CPU_R6000 is not set | 111 | # CONFIG_CPU_R6000 is not set |
| 105 | # CONFIG_CPU_NEVADA is not set | 112 | # CONFIG_CPU_NEVADA is not set |
| 106 | # CONFIG_CPU_R8000 is not set | 113 | # CONFIG_CPU_R8000 is not set |
| @@ -108,6 +115,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 | |||
| 108 | # CONFIG_CPU_RM7000 is not set | 115 | # CONFIG_CPU_RM7000 is not set |
| 109 | # CONFIG_CPU_RM9000 is not set | 116 | # CONFIG_CPU_RM9000 is not set |
| 110 | CONFIG_CPU_SB1=y | 117 | CONFIG_CPU_SB1=y |
| 118 | # CONFIG_CPU_CAVIUM_OCTEON is not set | ||
| 111 | CONFIG_SYS_HAS_CPU_SB1=y | 119 | CONFIG_SYS_HAS_CPU_SB1=y |
| 112 | CONFIG_WEAK_ORDERING=y | 120 | CONFIG_WEAK_ORDERING=y |
| 113 | CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y | 121 | CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y |
| @@ -123,11 +131,13 @@ CONFIG_64BIT=y | |||
| 123 | CONFIG_PAGE_SIZE_4KB=y | 131 | CONFIG_PAGE_SIZE_4KB=y |
| 124 | # CONFIG_PAGE_SIZE_8KB is not set | 132 | # CONFIG_PAGE_SIZE_8KB is not set |
| 125 | # CONFIG_PAGE_SIZE_16KB is not set | 133 | # CONFIG_PAGE_SIZE_16KB is not set |
| 134 | # CONFIG_PAGE_SIZE_32KB is not set | ||
| 126 | # CONFIG_PAGE_SIZE_64KB is not set | 135 | # CONFIG_PAGE_SIZE_64KB is not set |
| 127 | # CONFIG_SIBYTE_DMA_PAGEOPS is not set | 136 | # CONFIG_SIBYTE_DMA_PAGEOPS is not set |
| 128 | CONFIG_MIPS_MT_DISABLED=y | 137 | CONFIG_MIPS_MT_DISABLED=y |
| 129 | # CONFIG_MIPS_MT_SMP is not set | 138 | # CONFIG_MIPS_MT_SMP is not set |
| 130 | # CONFIG_MIPS_MT_SMTC is not set | 139 | # CONFIG_MIPS_MT_SMTC is not set |
| 140 | # CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set | ||
| 131 | CONFIG_CPU_HAS_SYNC=y | 141 | CONFIG_CPU_HAS_SYNC=y |
| 132 | CONFIG_GENERIC_HARDIRQS=y | 142 | CONFIG_GENERIC_HARDIRQS=y |
| 133 | CONFIG_GENERIC_IRQ_PROBE=y | 143 | CONFIG_GENERIC_IRQ_PROBE=y |
| @@ -142,18 +152,17 @@ CONFIG_FLATMEM_MANUAL=y | |||
| 142 | # CONFIG_SPARSEMEM_MANUAL is not set | 152 | # CONFIG_SPARSEMEM_MANUAL is not set |
| 143 | CONFIG_FLATMEM=y | 153 | CONFIG_FLATMEM=y |
| 144 | CONFIG_FLAT_NODE_MEM_MAP=y | 154 | CONFIG_FLAT_NODE_MEM_MAP=y |
| 145 | # CONFIG_SPARSEMEM_STATIC is not set | ||
| 146 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
| 147 | CONFIG_PAGEFLAGS_EXTENDED=y | 155 | CONFIG_PAGEFLAGS_EXTENDED=y |
| 148 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 156 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
| 149 | CONFIG_RESOURCES_64BIT=y | 157 | CONFIG_PHYS_ADDR_T_64BIT=y |
| 150 | CONFIG_ZONE_DMA_FLAG=0 | 158 | CONFIG_ZONE_DMA_FLAG=0 |
| 151 | CONFIG_VIRT_TO_BUS=y | 159 | CONFIG_VIRT_TO_BUS=y |
| 160 | # CONFIG_KSM is not set | ||
| 161 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
| 152 | CONFIG_SMP=y | 162 | CONFIG_SMP=y |
| 153 | CONFIG_SYS_SUPPORTS_SMP=y | 163 | CONFIG_SYS_SUPPORTS_SMP=y |
| 154 | CONFIG_NR_CPUS_DEFAULT_4=y | 164 | CONFIG_NR_CPUS_DEFAULT_4=y |
| 155 | CONFIG_NR_CPUS=4 | 165 | CONFIG_NR_CPUS=4 |
| 156 | # CONFIG_MIPS_CMP is not set | ||
| 157 | CONFIG_TICK_ONESHOT=y | 166 | CONFIG_TICK_ONESHOT=y |
| 158 | CONFIG_NO_HZ=y | 167 | CONFIG_NO_HZ=y |
| 159 | CONFIG_HIGH_RES_TIMERS=y | 168 | CONFIG_HIGH_RES_TIMERS=y |
| @@ -175,6 +184,7 @@ CONFIG_SECCOMP=y | |||
| 175 | CONFIG_LOCKDEP_SUPPORT=y | 184 | CONFIG_LOCKDEP_SUPPORT=y |
| 176 | CONFIG_STACKTRACE_SUPPORT=y | 185 | CONFIG_STACKTRACE_SUPPORT=y |
| 177 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 186 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
| 187 | CONFIG_CONSTRUCTORS=y | ||
| 178 | 188 | ||
| 179 | # | 189 | # |
| 180 | # General setup | 190 | # General setup |
| @@ -188,6 +198,7 @@ CONFIG_SWAP=y | |||
| 188 | CONFIG_SYSVIPC=y | 198 | CONFIG_SYSVIPC=y |
| 189 | CONFIG_SYSVIPC_SYSCTL=y | 199 | CONFIG_SYSVIPC_SYSCTL=y |
| 190 | CONFIG_POSIX_MQUEUE=y | 200 | CONFIG_POSIX_MQUEUE=y |
| 201 | CONFIG_POSIX_MQUEUE_SYSCTL=y | ||
| 191 | CONFIG_BSD_PROCESS_ACCT=y | 202 | CONFIG_BSD_PROCESS_ACCT=y |
| 192 | CONFIG_BSD_PROCESS_ACCT_V3=y | 203 | CONFIG_BSD_PROCESS_ACCT_V3=y |
| 193 | CONFIG_TASKSTATS=y | 204 | CONFIG_TASKSTATS=y |
| @@ -195,23 +206,39 @@ CONFIG_TASK_DELAY_ACCT=y | |||
| 195 | CONFIG_TASK_XACCT=y | 206 | CONFIG_TASK_XACCT=y |
| 196 | CONFIG_TASK_IO_ACCOUNTING=y | 207 | CONFIG_TASK_IO_ACCOUNTING=y |
| 197 | CONFIG_AUDIT=y | 208 | CONFIG_AUDIT=y |
| 209 | |||
| 210 | # | ||
| 211 | # RCU Subsystem | ||
| 212 | # | ||
| 213 | CONFIG_TREE_RCU=y | ||
| 214 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
| 215 | # CONFIG_TINY_RCU is not set | ||
| 216 | # CONFIG_RCU_TRACE is not set | ||
| 217 | CONFIG_RCU_FANOUT=64 | ||
| 218 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
| 219 | # CONFIG_RCU_FAST_NO_HZ is not set | ||
| 220 | # CONFIG_TREE_RCU_TRACE is not set | ||
| 198 | CONFIG_IKCONFIG=y | 221 | CONFIG_IKCONFIG=y |
| 199 | CONFIG_IKCONFIG_PROC=y | 222 | CONFIG_IKCONFIG_PROC=y |
| 200 | CONFIG_LOG_BUF_SHIFT=16 | 223 | CONFIG_LOG_BUF_SHIFT=16 |
| 201 | # CONFIG_CGROUPS is not set | 224 | # CONFIG_CGROUPS is not set |
| 202 | CONFIG_GROUP_SCHED=y | 225 | # CONFIG_SYSFS_DEPRECATED_V2 is not set |
| 203 | CONFIG_FAIR_GROUP_SCHED=y | ||
| 204 | # CONFIG_RT_GROUP_SCHED is not set | ||
| 205 | CONFIG_USER_SCHED=y | ||
| 206 | # CONFIG_CGROUP_SCHED is not set | ||
| 207 | CONFIG_SYSFS_DEPRECATED=y | ||
| 208 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
| 209 | CONFIG_RELAY=y | 226 | CONFIG_RELAY=y |
| 210 | # CONFIG_NAMESPACES is not set | 227 | CONFIG_NAMESPACES=y |
| 228 | CONFIG_UTS_NS=y | ||
| 229 | CONFIG_IPC_NS=y | ||
| 230 | CONFIG_USER_NS=y | ||
| 231 | CONFIG_PID_NS=y | ||
| 232 | CONFIG_NET_NS=y | ||
| 211 | CONFIG_BLK_DEV_INITRD=y | 233 | CONFIG_BLK_DEV_INITRD=y |
| 212 | CONFIG_INITRAMFS_SOURCE="" | 234 | CONFIG_INITRAMFS_SOURCE="" |
| 235 | CONFIG_RD_GZIP=y | ||
| 236 | # CONFIG_RD_BZIP2 is not set | ||
| 237 | # CONFIG_RD_LZMA is not set | ||
| 238 | # CONFIG_RD_LZO is not set | ||
| 213 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 239 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
| 214 | CONFIG_SYSCTL=y | 240 | CONFIG_SYSCTL=y |
| 241 | CONFIG_ANON_INODES=y | ||
| 215 | CONFIG_EMBEDDED=y | 242 | CONFIG_EMBEDDED=y |
| 216 | # CONFIG_SYSCTL_SYSCALL is not set | 243 | # CONFIG_SYSCTL_SYSCALL is not set |
| 217 | CONFIG_KALLSYMS=y | 244 | CONFIG_KALLSYMS=y |
| @@ -222,29 +249,36 @@ CONFIG_PRINTK=y | |||
| 222 | CONFIG_BUG=y | 249 | CONFIG_BUG=y |
| 223 | CONFIG_ELF_CORE=y | 250 | CONFIG_ELF_CORE=y |
| 224 | # CONFIG_PCSPKR_PLATFORM is not set | 251 | # CONFIG_PCSPKR_PLATFORM is not set |
| 225 | CONFIG_COMPAT_BRK=y | ||
| 226 | CONFIG_BASE_FULL=y | 252 | CONFIG_BASE_FULL=y |
| 227 | CONFIG_FUTEX=y | 253 | CONFIG_FUTEX=y |
| 228 | CONFIG_ANON_INODES=y | ||
| 229 | CONFIG_EPOLL=y | 254 | CONFIG_EPOLL=y |
| 230 | CONFIG_SIGNALFD=y | 255 | CONFIG_SIGNALFD=y |
| 231 | CONFIG_TIMERFD=y | 256 | CONFIG_TIMERFD=y |
| 232 | CONFIG_EVENTFD=y | 257 | CONFIG_EVENTFD=y |
| 233 | CONFIG_SHMEM=y | 258 | CONFIG_SHMEM=y |
| 259 | CONFIG_AIO=y | ||
| 260 | |||
| 261 | # | ||
| 262 | # Kernel Performance Events And Counters | ||
| 263 | # | ||
| 234 | CONFIG_VM_EVENT_COUNTERS=y | 264 | CONFIG_VM_EVENT_COUNTERS=y |
| 265 | CONFIG_PCI_QUIRKS=y | ||
| 266 | CONFIG_COMPAT_BRK=y | ||
| 235 | CONFIG_SLAB=y | 267 | CONFIG_SLAB=y |
| 236 | # CONFIG_SLUB is not set | 268 | # CONFIG_SLUB is not set |
| 237 | # CONFIG_SLOB is not set | 269 | # CONFIG_SLOB is not set |
| 238 | # CONFIG_PROFILING is not set | 270 | # CONFIG_PROFILING is not set |
| 239 | # CONFIG_MARKERS is not set | ||
| 240 | CONFIG_HAVE_OPROFILE=y | 271 | CONFIG_HAVE_OPROFILE=y |
| 241 | # CONFIG_HAVE_KPROBES is not set | 272 | CONFIG_HAVE_SYSCALL_WRAPPERS=y |
| 242 | # CONFIG_HAVE_KRETPROBES is not set | 273 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
| 243 | # CONFIG_HAVE_DMA_ATTRS is not set | 274 | |
| 244 | CONFIG_PROC_PAGE_MONITOR=y | 275 | # |
| 276 | # GCOV-based kernel profiling | ||
| 277 | # | ||
| 278 | # CONFIG_SLOW_WORK is not set | ||
| 279 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | ||
| 245 | CONFIG_SLABINFO=y | 280 | CONFIG_SLABINFO=y |
| 246 | CONFIG_RT_MUTEXES=y | 281 | CONFIG_RT_MUTEXES=y |
| 247 | # CONFIG_TINY_SHMEM is not set | ||
| 248 | CONFIG_BASE_SMALL=0 | 282 | CONFIG_BASE_SMALL=0 |
| 249 | CONFIG_MODULES=y | 283 | CONFIG_MODULES=y |
| 250 | # CONFIG_MODULE_FORCE_LOAD is not set | 284 | # CONFIG_MODULE_FORCE_LOAD is not set |
| @@ -252,26 +286,52 @@ CONFIG_MODULE_UNLOAD=y | |||
| 252 | # CONFIG_MODULE_FORCE_UNLOAD is not set | 286 | # CONFIG_MODULE_FORCE_UNLOAD is not set |
| 253 | CONFIG_MODVERSIONS=y | 287 | CONFIG_MODVERSIONS=y |
| 254 | CONFIG_MODULE_SRCVERSION_ALL=y | 288 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 255 | CONFIG_KMOD=y | ||
| 256 | CONFIG_STOP_MACHINE=y | 289 | CONFIG_STOP_MACHINE=y |
| 257 | CONFIG_BLOCK=y | 290 | CONFIG_BLOCK=y |
| 258 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
| 259 | # CONFIG_BLK_DEV_BSG is not set | 291 | # CONFIG_BLK_DEV_BSG is not set |
| 292 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
| 260 | CONFIG_BLOCK_COMPAT=y | 293 | CONFIG_BLOCK_COMPAT=y |
| 261 | 294 | ||
| 262 | # | 295 | # |
| 263 | # IO Schedulers | 296 | # IO Schedulers |
| 264 | # | 297 | # |
| 265 | CONFIG_IOSCHED_NOOP=y | 298 | CONFIG_IOSCHED_NOOP=y |
| 266 | CONFIG_IOSCHED_AS=y | ||
| 267 | CONFIG_IOSCHED_DEADLINE=y | 299 | CONFIG_IOSCHED_DEADLINE=y |
| 268 | CONFIG_IOSCHED_CFQ=y | 300 | CONFIG_IOSCHED_CFQ=y |
| 269 | CONFIG_DEFAULT_AS=y | ||
| 270 | # CONFIG_DEFAULT_DEADLINE is not set | 301 | # CONFIG_DEFAULT_DEADLINE is not set |
| 271 | # CONFIG_DEFAULT_CFQ is not set | 302 | CONFIG_DEFAULT_CFQ=y |
| 272 | # CONFIG_DEFAULT_NOOP is not set | 303 | # CONFIG_DEFAULT_NOOP is not set |
| 273 | CONFIG_DEFAULT_IOSCHED="anticipatory" | 304 | CONFIG_DEFAULT_IOSCHED="cfq" |
| 274 | CONFIG_CLASSIC_RCU=y | 305 | # CONFIG_INLINE_SPIN_TRYLOCK is not set |
| 306 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
| 307 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
| 308 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
| 309 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
| 310 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
| 311 | CONFIG_INLINE_SPIN_UNLOCK=y | ||
| 312 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
| 313 | CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | ||
| 314 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
| 315 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
| 316 | # CONFIG_INLINE_READ_LOCK is not set | ||
| 317 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
| 318 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
| 319 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
| 320 | CONFIG_INLINE_READ_UNLOCK=y | ||
| 321 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
| 322 | CONFIG_INLINE_READ_UNLOCK_IRQ=y | ||
| 323 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
| 324 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
| 325 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
| 326 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
| 327 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
| 328 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
| 329 | CONFIG_INLINE_WRITE_UNLOCK=y | ||
| 330 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
| 331 | CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | ||
| 332 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
| 333 | CONFIG_MUTEX_SPIN_ON_OWNER=y | ||
| 334 | # CONFIG_FREEZER is not set | ||
| 275 | 335 | ||
| 276 | # | 336 | # |
| 277 | # Bus options (PCI, PCMCIA, EISA, ISA, TC) | 337 | # Bus options (PCI, PCMCIA, EISA, ISA, TC) |
| @@ -280,8 +340,9 @@ CONFIG_HW_HAS_PCI=y | |||
| 280 | CONFIG_PCI=y | 340 | CONFIG_PCI=y |
| 281 | CONFIG_PCI_DOMAINS=y | 341 | CONFIG_PCI_DOMAINS=y |
| 282 | # CONFIG_ARCH_SUPPORTS_MSI is not set | 342 | # CONFIG_ARCH_SUPPORTS_MSI is not set |
| 283 | CONFIG_PCI_LEGACY=y | ||
| 284 | CONFIG_PCI_DEBUG=y | 343 | CONFIG_PCI_DEBUG=y |
| 344 | # CONFIG_PCI_STUB is not set | ||
| 345 | # CONFIG_PCI_IOV is not set | ||
| 285 | CONFIG_MMU=y | 346 | CONFIG_MMU=y |
| 286 | CONFIG_ZONE_DMA32=y | 347 | CONFIG_ZONE_DMA32=y |
| 287 | # CONFIG_PCCARD is not set | 348 | # CONFIG_PCCARD is not set |
| @@ -291,6 +352,8 @@ CONFIG_ZONE_DMA32=y | |||
| 291 | # Executable file formats | 352 | # Executable file formats |
| 292 | # | 353 | # |
| 293 | CONFIG_BINFMT_ELF=y | 354 | CONFIG_BINFMT_ELF=y |
| 355 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
| 356 | # CONFIG_HAVE_AOUT is not set | ||
| 294 | # CONFIG_BINFMT_MISC is not set | 357 | # CONFIG_BINFMT_MISC is not set |
| 295 | CONFIG_MIPS32_COMPAT=y | 358 | CONFIG_MIPS32_COMPAT=y |
| 296 | CONFIG_COMPAT=y | 359 | CONFIG_COMPAT=y |
| @@ -304,23 +367,20 @@ CONFIG_BINFMT_ELF32=y | |||
| 304 | # | 367 | # |
| 305 | CONFIG_PM=y | 368 | CONFIG_PM=y |
| 306 | # CONFIG_PM_DEBUG is not set | 369 | # CONFIG_PM_DEBUG is not set |
| 307 | 370 | # CONFIG_PM_RUNTIME is not set | |
| 308 | # | ||
| 309 | # Networking | ||
| 310 | # | ||
| 311 | CONFIG_NET=y | 371 | CONFIG_NET=y |
| 312 | 372 | ||
| 313 | # | 373 | # |
| 314 | # Networking options | 374 | # Networking options |
| 315 | # | 375 | # |
| 316 | CONFIG_PACKET=y | 376 | CONFIG_PACKET=y |
| 317 | CONFIG_PACKET_MMAP=y | ||
| 318 | CONFIG_UNIX=y | 377 | CONFIG_UNIX=y |
| 319 | CONFIG_XFRM=y | 378 | CONFIG_XFRM=y |
| 320 | CONFIG_XFRM_USER=m | 379 | CONFIG_XFRM_USER=m |
| 321 | # CONFIG_XFRM_SUB_POLICY is not set | 380 | # CONFIG_XFRM_SUB_POLICY is not set |
| 322 | CONFIG_XFRM_MIGRATE=y | 381 | CONFIG_XFRM_MIGRATE=y |
| 323 | # CONFIG_XFRM_STATISTICS is not set | 382 | # CONFIG_XFRM_STATISTICS is not set |
| 383 | CONFIG_XFRM_IPCOMP=m | ||
| 324 | CONFIG_NET_KEY=y | 384 | CONFIG_NET_KEY=y |
| 325 | CONFIG_NET_KEY_MIGRATE=y | 385 | CONFIG_NET_KEY_MIGRATE=y |
| 326 | CONFIG_INET=y | 386 | CONFIG_INET=y |
| @@ -353,36 +413,6 @@ CONFIG_INET_TCP_DIAG=y | |||
| 353 | CONFIG_TCP_CONG_CUBIC=y | 413 | CONFIG_TCP_CONG_CUBIC=y |
| 354 | CONFIG_DEFAULT_TCP_CONG="cubic" | 414 | CONFIG_DEFAULT_TCP_CONG="cubic" |
| 355 | CONFIG_TCP_MD5SIG=y | 415 | CONFIG_TCP_MD5SIG=y |
| 356 | CONFIG_IP_VS=m | ||
| 357 | # CONFIG_IP_VS_DEBUG is not set | ||
| 358 | CONFIG_IP_VS_TAB_BITS=12 | ||
| 359 | |||
| 360 | # | ||
| 361 | # IPVS transport protocol load balancing support | ||
| 362 | # | ||
| 363 | CONFIG_IP_VS_PROTO_TCP=y | ||
| 364 | CONFIG_IP_VS_PROTO_UDP=y | ||
| 365 | CONFIG_IP_VS_PROTO_ESP=y | ||
| 366 | CONFIG_IP_VS_PROTO_AH=y | ||
| 367 | |||
| 368 | # | ||
| 369 | # IPVS scheduler | ||
| 370 | # | ||
| 371 | CONFIG_IP_VS_RR=m | ||
| 372 | CONFIG_IP_VS_WRR=m | ||
| 373 | CONFIG_IP_VS_LC=m | ||
| 374 | CONFIG_IP_VS_WLC=m | ||
| 375 | CONFIG_IP_VS_LBLC=m | ||
| 376 | CONFIG_IP_VS_LBLCR=m | ||
| 377 | CONFIG_IP_VS_DH=m | ||
| 378 | CONFIG_IP_VS_SH=m | ||
| 379 | CONFIG_IP_VS_SED=m | ||
| 380 | CONFIG_IP_VS_NQ=m | ||
| 381 | |||
| 382 | # | ||
| 383 | # IPVS application helper | ||
| 384 | # | ||
| 385 | CONFIG_IP_VS_FTP=m | ||
| 386 | CONFIG_IPV6=m | 416 | CONFIG_IPV6=m |
| 387 | CONFIG_IPV6_PRIVACY=y | 417 | CONFIG_IPV6_PRIVACY=y |
| 388 | CONFIG_IPV6_ROUTER_PREF=y | 418 | CONFIG_IPV6_ROUTER_PREF=y |
| @@ -399,11 +429,13 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m | |||
| 399 | CONFIG_INET6_XFRM_MODE_BEET=m | 429 | CONFIG_INET6_XFRM_MODE_BEET=m |
| 400 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m | 430 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m |
| 401 | CONFIG_IPV6_SIT=m | 431 | CONFIG_IPV6_SIT=m |
| 432 | CONFIG_IPV6_SIT_6RD=y | ||
| 402 | CONFIG_IPV6_NDISC_NODETYPE=y | 433 | CONFIG_IPV6_NDISC_NODETYPE=y |
| 403 | CONFIG_IPV6_TUNNEL=m | 434 | CONFIG_IPV6_TUNNEL=m |
| 404 | CONFIG_IPV6_MULTIPLE_TABLES=y | 435 | CONFIG_IPV6_MULTIPLE_TABLES=y |
| 405 | CONFIG_IPV6_SUBTREES=y | 436 | CONFIG_IPV6_SUBTREES=y |
| 406 | # CONFIG_IPV6_MROUTE is not set | 437 | # CONFIG_IPV6_MROUTE is not set |
| 438 | CONFIG_NETLABEL=y | ||
| 407 | CONFIG_NETWORK_SECMARK=y | 439 | CONFIG_NETWORK_SECMARK=y |
| 408 | CONFIG_NETFILTER=y | 440 | CONFIG_NETFILTER=y |
| 409 | # CONFIG_NETFILTER_DEBUG is not set | 441 | # CONFIG_NETFILTER_DEBUG is not set |
| @@ -421,19 +453,53 @@ CONFIG_NF_CONNTRACK_IRC=m | |||
| 421 | CONFIG_NF_CONNTRACK_SIP=m | 453 | CONFIG_NF_CONNTRACK_SIP=m |
| 422 | CONFIG_NF_CT_NETLINK=m | 454 | CONFIG_NF_CT_NETLINK=m |
| 423 | CONFIG_NETFILTER_XTABLES=m | 455 | CONFIG_NETFILTER_XTABLES=m |
| 456 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
| 424 | CONFIG_NETFILTER_XT_TARGET_MARK=m | 457 | CONFIG_NETFILTER_XT_TARGET_MARK=m |
| 425 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | 458 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m |
| 426 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m | 459 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m |
| 427 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
| 428 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 460 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
| 429 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | 461 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m |
| 430 | CONFIG_NETFILTER_XT_MATCH_MARK=m | 462 | CONFIG_NETFILTER_XT_MATCH_MARK=m |
| 431 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | 463 | CONFIG_NETFILTER_XT_MATCH_POLICY=m |
| 432 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 464 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
| 465 | CONFIG_IP_VS=m | ||
| 466 | CONFIG_IP_VS_IPV6=y | ||
| 467 | # CONFIG_IP_VS_DEBUG is not set | ||
| 468 | CONFIG_IP_VS_TAB_BITS=12 | ||
| 469 | |||
| 470 | # | ||
| 471 | # IPVS transport protocol load balancing support | ||
| 472 | # | ||
| 473 | CONFIG_IP_VS_PROTO_TCP=y | ||
| 474 | CONFIG_IP_VS_PROTO_UDP=y | ||
| 475 | CONFIG_IP_VS_PROTO_AH_ESP=y | ||
| 476 | CONFIG_IP_VS_PROTO_ESP=y | ||
| 477 | CONFIG_IP_VS_PROTO_AH=y | ||
| 478 | CONFIG_IP_VS_PROTO_SCTP=y | ||
| 479 | |||
| 480 | # | ||
| 481 | # IPVS scheduler | ||
| 482 | # | ||
| 483 | CONFIG_IP_VS_RR=m | ||
| 484 | CONFIG_IP_VS_WRR=m | ||
| 485 | CONFIG_IP_VS_LC=m | ||
| 486 | CONFIG_IP_VS_WLC=m | ||
| 487 | CONFIG_IP_VS_LBLC=m | ||
| 488 | CONFIG_IP_VS_LBLCR=m | ||
| 489 | CONFIG_IP_VS_DH=m | ||
| 490 | CONFIG_IP_VS_SH=m | ||
| 491 | CONFIG_IP_VS_SED=m | ||
| 492 | CONFIG_IP_VS_NQ=m | ||
| 493 | |||
| 494 | # | ||
| 495 | # IPVS application helper | ||
| 496 | # | ||
| 497 | CONFIG_IP_VS_FTP=m | ||
| 433 | 498 | ||
| 434 | # | 499 | # |
| 435 | # IP: Netfilter Configuration | 500 | # IP: Netfilter Configuration |
| 436 | # | 501 | # |
| 502 | CONFIG_NF_DEFRAG_IPV4=m | ||
| 437 | CONFIG_NF_CONNTRACK_IPV4=m | 503 | CONFIG_NF_CONNTRACK_IPV4=m |
| 438 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | 504 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y |
| 439 | CONFIG_IP_NF_IPTABLES=m | 505 | CONFIG_IP_NF_IPTABLES=m |
| @@ -459,22 +525,44 @@ CONFIG_IP_NF_MANGLE=m | |||
| 459 | CONFIG_NF_CONNTRACK_IPV6=m | 525 | CONFIG_NF_CONNTRACK_IPV6=m |
| 460 | CONFIG_IP6_NF_IPTABLES=m | 526 | CONFIG_IP6_NF_IPTABLES=m |
| 461 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | 527 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m |
| 462 | CONFIG_IP6_NF_FILTER=m | ||
| 463 | CONFIG_IP6_NF_TARGET_LOG=m | 528 | CONFIG_IP6_NF_TARGET_LOG=m |
| 529 | CONFIG_IP6_NF_FILTER=m | ||
| 464 | CONFIG_IP6_NF_TARGET_REJECT=m | 530 | CONFIG_IP6_NF_TARGET_REJECT=m |
| 465 | CONFIG_IP6_NF_MANGLE=m | 531 | CONFIG_IP6_NF_MANGLE=m |
| 466 | # CONFIG_IP_DCCP is not set | 532 | CONFIG_IP_DCCP=m |
| 533 | CONFIG_INET_DCCP_DIAG=m | ||
| 534 | |||
| 535 | # | ||
| 536 | # DCCP CCIDs Configuration (EXPERIMENTAL) | ||
| 537 | # | ||
| 538 | # CONFIG_IP_DCCP_CCID2_DEBUG is not set | ||
| 539 | CONFIG_IP_DCCP_CCID3=y | ||
| 540 | # CONFIG_IP_DCCP_CCID3_DEBUG is not set | ||
| 541 | CONFIG_IP_DCCP_CCID3_RTO=100 | ||
| 542 | CONFIG_IP_DCCP_TFRC_LIB=y | ||
| 543 | |||
| 544 | # | ||
| 545 | # DCCP Kernel Hacking | ||
| 546 | # | ||
| 547 | # CONFIG_IP_DCCP_DEBUG is not set | ||
| 467 | CONFIG_IP_SCTP=m | 548 | CONFIG_IP_SCTP=m |
| 468 | # CONFIG_SCTP_DBG_MSG is not set | 549 | # CONFIG_SCTP_DBG_MSG is not set |
| 469 | # CONFIG_SCTP_DBG_OBJCNT is not set | 550 | # CONFIG_SCTP_DBG_OBJCNT is not set |
| 470 | # CONFIG_SCTP_HMAC_NONE is not set | 551 | # CONFIG_SCTP_HMAC_NONE is not set |
| 471 | # CONFIG_SCTP_HMAC_SHA1 is not set | 552 | CONFIG_SCTP_HMAC_SHA1=y |
| 472 | CONFIG_SCTP_HMAC_MD5=y | 553 | # CONFIG_SCTP_HMAC_MD5 is not set |
| 554 | # CONFIG_RDS is not set | ||
| 473 | # CONFIG_TIPC is not set | 555 | # CONFIG_TIPC is not set |
| 474 | # CONFIG_ATM is not set | 556 | # CONFIG_ATM is not set |
| 475 | # CONFIG_BRIDGE is not set | 557 | CONFIG_STP=m |
| 476 | # CONFIG_VLAN_8021Q is not set | 558 | CONFIG_GARP=m |
| 559 | CONFIG_BRIDGE=m | ||
| 560 | CONFIG_BRIDGE_IGMP_SNOOPING=y | ||
| 561 | # CONFIG_NET_DSA is not set | ||
| 562 | CONFIG_VLAN_8021Q=m | ||
| 563 | CONFIG_VLAN_8021Q_GVRP=y | ||
| 477 | # CONFIG_DECNET is not set | 564 | # CONFIG_DECNET is not set |
| 565 | CONFIG_LLC=m | ||
| 478 | # CONFIG_LLC2 is not set | 566 | # CONFIG_LLC2 is not set |
| 479 | # CONFIG_IPX is not set | 567 | # CONFIG_IPX is not set |
| 480 | # CONFIG_ATALK is not set | 568 | # CONFIG_ATALK is not set |
| @@ -482,26 +570,47 @@ CONFIG_SCTP_HMAC_MD5=y | |||
| 482 | # CONFIG_LAPB is not set | 570 | # CONFIG_LAPB is not set |
| 483 | # CONFIG_ECONET is not set | 571 | # CONFIG_ECONET is not set |
| 484 | # CONFIG_WAN_ROUTER is not set | 572 | # CONFIG_WAN_ROUTER is not set |
| 573 | # CONFIG_PHONET is not set | ||
| 574 | # CONFIG_IEEE802154 is not set | ||
| 485 | # CONFIG_NET_SCHED is not set | 575 | # CONFIG_NET_SCHED is not set |
| 576 | # CONFIG_DCB is not set | ||
| 486 | 577 | ||
| 487 | # | 578 | # |
| 488 | # Network testing | 579 | # Network testing |
| 489 | # | 580 | # |
| 490 | # CONFIG_NET_PKTGEN is not set | 581 | # CONFIG_NET_PKTGEN is not set |
| 491 | # CONFIG_HAMRADIO is not set | 582 | CONFIG_HAMRADIO=y |
| 583 | |||
| 584 | # | ||
| 585 | # Packet Radio protocols | ||
| 586 | # | ||
| 587 | CONFIG_AX25=m | ||
| 588 | CONFIG_AX25_DAMA_SLAVE=y | ||
| 589 | CONFIG_NETROM=m | ||
| 590 | CONFIG_ROSE=m | ||
| 591 | |||
| 592 | # | ||
| 593 | # AX.25 network device drivers | ||
| 594 | # | ||
| 595 | CONFIG_MKISS=m | ||
| 596 | CONFIG_6PACK=m | ||
| 597 | CONFIG_BPQETHER=m | ||
| 598 | CONFIG_BAYCOM_SER_FDX=m | ||
| 599 | CONFIG_BAYCOM_SER_HDX=m | ||
| 600 | CONFIG_YAM=m | ||
| 492 | # CONFIG_CAN is not set | 601 | # CONFIG_CAN is not set |
| 493 | # CONFIG_IRDA is not set | 602 | # CONFIG_IRDA is not set |
| 494 | # CONFIG_BT is not set | 603 | # CONFIG_BT is not set |
| 495 | # CONFIG_AF_RXRPC is not set | 604 | # CONFIG_AF_RXRPC is not set |
| 496 | CONFIG_FIB_RULES=y | 605 | CONFIG_FIB_RULES=y |
| 606 | CONFIG_WIRELESS=y | ||
| 607 | # CONFIG_CFG80211 is not set | ||
| 608 | # CONFIG_LIB80211 is not set | ||
| 497 | 609 | ||
| 498 | # | 610 | # |
| 499 | # Wireless | 611 | # CFG80211 needs to be enabled for MAC80211 |
| 500 | # | 612 | # |
| 501 | # CONFIG_CFG80211 is not set | 613 | # CONFIG_WIMAX is not set |
| 502 | # CONFIG_WIRELESS_EXT is not set | ||
| 503 | # CONFIG_MAC80211 is not set | ||
| 504 | # CONFIG_IEEE80211 is not set | ||
| 505 | # CONFIG_RFKILL is not set | 614 | # CONFIG_RFKILL is not set |
| 506 | # CONFIG_NET_9P is not set | 615 | # CONFIG_NET_9P is not set |
| 507 | 616 | ||
| @@ -513,9 +622,12 @@ CONFIG_FIB_RULES=y | |||
| 513 | # Generic Driver Options | 622 | # Generic Driver Options |
| 514 | # | 623 | # |
| 515 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 624 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
| 625 | # CONFIG_DEVTMPFS is not set | ||
| 516 | CONFIG_STANDALONE=y | 626 | CONFIG_STANDALONE=y |
| 517 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 627 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
| 518 | CONFIG_FW_LOADER=m | 628 | CONFIG_FW_LOADER=m |
| 629 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
| 630 | CONFIG_EXTRA_FIRMWARE="" | ||
| 519 | # CONFIG_DEBUG_DRIVER is not set | 631 | # CONFIG_DEBUG_DRIVER is not set |
| 520 | # CONFIG_DEBUG_DEVRES is not set | 632 | # CONFIG_DEBUG_DEVRES is not set |
| 521 | # CONFIG_SYS_HYPERVISOR is not set | 633 | # CONFIG_SYS_HYPERVISOR is not set |
| @@ -530,33 +642,53 @@ CONFIG_BLK_DEV=y | |||
| 530 | # CONFIG_BLK_DEV_COW_COMMON is not set | 642 | # CONFIG_BLK_DEV_COW_COMMON is not set |
| 531 | CONFIG_BLK_DEV_LOOP=m | 643 | CONFIG_BLK_DEV_LOOP=m |
| 532 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 644 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
| 645 | |||
| 646 | # | ||
| 647 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
| 648 | # | ||
| 533 | CONFIG_BLK_DEV_NBD=m | 649 | CONFIG_BLK_DEV_NBD=m |
| 534 | # CONFIG_BLK_DEV_SX8 is not set | 650 | # CONFIG_BLK_DEV_SX8 is not set |
| 535 | # CONFIG_BLK_DEV_RAM is not set | 651 | # CONFIG_BLK_DEV_RAM is not set |
| 536 | # CONFIG_CDROM_PKTCDVD is not set | 652 | # CONFIG_CDROM_PKTCDVD is not set |
| 537 | # CONFIG_ATA_OVER_ETH is not set | 653 | # CONFIG_ATA_OVER_ETH is not set |
| 654 | # CONFIG_BLK_DEV_HD is not set | ||
| 538 | CONFIG_MISC_DEVICES=y | 655 | CONFIG_MISC_DEVICES=y |
| 656 | # CONFIG_AD525X_DPOT is not set | ||
| 539 | # CONFIG_PHANTOM is not set | 657 | # CONFIG_PHANTOM is not set |
| 540 | # CONFIG_EEPROM_93CX6 is not set | ||
| 541 | CONFIG_SGI_IOC4=m | 658 | CONFIG_SGI_IOC4=m |
| 542 | # CONFIG_TIFM_CORE is not set | 659 | # CONFIG_TIFM_CORE is not set |
| 660 | # CONFIG_ICS932S401 is not set | ||
| 543 | # CONFIG_ENCLOSURE_SERVICES is not set | 661 | # CONFIG_ENCLOSURE_SERVICES is not set |
| 662 | # CONFIG_HP_ILO is not set | ||
| 663 | # CONFIG_ISL29003 is not set | ||
| 664 | # CONFIG_SENSORS_TSL2550 is not set | ||
| 665 | # CONFIG_DS1682 is not set | ||
| 666 | # CONFIG_C2PORT is not set | ||
| 667 | |||
| 668 | # | ||
| 669 | # EEPROM support | ||
| 670 | # | ||
| 671 | # CONFIG_EEPROM_AT24 is not set | ||
| 672 | CONFIG_EEPROM_LEGACY=y | ||
| 673 | CONFIG_EEPROM_MAX6875=y | ||
| 674 | # CONFIG_EEPROM_93CX6 is not set | ||
| 675 | # CONFIG_CB710_CORE is not set | ||
| 544 | CONFIG_HAVE_IDE=y | 676 | CONFIG_HAVE_IDE=y |
| 545 | CONFIG_IDE=y | 677 | CONFIG_IDE=y |
| 546 | CONFIG_IDE_MAX_HWIFS=4 | ||
| 547 | CONFIG_BLK_DEV_IDE=y | ||
| 548 | 678 | ||
| 549 | # | 679 | # |
| 550 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | 680 | # Please see Documentation/ide/ide.txt for help/info on IDE drives |
| 551 | # | 681 | # |
| 682 | CONFIG_IDE_XFER_MODE=y | ||
| 683 | CONFIG_IDE_TIMINGS=y | ||
| 684 | CONFIG_IDE_ATAPI=y | ||
| 552 | # CONFIG_BLK_DEV_IDE_SATA is not set | 685 | # CONFIG_BLK_DEV_IDE_SATA is not set |
| 553 | CONFIG_BLK_DEV_IDEDISK=y | 686 | CONFIG_IDE_GD=y |
| 554 | # CONFIG_IDEDISK_MULTI_MODE is not set | 687 | CONFIG_IDE_GD_ATA=y |
| 688 | # CONFIG_IDE_GD_ATAPI is not set | ||
| 555 | CONFIG_BLK_DEV_IDECD=y | 689 | CONFIG_BLK_DEV_IDECD=y |
| 556 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y | 690 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y |
| 557 | CONFIG_BLK_DEV_IDETAPE=y | 691 | CONFIG_BLK_DEV_IDETAPE=y |
| 558 | CONFIG_BLK_DEV_IDEFLOPPY=y | ||
| 559 | # CONFIG_BLK_DEV_IDESCSI is not set | ||
| 560 | # CONFIG_IDE_TASK_IOCTL is not set | 692 | # CONFIG_IDE_TASK_IOCTL is not set |
| 561 | CONFIG_IDE_PROC_FS=y | 693 | CONFIG_IDE_PROC_FS=y |
| 562 | 694 | ||
| @@ -581,14 +713,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y | |||
| 581 | # CONFIG_BLK_DEV_AMD74XX is not set | 713 | # CONFIG_BLK_DEV_AMD74XX is not set |
| 582 | CONFIG_BLK_DEV_CMD64X=y | 714 | CONFIG_BLK_DEV_CMD64X=y |
| 583 | # CONFIG_BLK_DEV_TRIFLEX is not set | 715 | # CONFIG_BLK_DEV_TRIFLEX is not set |
| 584 | # CONFIG_BLK_DEV_CY82C693 is not set | ||
| 585 | # CONFIG_BLK_DEV_CS5520 is not set | 716 | # CONFIG_BLK_DEV_CS5520 is not set |
| 586 | # CONFIG_BLK_DEV_CS5530 is not set | 717 | # CONFIG_BLK_DEV_CS5530 is not set |
| 587 | # CONFIG_BLK_DEV_HPT34X is not set | ||
| 588 | # CONFIG_BLK_DEV_HPT366 is not set | 718 | # CONFIG_BLK_DEV_HPT366 is not set |
| 589 | # CONFIG_BLK_DEV_JMICRON is not set | 719 | # CONFIG_BLK_DEV_JMICRON is not set |
| 590 | # CONFIG_BLK_DEV_SC1200 is not set | 720 | # CONFIG_BLK_DEV_SC1200 is not set |
| 591 | # CONFIG_BLK_DEV_PIIX is not set | 721 | # CONFIG_BLK_DEV_PIIX is not set |
| 722 | # CONFIG_BLK_DEV_IT8172 is not set | ||
| 592 | CONFIG_BLK_DEV_IT8213=m | 723 | CONFIG_BLK_DEV_IT8213=m |
| 593 | # CONFIG_BLK_DEV_IT821X is not set | 724 | # CONFIG_BLK_DEV_IT821X is not set |
| 594 | # CONFIG_BLK_DEV_NS87415 is not set | 725 | # CONFIG_BLK_DEV_NS87415 is not set |
| @@ -600,14 +731,12 @@ CONFIG_BLK_DEV_IT8213=m | |||
| 600 | # CONFIG_BLK_DEV_TRM290 is not set | 731 | # CONFIG_BLK_DEV_TRM290 is not set |
| 601 | # CONFIG_BLK_DEV_VIA82CXXX is not set | 732 | # CONFIG_BLK_DEV_VIA82CXXX is not set |
| 602 | CONFIG_BLK_DEV_TC86C001=m | 733 | CONFIG_BLK_DEV_TC86C001=m |
| 603 | # CONFIG_BLK_DEV_IDE_SWARM is not set | ||
| 604 | CONFIG_BLK_DEV_IDEDMA=y | 734 | CONFIG_BLK_DEV_IDEDMA=y |
| 605 | # CONFIG_BLK_DEV_HD_ONLY is not set | ||
| 606 | # CONFIG_BLK_DEV_HD is not set | ||
| 607 | 735 | ||
| 608 | # | 736 | # |
| 609 | # SCSI device support | 737 | # SCSI device support |
| 610 | # | 738 | # |
| 739 | CONFIG_SCSI_MOD=y | ||
| 611 | # CONFIG_RAID_ATTRS is not set | 740 | # CONFIG_RAID_ATTRS is not set |
| 612 | CONFIG_SCSI=y | 741 | CONFIG_SCSI=y |
| 613 | CONFIG_SCSI_DMA=y | 742 | CONFIG_SCSI_DMA=y |
| @@ -625,10 +754,6 @@ CONFIG_BLK_DEV_SR=m | |||
| 625 | CONFIG_BLK_DEV_SR_VENDOR=y | 754 | CONFIG_BLK_DEV_SR_VENDOR=y |
| 626 | CONFIG_CHR_DEV_SG=m | 755 | CONFIG_CHR_DEV_SG=m |
| 627 | CONFIG_CHR_DEV_SCH=m | 756 | CONFIG_CHR_DEV_SCH=m |
| 628 | |||
| 629 | # | ||
| 630 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
| 631 | # | ||
| 632 | # CONFIG_SCSI_MULTI_LUN is not set | 757 | # CONFIG_SCSI_MULTI_LUN is not set |
| 633 | # CONFIG_SCSI_CONSTANTS is not set | 758 | # CONFIG_SCSI_CONSTANTS is not set |
| 634 | # CONFIG_SCSI_LOGGING is not set | 759 | # CONFIG_SCSI_LOGGING is not set |
| @@ -645,27 +770,36 @@ CONFIG_SCSI_WAIT_SCAN=m | |||
| 645 | # CONFIG_SCSI_SRP_ATTRS is not set | 770 | # CONFIG_SCSI_SRP_ATTRS is not set |
| 646 | CONFIG_SCSI_LOWLEVEL=y | 771 | CONFIG_SCSI_LOWLEVEL=y |
| 647 | # CONFIG_ISCSI_TCP is not set | 772 | # CONFIG_ISCSI_TCP is not set |
| 773 | # CONFIG_SCSI_CXGB3_ISCSI is not set | ||
| 774 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
| 775 | # CONFIG_BE2ISCSI is not set | ||
| 648 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 776 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
| 777 | # CONFIG_SCSI_HPSA is not set | ||
| 649 | # CONFIG_SCSI_3W_9XXX is not set | 778 | # CONFIG_SCSI_3W_9XXX is not set |
| 779 | # CONFIG_SCSI_3W_SAS is not set | ||
| 650 | # CONFIG_SCSI_ACARD is not set | 780 | # CONFIG_SCSI_ACARD is not set |
| 651 | # CONFIG_SCSI_AACRAID is not set | 781 | # CONFIG_SCSI_AACRAID is not set |
| 652 | # CONFIG_SCSI_AIC7XXX is not set | 782 | # CONFIG_SCSI_AIC7XXX is not set |
| 653 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 783 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
| 654 | # CONFIG_SCSI_AIC79XX is not set | 784 | # CONFIG_SCSI_AIC79XX is not set |
| 655 | # CONFIG_SCSI_AIC94XX is not set | 785 | # CONFIG_SCSI_AIC94XX is not set |
| 786 | # CONFIG_SCSI_MVSAS is not set | ||
| 656 | # CONFIG_SCSI_DPT_I2O is not set | 787 | # CONFIG_SCSI_DPT_I2O is not set |
| 657 | # CONFIG_SCSI_ADVANSYS is not set | 788 | # CONFIG_SCSI_ADVANSYS is not set |
| 658 | # CONFIG_SCSI_ARCMSR is not set | 789 | # CONFIG_SCSI_ARCMSR is not set |
| 659 | # CONFIG_MEGARAID_NEWGEN is not set | 790 | # CONFIG_MEGARAID_NEWGEN is not set |
| 660 | # CONFIG_MEGARAID_LEGACY is not set | 791 | # CONFIG_MEGARAID_LEGACY is not set |
| 661 | # CONFIG_MEGARAID_SAS is not set | 792 | # CONFIG_MEGARAID_SAS is not set |
| 793 | # CONFIG_SCSI_MPT2SAS is not set | ||
| 662 | # CONFIG_SCSI_HPTIOP is not set | 794 | # CONFIG_SCSI_HPTIOP is not set |
| 795 | # CONFIG_LIBFC is not set | ||
| 796 | # CONFIG_LIBFCOE is not set | ||
| 797 | # CONFIG_FCOE is not set | ||
| 663 | # CONFIG_SCSI_DMX3191D is not set | 798 | # CONFIG_SCSI_DMX3191D is not set |
| 664 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | 799 | # CONFIG_SCSI_FUTURE_DOMAIN is not set |
| 665 | # CONFIG_SCSI_IPS is not set | 800 | # CONFIG_SCSI_IPS is not set |
| 666 | # CONFIG_SCSI_INITIO is not set | 801 | # CONFIG_SCSI_INITIO is not set |
| 667 | # CONFIG_SCSI_INIA100 is not set | 802 | # CONFIG_SCSI_INIA100 is not set |
| 668 | # CONFIG_SCSI_MVSAS is not set | ||
| 669 | # CONFIG_SCSI_STEX is not set | 803 | # CONFIG_SCSI_STEX is not set |
| 670 | # CONFIG_SCSI_SYM53C8XX_2 is not set | 804 | # CONFIG_SCSI_SYM53C8XX_2 is not set |
| 671 | # CONFIG_SCSI_IPR is not set | 805 | # CONFIG_SCSI_IPR is not set |
| @@ -676,9 +810,15 @@ CONFIG_SCSI_LOWLEVEL=y | |||
| 676 | # CONFIG_SCSI_DC395x is not set | 810 | # CONFIG_SCSI_DC395x is not set |
| 677 | # CONFIG_SCSI_DC390T is not set | 811 | # CONFIG_SCSI_DC390T is not set |
| 678 | # CONFIG_SCSI_DEBUG is not set | 812 | # CONFIG_SCSI_DEBUG is not set |
| 813 | # CONFIG_SCSI_PMCRAID is not set | ||
| 814 | # CONFIG_SCSI_PM8001 is not set | ||
| 679 | # CONFIG_SCSI_SRP is not set | 815 | # CONFIG_SCSI_SRP is not set |
| 816 | # CONFIG_SCSI_BFA_FC is not set | ||
| 817 | # CONFIG_SCSI_DH is not set | ||
| 818 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
| 680 | CONFIG_ATA=y | 819 | CONFIG_ATA=y |
| 681 | # CONFIG_ATA_NONSTANDARD is not set | 820 | # CONFIG_ATA_NONSTANDARD is not set |
| 821 | CONFIG_ATA_VERBOSE_ERROR=y | ||
| 682 | CONFIG_SATA_PMP=y | 822 | CONFIG_SATA_PMP=y |
| 683 | # CONFIG_SATA_AHCI is not set | 823 | # CONFIG_SATA_AHCI is not set |
| 684 | CONFIG_SATA_SIL24=y | 824 | CONFIG_SATA_SIL24=y |
| @@ -700,6 +840,7 @@ CONFIG_ATA_SFF=y | |||
| 700 | # CONFIG_PATA_ALI is not set | 840 | # CONFIG_PATA_ALI is not set |
| 701 | # CONFIG_PATA_AMD is not set | 841 | # CONFIG_PATA_AMD is not set |
| 702 | # CONFIG_PATA_ARTOP is not set | 842 | # CONFIG_PATA_ARTOP is not set |
| 843 | # CONFIG_PATA_ATP867X is not set | ||
| 703 | # CONFIG_PATA_ATIIXP is not set | 844 | # CONFIG_PATA_ATIIXP is not set |
| 704 | # CONFIG_PATA_CMD640_PCI is not set | 845 | # CONFIG_PATA_CMD640_PCI is not set |
| 705 | # CONFIG_PATA_CMD64X is not set | 846 | # CONFIG_PATA_CMD64X is not set |
| @@ -715,6 +856,7 @@ CONFIG_ATA_SFF=y | |||
| 715 | # CONFIG_PATA_IT821X is not set | 856 | # CONFIG_PATA_IT821X is not set |
| 716 | # CONFIG_PATA_IT8213 is not set | 857 | # CONFIG_PATA_IT8213 is not set |
| 717 | # CONFIG_PATA_JMICRON is not set | 858 | # CONFIG_PATA_JMICRON is not set |
| 859 | # CONFIG_PATA_LEGACY is not set | ||
| 718 | # CONFIG_PATA_TRIFLEX is not set | 860 | # CONFIG_PATA_TRIFLEX is not set |
| 719 | # CONFIG_PATA_MARVELL is not set | 861 | # CONFIG_PATA_MARVELL is not set |
| 720 | # CONFIG_PATA_MPIIX is not set | 862 | # CONFIG_PATA_MPIIX is not set |
| @@ -725,14 +867,16 @@ CONFIG_ATA_SFF=y | |||
| 725 | # CONFIG_PATA_NS87415 is not set | 867 | # CONFIG_PATA_NS87415 is not set |
| 726 | # CONFIG_PATA_OPTI is not set | 868 | # CONFIG_PATA_OPTI is not set |
| 727 | # CONFIG_PATA_OPTIDMA is not set | 869 | # CONFIG_PATA_OPTIDMA is not set |
| 870 | # CONFIG_PATA_PDC2027X is not set | ||
| 728 | # CONFIG_PATA_PDC_OLD is not set | 871 | # CONFIG_PATA_PDC_OLD is not set |
| 729 | # CONFIG_PATA_RADISYS is not set | 872 | # CONFIG_PATA_RADISYS is not set |
| 873 | # CONFIG_PATA_RDC is not set | ||
| 730 | # CONFIG_PATA_RZ1000 is not set | 874 | # CONFIG_PATA_RZ1000 is not set |
| 731 | # CONFIG_PATA_SC1200 is not set | 875 | # CONFIG_PATA_SC1200 is not set |
| 732 | # CONFIG_PATA_SERVERWORKS is not set | 876 | # CONFIG_PATA_SERVERWORKS is not set |
| 733 | # CONFIG_PATA_PDC2027X is not set | ||
| 734 | CONFIG_PATA_SIL680=y | 877 | CONFIG_PATA_SIL680=y |
| 735 | # CONFIG_PATA_SIS is not set | 878 | # CONFIG_PATA_SIS is not set |
| 879 | # CONFIG_PATA_TOSHIBA is not set | ||
| 736 | # CONFIG_PATA_VIA is not set | 880 | # CONFIG_PATA_VIA is not set |
| 737 | # CONFIG_PATA_WINBOND is not set | 881 | # CONFIG_PATA_WINBOND is not set |
| 738 | # CONFIG_PATA_PLATFORM is not set | 882 | # CONFIG_PATA_PLATFORM is not set |
| @@ -745,13 +889,16 @@ CONFIG_PATA_SIL680=y | |||
| 745 | # | 889 | # |
| 746 | 890 | ||
| 747 | # | 891 | # |
| 748 | # Enable only one of the two stacks, unless you know what you are doing | 892 | # You can enable one or both FireWire driver stacks. |
| 893 | # | ||
| 894 | |||
| 895 | # | ||
| 896 | # The newer stack is recommended. | ||
| 749 | # | 897 | # |
| 750 | # CONFIG_FIREWIRE is not set | 898 | # CONFIG_FIREWIRE is not set |
| 751 | # CONFIG_IEEE1394 is not set | 899 | # CONFIG_IEEE1394 is not set |
| 752 | # CONFIG_I2O is not set | 900 | # CONFIG_I2O is not set |
| 753 | CONFIG_NETDEVICES=y | 901 | CONFIG_NETDEVICES=y |
| 754 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
| 755 | # CONFIG_DUMMY is not set | 902 | # CONFIG_DUMMY is not set |
| 756 | # CONFIG_BONDING is not set | 903 | # CONFIG_BONDING is not set |
| 757 | # CONFIG_MACVLAN is not set | 904 | # CONFIG_MACVLAN is not set |
| @@ -774,6 +921,9 @@ CONFIG_PHYLIB=y | |||
| 774 | # CONFIG_BROADCOM_PHY is not set | 921 | # CONFIG_BROADCOM_PHY is not set |
| 775 | # CONFIG_ICPLUS_PHY is not set | 922 | # CONFIG_ICPLUS_PHY is not set |
| 776 | # CONFIG_REALTEK_PHY is not set | 923 | # CONFIG_REALTEK_PHY is not set |
| 924 | # CONFIG_NATIONAL_PHY is not set | ||
| 925 | # CONFIG_STE10XP is not set | ||
| 926 | # CONFIG_LSI_ET1011C_PHY is not set | ||
| 777 | # CONFIG_FIXED_PHY is not set | 927 | # CONFIG_FIXED_PHY is not set |
| 778 | # CONFIG_MDIO_BITBANG is not set | 928 | # CONFIG_MDIO_BITBANG is not set |
| 779 | CONFIG_NET_ETHERNET=y | 929 | CONFIG_NET_ETHERNET=y |
| @@ -783,23 +933,33 @@ CONFIG_MII=y | |||
| 783 | # CONFIG_SUNGEM is not set | 933 | # CONFIG_SUNGEM is not set |
| 784 | # CONFIG_CASSINI is not set | 934 | # CONFIG_CASSINI is not set |
| 785 | # CONFIG_NET_VENDOR_3COM is not set | 935 | # CONFIG_NET_VENDOR_3COM is not set |
| 936 | # CONFIG_SMC91X is not set | ||
| 786 | # CONFIG_DM9000 is not set | 937 | # CONFIG_DM9000 is not set |
| 938 | # CONFIG_ETHOC is not set | ||
| 939 | # CONFIG_SMSC911X is not set | ||
| 940 | # CONFIG_DNET is not set | ||
| 787 | # CONFIG_NET_TULIP is not set | 941 | # CONFIG_NET_TULIP is not set |
| 788 | # CONFIG_HP100 is not set | 942 | # CONFIG_HP100 is not set |
| 789 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | 943 | # CONFIG_IBM_NEW_EMAC_ZMII is not set |
| 790 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 944 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
| 791 | # CONFIG_IBM_NEW_EMAC_TAH is not set | 945 | # CONFIG_IBM_NEW_EMAC_TAH is not set |
| 792 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | 946 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set |
| 947 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
| 948 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
| 949 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
| 793 | # CONFIG_NET_PCI is not set | 950 | # CONFIG_NET_PCI is not set |
| 794 | # CONFIG_B44 is not set | 951 | # CONFIG_B44 is not set |
| 952 | # CONFIG_KS8842 is not set | ||
| 953 | # CONFIG_KS8851_MLL is not set | ||
| 954 | # CONFIG_ATL2 is not set | ||
| 795 | CONFIG_NETDEV_1000=y | 955 | CONFIG_NETDEV_1000=y |
| 796 | # CONFIG_ACENIC is not set | 956 | # CONFIG_ACENIC is not set |
| 797 | # CONFIG_DL2K is not set | 957 | # CONFIG_DL2K is not set |
| 798 | # CONFIG_E1000 is not set | 958 | # CONFIG_E1000 is not set |
| 799 | # CONFIG_E1000E is not set | 959 | # CONFIG_E1000E is not set |
| 800 | # CONFIG_E1000E_ENABLED is not set | ||
| 801 | # CONFIG_IP1000 is not set | 960 | # CONFIG_IP1000 is not set |
| 802 | # CONFIG_IGB is not set | 961 | # CONFIG_IGB is not set |
| 962 | # CONFIG_IGBVF is not set | ||
| 803 | # CONFIG_NS83820 is not set | 963 | # CONFIG_NS83820 is not set |
| 804 | # CONFIG_HAMACHI is not set | 964 | # CONFIG_HAMACHI is not set |
| 805 | # CONFIG_YELLOWFIN is not set | 965 | # CONFIG_YELLOWFIN is not set |
| @@ -811,29 +971,42 @@ CONFIG_SB1250_MAC=y | |||
| 811 | # CONFIG_VIA_VELOCITY is not set | 971 | # CONFIG_VIA_VELOCITY is not set |
| 812 | # CONFIG_TIGON3 is not set | 972 | # CONFIG_TIGON3 is not set |
| 813 | # CONFIG_BNX2 is not set | 973 | # CONFIG_BNX2 is not set |
| 974 | # CONFIG_CNIC is not set | ||
| 814 | # CONFIG_QLA3XXX is not set | 975 | # CONFIG_QLA3XXX is not set |
| 815 | # CONFIG_ATL1 is not set | 976 | # CONFIG_ATL1 is not set |
| 977 | # CONFIG_ATL1E is not set | ||
| 978 | # CONFIG_ATL1C is not set | ||
| 979 | # CONFIG_JME is not set | ||
| 816 | CONFIG_NETDEV_10000=y | 980 | CONFIG_NETDEV_10000=y |
| 981 | CONFIG_MDIO=m | ||
| 817 | # CONFIG_CHELSIO_T1 is not set | 982 | # CONFIG_CHELSIO_T1 is not set |
| 983 | CONFIG_CHELSIO_T3_DEPENDS=y | ||
| 818 | CONFIG_CHELSIO_T3=m | 984 | CONFIG_CHELSIO_T3=m |
| 985 | # CONFIG_ENIC is not set | ||
| 819 | # CONFIG_IXGBE is not set | 986 | # CONFIG_IXGBE is not set |
| 820 | # CONFIG_IXGB is not set | 987 | # CONFIG_IXGB is not set |
| 821 | # CONFIG_S2IO is not set | 988 | # CONFIG_S2IO is not set |
| 989 | # CONFIG_VXGE is not set | ||
| 822 | # CONFIG_MYRI10GE is not set | 990 | # CONFIG_MYRI10GE is not set |
| 823 | CONFIG_NETXEN_NIC=m | 991 | CONFIG_NETXEN_NIC=m |
| 824 | # CONFIG_NIU is not set | 992 | # CONFIG_NIU is not set |
| 993 | # CONFIG_MLX4_EN is not set | ||
| 825 | # CONFIG_MLX4_CORE is not set | 994 | # CONFIG_MLX4_CORE is not set |
| 826 | # CONFIG_TEHUTI is not set | 995 | # CONFIG_TEHUTI is not set |
| 827 | # CONFIG_BNX2X is not set | 996 | # CONFIG_BNX2X is not set |
| 997 | # CONFIG_QLCNIC is not set | ||
| 998 | # CONFIG_QLGE is not set | ||
| 828 | # CONFIG_SFC is not set | 999 | # CONFIG_SFC is not set |
| 1000 | # CONFIG_BE2NET is not set | ||
| 829 | # CONFIG_TR is not set | 1001 | # CONFIG_TR is not set |
| 1002 | CONFIG_WLAN=y | ||
| 1003 | # CONFIG_ATMEL is not set | ||
| 1004 | # CONFIG_PRISM54 is not set | ||
| 1005 | # CONFIG_HOSTAP is not set | ||
| 830 | 1006 | ||
| 831 | # | 1007 | # |
| 832 | # Wireless LAN | 1008 | # Enable WiMAX (Networking options) to see the WiMAX drivers |
| 833 | # | 1009 | # |
| 834 | # CONFIG_WLAN_PRE80211 is not set | ||
| 835 | # CONFIG_WLAN_80211 is not set | ||
| 836 | # CONFIG_IWLWIFI_LEDS is not set | ||
| 837 | # CONFIG_WAN is not set | 1010 | # CONFIG_WAN is not set |
| 838 | # CONFIG_FDDI is not set | 1011 | # CONFIG_FDDI is not set |
| 839 | # CONFIG_HIPPI is not set | 1012 | # CONFIG_HIPPI is not set |
| @@ -856,6 +1029,7 @@ CONFIG_SLIP_MODE_SLIP6=y | |||
| 856 | # CONFIG_NETCONSOLE is not set | 1029 | # CONFIG_NETCONSOLE is not set |
| 857 | # CONFIG_NETPOLL is not set | 1030 | # CONFIG_NETPOLL is not set |
| 858 | # CONFIG_NET_POLL_CONTROLLER is not set | 1031 | # CONFIG_NET_POLL_CONTROLLER is not set |
| 1032 | # CONFIG_VMXNET3 is not set | ||
| 859 | # CONFIG_ISDN is not set | 1033 | # CONFIG_ISDN is not set |
| 860 | # CONFIG_PHONE is not set | 1034 | # CONFIG_PHONE is not set |
| 861 | 1035 | ||
| @@ -873,6 +1047,7 @@ CONFIG_SERIO_SERPORT=y | |||
| 873 | # CONFIG_SERIO_PCIPS2 is not set | 1047 | # CONFIG_SERIO_PCIPS2 is not set |
| 874 | # CONFIG_SERIO_LIBPS2 is not set | 1048 | # CONFIG_SERIO_LIBPS2 is not set |
| 875 | CONFIG_SERIO_RAW=m | 1049 | CONFIG_SERIO_RAW=m |
| 1050 | # CONFIG_SERIO_ALTERA_PS2 is not set | ||
| 876 | # CONFIG_GAMEPORT is not set | 1051 | # CONFIG_GAMEPORT is not set |
| 877 | 1052 | ||
| 878 | # | 1053 | # |
| @@ -893,8 +1068,6 @@ CONFIG_SERIAL_NONSTANDARD=y | |||
| 893 | # CONFIG_N_HDLC is not set | 1068 | # CONFIG_N_HDLC is not set |
| 894 | # CONFIG_RISCOM8 is not set | 1069 | # CONFIG_RISCOM8 is not set |
| 895 | # CONFIG_SPECIALIX is not set | 1070 | # CONFIG_SPECIALIX is not set |
| 896 | # CONFIG_SX is not set | ||
| 897 | # CONFIG_RIO is not set | ||
| 898 | # CONFIG_STALDRV is not set | 1071 | # CONFIG_STALDRV is not set |
| 899 | # CONFIG_NOZOMI is not set | 1072 | # CONFIG_NOZOMI is not set |
| 900 | 1073 | ||
| @@ -911,7 +1084,9 @@ CONFIG_SERIAL_SB1250_DUART_CONSOLE=y | |||
| 911 | CONFIG_SERIAL_CORE=y | 1084 | CONFIG_SERIAL_CORE=y |
| 912 | CONFIG_SERIAL_CORE_CONSOLE=y | 1085 | CONFIG_SERIAL_CORE_CONSOLE=y |
| 913 | # CONFIG_SERIAL_JSM is not set | 1086 | # CONFIG_SERIAL_JSM is not set |
| 1087 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
| 914 | CONFIG_UNIX98_PTYS=y | 1088 | CONFIG_UNIX98_PTYS=y |
| 1089 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
| 915 | CONFIG_LEGACY_PTYS=y | 1090 | CONFIG_LEGACY_PTYS=y |
| 916 | CONFIG_LEGACY_PTY_COUNT=256 | 1091 | CONFIG_LEGACY_PTY_COUNT=256 |
| 917 | # CONFIG_IPMI_HANDLER is not set | 1092 | # CONFIG_IPMI_HANDLER is not set |
| @@ -923,89 +1098,99 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
| 923 | CONFIG_DEVPORT=y | 1098 | CONFIG_DEVPORT=y |
| 924 | CONFIG_I2C=y | 1099 | CONFIG_I2C=y |
| 925 | CONFIG_I2C_BOARDINFO=y | 1100 | CONFIG_I2C_BOARDINFO=y |
| 1101 | CONFIG_I2C_COMPAT=y | ||
| 926 | CONFIG_I2C_CHARDEV=y | 1102 | CONFIG_I2C_CHARDEV=y |
| 1103 | CONFIG_I2C_HELPER_AUTO=y | ||
| 927 | 1104 | ||
| 928 | # | 1105 | # |
| 929 | # I2C Hardware Bus support | 1106 | # I2C Hardware Bus support |
| 930 | # | 1107 | # |
| 1108 | |||
| 1109 | # | ||
| 1110 | # PC SMBus host controller drivers | ||
| 1111 | # | ||
| 931 | # CONFIG_I2C_ALI1535 is not set | 1112 | # CONFIG_I2C_ALI1535 is not set |
| 932 | # CONFIG_I2C_ALI1563 is not set | 1113 | # CONFIG_I2C_ALI1563 is not set |
| 933 | # CONFIG_I2C_ALI15X3 is not set | 1114 | # CONFIG_I2C_ALI15X3 is not set |
| 934 | # CONFIG_I2C_AMD756 is not set | 1115 | # CONFIG_I2C_AMD756 is not set |
| 935 | # CONFIG_I2C_AMD8111 is not set | 1116 | # CONFIG_I2C_AMD8111 is not set |
| 936 | # CONFIG_I2C_I801 is not set | 1117 | # CONFIG_I2C_I801 is not set |
| 937 | # CONFIG_I2C_I810 is not set | 1118 | # CONFIG_I2C_ISCH is not set |
| 938 | # CONFIG_I2C_PIIX4 is not set | 1119 | # CONFIG_I2C_PIIX4 is not set |
| 939 | # CONFIG_I2C_NFORCE2 is not set | 1120 | # CONFIG_I2C_NFORCE2 is not set |
| 940 | # CONFIG_I2C_OCORES is not set | ||
| 941 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
| 942 | # CONFIG_I2C_PROSAVAGE is not set | ||
| 943 | # CONFIG_I2C_SAVAGE4 is not set | ||
| 944 | CONFIG_I2C_SIBYTE=y | ||
| 945 | # CONFIG_I2C_SIMTEC is not set | ||
| 946 | # CONFIG_I2C_SIS5595 is not set | 1121 | # CONFIG_I2C_SIS5595 is not set |
| 947 | # CONFIG_I2C_SIS630 is not set | 1122 | # CONFIG_I2C_SIS630 is not set |
| 948 | # CONFIG_I2C_SIS96X is not set | 1123 | # CONFIG_I2C_SIS96X is not set |
| 949 | # CONFIG_I2C_TAOS_EVM is not set | ||
| 950 | # CONFIG_I2C_STUB is not set | ||
| 951 | # CONFIG_I2C_VIA is not set | 1124 | # CONFIG_I2C_VIA is not set |
| 952 | # CONFIG_I2C_VIAPRO is not set | 1125 | # CONFIG_I2C_VIAPRO is not set |
| 953 | # CONFIG_I2C_VOODOO3 is not set | ||
| 954 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
| 955 | 1126 | ||
| 956 | # | 1127 | # |
| 957 | # Miscellaneous I2C Chip support | 1128 | # I2C system bus drivers (mostly embedded / system-on-chip) |
| 958 | # | 1129 | # |
| 959 | # CONFIG_DS1682 is not set | 1130 | # CONFIG_I2C_OCORES is not set |
| 960 | CONFIG_EEPROM_LEGACY=y | 1131 | # CONFIG_I2C_SIMTEC is not set |
| 961 | CONFIG_SENSORS_PCF8574=y | 1132 | # CONFIG_I2C_XILINX is not set |
| 962 | # CONFIG_PCF8575 is not set | 1133 | |
| 963 | CONFIG_SENSORS_PCF8591=y | 1134 | # |
| 964 | CONFIG_EEPROM_MAX6875=y | 1135 | # External I2C/SMBus adapter drivers |
| 965 | # CONFIG_SENSORS_TSL2550 is not set | 1136 | # |
| 1137 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
| 1138 | # CONFIG_I2C_TAOS_EVM is not set | ||
| 1139 | |||
| 1140 | # | ||
| 1141 | # Other I2C/SMBus bus drivers | ||
| 1142 | # | ||
| 1143 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
| 1144 | CONFIG_I2C_SIBYTE=y | ||
| 1145 | # CONFIG_I2C_STUB is not set | ||
| 966 | CONFIG_I2C_DEBUG_CORE=y | 1146 | CONFIG_I2C_DEBUG_CORE=y |
| 967 | CONFIG_I2C_DEBUG_ALGO=y | 1147 | CONFIG_I2C_DEBUG_ALGO=y |
| 968 | CONFIG_I2C_DEBUG_BUS=y | 1148 | CONFIG_I2C_DEBUG_BUS=y |
| 969 | CONFIG_I2C_DEBUG_CHIP=y | ||
| 970 | # CONFIG_SPI is not set | 1149 | # CONFIG_SPI is not set |
| 1150 | |||
| 1151 | # | ||
| 1152 | # PPS support | ||
| 1153 | # | ||
| 1154 | # CONFIG_PPS is not set | ||
| 971 | # CONFIG_W1 is not set | 1155 | # CONFIG_W1 is not set |
| 972 | # CONFIG_POWER_SUPPLY is not set | 1156 | # CONFIG_POWER_SUPPLY is not set |
| 973 | # CONFIG_HWMON is not set | 1157 | # CONFIG_HWMON is not set |
| 974 | # CONFIG_THERMAL is not set | 1158 | # CONFIG_THERMAL is not set |
| 975 | # CONFIG_THERMAL_HWMON is not set | ||
| 976 | # CONFIG_WATCHDOG is not set | 1159 | # CONFIG_WATCHDOG is not set |
| 1160 | CONFIG_SSB_POSSIBLE=y | ||
| 977 | 1161 | ||
| 978 | # | 1162 | # |
| 979 | # Sonics Silicon Backplane | 1163 | # Sonics Silicon Backplane |
| 980 | # | 1164 | # |
| 981 | CONFIG_SSB_POSSIBLE=y | ||
| 982 | # CONFIG_SSB is not set | 1165 | # CONFIG_SSB is not set |
| 983 | 1166 | ||
| 984 | # | 1167 | # |
| 985 | # Multifunction device drivers | 1168 | # Multifunction device drivers |
| 986 | # | 1169 | # |
| 1170 | # CONFIG_MFD_CORE is not set | ||
| 1171 | # CONFIG_MFD_88PM860X is not set | ||
| 987 | # CONFIG_MFD_SM501 is not set | 1172 | # CONFIG_MFD_SM501 is not set |
| 988 | # CONFIG_HTC_PASIC3 is not set | 1173 | # CONFIG_HTC_PASIC3 is not set |
| 989 | 1174 | # CONFIG_TWL4030_CORE is not set | |
| 990 | # | 1175 | # CONFIG_MFD_TMIO is not set |
| 991 | # Multimedia devices | 1176 | # CONFIG_PMIC_DA903X is not set |
| 992 | # | 1177 | # CONFIG_PMIC_ADP5520 is not set |
| 993 | 1178 | # CONFIG_MFD_MAX8925 is not set | |
| 994 | # | 1179 | # CONFIG_MFD_WM8400 is not set |
| 995 | # Multimedia core support | 1180 | # CONFIG_MFD_WM831X is not set |
| 996 | # | 1181 | # CONFIG_MFD_WM8350_I2C is not set |
| 997 | # CONFIG_VIDEO_DEV is not set | 1182 | # CONFIG_MFD_WM8994 is not set |
| 998 | # CONFIG_DVB_CORE is not set | 1183 | # CONFIG_MFD_PCF50633 is not set |
| 999 | # CONFIG_VIDEO_MEDIA is not set | 1184 | # CONFIG_AB3100_CORE is not set |
| 1000 | 1185 | # CONFIG_LPC_SCH is not set | |
| 1001 | # | 1186 | # CONFIG_REGULATOR is not set |
| 1002 | # Multimedia drivers | 1187 | # CONFIG_MEDIA_SUPPORT is not set |
| 1003 | # | ||
| 1004 | # CONFIG_DAB is not set | ||
| 1005 | 1188 | ||
| 1006 | # | 1189 | # |
| 1007 | # Graphics support | 1190 | # Graphics support |
| 1008 | # | 1191 | # |
| 1192 | CONFIG_VGA_ARB=y | ||
| 1193 | CONFIG_VGA_ARB_MAX_GPUS=16 | ||
| 1009 | # CONFIG_DRM is not set | 1194 | # CONFIG_DRM is not set |
| 1010 | # CONFIG_VGASTATE is not set | 1195 | # CONFIG_VGASTATE is not set |
| 1011 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | 1196 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set |
| @@ -1016,10 +1201,6 @@ CONFIG_SSB_POSSIBLE=y | |||
| 1016 | # Display device support | 1201 | # Display device support |
| 1017 | # | 1202 | # |
| 1018 | # CONFIG_DISPLAY_SUPPORT is not set | 1203 | # CONFIG_DISPLAY_SUPPORT is not set |
| 1019 | |||
| 1020 | # | ||
| 1021 | # Sound | ||
| 1022 | # | ||
| 1023 | # CONFIG_SOUND is not set | 1204 | # CONFIG_SOUND is not set |
| 1024 | CONFIG_USB_SUPPORT=y | 1205 | CONFIG_USB_SUPPORT=y |
| 1025 | CONFIG_USB_ARCH_HAS_HCD=y | 1206 | CONFIG_USB_ARCH_HAS_HCD=y |
| @@ -1030,9 +1211,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y | |||
| 1030 | # CONFIG_USB_OTG_BLACKLIST_HUB is not set | 1211 | # CONFIG_USB_OTG_BLACKLIST_HUB is not set |
| 1031 | 1212 | ||
| 1032 | # | 1213 | # |
| 1033 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1214 | # Enable Host or Gadget support to see Inventra options |
| 1215 | # | ||
| 1216 | |||
| 1217 | # | ||
| 1218 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may | ||
| 1034 | # | 1219 | # |
| 1035 | # CONFIG_USB_GADGET is not set | 1220 | # CONFIG_USB_GADGET is not set |
| 1221 | |||
| 1222 | # | ||
| 1223 | # OTG and related infrastructure | ||
| 1224 | # | ||
| 1225 | # CONFIG_UWB is not set | ||
| 1036 | # CONFIG_MMC is not set | 1226 | # CONFIG_MMC is not set |
| 1037 | # CONFIG_MEMSTICK is not set | 1227 | # CONFIG_MEMSTICK is not set |
| 1038 | # CONFIG_NEW_LEDS is not set | 1228 | # CONFIG_NEW_LEDS is not set |
| @@ -1040,41 +1230,66 @@ CONFIG_USB_ARCH_HAS_EHCI=y | |||
| 1040 | # CONFIG_INFINIBAND is not set | 1230 | # CONFIG_INFINIBAND is not set |
| 1041 | CONFIG_RTC_LIB=y | 1231 | CONFIG_RTC_LIB=y |
| 1042 | # CONFIG_RTC_CLASS is not set | 1232 | # CONFIG_RTC_CLASS is not set |
| 1233 | # CONFIG_DMADEVICES is not set | ||
| 1234 | # CONFIG_AUXDISPLAY is not set | ||
| 1043 | # CONFIG_UIO is not set | 1235 | # CONFIG_UIO is not set |
| 1044 | 1236 | ||
| 1045 | # | 1237 | # |
| 1238 | # TI VLYNQ | ||
| 1239 | # | ||
| 1240 | # CONFIG_STAGING is not set | ||
| 1241 | |||
| 1242 | # | ||
| 1046 | # File systems | 1243 | # File systems |
| 1047 | # | 1244 | # |
| 1048 | CONFIG_EXT2_FS=m | 1245 | CONFIG_EXT2_FS=m |
| 1049 | CONFIG_EXT2_FS_XATTR=y | 1246 | CONFIG_EXT2_FS_XATTR=y |
| 1050 | # CONFIG_EXT2_FS_POSIX_ACL is not set | 1247 | CONFIG_EXT2_FS_POSIX_ACL=y |
| 1051 | # CONFIG_EXT2_FS_SECURITY is not set | 1248 | CONFIG_EXT2_FS_SECURITY=y |
| 1052 | # CONFIG_EXT2_FS_XIP is not set | 1249 | CONFIG_EXT2_FS_XIP=y |
| 1053 | CONFIG_EXT3_FS=y | 1250 | CONFIG_EXT3_FS=m |
| 1251 | CONFIG_EXT3_DEFAULTS_TO_ORDERED=y | ||
| 1054 | CONFIG_EXT3_FS_XATTR=y | 1252 | CONFIG_EXT3_FS_XATTR=y |
| 1055 | # CONFIG_EXT3_FS_POSIX_ACL is not set | 1253 | CONFIG_EXT3_FS_POSIX_ACL=y |
| 1056 | # CONFIG_EXT3_FS_SECURITY is not set | 1254 | CONFIG_EXT3_FS_SECURITY=y |
| 1057 | # CONFIG_EXT4DEV_FS is not set | 1255 | CONFIG_EXT4_FS=y |
| 1058 | CONFIG_JBD=y | 1256 | CONFIG_EXT4_FS_XATTR=y |
| 1257 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
| 1258 | CONFIG_EXT4_FS_SECURITY=y | ||
| 1259 | # CONFIG_EXT4_DEBUG is not set | ||
| 1260 | CONFIG_FS_XIP=y | ||
| 1261 | CONFIG_JBD=m | ||
| 1262 | CONFIG_JBD2=y | ||
| 1059 | CONFIG_FS_MBCACHE=y | 1263 | CONFIG_FS_MBCACHE=y |
| 1060 | # CONFIG_REISERFS_FS is not set | 1264 | # CONFIG_REISERFS_FS is not set |
| 1061 | # CONFIG_JFS_FS is not set | 1265 | # CONFIG_JFS_FS is not set |
| 1062 | # CONFIG_FS_POSIX_ACL is not set | 1266 | CONFIG_FS_POSIX_ACL=y |
| 1063 | # CONFIG_XFS_FS is not set | 1267 | # CONFIG_XFS_FS is not set |
| 1064 | # CONFIG_GFS2_FS is not set | 1268 | # CONFIG_GFS2_FS is not set |
| 1065 | # CONFIG_OCFS2_FS is not set | 1269 | # CONFIG_OCFS2_FS is not set |
| 1270 | # CONFIG_BTRFS_FS is not set | ||
| 1271 | # CONFIG_NILFS2_FS is not set | ||
| 1272 | CONFIG_FILE_LOCKING=y | ||
| 1273 | CONFIG_FSNOTIFY=y | ||
| 1066 | CONFIG_DNOTIFY=y | 1274 | CONFIG_DNOTIFY=y |
| 1067 | CONFIG_INOTIFY=y | 1275 | CONFIG_INOTIFY=y |
| 1068 | CONFIG_INOTIFY_USER=y | 1276 | CONFIG_INOTIFY_USER=y |
| 1069 | CONFIG_QUOTA=y | 1277 | CONFIG_QUOTA=y |
| 1070 | CONFIG_QUOTA_NETLINK_INTERFACE=y | 1278 | CONFIG_QUOTA_NETLINK_INTERFACE=y |
| 1071 | # CONFIG_PRINT_QUOTA_WARNING is not set | 1279 | # CONFIG_PRINT_QUOTA_WARNING is not set |
| 1280 | CONFIG_QUOTA_TREE=m | ||
| 1072 | # CONFIG_QFMT_V1 is not set | 1281 | # CONFIG_QFMT_V1 is not set |
| 1073 | CONFIG_QFMT_V2=m | 1282 | CONFIG_QFMT_V2=m |
| 1074 | CONFIG_QUOTACTL=y | 1283 | CONFIG_QUOTACTL=y |
| 1075 | CONFIG_AUTOFS_FS=m | 1284 | CONFIG_AUTOFS_FS=m |
| 1076 | CONFIG_AUTOFS4_FS=m | 1285 | CONFIG_AUTOFS4_FS=m |
| 1077 | CONFIG_FUSE_FS=m | 1286 | CONFIG_FUSE_FS=m |
| 1287 | # CONFIG_CUSE is not set | ||
| 1288 | |||
| 1289 | # | ||
| 1290 | # Caches | ||
| 1291 | # | ||
| 1292 | # CONFIG_FSCACHE is not set | ||
| 1078 | 1293 | ||
| 1079 | # | 1294 | # |
| 1080 | # CD-ROM/DVD Filesystems | 1295 | # CD-ROM/DVD Filesystems |
| @@ -1103,15 +1318,13 @@ CONFIG_NTFS_RW=y | |||
| 1103 | CONFIG_PROC_FS=y | 1318 | CONFIG_PROC_FS=y |
| 1104 | CONFIG_PROC_KCORE=y | 1319 | CONFIG_PROC_KCORE=y |
| 1105 | CONFIG_PROC_SYSCTL=y | 1320 | CONFIG_PROC_SYSCTL=y |
| 1321 | CONFIG_PROC_PAGE_MONITOR=y | ||
| 1106 | CONFIG_SYSFS=y | 1322 | CONFIG_SYSFS=y |
| 1107 | CONFIG_TMPFS=y | 1323 | CONFIG_TMPFS=y |
| 1108 | # CONFIG_TMPFS_POSIX_ACL is not set | 1324 | # CONFIG_TMPFS_POSIX_ACL is not set |
| 1109 | # CONFIG_HUGETLB_PAGE is not set | 1325 | # CONFIG_HUGETLB_PAGE is not set |
| 1110 | CONFIG_CONFIGFS_FS=m | 1326 | CONFIG_CONFIGFS_FS=m |
| 1111 | 1327 | CONFIG_MISC_FILESYSTEMS=y | |
| 1112 | # | ||
| 1113 | # Miscellaneous filesystems | ||
| 1114 | # | ||
| 1115 | # CONFIG_ADFS_FS is not set | 1328 | # CONFIG_ADFS_FS is not set |
| 1116 | # CONFIG_AFFS_FS is not set | 1329 | # CONFIG_AFFS_FS is not set |
| 1117 | # CONFIG_ECRYPT_FS is not set | 1330 | # CONFIG_ECRYPT_FS is not set |
| @@ -1120,9 +1333,12 @@ CONFIG_CONFIGFS_FS=m | |||
| 1120 | # CONFIG_BEFS_FS is not set | 1333 | # CONFIG_BEFS_FS is not set |
| 1121 | # CONFIG_BFS_FS is not set | 1334 | # CONFIG_BFS_FS is not set |
| 1122 | # CONFIG_EFS_FS is not set | 1335 | # CONFIG_EFS_FS is not set |
| 1336 | # CONFIG_LOGFS is not set | ||
| 1123 | # CONFIG_CRAMFS is not set | 1337 | # CONFIG_CRAMFS is not set |
| 1338 | # CONFIG_SQUASHFS is not set | ||
| 1124 | # CONFIG_VXFS_FS is not set | 1339 | # CONFIG_VXFS_FS is not set |
| 1125 | # CONFIG_MINIX_FS is not set | 1340 | # CONFIG_MINIX_FS is not set |
| 1341 | # CONFIG_OMFS_FS is not set | ||
| 1126 | # CONFIG_HPFS_FS is not set | 1342 | # CONFIG_HPFS_FS is not set |
| 1127 | # CONFIG_QNX4FS_FS is not set | 1343 | # CONFIG_QNX4FS_FS is not set |
| 1128 | # CONFIG_ROMFS_FS is not set | 1344 | # CONFIG_ROMFS_FS is not set |
| @@ -1133,16 +1349,17 @@ CONFIG_NFS_FS=y | |||
| 1133 | CONFIG_NFS_V3=y | 1349 | CONFIG_NFS_V3=y |
| 1134 | # CONFIG_NFS_V3_ACL is not set | 1350 | # CONFIG_NFS_V3_ACL is not set |
| 1135 | # CONFIG_NFS_V4 is not set | 1351 | # CONFIG_NFS_V4 is not set |
| 1136 | # CONFIG_NFSD is not set | ||
| 1137 | CONFIG_ROOT_NFS=y | 1352 | CONFIG_ROOT_NFS=y |
| 1353 | # CONFIG_NFSD is not set | ||
| 1138 | CONFIG_LOCKD=y | 1354 | CONFIG_LOCKD=y |
| 1139 | CONFIG_LOCKD_V4=y | 1355 | CONFIG_LOCKD_V4=y |
| 1140 | CONFIG_NFS_COMMON=y | 1356 | CONFIG_NFS_COMMON=y |
| 1141 | CONFIG_SUNRPC=y | 1357 | CONFIG_SUNRPC=y |
| 1142 | # CONFIG_SUNRPC_BIND34 is not set | 1358 | CONFIG_SUNRPC_GSS=m |
| 1143 | # CONFIG_RPCSEC_GSS_KRB5 is not set | 1359 | CONFIG_RPCSEC_GSS_KRB5=m |
| 1144 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 1360 | CONFIG_RPCSEC_GSS_SPKM3=m |
| 1145 | # CONFIG_SMB_FS is not set | 1361 | # CONFIG_SMB_FS is not set |
| 1362 | # CONFIG_CEPH_FS is not set | ||
| 1146 | # CONFIG_CIFS is not set | 1363 | # CONFIG_CIFS is not set |
| 1147 | # CONFIG_NCP_FS is not set | 1364 | # CONFIG_NCP_FS is not set |
| 1148 | # CONFIG_CODA_FS is not set | 1365 | # CONFIG_CODA_FS is not set |
| @@ -1205,12 +1422,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y | |||
| 1205 | CONFIG_ENABLE_MUST_CHECK=y | 1422 | CONFIG_ENABLE_MUST_CHECK=y |
| 1206 | CONFIG_FRAME_WARN=2048 | 1423 | CONFIG_FRAME_WARN=2048 |
| 1207 | CONFIG_MAGIC_SYSRQ=y | 1424 | CONFIG_MAGIC_SYSRQ=y |
| 1425 | # CONFIG_STRIP_ASM_SYMS is not set | ||
| 1208 | # CONFIG_UNUSED_SYMBOLS is not set | 1426 | # CONFIG_UNUSED_SYMBOLS is not set |
| 1209 | # CONFIG_DEBUG_FS is not set | 1427 | # CONFIG_DEBUG_FS is not set |
| 1210 | # CONFIG_HEADERS_CHECK is not set | 1428 | # CONFIG_HEADERS_CHECK is not set |
| 1211 | CONFIG_DEBUG_KERNEL=y | 1429 | CONFIG_DEBUG_KERNEL=y |
| 1212 | # CONFIG_DEBUG_SHIRQ is not set | 1430 | # CONFIG_DEBUG_SHIRQ is not set |
| 1213 | CONFIG_DETECT_SOFTLOCKUP=y | 1431 | CONFIG_DETECT_SOFTLOCKUP=y |
| 1432 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
| 1433 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
| 1434 | CONFIG_DETECT_HUNG_TASK=y | ||
| 1435 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
| 1436 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
| 1214 | CONFIG_SCHED_DEBUG=y | 1437 | CONFIG_SCHED_DEBUG=y |
| 1215 | # CONFIG_SCHEDSTATS is not set | 1438 | # CONFIG_SCHEDSTATS is not set |
| 1216 | # CONFIG_TIMER_STATS is not set | 1439 | # CONFIG_TIMER_STATS is not set |
| @@ -1219,23 +1442,53 @@ CONFIG_SCHED_DEBUG=y | |||
| 1219 | # CONFIG_DEBUG_RT_MUTEXES is not set | 1442 | # CONFIG_DEBUG_RT_MUTEXES is not set |
| 1220 | # CONFIG_RT_MUTEX_TESTER is not set | 1443 | # CONFIG_RT_MUTEX_TESTER is not set |
| 1221 | # CONFIG_DEBUG_SPINLOCK is not set | 1444 | # CONFIG_DEBUG_SPINLOCK is not set |
| 1222 | CONFIG_DEBUG_MUTEXES=y | 1445 | # CONFIG_DEBUG_MUTEXES is not set |
| 1223 | # CONFIG_DEBUG_LOCK_ALLOC is not set | 1446 | # CONFIG_DEBUG_LOCK_ALLOC is not set |
| 1224 | # CONFIG_PROVE_LOCKING is not set | 1447 | # CONFIG_PROVE_LOCKING is not set |
| 1225 | # CONFIG_LOCK_STAT is not set | 1448 | # CONFIG_LOCK_STAT is not set |
| 1226 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1449 | CONFIG_DEBUG_SPINLOCK_SLEEP=y |
| 1227 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | 1450 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set |
| 1228 | # CONFIG_DEBUG_KOBJECT is not set | 1451 | # CONFIG_DEBUG_KOBJECT is not set |
| 1229 | # CONFIG_DEBUG_INFO is not set | 1452 | # CONFIG_DEBUG_INFO is not set |
| 1230 | # CONFIG_DEBUG_VM is not set | 1453 | # CONFIG_DEBUG_VM is not set |
| 1231 | # CONFIG_DEBUG_WRITECOUNT is not set | 1454 | # CONFIG_DEBUG_WRITECOUNT is not set |
| 1232 | # CONFIG_DEBUG_LIST is not set | 1455 | CONFIG_DEBUG_MEMORY_INIT=y |
| 1456 | CONFIG_DEBUG_LIST=y | ||
| 1233 | # CONFIG_DEBUG_SG is not set | 1457 | # CONFIG_DEBUG_SG is not set |
| 1458 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
| 1459 | # CONFIG_DEBUG_CREDENTIALS is not set | ||
| 1234 | # CONFIG_BOOT_PRINTK_DELAY is not set | 1460 | # CONFIG_BOOT_PRINTK_DELAY is not set |
| 1235 | # CONFIG_RCU_TORTURE_TEST is not set | 1461 | # CONFIG_RCU_TORTURE_TEST is not set |
| 1462 | CONFIG_RCU_CPU_STALL_DETECTOR=y | ||
| 1236 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1463 | # CONFIG_BACKTRACE_SELF_TEST is not set |
| 1464 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
| 1465 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | ||
| 1237 | # CONFIG_FAULT_INJECTION is not set | 1466 | # CONFIG_FAULT_INJECTION is not set |
| 1467 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
| 1468 | # CONFIG_PAGE_POISONING is not set | ||
| 1469 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
| 1470 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | ||
| 1471 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | ||
| 1472 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
| 1473 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | ||
| 1474 | CONFIG_TRACING_SUPPORT=y | ||
| 1475 | CONFIG_FTRACE=y | ||
| 1476 | # CONFIG_FUNCTION_TRACER is not set | ||
| 1477 | # CONFIG_IRQSOFF_TRACER is not set | ||
| 1478 | # CONFIG_SCHED_TRACER is not set | ||
| 1479 | # CONFIG_ENABLE_DEFAULT_TRACERS is not set | ||
| 1480 | # CONFIG_BOOT_TRACER is not set | ||
| 1481 | CONFIG_BRANCH_PROFILE_NONE=y | ||
| 1482 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
| 1483 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
| 1484 | # CONFIG_STACK_TRACER is not set | ||
| 1485 | # CONFIG_KMEMTRACE is not set | ||
| 1486 | # CONFIG_WORKQUEUE_TRACER is not set | ||
| 1487 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
| 1238 | # CONFIG_SAMPLES is not set | 1488 | # CONFIG_SAMPLES is not set |
| 1489 | CONFIG_HAVE_ARCH_KGDB=y | ||
| 1490 | # CONFIG_KGDB is not set | ||
| 1491 | CONFIG_EARLY_PRINTK=y | ||
| 1239 | # CONFIG_CMDLINE_BOOL is not set | 1492 | # CONFIG_CMDLINE_BOOL is not set |
| 1240 | # CONFIG_DEBUG_STACK_USAGE is not set | 1493 | # CONFIG_DEBUG_STACK_USAGE is not set |
| 1241 | # CONFIG_SB1XXX_CORELIS is not set | 1494 | # CONFIG_SB1XXX_CORELIS is not set |
| @@ -1246,20 +1499,50 @@ CONFIG_DEBUG_MUTEXES=y | |||
| 1246 | # | 1499 | # |
| 1247 | CONFIG_KEYS=y | 1500 | CONFIG_KEYS=y |
| 1248 | CONFIG_KEYS_DEBUG_PROC_KEYS=y | 1501 | CONFIG_KEYS_DEBUG_PROC_KEYS=y |
| 1249 | # CONFIG_SECURITY is not set | 1502 | CONFIG_SECURITY=y |
| 1250 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1503 | # CONFIG_SECURITYFS is not set |
| 1504 | CONFIG_SECURITY_NETWORK=y | ||
| 1505 | CONFIG_SECURITY_NETWORK_XFRM=y | ||
| 1506 | # CONFIG_SECURITY_PATH is not set | ||
| 1507 | CONFIG_LSM_MMAP_MIN_ADDR=65536 | ||
| 1508 | CONFIG_SECURITY_SELINUX=y | ||
| 1509 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | ||
| 1510 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 | ||
| 1511 | CONFIG_SECURITY_SELINUX_DISABLE=y | ||
| 1512 | CONFIG_SECURITY_SELINUX_DEVELOP=y | ||
| 1513 | CONFIG_SECURITY_SELINUX_AVC_STATS=y | ||
| 1514 | CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 | ||
| 1515 | # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set | ||
| 1516 | # CONFIG_SECURITY_SMACK is not set | ||
| 1517 | # CONFIG_SECURITY_TOMOYO is not set | ||
| 1518 | # CONFIG_DEFAULT_SECURITY_SELINUX is not set | ||
| 1519 | # CONFIG_DEFAULT_SECURITY_SMACK is not set | ||
| 1520 | # CONFIG_DEFAULT_SECURITY_TOMOYO is not set | ||
| 1521 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
| 1522 | CONFIG_DEFAULT_SECURITY="" | ||
| 1251 | CONFIG_CRYPTO=y | 1523 | CONFIG_CRYPTO=y |
| 1252 | 1524 | ||
| 1253 | # | 1525 | # |
| 1254 | # Crypto core or helper | 1526 | # Crypto core or helper |
| 1255 | # | 1527 | # |
| 1528 | # CONFIG_CRYPTO_FIPS is not set | ||
| 1256 | CONFIG_CRYPTO_ALGAPI=y | 1529 | CONFIG_CRYPTO_ALGAPI=y |
| 1530 | CONFIG_CRYPTO_ALGAPI2=y | ||
| 1257 | CONFIG_CRYPTO_AEAD=m | 1531 | CONFIG_CRYPTO_AEAD=m |
| 1532 | CONFIG_CRYPTO_AEAD2=y | ||
| 1258 | CONFIG_CRYPTO_BLKCIPHER=y | 1533 | CONFIG_CRYPTO_BLKCIPHER=y |
| 1534 | CONFIG_CRYPTO_BLKCIPHER2=y | ||
| 1259 | CONFIG_CRYPTO_HASH=y | 1535 | CONFIG_CRYPTO_HASH=y |
| 1536 | CONFIG_CRYPTO_HASH2=y | ||
| 1537 | CONFIG_CRYPTO_RNG=m | ||
| 1538 | CONFIG_CRYPTO_RNG2=y | ||
| 1539 | CONFIG_CRYPTO_PCOMP=y | ||
| 1260 | CONFIG_CRYPTO_MANAGER=y | 1540 | CONFIG_CRYPTO_MANAGER=y |
| 1541 | CONFIG_CRYPTO_MANAGER2=y | ||
| 1261 | CONFIG_CRYPTO_GF128MUL=m | 1542 | CONFIG_CRYPTO_GF128MUL=m |
| 1262 | CONFIG_CRYPTO_NULL=y | 1543 | CONFIG_CRYPTO_NULL=y |
| 1544 | # CONFIG_CRYPTO_PCRYPT is not set | ||
| 1545 | CONFIG_CRYPTO_WORKQUEUE=y | ||
| 1263 | # CONFIG_CRYPTO_CRYPTD is not set | 1546 | # CONFIG_CRYPTO_CRYPTD is not set |
| 1264 | CONFIG_CRYPTO_AUTHENC=m | 1547 | CONFIG_CRYPTO_AUTHENC=m |
| 1265 | # CONFIG_CRYPTO_TEST is not set | 1548 | # CONFIG_CRYPTO_TEST is not set |
| @@ -1276,7 +1559,7 @@ CONFIG_CRYPTO_SEQIV=m | |||
| 1276 | # | 1559 | # |
| 1277 | CONFIG_CRYPTO_CBC=m | 1560 | CONFIG_CRYPTO_CBC=m |
| 1278 | CONFIG_CRYPTO_CTR=m | 1561 | CONFIG_CRYPTO_CTR=m |
| 1279 | # CONFIG_CRYPTO_CTS is not set | 1562 | CONFIG_CRYPTO_CTS=m |
| 1280 | CONFIG_CRYPTO_ECB=m | 1563 | CONFIG_CRYPTO_ECB=m |
| 1281 | CONFIG_CRYPTO_LRW=m | 1564 | CONFIG_CRYPTO_LRW=m |
| 1282 | CONFIG_CRYPTO_PCBC=m | 1565 | CONFIG_CRYPTO_PCBC=m |
| @@ -1287,14 +1570,20 @@ CONFIG_CRYPTO_XTS=m | |||
| 1287 | # | 1570 | # |
| 1288 | CONFIG_CRYPTO_HMAC=y | 1571 | CONFIG_CRYPTO_HMAC=y |
| 1289 | CONFIG_CRYPTO_XCBC=m | 1572 | CONFIG_CRYPTO_XCBC=m |
| 1573 | CONFIG_CRYPTO_VMAC=m | ||
| 1290 | 1574 | ||
| 1291 | # | 1575 | # |
| 1292 | # Digest | 1576 | # Digest |
| 1293 | # | 1577 | # |
| 1294 | # CONFIG_CRYPTO_CRC32C is not set | 1578 | CONFIG_CRYPTO_CRC32C=m |
| 1579 | CONFIG_CRYPTO_GHASH=m | ||
| 1295 | CONFIG_CRYPTO_MD4=m | 1580 | CONFIG_CRYPTO_MD4=m |
| 1296 | CONFIG_CRYPTO_MD5=y | 1581 | CONFIG_CRYPTO_MD5=y |
| 1297 | CONFIG_CRYPTO_MICHAEL_MIC=m | 1582 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| 1583 | CONFIG_CRYPTO_RMD128=m | ||
| 1584 | CONFIG_CRYPTO_RMD160=m | ||
| 1585 | CONFIG_CRYPTO_RMD256=m | ||
| 1586 | CONFIG_CRYPTO_RMD320=m | ||
| 1298 | CONFIG_CRYPTO_SHA1=m | 1587 | CONFIG_CRYPTO_SHA1=m |
| 1299 | CONFIG_CRYPTO_SHA256=m | 1588 | CONFIG_CRYPTO_SHA256=m |
| 1300 | CONFIG_CRYPTO_SHA512=m | 1589 | CONFIG_CRYPTO_SHA512=m |
| @@ -1325,25 +1614,36 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m | |||
| 1325 | # Compression | 1614 | # Compression |
| 1326 | # | 1615 | # |
| 1327 | CONFIG_CRYPTO_DEFLATE=m | 1616 | CONFIG_CRYPTO_DEFLATE=m |
| 1328 | # CONFIG_CRYPTO_LZO is not set | 1617 | CONFIG_CRYPTO_ZLIB=m |
| 1618 | CONFIG_CRYPTO_LZO=m | ||
| 1619 | |||
| 1620 | # | ||
| 1621 | # Random Number Generation | ||
| 1622 | # | ||
| 1623 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
| 1329 | CONFIG_CRYPTO_HW=y | 1624 | CONFIG_CRYPTO_HW=y |
| 1330 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set | 1625 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set |
| 1626 | # CONFIG_BINARY_PRINTF is not set | ||
| 1331 | 1627 | ||
| 1332 | # | 1628 | # |
| 1333 | # Library routines | 1629 | # Library routines |
| 1334 | # | 1630 | # |
| 1335 | CONFIG_BITREVERSE=y | 1631 | CONFIG_BITREVERSE=y |
| 1336 | # CONFIG_GENERIC_FIND_FIRST_BIT is not set | 1632 | CONFIG_GENERIC_FIND_LAST_BIT=y |
| 1337 | CONFIG_CRC_CCITT=m | 1633 | CONFIG_CRC_CCITT=m |
| 1338 | # CONFIG_CRC16 is not set | 1634 | CONFIG_CRC16=y |
| 1635 | CONFIG_CRC_T10DIF=m | ||
| 1339 | CONFIG_CRC_ITU_T=m | 1636 | CONFIG_CRC_ITU_T=m |
| 1340 | CONFIG_CRC32=y | 1637 | CONFIG_CRC32=y |
| 1341 | # CONFIG_CRC7 is not set | 1638 | CONFIG_CRC7=m |
| 1342 | CONFIG_LIBCRC32C=m | 1639 | CONFIG_LIBCRC32C=m |
| 1343 | CONFIG_AUDIT_GENERIC=y | 1640 | CONFIG_AUDIT_GENERIC=y |
| 1344 | CONFIG_ZLIB_INFLATE=m | 1641 | CONFIG_ZLIB_INFLATE=y |
| 1345 | CONFIG_ZLIB_DEFLATE=m | 1642 | CONFIG_ZLIB_DEFLATE=m |
| 1346 | CONFIG_PLIST=y | 1643 | CONFIG_LZO_COMPRESS=m |
| 1644 | CONFIG_LZO_DECOMPRESS=m | ||
| 1645 | CONFIG_DECOMPRESS_GZIP=y | ||
| 1347 | CONFIG_HAS_IOMEM=y | 1646 | CONFIG_HAS_IOMEM=y |
| 1348 | CONFIG_HAS_IOPORT=y | 1647 | CONFIG_HAS_IOPORT=y |
| 1349 | CONFIG_HAS_DMA=y | 1648 | CONFIG_HAS_DMA=y |
| 1649 | CONFIG_NLATTR=y | ||
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 1dd74fbdc09b..9252d9b50e59 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h | |||
| @@ -13,12 +13,14 @@ | |||
| 13 | #include <asm/siginfo.h> | 13 | #include <asm/siginfo.h> |
| 14 | 14 | ||
| 15 | struct mips_abi { | 15 | struct mips_abi { |
| 16 | int (* const setup_frame)(struct k_sigaction * ka, | 16 | int (* const setup_frame)(void *sig_return, struct k_sigaction *ka, |
| 17 | struct pt_regs *regs, int signr, | 17 | struct pt_regs *regs, int signr, |
| 18 | sigset_t *set); | 18 | sigset_t *set); |
| 19 | int (* const setup_rt_frame)(struct k_sigaction * ka, | 19 | const unsigned long signal_return_offset; |
| 20 | int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka, | ||
| 20 | struct pt_regs *regs, int signr, | 21 | struct pt_regs *regs, int signr, |
| 21 | sigset_t *set, siginfo_t *info); | 22 | sigset_t *set, siginfo_t *info); |
| 23 | const unsigned long rt_signal_return_offset; | ||
| 22 | const unsigned long restart; | 24 | const unsigned long restart; |
| 23 | }; | 25 | }; |
| 24 | 26 | ||
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index e53d7bed5cda..ea77a42c5f8c 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h | |||
| @@ -310,6 +310,7 @@ do { \ | |||
| 310 | 310 | ||
| 311 | #endif /* CONFIG_64BIT */ | 311 | #endif /* CONFIG_64BIT */ |
| 312 | 312 | ||
| 313 | struct pt_regs; | ||
| 313 | struct task_struct; | 314 | struct task_struct; |
| 314 | 315 | ||
| 315 | extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); | 316 | extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); |
| @@ -367,4 +368,8 @@ extern const char *__elf_platform; | |||
| 367 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | 368 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) |
| 368 | #endif | 369 | #endif |
| 369 | 370 | ||
| 371 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
| 372 | struct linux_binprm; | ||
| 373 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
| 374 | int uses_interp); | ||
| 370 | #endif /* _ASM_ELF_H */ | 375 | #endif /* _ASM_ELF_H */ |
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index aecada6f6117..3b4092705567 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h | |||
| @@ -41,7 +41,11 @@ struct mips_fpu_emulator_stats { | |||
| 41 | DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); | 41 | DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); |
| 42 | 42 | ||
| 43 | #define MIPS_FPU_EMU_INC_STATS(M) \ | 43 | #define MIPS_FPU_EMU_INC_STATS(M) \ |
| 44 | cpu_local_wrap(__local_inc(&__get_cpu_var(fpuemustats).M)) | 44 | do { \ |
| 45 | preempt_disable(); \ | ||
| 46 | __local_inc(&__get_cpu_var(fpuemustats).M); \ | ||
| 47 | preempt_enable(); \ | ||
| 48 | } while (0) | ||
| 45 | 49 | ||
| 46 | #else | 50 | #else |
| 47 | #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) | 51 | #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) |
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index b12c4aca2cc9..96a2391ad85b 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h | |||
| @@ -85,6 +85,7 @@ enum bcm63xx_regs_set { | |||
| 85 | RSET_TIMER, | 85 | RSET_TIMER, |
| 86 | RSET_WDT, | 86 | RSET_WDT, |
| 87 | RSET_UART0, | 87 | RSET_UART0, |
| 88 | RSET_UART1, | ||
| 88 | RSET_GPIO, | 89 | RSET_GPIO, |
| 89 | RSET_SPI, | 90 | RSET_SPI, |
| 90 | RSET_UDC0, | 91 | RSET_UDC0, |
| @@ -123,6 +124,7 @@ enum bcm63xx_regs_set { | |||
| 123 | #define BCM_6338_TIMER_BASE (0xfffe0200) | 124 | #define BCM_6338_TIMER_BASE (0xfffe0200) |
| 124 | #define BCM_6338_WDT_BASE (0xfffe021c) | 125 | #define BCM_6338_WDT_BASE (0xfffe021c) |
| 125 | #define BCM_6338_UART0_BASE (0xfffe0300) | 126 | #define BCM_6338_UART0_BASE (0xfffe0300) |
| 127 | #define BCM_6338_UART1_BASE (0xdeadbeef) | ||
| 126 | #define BCM_6338_GPIO_BASE (0xfffe0400) | 128 | #define BCM_6338_GPIO_BASE (0xfffe0400) |
| 127 | #define BCM_6338_SPI_BASE (0xfffe0c00) | 129 | #define BCM_6338_SPI_BASE (0xfffe0c00) |
| 128 | #define BCM_6338_UDC0_BASE (0xdeadbeef) | 130 | #define BCM_6338_UDC0_BASE (0xdeadbeef) |
| @@ -153,6 +155,7 @@ enum bcm63xx_regs_set { | |||
| 153 | #define BCM_6345_TIMER_BASE (0xfffe0200) | 155 | #define BCM_6345_TIMER_BASE (0xfffe0200) |
| 154 | #define BCM_6345_WDT_BASE (0xfffe021c) | 156 | #define BCM_6345_WDT_BASE (0xfffe021c) |
| 155 | #define BCM_6345_UART0_BASE (0xfffe0300) | 157 | #define BCM_6345_UART0_BASE (0xfffe0300) |
| 158 | #define BCM_6345_UART1_BASE (0xdeadbeef) | ||
| 156 | #define BCM_6345_GPIO_BASE (0xfffe0400) | 159 | #define BCM_6345_GPIO_BASE (0xfffe0400) |
| 157 | #define BCM_6345_SPI_BASE (0xdeadbeef) | 160 | #define BCM_6345_SPI_BASE (0xdeadbeef) |
| 158 | #define BCM_6345_UDC0_BASE (0xdeadbeef) | 161 | #define BCM_6345_UDC0_BASE (0xdeadbeef) |
| @@ -182,6 +185,7 @@ enum bcm63xx_regs_set { | |||
| 182 | #define BCM_6348_TIMER_BASE (0xfffe0200) | 185 | #define BCM_6348_TIMER_BASE (0xfffe0200) |
| 183 | #define BCM_6348_WDT_BASE (0xfffe021c) | 186 | #define BCM_6348_WDT_BASE (0xfffe021c) |
| 184 | #define BCM_6348_UART0_BASE (0xfffe0300) | 187 | #define BCM_6348_UART0_BASE (0xfffe0300) |
| 188 | #define BCM_6348_UART1_BASE (0xdeadbeef) | ||
| 185 | #define BCM_6348_GPIO_BASE (0xfffe0400) | 189 | #define BCM_6348_GPIO_BASE (0xfffe0400) |
| 186 | #define BCM_6348_SPI_BASE (0xfffe0c00) | 190 | #define BCM_6348_SPI_BASE (0xfffe0c00) |
| 187 | #define BCM_6348_UDC0_BASE (0xfffe1000) | 191 | #define BCM_6348_UDC0_BASE (0xfffe1000) |
| @@ -208,6 +212,7 @@ enum bcm63xx_regs_set { | |||
| 208 | #define BCM_6358_TIMER_BASE (0xfffe0040) | 212 | #define BCM_6358_TIMER_BASE (0xfffe0040) |
| 209 | #define BCM_6358_WDT_BASE (0xfffe005c) | 213 | #define BCM_6358_WDT_BASE (0xfffe005c) |
| 210 | #define BCM_6358_UART0_BASE (0xfffe0100) | 214 | #define BCM_6358_UART0_BASE (0xfffe0100) |
| 215 | #define BCM_6358_UART1_BASE (0xfffe0120) | ||
| 211 | #define BCM_6358_GPIO_BASE (0xfffe0080) | 216 | #define BCM_6358_GPIO_BASE (0xfffe0080) |
| 212 | #define BCM_6358_SPI_BASE (0xdeadbeef) | 217 | #define BCM_6358_SPI_BASE (0xdeadbeef) |
| 213 | #define BCM_6358_UDC0_BASE (0xfffe0800) | 218 | #define BCM_6358_UDC0_BASE (0xfffe0800) |
| @@ -246,6 +251,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) | |||
| 246 | return BCM_6338_WDT_BASE; | 251 | return BCM_6338_WDT_BASE; |
| 247 | case RSET_UART0: | 252 | case RSET_UART0: |
| 248 | return BCM_6338_UART0_BASE; | 253 | return BCM_6338_UART0_BASE; |
| 254 | case RSET_UART1: | ||
| 255 | return BCM_6338_UART1_BASE; | ||
| 249 | case RSET_GPIO: | 256 | case RSET_GPIO: |
| 250 | return BCM_6338_GPIO_BASE; | 257 | return BCM_6338_GPIO_BASE; |
| 251 | case RSET_SPI: | 258 | case RSET_SPI: |
| @@ -292,6 +299,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) | |||
| 292 | return BCM_6345_WDT_BASE; | 299 | return BCM_6345_WDT_BASE; |
| 293 | case RSET_UART0: | 300 | case RSET_UART0: |
| 294 | return BCM_6345_UART0_BASE; | 301 | return BCM_6345_UART0_BASE; |
| 302 | case RSET_UART1: | ||
| 303 | return BCM_6345_UART1_BASE; | ||
| 295 | case RSET_GPIO: | 304 | case RSET_GPIO: |
| 296 | return BCM_6345_GPIO_BASE; | 305 | return BCM_6345_GPIO_BASE; |
| 297 | case RSET_SPI: | 306 | case RSET_SPI: |
| @@ -338,6 +347,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) | |||
| 338 | return BCM_6348_WDT_BASE; | 347 | return BCM_6348_WDT_BASE; |
| 339 | case RSET_UART0: | 348 | case RSET_UART0: |
| 340 | return BCM_6348_UART0_BASE; | 349 | return BCM_6348_UART0_BASE; |
| 350 | case RSET_UART1: | ||
| 351 | return BCM_6348_UART1_BASE; | ||
| 341 | case RSET_GPIO: | 352 | case RSET_GPIO: |
| 342 | return BCM_6348_GPIO_BASE; | 353 | return BCM_6348_GPIO_BASE; |
| 343 | case RSET_SPI: | 354 | case RSET_SPI: |
| @@ -384,6 +395,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) | |||
| 384 | return BCM_6358_WDT_BASE; | 395 | return BCM_6358_WDT_BASE; |
| 385 | case RSET_UART0: | 396 | case RSET_UART0: |
| 386 | return BCM_6358_UART0_BASE; | 397 | return BCM_6358_UART0_BASE; |
| 398 | case RSET_UART1: | ||
| 399 | return BCM_6358_UART1_BASE; | ||
| 387 | case RSET_GPIO: | 400 | case RSET_GPIO: |
| 388 | return BCM_6358_GPIO_BASE; | 401 | return BCM_6358_GPIO_BASE; |
| 389 | case RSET_SPI: | 402 | case RSET_SPI: |
| @@ -429,6 +442,7 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) | |||
| 429 | enum bcm63xx_irq { | 442 | enum bcm63xx_irq { |
| 430 | IRQ_TIMER = 0, | 443 | IRQ_TIMER = 0, |
| 431 | IRQ_UART0, | 444 | IRQ_UART0, |
| 445 | IRQ_UART1, | ||
| 432 | IRQ_DSL, | 446 | IRQ_DSL, |
| 433 | IRQ_ENET0, | 447 | IRQ_ENET0, |
| 434 | IRQ_ENET1, | 448 | IRQ_ENET1, |
| @@ -510,6 +524,7 @@ enum bcm63xx_irq { | |||
| 510 | */ | 524 | */ |
| 511 | #define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) | 525 | #define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) |
| 512 | #define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2) | 526 | #define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2) |
| 527 | #define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3) | ||
| 513 | #define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5) | 528 | #define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5) |
| 514 | #define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) | 529 | #define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) |
| 515 | #define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) | 530 | #define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) |
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h new file mode 100644 index 000000000000..23c705baf171 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef BCM63XX_DEV_UART_H_ | ||
| 2 | #define BCM63XX_DEV_UART_H_ | ||
| 3 | |||
| 4 | int bcm63xx_uart_register(unsigned int id); | ||
| 5 | |||
| 6 | #endif /* BCM63XX_DEV_UART_H_ */ | ||
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 76a0b7216af5..43d4da0b1e9f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h | |||
| @@ -10,6 +10,10 @@ static inline unsigned long bcm63xx_gpio_count(void) | |||
| 10 | switch (bcm63xx_get_cpu_id()) { | 10 | switch (bcm63xx_get_cpu_id()) { |
| 11 | case BCM6358_CPU_ID: | 11 | case BCM6358_CPU_ID: |
| 12 | return 40; | 12 | return 40; |
| 13 | case BCM6338_CPU_ID: | ||
| 14 | return 8; | ||
| 15 | case BCM6345_CPU_ID: | ||
| 16 | return 16; | ||
| 13 | case BCM6348_CPU_ID: | 17 | case BCM6348_CPU_ID: |
| 14 | default: | 18 | default: |
| 15 | return 37; | 19 | return 37; |
diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index 6479090a4106..474daaa53497 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h | |||
| @@ -45,6 +45,8 @@ struct board_info { | |||
| 45 | unsigned int has_ohci0:1; | 45 | unsigned int has_ohci0:1; |
| 46 | unsigned int has_ehci0:1; | 46 | unsigned int has_ehci0:1; |
| 47 | unsigned int has_dsp:1; | 47 | unsigned int has_dsp:1; |
| 48 | unsigned int has_uart0:1; | ||
| 49 | unsigned int has_uart1:1; | ||
| 48 | 50 | ||
| 49 | /* ethernet config */ | 51 | /* ethernet config */ |
| 50 | struct bcm63xx_enet_platform_data enet0; | 52 | struct bcm63xx_enet_platform_data enet0; |
diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h index 71742bac940d..f453c01d0672 100644 --- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | #define cpu_has_smartmips 0 | 24 | #define cpu_has_smartmips 0 |
| 25 | #define cpu_has_vtag_icache 0 | 25 | #define cpu_has_vtag_icache 0 |
| 26 | 26 | ||
| 27 | #if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCMCPU_IS_6348) || defined(CONFIG_CPU_IS_6338) || defined(CONFIG_CPU_IS_BCM6345)) | 27 | #if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCM63XX_CPU_6348) || defined(CONFIG_BCM63XX_CPU_6345) || defined(CONFIG_BCM63XX_CPU_6338)) |
| 28 | #define cpu_has_dc_aliases 0 | 28 | #define cpu_has_dc_aliases 0 |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h index 7950ef4f032c..743385d7b5f2 100644 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ b/arch/mips/include/asm/mach-sibyte/war.h | |||
| @@ -16,7 +16,11 @@ | |||
| 16 | #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ | 16 | #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ |
| 17 | defined(CONFIG_SB1_PASS_2_WORKAROUNDS) | 17 | defined(CONFIG_SB1_PASS_2_WORKAROUNDS) |
| 18 | 18 | ||
| 19 | #define BCM1250_M3_WAR 1 | 19 | #ifndef __ASSEMBLY__ |
| 20 | extern int sb1250_m3_workaround_needed(void); | ||
| 21 | #endif | ||
| 22 | |||
| 23 | #define BCM1250_M3_WAR sb1250_m3_workaround_needed() | ||
| 20 | #define SIBYTE_1956_WAR 1 | 24 | #define SIBYTE_1956_WAR 1 |
| 21 | 25 | ||
| 22 | #else | 26 | #else |
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 4063edd79623..c436138945a8 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | #ifndef __ASM_MMU_H | 1 | #ifndef __ASM_MMU_H |
| 2 | #define __ASM_MMU_H | 2 | #define __ASM_MMU_H |
| 3 | 3 | ||
| 4 | typedef unsigned long mm_context_t[NR_CPUS]; | 4 | typedef struct { |
| 5 | unsigned long asid[NR_CPUS]; | ||
| 6 | void *vdso; | ||
| 7 | } mm_context_t; | ||
| 5 | 8 | ||
| 6 | #endif /* __ASM_MMU_H */ | 9 | #endif /* __ASM_MMU_H */ |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 145bb81ccaa5..d9592733a7ba 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
| @@ -104,7 +104,7 @@ extern unsigned long smtc_asid_mask; | |||
| 104 | 104 | ||
| 105 | #endif | 105 | #endif |
| 106 | 106 | ||
| 107 | #define cpu_context(cpu, mm) ((mm)->context[cpu]) | 107 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
| 108 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) | 108 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) |
| 109 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | 109 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
| 110 | 110 | ||
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index ac32572430f4..a16beafcea91 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
| @@ -188,8 +188,10 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 188 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 188 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 189 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 189 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 190 | 190 | ||
| 191 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) | 191 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \ |
| 192 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) | 192 | PHYS_OFFSET) |
| 193 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \ | ||
| 194 | PHYS_OFFSET) | ||
| 193 | 195 | ||
| 194 | #include <asm-generic/memory_model.h> | 196 | #include <asm-generic/memory_model.h> |
| 195 | #include <asm-generic/getorder.h> | 197 | #include <asm-generic/getorder.h> |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 087a8884ef06..ab387910009a 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
| @@ -33,13 +33,19 @@ extern void (*cpu_wait)(void); | |||
| 33 | 33 | ||
| 34 | extern unsigned int vced_count, vcei_count; | 34 | extern unsigned int vced_count, vcei_count; |
| 35 | 35 | ||
| 36 | /* | ||
| 37 | * A special page (the vdso) is mapped into all processes at the very | ||
| 38 | * top of the virtual memory space. | ||
| 39 | */ | ||
| 40 | #define SPECIAL_PAGES_SIZE PAGE_SIZE | ||
| 41 | |||
| 36 | #ifdef CONFIG_32BIT | 42 | #ifdef CONFIG_32BIT |
| 37 | /* | 43 | /* |
| 38 | * User space process size: 2GB. This is hardcoded into a few places, | 44 | * User space process size: 2GB. This is hardcoded into a few places, |
| 39 | * so don't change it unless you know what you are doing. | 45 | * so don't change it unless you know what you are doing. |
| 40 | */ | 46 | */ |
| 41 | #define TASK_SIZE 0x7fff8000UL | 47 | #define TASK_SIZE 0x7fff8000UL |
| 42 | #define STACK_TOP TASK_SIZE | 48 | #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE) |
| 43 | 49 | ||
| 44 | /* | 50 | /* |
| 45 | * This decides where the kernel will search for a free chunk of vm | 51 | * This decides where the kernel will search for a free chunk of vm |
| @@ -59,7 +65,8 @@ extern unsigned int vced_count, vcei_count; | |||
| 59 | #define TASK_SIZE32 0x7fff8000UL | 65 | #define TASK_SIZE32 0x7fff8000UL |
| 60 | #define TASK_SIZE 0x10000000000UL | 66 | #define TASK_SIZE 0x10000000000UL |
| 61 | #define STACK_TOP \ | 67 | #define STACK_TOP \ |
| 62 | (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE) | 68 | (((test_thread_flag(TIF_32BIT_ADDR) ? \ |
| 69 | TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE) | ||
| 63 | 70 | ||
| 64 | /* | 71 | /* |
| 65 | * This decides where the kernel will search for a free chunk of vm | 72 | * This decides where the kernel will search for a free chunk of vm |
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 3b6da3330e32..c8419129e770 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h | |||
| @@ -121,6 +121,25 @@ | |||
| 121 | .endm | 121 | .endm |
| 122 | #else | 122 | #else |
| 123 | .macro get_saved_sp /* Uniprocessor variation */ | 123 | .macro get_saved_sp /* Uniprocessor variation */ |
| 124 | #ifdef CONFIG_CPU_LOONGSON2F | ||
| 125 | /* | ||
| 126 | * Clear BTB (branch target buffer), forbid RAS (return address | ||
| 127 | * stack) to workaround the Out-of-order Issue in Loongson2F | ||
| 128 | * via its diagnostic register. | ||
| 129 | */ | ||
| 130 | move k0, ra | ||
| 131 | jal 1f | ||
| 132 | nop | ||
| 133 | 1: jal 1f | ||
| 134 | nop | ||
| 135 | 1: jal 1f | ||
| 136 | nop | ||
| 137 | 1: jal 1f | ||
| 138 | nop | ||
| 139 | 1: move ra, k0 | ||
| 140 | li k0, 3 | ||
| 141 | mtc0 k0, $22 | ||
| 142 | #endif /* CONFIG_CPU_LOONGSON2F */ | ||
| 124 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) | 143 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) |
| 125 | lui k1, %hi(kernelsp) | 144 | lui k1, %hi(kernelsp) |
| 126 | #else | 145 | #else |
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index b99bd07e199b..11a8b5252549 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h | |||
| @@ -84,6 +84,7 @@ Ip_u2s3u1(_lw); | |||
| 84 | Ip_u1u2u3(_mfc0); | 84 | Ip_u1u2u3(_mfc0); |
| 85 | Ip_u1u2u3(_mtc0); | 85 | Ip_u1u2u3(_mtc0); |
| 86 | Ip_u2u1u3(_ori); | 86 | Ip_u2u1u3(_ori); |
| 87 | Ip_u3u1u2(_or); | ||
| 87 | Ip_u2s3u1(_pref); | 88 | Ip_u2s3u1(_pref); |
| 88 | Ip_0(_rfe); | 89 | Ip_0(_rfe); |
| 89 | Ip_u2s3u1(_sc); | 90 | Ip_u2s3u1(_sc); |
| @@ -102,6 +103,7 @@ Ip_0(_tlbwr); | |||
| 102 | Ip_u3u1u2(_xor); | 103 | Ip_u3u1u2(_xor); |
| 103 | Ip_u2u1u3(_xori); | 104 | Ip_u2u1u3(_xori); |
| 104 | Ip_u2u1msbu3(_dins); | 105 | Ip_u2u1msbu3(_dins); |
| 106 | Ip_u1(_syscall); | ||
| 105 | 107 | ||
| 106 | /* Handle labels. */ | 108 | /* Handle labels. */ |
| 107 | struct uasm_label { | 109 | struct uasm_label { |
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h new file mode 100644 index 000000000000..cca56aa40ff4 --- /dev/null +++ b/arch/mips/include/asm/vdso.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2009 Cavium Networks | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef __ASM_VDSO_H | ||
| 10 | #define __ASM_VDSO_H | ||
| 11 | |||
| 12 | #include <linux/types.h> | ||
| 13 | |||
| 14 | |||
| 15 | #ifdef CONFIG_32BIT | ||
| 16 | struct mips_vdso { | ||
| 17 | u32 signal_trampoline[2]; | ||
| 18 | u32 rt_signal_trampoline[2]; | ||
| 19 | }; | ||
| 20 | #else /* !CONFIG_32BIT */ | ||
| 21 | struct mips_vdso { | ||
| 22 | u32 o32_signal_trampoline[2]; | ||
| 23 | u32 o32_rt_signal_trampoline[2]; | ||
| 24 | u32 rt_signal_trampoline[2]; | ||
| 25 | u32 n32_rt_signal_trampoline[2]; | ||
| 26 | }; | ||
| 27 | #endif /* CONFIG_32BIT */ | ||
| 28 | |||
| 29 | #endif /* __ASM_VDSO_H */ | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index ef20957ca14b..7a6ac501cbb5 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
| @@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
| 6 | 6 | ||
| 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
| 8 | ptrace.o reset.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
| 9 | time.o topology.o traps.o unaligned.o watch.o | 9 | time.o topology.o traps.o unaligned.o watch.o vdso.o |
| 10 | 10 | ||
| 11 | ifdef CONFIG_FUNCTION_TRACER | 11 | ifdef CONFIG_FUNCTION_TRACER |
| 12 | CFLAGS_REMOVE_ftrace.o = -pg | 12 | CFLAGS_REMOVE_ftrace.o = -pg |
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c index d7ca256e33ef..cefc6e259baf 100644 --- a/arch/mips/kernel/cpufreq/loongson2_clock.c +++ b/arch/mips/kernel/cpufreq/loongson2_clock.c | |||
| @@ -164,3 +164,7 @@ void loongson2_cpu_wait(void) | |||
| 164 | spin_unlock_irqrestore(&loongson2_wait_lock, flags); | 164 | spin_unlock_irqrestore(&loongson2_wait_lock, flags); |
| 165 | } | 165 | } |
| 166 | EXPORT_SYMBOL_GPL(loongson2_cpu_wait); | 166 | EXPORT_SYMBOL_GPL(loongson2_cpu_wait); |
| 167 | |||
| 168 | MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); | ||
| 169 | MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); | ||
| 170 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 463b71b90a00..99960940d4a4 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
| @@ -63,8 +63,13 @@ void __noreturn cpu_idle(void) | |||
| 63 | 63 | ||
| 64 | smtc_idle_loop_hook(); | 64 | smtc_idle_loop_hook(); |
| 65 | #endif | 65 | #endif |
| 66 | if (cpu_wait) | 66 | |
| 67 | if (cpu_wait) { | ||
| 68 | /* Don't trace irqs off for idle */ | ||
| 69 | stop_critical_timings(); | ||
| 67 | (*cpu_wait)(); | 70 | (*cpu_wait)(); |
| 71 | start_critical_timings(); | ||
| 72 | } | ||
| 68 | } | 73 | } |
| 69 | #ifdef CONFIG_HOTPLUG_CPU | 74 | #ifdef CONFIG_HOTPLUG_CPU |
| 70 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && | 75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 6c8e8c4246f7..10263b405981 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
| @@ -26,11 +26,6 @@ | |||
| 26 | */ | 26 | */ |
| 27 | extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 27 | extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
| 28 | size_t frame_size); | 28 | size_t frame_size); |
| 29 | /* | ||
| 30 | * install trampoline code to get back from the sig handler | ||
| 31 | */ | ||
| 32 | extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); | ||
| 33 | |||
| 34 | /* Check and clear pending FPU exceptions in saved CSR */ | 29 | /* Check and clear pending FPU exceptions in saved CSR */ |
| 35 | extern int fpcsr_pending(unsigned int __user *fpcsr); | 30 | extern int fpcsr_pending(unsigned int __user *fpcsr); |
| 36 | 31 | ||
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index d0c68b5d717b..2099d5a4c4b7 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <asm/ucontext.h> | 32 | #include <asm/ucontext.h> |
| 33 | #include <asm/cpu-features.h> | 33 | #include <asm/cpu-features.h> |
| 34 | #include <asm/war.h> | 34 | #include <asm/war.h> |
| 35 | #include <asm/vdso.h> | ||
| 35 | 36 | ||
| 36 | #include "signal-common.h" | 37 | #include "signal-common.h" |
| 37 | 38 | ||
| @@ -44,47 +45,20 @@ extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); | |||
| 44 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); | 45 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); |
| 45 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); | 46 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); |
| 46 | 47 | ||
| 47 | /* | ||
| 48 | * Horribly complicated - with the bloody RM9000 workarounds enabled | ||
| 49 | * the signal trampolines is moving to the end of the structure so we can | ||
| 50 | * increase the alignment without breaking software compatibility. | ||
| 51 | */ | ||
| 52 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
| 53 | |||
| 54 | struct sigframe { | 48 | struct sigframe { |
| 55 | u32 sf_ass[4]; /* argument save space for o32 */ | 49 | u32 sf_ass[4]; /* argument save space for o32 */ |
| 56 | u32 sf_code[2]; /* signal trampoline */ | 50 | u32 sf_pad[2]; /* Was: signal trampoline */ |
| 57 | struct sigcontext sf_sc; | 51 | struct sigcontext sf_sc; |
| 58 | sigset_t sf_mask; | 52 | sigset_t sf_mask; |
| 59 | }; | 53 | }; |
| 60 | 54 | ||
| 61 | struct rt_sigframe { | 55 | struct rt_sigframe { |
| 62 | u32 rs_ass[4]; /* argument save space for o32 */ | 56 | u32 rs_ass[4]; /* argument save space for o32 */ |
| 63 | u32 rs_code[2]; /* signal trampoline */ | 57 | u32 rs_pad[2]; /* Was: signal trampoline */ |
| 64 | struct siginfo rs_info; | 58 | struct siginfo rs_info; |
| 65 | struct ucontext rs_uc; | 59 | struct ucontext rs_uc; |
| 66 | }; | 60 | }; |
| 67 | 61 | ||
| 68 | #else | ||
| 69 | |||
| 70 | struct sigframe { | ||
| 71 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
| 72 | u32 sf_pad[2]; | ||
| 73 | struct sigcontext sf_sc; /* hw context */ | ||
| 74 | sigset_t sf_mask; | ||
| 75 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
| 76 | }; | ||
| 77 | |||
| 78 | struct rt_sigframe { | ||
| 79 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
| 80 | u32 rs_pad[2]; | ||
| 81 | struct siginfo rs_info; | ||
| 82 | struct ucontext rs_uc; | ||
| 83 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
| 84 | }; | ||
| 85 | |||
| 86 | #endif | ||
| 87 | |||
| 88 | /* | 62 | /* |
| 89 | * Helper routines | 63 | * Helper routines |
| 90 | */ | 64 | */ |
| @@ -266,32 +240,6 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
| 266 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); | 240 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); |
| 267 | } | 241 | } |
| 268 | 242 | ||
| 269 | int install_sigtramp(unsigned int __user *tramp, unsigned int syscall) | ||
| 270 | { | ||
| 271 | int err; | ||
| 272 | |||
| 273 | /* | ||
| 274 | * Set up the return code ... | ||
| 275 | * | ||
| 276 | * li v0, __NR__foo_sigreturn | ||
| 277 | * syscall | ||
| 278 | */ | ||
| 279 | |||
| 280 | err = __put_user(0x24020000 + syscall, tramp + 0); | ||
| 281 | err |= __put_user(0x0000000c , tramp + 1); | ||
| 282 | if (ICACHE_REFILLS_WORKAROUND_WAR) { | ||
| 283 | err |= __put_user(0, tramp + 2); | ||
| 284 | err |= __put_user(0, tramp + 3); | ||
| 285 | err |= __put_user(0, tramp + 4); | ||
| 286 | err |= __put_user(0, tramp + 5); | ||
| 287 | err |= __put_user(0, tramp + 6); | ||
| 288 | err |= __put_user(0, tramp + 7); | ||
| 289 | } | ||
| 290 | flush_cache_sigtramp((unsigned long) tramp); | ||
| 291 | |||
| 292 | return err; | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | 243 | /* |
| 296 | * Atomically swap in the new signal mask, and wait for a signal. | 244 | * Atomically swap in the new signal mask, and wait for a signal. |
| 297 | */ | 245 | */ |
| @@ -484,8 +432,8 @@ badframe: | |||
| 484 | } | 432 | } |
| 485 | 433 | ||
| 486 | #ifdef CONFIG_TRAD_SIGNALS | 434 | #ifdef CONFIG_TRAD_SIGNALS |
| 487 | static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | 435 | static int setup_frame(void *sig_return, struct k_sigaction *ka, |
| 488 | int signr, sigset_t *set) | 436 | struct pt_regs *regs, int signr, sigset_t *set) |
| 489 | { | 437 | { |
| 490 | struct sigframe __user *frame; | 438 | struct sigframe __user *frame; |
| 491 | int err = 0; | 439 | int err = 0; |
| @@ -494,8 +442,6 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 494 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 442 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
| 495 | goto give_sigsegv; | 443 | goto give_sigsegv; |
| 496 | 444 | ||
| 497 | err |= install_sigtramp(frame->sf_code, __NR_sigreturn); | ||
| 498 | |||
| 499 | err |= setup_sigcontext(regs, &frame->sf_sc); | 445 | err |= setup_sigcontext(regs, &frame->sf_sc); |
| 500 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 446 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
| 501 | if (err) | 447 | if (err) |
| @@ -515,7 +461,7 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 515 | regs->regs[ 5] = 0; | 461 | regs->regs[ 5] = 0; |
| 516 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | 462 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; |
| 517 | regs->regs[29] = (unsigned long) frame; | 463 | regs->regs[29] = (unsigned long) frame; |
| 518 | regs->regs[31] = (unsigned long) frame->sf_code; | 464 | regs->regs[31] = (unsigned long) sig_return; |
| 519 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 465 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
| 520 | 466 | ||
| 521 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 467 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
| @@ -529,8 +475,9 @@ give_sigsegv: | |||
| 529 | } | 475 | } |
| 530 | #endif | 476 | #endif |
| 531 | 477 | ||
| 532 | static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | 478 | static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, |
| 533 | int signr, sigset_t *set, siginfo_t *info) | 479 | struct pt_regs *regs, int signr, sigset_t *set, |
| 480 | siginfo_t *info) | ||
| 534 | { | 481 | { |
| 535 | struct rt_sigframe __user *frame; | 482 | struct rt_sigframe __user *frame; |
| 536 | int err = 0; | 483 | int err = 0; |
| @@ -539,8 +486,6 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 539 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 486 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
| 540 | goto give_sigsegv; | 487 | goto give_sigsegv; |
| 541 | 488 | ||
| 542 | err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn); | ||
| 543 | |||
| 544 | /* Create siginfo. */ | 489 | /* Create siginfo. */ |
| 545 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 490 | err |= copy_siginfo_to_user(&frame->rs_info, info); |
| 546 | 491 | ||
| @@ -573,7 +518,7 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 573 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 518 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
| 574 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 519 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
| 575 | regs->regs[29] = (unsigned long) frame; | 520 | regs->regs[29] = (unsigned long) frame; |
| 576 | regs->regs[31] = (unsigned long) frame->rs_code; | 521 | regs->regs[31] = (unsigned long) sig_return; |
| 577 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 522 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
| 578 | 523 | ||
| 579 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 524 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
| @@ -590,8 +535,11 @@ give_sigsegv: | |||
| 590 | struct mips_abi mips_abi = { | 535 | struct mips_abi mips_abi = { |
| 591 | #ifdef CONFIG_TRAD_SIGNALS | 536 | #ifdef CONFIG_TRAD_SIGNALS |
| 592 | .setup_frame = setup_frame, | 537 | .setup_frame = setup_frame, |
| 538 | .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), | ||
| 593 | #endif | 539 | #endif |
| 594 | .setup_rt_frame = setup_rt_frame, | 540 | .setup_rt_frame = setup_rt_frame, |
| 541 | .rt_signal_return_offset = | ||
| 542 | offsetof(struct mips_vdso, rt_signal_trampoline), | ||
| 595 | .restart = __NR_restart_syscall | 543 | .restart = __NR_restart_syscall |
| 596 | }; | 544 | }; |
| 597 | 545 | ||
| @@ -599,6 +547,8 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
| 599 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) | 547 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) |
| 600 | { | 548 | { |
| 601 | int ret; | 549 | int ret; |
| 550 | struct mips_abi *abi = current->thread.abi; | ||
| 551 | void *vdso = current->mm->context.vdso; | ||
| 602 | 552 | ||
| 603 | switch(regs->regs[0]) { | 553 | switch(regs->regs[0]) { |
| 604 | case ERESTART_RESTARTBLOCK: | 554 | case ERESTART_RESTARTBLOCK: |
| @@ -619,9 +569,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info, | |||
| 619 | regs->regs[0] = 0; /* Don't deal with this again. */ | 569 | regs->regs[0] = 0; /* Don't deal with this again. */ |
| 620 | 570 | ||
| 621 | if (sig_uses_siginfo(ka)) | 571 | if (sig_uses_siginfo(ka)) |
| 622 | ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); | 572 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, |
| 573 | ka, regs, sig, oldset, info); | ||
| 623 | else | 574 | else |
| 624 | ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); | 575 | ret = abi->setup_frame(vdso + abi->signal_return_offset, |
| 576 | ka, regs, sig, oldset); | ||
| 625 | 577 | ||
| 626 | spin_lock_irq(¤t->sighand->siglock); | 578 | spin_lock_irq(¤t->sighand->siglock); |
| 627 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 579 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 03abaf048f09..a0ed0e052b2e 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <asm/system.h> | 32 | #include <asm/system.h> |
| 33 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
| 34 | #include <asm/war.h> | 34 | #include <asm/war.h> |
| 35 | #include <asm/vdso.h> | ||
| 35 | 36 | ||
| 36 | #include "signal-common.h" | 37 | #include "signal-common.h" |
| 37 | 38 | ||
| @@ -47,8 +48,6 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user | |||
| 47 | /* | 48 | /* |
| 48 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | 49 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... |
| 49 | */ | 50 | */ |
| 50 | #define __NR_O32_sigreturn 4119 | ||
| 51 | #define __NR_O32_rt_sigreturn 4193 | ||
| 52 | #define __NR_O32_restart_syscall 4253 | 51 | #define __NR_O32_restart_syscall 4253 |
| 53 | 52 | ||
| 54 | /* 32-bit compatibility types */ | 53 | /* 32-bit compatibility types */ |
| @@ -77,47 +76,20 @@ struct ucontext32 { | |||
| 77 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 76 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
| 78 | }; | 77 | }; |
| 79 | 78 | ||
| 80 | /* | ||
| 81 | * Horribly complicated - with the bloody RM9000 workarounds enabled | ||
| 82 | * the signal trampolines is moving to the end of the structure so we can | ||
| 83 | * increase the alignment without breaking software compatibility. | ||
| 84 | */ | ||
| 85 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
| 86 | |||
| 87 | struct sigframe32 { | 79 | struct sigframe32 { |
| 88 | u32 sf_ass[4]; /* argument save space for o32 */ | 80 | u32 sf_ass[4]; /* argument save space for o32 */ |
| 89 | u32 sf_code[2]; /* signal trampoline */ | 81 | u32 sf_pad[2]; /* Was: signal trampoline */ |
| 90 | struct sigcontext32 sf_sc; | 82 | struct sigcontext32 sf_sc; |
| 91 | compat_sigset_t sf_mask; | 83 | compat_sigset_t sf_mask; |
| 92 | }; | 84 | }; |
| 93 | 85 | ||
| 94 | struct rt_sigframe32 { | 86 | struct rt_sigframe32 { |
| 95 | u32 rs_ass[4]; /* argument save space for o32 */ | 87 | u32 rs_ass[4]; /* argument save space for o32 */ |
| 96 | u32 rs_code[2]; /* signal trampoline */ | 88 | u32 rs_pad[2]; /* Was: signal trampoline */ |
| 97 | compat_siginfo_t rs_info; | 89 | compat_siginfo_t rs_info; |
| 98 | struct ucontext32 rs_uc; | 90 | struct ucontext32 rs_uc; |
| 99 | }; | 91 | }; |
| 100 | 92 | ||
| 101 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | ||
| 102 | |||
| 103 | struct sigframe32 { | ||
| 104 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
| 105 | u32 sf_pad[2]; | ||
| 106 | struct sigcontext32 sf_sc; /* hw context */ | ||
| 107 | compat_sigset_t sf_mask; | ||
| 108 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | struct rt_sigframe32 { | ||
| 112 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
| 113 | u32 rs_pad[2]; | ||
| 114 | compat_siginfo_t rs_info; | ||
| 115 | struct ucontext32 rs_uc; | ||
| 116 | u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */ | ||
| 117 | }; | ||
| 118 | |||
| 119 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ | ||
| 120 | |||
| 121 | /* | 93 | /* |
| 122 | * sigcontext handlers | 94 | * sigcontext handlers |
| 123 | */ | 95 | */ |
| @@ -598,8 +570,8 @@ badframe: | |||
| 598 | force_sig(SIGSEGV, current); | 570 | force_sig(SIGSEGV, current); |
| 599 | } | 571 | } |
| 600 | 572 | ||
| 601 | static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | 573 | static int setup_frame_32(void *sig_return, struct k_sigaction *ka, |
| 602 | int signr, sigset_t *set) | 574 | struct pt_regs *regs, int signr, sigset_t *set) |
| 603 | { | 575 | { |
| 604 | struct sigframe32 __user *frame; | 576 | struct sigframe32 __user *frame; |
| 605 | int err = 0; | 577 | int err = 0; |
| @@ -608,8 +580,6 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 608 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 580 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
| 609 | goto give_sigsegv; | 581 | goto give_sigsegv; |
| 610 | 582 | ||
| 611 | err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn); | ||
| 612 | |||
| 613 | err |= setup_sigcontext32(regs, &frame->sf_sc); | 583 | err |= setup_sigcontext32(regs, &frame->sf_sc); |
| 614 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); | 584 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); |
| 615 | 585 | ||
| @@ -630,7 +600,7 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 630 | regs->regs[ 5] = 0; | 600 | regs->regs[ 5] = 0; |
| 631 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | 601 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; |
| 632 | regs->regs[29] = (unsigned long) frame; | 602 | regs->regs[29] = (unsigned long) frame; |
| 633 | regs->regs[31] = (unsigned long) frame->sf_code; | 603 | regs->regs[31] = (unsigned long) sig_return; |
| 634 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 604 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
| 635 | 605 | ||
| 636 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 606 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
| @@ -644,8 +614,9 @@ give_sigsegv: | |||
| 644 | return -EFAULT; | 614 | return -EFAULT; |
| 645 | } | 615 | } |
| 646 | 616 | ||
| 647 | static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | 617 | static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, |
| 648 | int signr, sigset_t *set, siginfo_t *info) | 618 | struct pt_regs *regs, int signr, sigset_t *set, |
| 619 | siginfo_t *info) | ||
| 649 | { | 620 | { |
| 650 | struct rt_sigframe32 __user *frame; | 621 | struct rt_sigframe32 __user *frame; |
| 651 | int err = 0; | 622 | int err = 0; |
| @@ -655,8 +626,6 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 655 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 626 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
| 656 | goto give_sigsegv; | 627 | goto give_sigsegv; |
| 657 | 628 | ||
| 658 | err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn); | ||
| 659 | |||
| 660 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | 629 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ |
| 661 | err |= copy_siginfo_to_user32(&frame->rs_info, info); | 630 | err |= copy_siginfo_to_user32(&frame->rs_info, info); |
| 662 | 631 | ||
| @@ -690,7 +659,7 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
| 690 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 659 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
| 691 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 660 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
| 692 | regs->regs[29] = (unsigned long) frame; | 661 | regs->regs[29] = (unsigned long) frame; |
| 693 | regs->regs[31] = (unsigned long) frame->rs_code; | 662 | regs->regs[31] = (unsigned long) sig_return; |
| 694 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 663 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
| 695 | 664 | ||
| 696 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 665 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
| @@ -709,7 +678,11 @@ give_sigsegv: | |||
| 709 | */ | 678 | */ |
| 710 | struct mips_abi mips_abi_32 = { | 679 | struct mips_abi mips_abi_32 = { |
| 711 | .setup_frame = setup_frame_32, | 680 | .setup_frame = setup_frame_32, |
| 681 | .signal_return_offset = | ||
| 682 | offsetof(struct mips_vdso, o32_signal_trampoline), | ||
| 712 | .setup_rt_frame = setup_rt_frame_32, | 683 | .setup_rt_frame = setup_rt_frame_32, |
| 684 | .rt_signal_return_offset = | ||
| 685 | offsetof(struct mips_vdso, o32_rt_signal_trampoline), | ||
| 713 | .restart = __NR_O32_restart_syscall | 686 | .restart = __NR_O32_restart_syscall |
| 714 | }; | 687 | }; |
| 715 | 688 | ||
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index bb277e82d421..2c5df818c65a 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
| @@ -39,13 +39,13 @@ | |||
| 39 | #include <asm/fpu.h> | 39 | #include <asm/fpu.h> |
| 40 | #include <asm/cpu-features.h> | 40 | #include <asm/cpu-features.h> |
| 41 | #include <asm/war.h> | 41 | #include <asm/war.h> |
| 42 | #include <asm/vdso.h> | ||
| 42 | 43 | ||
| 43 | #include "signal-common.h" | 44 | #include "signal-common.h" |
| 44 | 45 | ||
| 45 | /* | 46 | /* |
| 46 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | 47 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... |
| 47 | */ | 48 | */ |
| 48 | #define __NR_N32_rt_sigreturn 6211 | ||
| 49 | #define __NR_N32_restart_syscall 6214 | 49 | #define __NR_N32_restart_syscall 6214 |
| 50 | 50 | ||
| 51 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); | 51 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); |
| @@ -67,27 +67,13 @@ struct ucontextn32 { | |||
| 67 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 67 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
| 68 | }; | 68 | }; |
| 69 | 69 | ||
| 70 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
| 71 | |||
| 72 | struct rt_sigframe_n32 { | ||
| 73 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
| 74 | u32 rs_code[2]; /* signal trampoline */ | ||
| 75 | struct compat_siginfo rs_info; | ||
| 76 | struct ucontextn32 rs_uc; | ||
| 77 | }; | ||
| 78 | |||
| 79 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | ||
| 80 | |||
| 81 | struct rt_sigframe_n32 { | 70 | struct rt_sigframe_n32 { |
| 82 | u32 rs_ass[4]; /* argument save space for o32 */ | 71 | u32 rs_ass[4]; /* argument save space for o32 */ |
| 83 | u32 rs_pad[2]; | 72 | u32 rs_pad[2]; /* Was: signal trampoline */ |
| 84 | struct compat_siginfo rs_info; | 73 | struct compat_siginfo rs_info; |
| 85 | struct ucontextn32 rs_uc; | 74 | struct ucontextn32 rs_uc; |
| 86 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
| 87 | }; | 75 | }; |
| 88 | 76 | ||
| 89 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ | ||
| 90 | |||
| 91 | extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); | 77 | extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); |
| 92 | 78 | ||
| 93 | asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | 79 | asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
| @@ -173,7 +159,7 @@ badframe: | |||
| 173 | force_sig(SIGSEGV, current); | 159 | force_sig(SIGSEGV, current); |
| 174 | } | 160 | } |
| 175 | 161 | ||
| 176 | static int setup_rt_frame_n32(struct k_sigaction * ka, | 162 | static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, |
| 177 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) | 163 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) |
| 178 | { | 164 | { |
| 179 | struct rt_sigframe_n32 __user *frame; | 165 | struct rt_sigframe_n32 __user *frame; |
| @@ -184,8 +170,6 @@ static int setup_rt_frame_n32(struct k_sigaction * ka, | |||
| 184 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 170 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
| 185 | goto give_sigsegv; | 171 | goto give_sigsegv; |
| 186 | 172 | ||
| 187 | install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn); | ||
| 188 | |||
| 189 | /* Create siginfo. */ | 173 | /* Create siginfo. */ |
| 190 | err |= copy_siginfo_to_user32(&frame->rs_info, info); | 174 | err |= copy_siginfo_to_user32(&frame->rs_info, info); |
| 191 | 175 | ||
| @@ -219,7 +203,7 @@ static int setup_rt_frame_n32(struct k_sigaction * ka, | |||
| 219 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 203 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
| 220 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 204 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
| 221 | regs->regs[29] = (unsigned long) frame; | 205 | regs->regs[29] = (unsigned long) frame; |
| 222 | regs->regs[31] = (unsigned long) frame->rs_code; | 206 | regs->regs[31] = (unsigned long) sig_return; |
| 223 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 207 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
| 224 | 208 | ||
| 225 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 209 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
| @@ -235,5 +219,7 @@ give_sigsegv: | |||
| 235 | 219 | ||
| 236 | struct mips_abi mips_abi_n32 = { | 220 | struct mips_abi mips_abi_n32 = { |
| 237 | .setup_rt_frame = setup_rt_frame_n32, | 221 | .setup_rt_frame = setup_rt_frame_n32, |
| 222 | .rt_signal_return_offset = | ||
| 223 | offsetof(struct mips_vdso, n32_rt_signal_trampoline), | ||
| 238 | .restart = __NR_N32_restart_syscall | 224 | .restart = __NR_N32_restart_syscall |
| 239 | }; | 225 | }; |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 25e825aea327..a95dea5459c4 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -182,7 +182,7 @@ static int vpemask[2][8] = { | |||
| 182 | {0, 0, 0, 0, 0, 0, 0, 1} | 182 | {0, 0, 0, 0, 0, 0, 0, 1} |
| 183 | }; | 183 | }; |
| 184 | int tcnoprog[NR_CPUS]; | 184 | int tcnoprog[NR_CPUS]; |
| 185 | static atomic_t idle_hook_initialized = {0}; | 185 | static atomic_t idle_hook_initialized = ATOMIC_INIT(0); |
| 186 | static int clock_hang_reported[NR_CPUS]; | 186 | static int clock_hang_reported[NR_CPUS]; |
| 187 | 187 | ||
| 188 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 188 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 9587abc67f35..dd81b0f87518 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
| @@ -79,7 +79,11 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 79 | int do_color_align; | 79 | int do_color_align; |
| 80 | unsigned long task_size; | 80 | unsigned long task_size; |
| 81 | 81 | ||
| 82 | task_size = STACK_TOP; | 82 | #ifdef CONFIG_32BIT |
| 83 | task_size = TASK_SIZE; | ||
| 84 | #else /* Must be CONFIG_64BIT*/ | ||
| 85 | task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; | ||
| 86 | #endif | ||
| 83 | 87 | ||
| 84 | if (len > task_size) | 88 | if (len > task_size) |
| 85 | return -ENOMEM; | 89 | return -ENOMEM; |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 4e00f9bc23ee..1a4dd657ccb9 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -1599,7 +1599,7 @@ void __init trap_init(void) | |||
| 1599 | ebase = (unsigned long) | 1599 | ebase = (unsigned long) |
| 1600 | __alloc_bootmem(size, 1 << fls(size), 0); | 1600 | __alloc_bootmem(size, 1 << fls(size), 0); |
| 1601 | } else { | 1601 | } else { |
| 1602 | ebase = CAC_BASE; | 1602 | ebase = CKSEG0; |
| 1603 | if (cpu_has_mips_r2) | 1603 | if (cpu_has_mips_r2) |
| 1604 | ebase += (read_c0_ebase() & 0x3ffff000); | 1604 | ebase += (read_c0_ebase() & 0x3ffff000); |
| 1605 | } | 1605 | } |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c new file mode 100644 index 000000000000..b773c1112b14 --- /dev/null +++ b/arch/mips/kernel/vdso.c | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2009, 2010 Cavium Networks, Inc. | ||
| 7 | */ | ||
| 8 | |||
| 9 | |||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/err.h> | ||
| 12 | #include <linux/sched.h> | ||
| 13 | #include <linux/mm.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/binfmts.h> | ||
| 16 | #include <linux/elf.h> | ||
| 17 | #include <linux/vmalloc.h> | ||
| 18 | #include <linux/unistd.h> | ||
| 19 | |||
| 20 | #include <asm/vdso.h> | ||
| 21 | #include <asm/uasm.h> | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | ||
| 25 | */ | ||
| 26 | #define __NR_O32_sigreturn 4119 | ||
| 27 | #define __NR_O32_rt_sigreturn 4193 | ||
| 28 | #define __NR_N32_rt_sigreturn 6211 | ||
| 29 | |||
| 30 | static struct page *vdso_page; | ||
| 31 | |||
| 32 | static void __init install_trampoline(u32 *tramp, unsigned int sigreturn) | ||
| 33 | { | ||
| 34 | uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */ | ||
| 35 | uasm_i_syscall(&tramp, 0); | ||
| 36 | } | ||
| 37 | |||
| 38 | static int __init init_vdso(void) | ||
| 39 | { | ||
| 40 | struct mips_vdso *vdso; | ||
| 41 | |||
| 42 | vdso_page = alloc_page(GFP_KERNEL); | ||
| 43 | if (!vdso_page) | ||
| 44 | panic("Cannot allocate vdso"); | ||
| 45 | |||
| 46 | vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); | ||
| 47 | if (!vdso) | ||
| 48 | panic("Cannot map vdso"); | ||
| 49 | clear_page(vdso); | ||
| 50 | |||
| 51 | install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn); | ||
| 52 | #ifdef CONFIG_32BIT | ||
| 53 | install_trampoline(vdso->signal_trampoline, __NR_sigreturn); | ||
| 54 | #else | ||
| 55 | install_trampoline(vdso->n32_rt_signal_trampoline, | ||
| 56 | __NR_N32_rt_sigreturn); | ||
| 57 | install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn); | ||
| 58 | install_trampoline(vdso->o32_rt_signal_trampoline, | ||
| 59 | __NR_O32_rt_sigreturn); | ||
| 60 | #endif | ||
| 61 | |||
| 62 | vunmap(vdso); | ||
| 63 | |||
| 64 | pr_notice("init_vdso successfull\n"); | ||
| 65 | |||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | device_initcall(init_vdso); | ||
| 69 | |||
| 70 | static unsigned long vdso_addr(unsigned long start) | ||
| 71 | { | ||
| 72 | return STACK_TOP; | ||
| 73 | } | ||
| 74 | |||
| 75 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
| 76 | { | ||
| 77 | int ret; | ||
| 78 | unsigned long addr; | ||
| 79 | struct mm_struct *mm = current->mm; | ||
| 80 | |||
| 81 | down_write(&mm->mmap_sem); | ||
| 82 | |||
| 83 | addr = vdso_addr(mm->start_stack); | ||
| 84 | |||
| 85 | addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0); | ||
| 86 | if (IS_ERR_VALUE(addr)) { | ||
| 87 | ret = addr; | ||
| 88 | goto up_fail; | ||
| 89 | } | ||
| 90 | |||
| 91 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
| 92 | VM_READ|VM_EXEC| | ||
| 93 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
| 94 | VM_ALWAYSDUMP, | ||
| 95 | &vdso_page); | ||
| 96 | |||
| 97 | if (ret) | ||
| 98 | goto up_fail; | ||
| 99 | |||
| 100 | mm->context.vdso = (void *)addr; | ||
| 101 | |||
| 102 | up_fail: | ||
| 103 | up_write(&mm->mmap_sem); | ||
| 104 | return ret; | ||
| 105 | } | ||
| 106 | |||
| 107 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
| 108 | { | ||
| 109 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | ||
| 110 | return "[vdso]"; | ||
| 111 | return NULL; | ||
| 112 | } | ||
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 6b3b1de9dcae..5995969e8c42 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c | |||
| @@ -41,7 +41,7 @@ EXPORT_SYMBOL(__delay); | |||
| 41 | 41 | ||
| 42 | void __udelay(unsigned long us) | 42 | void __udelay(unsigned long us) |
| 43 | { | 43 | { |
| 44 | unsigned int lpj = current_cpu_data.udelay_val; | 44 | unsigned int lpj = raw_current_cpu_data.udelay_val; |
| 45 | 45 | ||
| 46 | __delay((us * 0x000010c7ull * HZ * lpj) >> 32); | 46 | __delay((us * 0x000010c7ull * HZ * lpj) >> 32); |
| 47 | } | 47 | } |
| @@ -49,7 +49,7 @@ EXPORT_SYMBOL(__udelay); | |||
| 49 | 49 | ||
| 50 | void __ndelay(unsigned long ns) | 50 | void __ndelay(unsigned long ns) |
| 51 | { | 51 | { |
| 52 | unsigned int lpj = current_cpu_data.udelay_val; | 52 | unsigned int lpj = raw_current_cpu_data.udelay_val; |
| 53 | 53 | ||
| 54 | __delay((ns * 0x00000005ull * HZ * lpj) >> 32); | 54 | __delay((ns * 0x00000005ull * HZ * lpj) >> 32); |
| 55 | } | 55 | } |
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 3f19d1c5d942..05909d58e2fe 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h | |||
| @@ -17,8 +17,7 @@ struct DWstruct { | |||
| 17 | #error I feel sick. | 17 | #error I feel sick. |
| 18 | #endif | 18 | #endif |
| 19 | 19 | ||
| 20 | typedef union | 20 | typedef union { |
| 21 | { | ||
| 22 | struct DWstruct s; | 21 | struct DWstruct s; |
| 23 | long long ll; | 22 | long long ll; |
| 24 | } DWunion; | 23 | } DWunion; |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index be8627bc5b02..12af739048fa 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
| @@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, | |||
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | unsigned long _page_cachable_default; | 135 | unsigned long _page_cachable_default; |
| 136 | EXPORT_SYMBOL_GPL(_page_cachable_default); | 136 | EXPORT_SYMBOL(_page_cachable_default); |
| 137 | 137 | ||
| 138 | static inline void setup_protection_map(void) | 138 | static inline void setup_protection_map(void) |
| 139 | { | 139 | { |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0de0e4127d66..d1f68aadbc4c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
| @@ -788,10 +788,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
| 788 | * create the plain linear handler | 788 | * create the plain linear handler |
| 789 | */ | 789 | */ |
| 790 | if (bcm1250_m3_war()) { | 790 | if (bcm1250_m3_war()) { |
| 791 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 791 | unsigned int segbits = 44; |
| 792 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 792 | |
| 793 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
| 794 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
| 793 | uasm_i_xor(&p, K0, K0, K1); | 795 | uasm_i_xor(&p, K0, K0, K1); |
| 794 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 796 | uasm_i_dsrl32(&p, K1, K0, 62 - 32); |
| 797 | uasm_i_dsrl(&p, K0, K0, 12 + 1); | ||
| 798 | uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); | ||
| 799 | uasm_i_or(&p, K0, K0, K1); | ||
| 795 | uasm_il_bnez(&p, &r, K0, label_leave); | 800 | uasm_il_bnez(&p, &r, K0, label_leave); |
| 796 | /* No need for uasm_i_nop */ | 801 | /* No need for uasm_i_nop */ |
| 797 | } | 802 | } |
| @@ -1312,10 +1317,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
| 1312 | memset(relocs, 0, sizeof(relocs)); | 1317 | memset(relocs, 0, sizeof(relocs)); |
| 1313 | 1318 | ||
| 1314 | if (bcm1250_m3_war()) { | 1319 | if (bcm1250_m3_war()) { |
| 1315 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 1320 | unsigned int segbits = 44; |
| 1316 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 1321 | |
| 1322 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
| 1323 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
| 1317 | uasm_i_xor(&p, K0, K0, K1); | 1324 | uasm_i_xor(&p, K0, K0, K1); |
| 1318 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 1325 | uasm_i_dsrl32(&p, K1, K0, 62 - 32); |
| 1326 | uasm_i_dsrl(&p, K0, K0, 12 + 1); | ||
| 1327 | uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); | ||
| 1328 | uasm_i_or(&p, K0, K0, K1); | ||
| 1319 | uasm_il_bnez(&p, &r, K0, label_leave); | 1329 | uasm_il_bnez(&p, &r, K0, label_leave); |
| 1320 | /* No need for uasm_i_nop */ | 1330 | /* No need for uasm_i_nop */ |
| 1321 | } | 1331 | } |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 1581e9852461..611d564fdcf1 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
| @@ -31,7 +31,8 @@ enum fields { | |||
| 31 | BIMM = 0x040, | 31 | BIMM = 0x040, |
| 32 | JIMM = 0x080, | 32 | JIMM = 0x080, |
| 33 | FUNC = 0x100, | 33 | FUNC = 0x100, |
| 34 | SET = 0x200 | 34 | SET = 0x200, |
| 35 | SCIMM = 0x400 | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | #define OP_MASK 0x3f | 38 | #define OP_MASK 0x3f |
| @@ -52,6 +53,8 @@ enum fields { | |||
| 52 | #define FUNC_SH 0 | 53 | #define FUNC_SH 0 |
| 53 | #define SET_MASK 0x7 | 54 | #define SET_MASK 0x7 |
| 54 | #define SET_SH 0 | 55 | #define SET_SH 0 |
| 56 | #define SCIMM_MASK 0xfffff | ||
| 57 | #define SCIMM_SH 6 | ||
| 55 | 58 | ||
| 56 | enum opcode { | 59 | enum opcode { |
| 57 | insn_invalid, | 60 | insn_invalid, |
| @@ -61,10 +64,10 @@ enum opcode { | |||
| 61 | insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, | 64 | insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, |
| 62 | insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, | 65 | insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, |
| 63 | insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, | 66 | insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, |
| 64 | insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, | 67 | insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, |
| 65 | insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, | 68 | insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, |
| 66 | insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, | 69 | insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, |
| 67 | insn_dins | 70 | insn_dins, insn_syscall |
| 68 | }; | 71 | }; |
| 69 | 72 | ||
| 70 | struct insn { | 73 | struct insn { |
| @@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = { | |||
| 117 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 120 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
| 118 | { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, | 121 | { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, |
| 119 | { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, | 122 | { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, |
| 123 | { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, | ||
| 120 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 124 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
| 121 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 125 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
| 122 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, | 126 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, |
| @@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = { | |||
| 136 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, | 140 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, |
| 137 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 141 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
| 138 | { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, | 142 | { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, |
| 143 | { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, | ||
| 139 | { insn_invalid, 0, 0 } | 144 | { insn_invalid, 0, 0 } |
| 140 | }; | 145 | }; |
| 141 | 146 | ||
| @@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg) | |||
| 208 | return (arg >> 2) & JIMM_MASK; | 213 | return (arg >> 2) & JIMM_MASK; |
| 209 | } | 214 | } |
| 210 | 215 | ||
| 216 | static inline __cpuinit u32 build_scimm(u32 arg) | ||
| 217 | { | ||
| 218 | if (arg & ~SCIMM_MASK) | ||
| 219 | printk(KERN_WARNING "Micro-assembler field overflow\n"); | ||
| 220 | |||
| 221 | return (arg & SCIMM_MASK) << SCIMM_SH; | ||
| 222 | } | ||
| 223 | |||
| 211 | static inline __cpuinit u32 build_func(u32 arg) | 224 | static inline __cpuinit u32 build_func(u32 arg) |
| 212 | { | 225 | { |
| 213 | if (arg & ~FUNC_MASK) | 226 | if (arg & ~FUNC_MASK) |
| @@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) | |||
| 266 | op |= build_func(va_arg(ap, u32)); | 279 | op |= build_func(va_arg(ap, u32)); |
| 267 | if (ip->fields & SET) | 280 | if (ip->fields & SET) |
| 268 | op |= build_set(va_arg(ap, u32)); | 281 | op |= build_set(va_arg(ap, u32)); |
| 282 | if (ip->fields & SCIMM) | ||
| 283 | op |= build_scimm(va_arg(ap, u32)); | ||
| 269 | va_end(ap); | 284 | va_end(ap); |
| 270 | 285 | ||
| 271 | **buf = op; | 286 | **buf = op; |
| @@ -373,6 +388,7 @@ I_u2s3u1(_lw) | |||
| 373 | I_u1u2u3(_mfc0) | 388 | I_u1u2u3(_mfc0) |
| 374 | I_u1u2u3(_mtc0) | 389 | I_u1u2u3(_mtc0) |
| 375 | I_u2u1u3(_ori) | 390 | I_u2u1u3(_ori) |
| 391 | I_u3u1u2(_or) | ||
| 376 | I_u2s3u1(_pref) | 392 | I_u2s3u1(_pref) |
| 377 | I_0(_rfe) | 393 | I_0(_rfe) |
| 378 | I_u2s3u1(_sc) | 394 | I_u2s3u1(_sc) |
| @@ -391,6 +407,7 @@ I_0(_tlbwr) | |||
| 391 | I_u3u1u2(_xor) | 407 | I_u3u1u2(_xor) |
| 392 | I_u2u1u3(_xori) | 408 | I_u2u1u3(_xori) |
| 393 | I_u2u1msbu3(_dins); | 409 | I_u2u1msbu3(_dins); |
| 410 | I_u1(_syscall); | ||
| 394 | 411 | ||
| 395 | /* Handle labels. */ | 412 | /* Handle labels. */ |
| 396 | void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) | 413 | void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) |
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c index 2bb4057bf6c7..d657ee0bc131 100644 --- a/arch/mips/pci/ops-loongson2.c +++ b/arch/mips/pci/ops-loongson2.c | |||
| @@ -180,15 +180,21 @@ struct pci_ops loongson_pci_ops = { | |||
| 180 | }; | 180 | }; |
| 181 | 181 | ||
| 182 | #ifdef CONFIG_CS5536 | 182 | #ifdef CONFIG_CS5536 |
| 183 | DEFINE_RAW_SPINLOCK(msr_lock); | ||
| 184 | |||
| 183 | void _rdmsr(u32 msr, u32 *hi, u32 *lo) | 185 | void _rdmsr(u32 msr, u32 *hi, u32 *lo) |
| 184 | { | 186 | { |
| 185 | struct pci_bus bus = { | 187 | struct pci_bus bus = { |
| 186 | .number = PCI_BUS_CS5536 | 188 | .number = PCI_BUS_CS5536 |
| 187 | }; | 189 | }; |
| 188 | u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); | 190 | u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); |
| 191 | unsigned long flags; | ||
| 192 | |||
| 193 | raw_spin_lock_irqsave(&msr_lock, flags); | ||
| 189 | loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); | 194 | loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); |
| 190 | loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); | 195 | loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); |
| 191 | loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); | 196 | loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); |
| 197 | raw_spin_unlock_irqrestore(&msr_lock, flags); | ||
| 192 | } | 198 | } |
| 193 | EXPORT_SYMBOL(_rdmsr); | 199 | EXPORT_SYMBOL(_rdmsr); |
| 194 | 200 | ||
| @@ -198,9 +204,13 @@ void _wrmsr(u32 msr, u32 hi, u32 lo) | |||
| 198 | .number = PCI_BUS_CS5536 | 204 | .number = PCI_BUS_CS5536 |
| 199 | }; | 205 | }; |
| 200 | u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); | 206 | u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); |
| 207 | unsigned long flags; | ||
| 208 | |||
| 209 | raw_spin_lock_irqsave(&msr_lock, flags); | ||
| 201 | loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); | 210 | loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); |
| 202 | loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); | 211 | loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); |
| 203 | loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); | 212 | loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); |
| 213 | raw_spin_unlock_irqrestore(&msr_lock, flags); | ||
| 204 | } | 214 | } |
| 205 | EXPORT_SYMBOL(_wrmsr); | 215 | EXPORT_SYMBOL(_wrmsr); |
| 206 | #endif | 216 | #endif |
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c index 0444da1e23c2..92da3155ce07 100644 --- a/arch/mips/sibyte/sb1250/setup.c +++ b/arch/mips/sibyte/sb1250/setup.c | |||
| @@ -87,6 +87,21 @@ static int __init setup_bcm1250(void) | |||
| 87 | return ret; | 87 | return ret; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | int sb1250_m3_workaround_needed(void) | ||
| 91 | { | ||
| 92 | switch (soc_type) { | ||
| 93 | case K_SYS_SOC_TYPE_BCM1250: | ||
| 94 | case K_SYS_SOC_TYPE_BCM1250_ALT: | ||
| 95 | case K_SYS_SOC_TYPE_BCM1250_ALT2: | ||
| 96 | case K_SYS_SOC_TYPE_BCM1125: | ||
| 97 | case K_SYS_SOC_TYPE_BCM1125H: | ||
| 98 | return soc_pass < K_SYS_REVISION_BCM1250_C0; | ||
| 99 | |||
| 100 | default: | ||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | |||
| 90 | static int __init setup_bcm112x(void) | 105 | static int __init setup_bcm112x(void) |
| 91 | { | 106 | { |
| 92 | int ret = 0; | 107 | int ret = 0; |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 7ae71cc56973..bcd6884985ad 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
| 3 | # Linux kernel version: 2.6.33-rc2 | 3 | # Linux kernel version: 2.6.34-rc3 |
| 4 | # Mon Jan 4 09:03:07 2010 | 4 | # Fri Apr 9 09:57:10 2010 |
| 5 | # | 5 | # |
| 6 | CONFIG_SCHED_MC=y | 6 | CONFIG_SCHED_MC=y |
| 7 | CONFIG_MMU=y | 7 | CONFIG_MMU=y |
| @@ -17,6 +17,7 @@ CONFIG_GENERIC_TIME=y | |||
| 17 | CONFIG_GENERIC_TIME_VSYSCALL=y | 17 | CONFIG_GENERIC_TIME_VSYSCALL=y |
| 18 | CONFIG_GENERIC_CLOCKEVENTS=y | 18 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 19 | CONFIG_GENERIC_BUG=y | 19 | CONFIG_GENERIC_BUG=y |
| 20 | CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y | ||
| 20 | CONFIG_NO_IOMEM=y | 21 | CONFIG_NO_IOMEM=y |
| 21 | CONFIG_NO_DMA=y | 22 | CONFIG_NO_DMA=y |
| 22 | CONFIG_GENERIC_LOCKBREAK=y | 23 | CONFIG_GENERIC_LOCKBREAK=y |
| @@ -62,15 +63,11 @@ CONFIG_TREE_RCU=y | |||
| 62 | # CONFIG_RCU_TRACE is not set | 63 | # CONFIG_RCU_TRACE is not set |
| 63 | CONFIG_RCU_FANOUT=64 | 64 | CONFIG_RCU_FANOUT=64 |
| 64 | # CONFIG_RCU_FANOUT_EXACT is not set | 65 | # CONFIG_RCU_FANOUT_EXACT is not set |
| 66 | # CONFIG_RCU_FAST_NO_HZ is not set | ||
| 65 | # CONFIG_TREE_RCU_TRACE is not set | 67 | # CONFIG_TREE_RCU_TRACE is not set |
| 66 | CONFIG_IKCONFIG=y | 68 | CONFIG_IKCONFIG=y |
| 67 | CONFIG_IKCONFIG_PROC=y | 69 | CONFIG_IKCONFIG_PROC=y |
| 68 | CONFIG_LOG_BUF_SHIFT=17 | 70 | CONFIG_LOG_BUF_SHIFT=17 |
| 69 | CONFIG_GROUP_SCHED=y | ||
| 70 | CONFIG_FAIR_GROUP_SCHED=y | ||
| 71 | # CONFIG_RT_GROUP_SCHED is not set | ||
| 72 | CONFIG_USER_SCHED=y | ||
| 73 | # CONFIG_CGROUP_SCHED is not set | ||
| 74 | CONFIG_CGROUPS=y | 71 | CONFIG_CGROUPS=y |
| 75 | # CONFIG_CGROUP_DEBUG is not set | 72 | # CONFIG_CGROUP_DEBUG is not set |
| 76 | CONFIG_CGROUP_NS=y | 73 | CONFIG_CGROUP_NS=y |
| @@ -79,6 +76,7 @@ CONFIG_CGROUP_NS=y | |||
| 79 | # CONFIG_CPUSETS is not set | 76 | # CONFIG_CPUSETS is not set |
| 80 | # CONFIG_CGROUP_CPUACCT is not set | 77 | # CONFIG_CGROUP_CPUACCT is not set |
| 81 | # CONFIG_RESOURCE_COUNTERS is not set | 78 | # CONFIG_RESOURCE_COUNTERS is not set |
| 79 | # CONFIG_CGROUP_SCHED is not set | ||
| 82 | CONFIG_SYSFS_DEPRECATED=y | 80 | CONFIG_SYSFS_DEPRECATED=y |
| 83 | CONFIG_SYSFS_DEPRECATED_V2=y | 81 | CONFIG_SYSFS_DEPRECATED_V2=y |
| 84 | # CONFIG_RELAY is not set | 82 | # CONFIG_RELAY is not set |
| @@ -93,6 +91,7 @@ CONFIG_INITRAMFS_SOURCE="" | |||
| 93 | CONFIG_RD_GZIP=y | 91 | CONFIG_RD_GZIP=y |
| 94 | CONFIG_RD_BZIP2=y | 92 | CONFIG_RD_BZIP2=y |
| 95 | CONFIG_RD_LZMA=y | 93 | CONFIG_RD_LZMA=y |
| 94 | CONFIG_RD_LZO=y | ||
| 96 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 95 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
| 97 | CONFIG_SYSCTL=y | 96 | CONFIG_SYSCTL=y |
| 98 | CONFIG_ANON_INODES=y | 97 | CONFIG_ANON_INODES=y |
| @@ -126,6 +125,7 @@ CONFIG_SLAB=y | |||
| 126 | # CONFIG_SLUB is not set | 125 | # CONFIG_SLUB is not set |
| 127 | # CONFIG_SLOB is not set | 126 | # CONFIG_SLOB is not set |
| 128 | # CONFIG_PROFILING is not set | 127 | # CONFIG_PROFILING is not set |
| 128 | CONFIG_TRACEPOINTS=y | ||
| 129 | CONFIG_HAVE_OPROFILE=y | 129 | CONFIG_HAVE_OPROFILE=y |
| 130 | CONFIG_KPROBES=y | 130 | CONFIG_KPROBES=y |
| 131 | CONFIG_HAVE_SYSCALL_WRAPPERS=y | 131 | CONFIG_HAVE_SYSCALL_WRAPPERS=y |
| @@ -134,6 +134,7 @@ CONFIG_HAVE_KPROBES=y | |||
| 134 | CONFIG_HAVE_KRETPROBES=y | 134 | CONFIG_HAVE_KRETPROBES=y |
| 135 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 135 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
| 136 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 136 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
| 137 | CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y | ||
| 137 | CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y | 138 | CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y |
| 138 | 139 | ||
| 139 | # | 140 | # |
| @@ -246,6 +247,7 @@ CONFIG_64BIT=y | |||
| 246 | CONFIG_SMP=y | 247 | CONFIG_SMP=y |
| 247 | CONFIG_NR_CPUS=32 | 248 | CONFIG_NR_CPUS=32 |
| 248 | CONFIG_HOTPLUG_CPU=y | 249 | CONFIG_HOTPLUG_CPU=y |
| 250 | # CONFIG_SCHED_BOOK is not set | ||
| 249 | CONFIG_COMPAT=y | 251 | CONFIG_COMPAT=y |
| 250 | CONFIG_SYSVIPC_COMPAT=y | 252 | CONFIG_SYSVIPC_COMPAT=y |
| 251 | CONFIG_AUDIT_ARCH=y | 253 | CONFIG_AUDIT_ARCH=y |
| @@ -345,13 +347,13 @@ CONFIG_PM_SLEEP=y | |||
| 345 | CONFIG_HIBERNATION=y | 347 | CONFIG_HIBERNATION=y |
| 346 | CONFIG_PM_STD_PARTITION="" | 348 | CONFIG_PM_STD_PARTITION="" |
| 347 | # CONFIG_PM_RUNTIME is not set | 349 | # CONFIG_PM_RUNTIME is not set |
| 350 | CONFIG_PM_OPS=y | ||
| 348 | CONFIG_NET=y | 351 | CONFIG_NET=y |
| 349 | 352 | ||
| 350 | # | 353 | # |
| 351 | # Networking options | 354 | # Networking options |
| 352 | # | 355 | # |
| 353 | CONFIG_PACKET=y | 356 | CONFIG_PACKET=y |
| 354 | # CONFIG_PACKET_MMAP is not set | ||
| 355 | CONFIG_UNIX=y | 357 | CONFIG_UNIX=y |
| 356 | CONFIG_XFRM=y | 358 | CONFIG_XFRM=y |
| 357 | # CONFIG_XFRM_USER is not set | 359 | # CONFIG_XFRM_USER is not set |
| @@ -529,6 +531,7 @@ CONFIG_NET_SCH_FIFO=y | |||
| 529 | # | 531 | # |
| 530 | # CONFIG_NET_PKTGEN is not set | 532 | # CONFIG_NET_PKTGEN is not set |
| 531 | # CONFIG_NET_TCPPROBE is not set | 533 | # CONFIG_NET_TCPPROBE is not set |
| 534 | # CONFIG_NET_DROP_MONITOR is not set | ||
| 532 | CONFIG_CAN=m | 535 | CONFIG_CAN=m |
| 533 | CONFIG_CAN_RAW=m | 536 | CONFIG_CAN_RAW=m |
| 534 | CONFIG_CAN_BCM=m | 537 | CONFIG_CAN_BCM=m |
| @@ -605,6 +608,7 @@ CONFIG_MISC_DEVICES=y | |||
| 605 | # | 608 | # |
| 606 | # SCSI device support | 609 | # SCSI device support |
| 607 | # | 610 | # |
| 611 | CONFIG_SCSI_MOD=y | ||
| 608 | # CONFIG_RAID_ATTRS is not set | 612 | # CONFIG_RAID_ATTRS is not set |
| 609 | CONFIG_SCSI=y | 613 | CONFIG_SCSI=y |
| 610 | # CONFIG_SCSI_DMA is not set | 614 | # CONFIG_SCSI_DMA is not set |
| @@ -863,6 +867,7 @@ CONFIG_MISC_FILESYSTEMS=y | |||
| 863 | # CONFIG_BEFS_FS is not set | 867 | # CONFIG_BEFS_FS is not set |
| 864 | # CONFIG_BFS_FS is not set | 868 | # CONFIG_BFS_FS is not set |
| 865 | # CONFIG_EFS_FS is not set | 869 | # CONFIG_EFS_FS is not set |
| 870 | # CONFIG_LOGFS is not set | ||
| 866 | # CONFIG_CRAMFS is not set | 871 | # CONFIG_CRAMFS is not set |
| 867 | # CONFIG_SQUASHFS is not set | 872 | # CONFIG_SQUASHFS is not set |
| 868 | # CONFIG_VXFS_FS is not set | 873 | # CONFIG_VXFS_FS is not set |
| @@ -891,6 +896,7 @@ CONFIG_SUNRPC=y | |||
| 891 | # CONFIG_RPCSEC_GSS_KRB5 is not set | 896 | # CONFIG_RPCSEC_GSS_KRB5 is not set |
| 892 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 897 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
| 893 | # CONFIG_SMB_FS is not set | 898 | # CONFIG_SMB_FS is not set |
| 899 | # CONFIG_CEPH_FS is not set | ||
| 894 | # CONFIG_CIFS is not set | 900 | # CONFIG_CIFS is not set |
| 895 | # CONFIG_NCP_FS is not set | 901 | # CONFIG_NCP_FS is not set |
| 896 | # CONFIG_CODA_FS is not set | 902 | # CONFIG_CODA_FS is not set |
| @@ -952,6 +958,7 @@ CONFIG_DEBUG_MUTEXES=y | |||
| 952 | # CONFIG_LOCK_STAT is not set | 958 | # CONFIG_LOCK_STAT is not set |
| 953 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | 959 | CONFIG_DEBUG_SPINLOCK_SLEEP=y |
| 954 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | 960 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set |
| 961 | CONFIG_STACKTRACE=y | ||
| 955 | # CONFIG_DEBUG_KOBJECT is not set | 962 | # CONFIG_DEBUG_KOBJECT is not set |
| 956 | CONFIG_DEBUG_BUGVERBOSE=y | 963 | CONFIG_DEBUG_BUGVERBOSE=y |
| 957 | # CONFIG_DEBUG_INFO is not set | 964 | # CONFIG_DEBUG_INFO is not set |
| @@ -973,12 +980,17 @@ CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | |||
| 973 | # CONFIG_LATENCYTOP is not set | 980 | # CONFIG_LATENCYTOP is not set |
| 974 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 981 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
| 975 | # CONFIG_DEBUG_PAGEALLOC is not set | 982 | # CONFIG_DEBUG_PAGEALLOC is not set |
| 983 | CONFIG_NOP_TRACER=y | ||
| 976 | CONFIG_HAVE_FUNCTION_TRACER=y | 984 | CONFIG_HAVE_FUNCTION_TRACER=y |
| 977 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | 985 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y |
| 978 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | 986 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y |
| 979 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 987 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
| 980 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 988 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
| 981 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y | 989 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y |
| 990 | CONFIG_RING_BUFFER=y | ||
| 991 | CONFIG_EVENT_TRACING=y | ||
| 992 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
| 993 | CONFIG_TRACING=y | ||
| 982 | CONFIG_TRACING_SUPPORT=y | 994 | CONFIG_TRACING_SUPPORT=y |
| 983 | CONFIG_FTRACE=y | 995 | CONFIG_FTRACE=y |
| 984 | # CONFIG_FUNCTION_TRACER is not set | 996 | # CONFIG_FUNCTION_TRACER is not set |
| @@ -995,10 +1007,15 @@ CONFIG_BRANCH_PROFILE_NONE=y | |||
| 995 | # CONFIG_KMEMTRACE is not set | 1007 | # CONFIG_KMEMTRACE is not set |
| 996 | # CONFIG_WORKQUEUE_TRACER is not set | 1008 | # CONFIG_WORKQUEUE_TRACER is not set |
| 997 | # CONFIG_BLK_DEV_IO_TRACE is not set | 1009 | # CONFIG_BLK_DEV_IO_TRACE is not set |
| 1010 | CONFIG_KPROBE_EVENT=y | ||
| 1011 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
| 998 | # CONFIG_DYNAMIC_DEBUG is not set | 1012 | # CONFIG_DYNAMIC_DEBUG is not set |
| 999 | CONFIG_SAMPLES=y | 1013 | CONFIG_SAMPLES=y |
| 1014 | # CONFIG_SAMPLE_TRACEPOINTS is not set | ||
| 1015 | # CONFIG_SAMPLE_TRACE_EVENTS is not set | ||
| 1000 | # CONFIG_SAMPLE_KOBJECT is not set | 1016 | # CONFIG_SAMPLE_KOBJECT is not set |
| 1001 | # CONFIG_SAMPLE_KPROBES is not set | 1017 | # CONFIG_SAMPLE_KPROBES is not set |
| 1018 | # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set | ||
| 1002 | 1019 | ||
| 1003 | # | 1020 | # |
| 1004 | # Security options | 1021 | # Security options |
| @@ -1032,6 +1049,7 @@ CONFIG_CRYPTO_MANAGER=y | |||
| 1032 | CONFIG_CRYPTO_MANAGER2=y | 1049 | CONFIG_CRYPTO_MANAGER2=y |
| 1033 | CONFIG_CRYPTO_GF128MUL=m | 1050 | CONFIG_CRYPTO_GF128MUL=m |
| 1034 | # CONFIG_CRYPTO_NULL is not set | 1051 | # CONFIG_CRYPTO_NULL is not set |
| 1052 | # CONFIG_CRYPTO_PCRYPT is not set | ||
| 1035 | CONFIG_CRYPTO_WORKQUEUE=y | 1053 | CONFIG_CRYPTO_WORKQUEUE=y |
| 1036 | # CONFIG_CRYPTO_CRYPTD is not set | 1054 | # CONFIG_CRYPTO_CRYPTD is not set |
| 1037 | CONFIG_CRYPTO_AUTHENC=m | 1055 | CONFIG_CRYPTO_AUTHENC=m |
| @@ -1119,7 +1137,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
| 1119 | # CONFIG_CRYPTO_DES_S390 is not set | 1137 | # CONFIG_CRYPTO_DES_S390 is not set |
| 1120 | # CONFIG_CRYPTO_AES_S390 is not set | 1138 | # CONFIG_CRYPTO_AES_S390 is not set |
| 1121 | CONFIG_S390_PRNG=m | 1139 | CONFIG_S390_PRNG=m |
| 1122 | # CONFIG_BINARY_PRINTF is not set | 1140 | CONFIG_BINARY_PRINTF=y |
| 1123 | 1141 | ||
| 1124 | # | 1142 | # |
| 1125 | # Library routines | 1143 | # Library routines |
| @@ -1136,14 +1154,16 @@ CONFIG_LIBCRC32C=m | |||
| 1136 | CONFIG_ZLIB_INFLATE=y | 1154 | CONFIG_ZLIB_INFLATE=y |
| 1137 | CONFIG_ZLIB_DEFLATE=m | 1155 | CONFIG_ZLIB_DEFLATE=m |
| 1138 | CONFIG_LZO_COMPRESS=m | 1156 | CONFIG_LZO_COMPRESS=m |
| 1139 | CONFIG_LZO_DECOMPRESS=m | 1157 | CONFIG_LZO_DECOMPRESS=y |
| 1140 | CONFIG_DECOMPRESS_GZIP=y | 1158 | CONFIG_DECOMPRESS_GZIP=y |
| 1141 | CONFIG_DECOMPRESS_BZIP2=y | 1159 | CONFIG_DECOMPRESS_BZIP2=y |
| 1142 | CONFIG_DECOMPRESS_LZMA=y | 1160 | CONFIG_DECOMPRESS_LZMA=y |
| 1161 | CONFIG_DECOMPRESS_LZO=y | ||
| 1143 | CONFIG_NLATTR=y | 1162 | CONFIG_NLATTR=y |
| 1144 | CONFIG_HAVE_KVM=y | 1163 | CONFIG_HAVE_KVM=y |
| 1145 | CONFIG_VIRTUALIZATION=y | 1164 | CONFIG_VIRTUALIZATION=y |
| 1146 | CONFIG_KVM=m | 1165 | CONFIG_KVM=m |
| 1166 | # CONFIG_VHOST_NET is not set | ||
| 1147 | CONFIG_VIRTIO=y | 1167 | CONFIG_VIRTIO=y |
| 1148 | CONFIG_VIRTIO_RING=y | 1168 | CONFIG_VIRTIO_RING=y |
| 1149 | CONFIG_VIRTIO_BALLOON=m | 1169 | CONFIG_VIRTIO_BALLOON=m |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9b5b9189c15e..89a504c3f12e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
| @@ -105,7 +105,7 @@ extern char empty_zero_page[PAGE_SIZE]; | |||
| 105 | #ifndef __ASSEMBLY__ | 105 | #ifndef __ASSEMBLY__ |
| 106 | /* | 106 | /* |
| 107 | * The vmalloc area will always be on the topmost area of the kernel | 107 | * The vmalloc area will always be on the topmost area of the kernel |
| 108 | * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, | 108 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, |
| 109 | * which should be enough for any sane case. | 109 | * which should be enough for any sane case. |
| 110 | * By putting vmalloc at the top, we maximise the gap between physical | 110 | * By putting vmalloc at the top, we maximise the gap between physical |
| 111 | * memory and vmalloc to catch misplaced memory accesses. As a side | 111 | * memory and vmalloc to catch misplaced memory accesses. As a side |
| @@ -120,8 +120,8 @@ extern unsigned long VMALLOC_START; | |||
| 120 | #define VMALLOC_END 0x7e000000UL | 120 | #define VMALLOC_END 0x7e000000UL |
| 121 | #define VMEM_MAP_END 0x80000000UL | 121 | #define VMEM_MAP_END 0x80000000UL |
| 122 | #else /* __s390x__ */ | 122 | #else /* __s390x__ */ |
| 123 | #define VMALLOC_SIZE (1UL << 30) | 123 | #define VMALLOC_SIZE (128UL << 30) |
| 124 | #define VMALLOC_END 0x3e040000000UL | 124 | #define VMALLOC_END 0x3e000000000UL |
| 125 | #define VMEM_MAP_END 0x40000000000UL | 125 | #define VMEM_MAP_END 0x40000000000UL |
| 126 | #endif /* __s390x__ */ | 126 | #endif /* __s390x__ */ |
| 127 | 127 | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 31d618a443af..2d92c2cf92d7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
| @@ -82,7 +82,8 @@ asm( | |||
| 82 | " lm 6,15,24(15)\n" | 82 | " lm 6,15,24(15)\n" |
| 83 | #endif | 83 | #endif |
| 84 | " br 14\n" | 84 | " br 14\n" |
| 85 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); | 85 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n" |
| 86 | " .previous\n"); | ||
| 86 | 87 | ||
| 87 | static __initdata char upper_command_line[COMMAND_LINE_SIZE]; | 88 | static __initdata char upper_command_line[COMMAND_LINE_SIZE]; |
| 88 | 89 | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 4348f9bc5393..6af7045280a8 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
| @@ -964,7 +964,7 @@ cleanup_critical: | |||
| 964 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop) | 964 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop) |
| 965 | bl BASED(0f) | 965 | bl BASED(0f) |
| 966 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) | 966 | clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) |
| 967 | bl BASED(cleanup_io_return) | 967 | bl BASED(cleanup_io_work_loop) |
| 968 | 0: | 968 | 0: |
| 969 | br %r14 | 969 | br %r14 |
| 970 | 970 | ||
| @@ -1039,6 +1039,12 @@ cleanup_sysc_leave_insn: | |||
| 1039 | 1039 | ||
| 1040 | cleanup_io_return: | 1040 | cleanup_io_return: |
| 1041 | mvc __LC_RETURN_PSW(4),0(%r12) | 1041 | mvc __LC_RETURN_PSW(4),0(%r12) |
| 1042 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return) | ||
| 1043 | la %r12,__LC_RETURN_PSW | ||
| 1044 | br %r14 | ||
| 1045 | |||
| 1046 | cleanup_io_work_loop: | ||
| 1047 | mvc __LC_RETURN_PSW(4),0(%r12) | ||
| 1042 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) | 1048 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) |
| 1043 | la %r12,__LC_RETURN_PSW | 1049 | la %r12,__LC_RETURN_PSW |
| 1044 | br %r14 | 1050 | br %r14 |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 29fd0f1e6ec4..52106d53271c 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
| @@ -946,7 +946,7 @@ cleanup_critical: | |||
| 946 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop) | 946 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop) |
| 947 | jl 0f | 947 | jl 0f |
| 948 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) | 948 | clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) |
| 949 | jl cleanup_io_return | 949 | jl cleanup_io_work_loop |
| 950 | 0: | 950 | 0: |
| 951 | br %r14 | 951 | br %r14 |
| 952 | 952 | ||
| @@ -1021,6 +1021,12 @@ cleanup_sysc_leave_insn: | |||
| 1021 | 1021 | ||
| 1022 | cleanup_io_return: | 1022 | cleanup_io_return: |
| 1023 | mvc __LC_RETURN_PSW(8),0(%r12) | 1023 | mvc __LC_RETURN_PSW(8),0(%r12) |
| 1024 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return) | ||
| 1025 | la %r12,__LC_RETURN_PSW | ||
| 1026 | br %r14 | ||
| 1027 | |||
| 1028 | cleanup_io_work_loop: | ||
| 1029 | mvc __LC_RETURN_PSW(8),0(%r12) | ||
| 1024 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) | 1030 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) |
| 1025 | la %r12,__LC_RETURN_PSW | 1031 | la %r12,__LC_RETURN_PSW |
| 1026 | br %r14 | 1032 | br %r14 |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14ef6f05e432..247b4c2d1e51 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
| @@ -165,10 +165,11 @@ static void tl_to_cores(struct tl_info *info) | |||
| 165 | default: | 165 | default: |
| 166 | clear_cores(); | 166 | clear_cores(); |
| 167 | machine_has_topology = 0; | 167 | machine_has_topology = 0; |
| 168 | return; | 168 | goto out; |
| 169 | } | 169 | } |
| 170 | tle = next_tle(tle); | 170 | tle = next_tle(tle); |
| 171 | } | 171 | } |
| 172 | out: | ||
| 172 | spin_unlock_irq(&topology_lock); | 173 | spin_unlock_irq(&topology_lock); |
| 173 | } | 174 | } |
| 174 | 175 | ||
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8ea3144b45b8..90165e7ca04e 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
| @@ -71,12 +71,8 @@ static pte_t __ref *vmem_pte_alloc(void) | |||
| 71 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | 71 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); |
| 72 | if (!pte) | 72 | if (!pte) |
| 73 | return NULL; | 73 | return NULL; |
| 74 | if (MACHINE_HAS_HPAGE) | 74 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, |
| 75 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, | 75 | PTRS_PER_PTE * sizeof(pte_t)); |
| 76 | PTRS_PER_PTE * sizeof(pte_t)); | ||
| 77 | else | ||
| 78 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, | ||
| 79 | PTRS_PER_PTE * sizeof(pte_t)); | ||
| 80 | return pte; | 76 | return pte; |
| 81 | } | 77 | } |
| 82 | 78 | ||
| @@ -117,8 +113,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
| 117 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && | 113 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && |
| 118 | (address + HPAGE_SIZE <= start + size) && | 114 | (address + HPAGE_SIZE <= start + size) && |
| 119 | (address >= HPAGE_SIZE)) { | 115 | (address >= HPAGE_SIZE)) { |
| 120 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE | | 116 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE; |
| 121 | _SEGMENT_ENTRY_CO; | ||
| 122 | pmd_val(*pm_dir) = pte_val(pte); | 117 | pmd_val(*pm_dir) = pte_val(pte); |
| 123 | address += HPAGE_SIZE - PAGE_SIZE; | 118 | address += HPAGE_SIZE - PAGE_SIZE; |
| 124 | continue; | 119 | continue; |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6db513674050..9908d477ccd9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
| @@ -37,6 +37,9 @@ config SPARC64 | |||
| 37 | def_bool 64BIT | 37 | def_bool 64BIT |
| 38 | select ARCH_SUPPORTS_MSI | 38 | select ARCH_SUPPORTS_MSI |
| 39 | select HAVE_FUNCTION_TRACER | 39 | select HAVE_FUNCTION_TRACER |
| 40 | select HAVE_FUNCTION_GRAPH_TRACER | ||
| 41 | select HAVE_FUNCTION_GRAPH_FP_TEST | ||
| 42 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 40 | select HAVE_KRETPROBES | 43 | select HAVE_KRETPROBES |
| 41 | select HAVE_KPROBES | 44 | select HAVE_KPROBES |
| 42 | select HAVE_LMB | 45 | select HAVE_LMB |
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 9d3c889718ac..1b4a831565f9 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug | |||
| @@ -19,13 +19,10 @@ config DEBUG_DCFLUSH | |||
| 19 | bool "D-cache flush debugging" | 19 | bool "D-cache flush debugging" |
| 20 | depends on SPARC64 && DEBUG_KERNEL | 20 | depends on SPARC64 && DEBUG_KERNEL |
| 21 | 21 | ||
| 22 | config STACK_DEBUG | ||
| 23 | bool "Stack Overflow Detection Support" | ||
| 24 | |||
| 25 | config MCOUNT | 22 | config MCOUNT |
| 26 | bool | 23 | bool |
| 27 | depends on SPARC64 | 24 | depends on SPARC64 |
| 28 | depends on STACK_DEBUG || FUNCTION_TRACER | 25 | depends on FUNCTION_TRACER |
| 29 | default y | 26 | default y |
| 30 | 27 | ||
| 31 | config FRAME_POINTER | 28 | config FRAME_POINTER |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 926397d345ff..050ef35b9dcf 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
| @@ -17,7 +17,7 @@ typedef struct { | |||
| 17 | unsigned int __nmi_count; | 17 | unsigned int __nmi_count; |
| 18 | unsigned long clock_tick; /* %tick's per second */ | 18 | unsigned long clock_tick; /* %tick's per second */ |
| 19 | unsigned long __pad; | 19 | unsigned long __pad; |
| 20 | unsigned int __pad1; | 20 | unsigned int irq0_irqs; |
| 21 | unsigned int __pad2; | 21 | unsigned int __pad2; |
| 22 | 22 | ||
| 23 | /* Dcache line 2, rarely used */ | 23 | /* Dcache line 2, rarely used */ |
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index 8b49bf920df3..bfa1ea45b4cd 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h | |||
| @@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void) | |||
| 76 | */ | 76 | */ |
| 77 | static inline unsigned long __raw_local_irq_save(void) | 77 | static inline unsigned long __raw_local_irq_save(void) |
| 78 | { | 78 | { |
| 79 | unsigned long flags = __raw_local_save_flags(); | 79 | unsigned long flags, tmp; |
| 80 | 80 | ||
| 81 | raw_local_irq_disable(); | 81 | /* Disable interrupts to PIL_NORMAL_MAX unless we already |
| 82 | * are using PIL_NMI, in which case PIL_NMI is retained. | ||
| 83 | * | ||
| 84 | * The only values we ever program into the %pil are 0, | ||
| 85 | * PIL_NORMAL_MAX and PIL_NMI. | ||
| 86 | * | ||
| 87 | * Since PIL_NMI is the largest %pil value and all bits are | ||
| 88 | * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX | ||
| 89 | * actually is. | ||
| 90 | */ | ||
| 91 | __asm__ __volatile__( | ||
| 92 | "rdpr %%pil, %0\n\t" | ||
| 93 | "or %0, %2, %1\n\t" | ||
| 94 | "wrpr %1, 0x0, %%pil" | ||
| 95 | : "=r" (flags), "=r" (tmp) | ||
| 96 | : "i" (PIL_NORMAL_MAX) | ||
| 97 | : "memory" | ||
| 98 | ); | ||
| 82 | 99 | ||
| 83 | return flags; | 100 | return flags; |
| 84 | } | 101 | } |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index c6316142db4e..0c2dc1f24a9a 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
| @@ -13,6 +13,14 @@ extra-y += init_task.o | |||
| 13 | CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) | 13 | CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) |
| 14 | extra-y += vmlinux.lds | 14 | extra-y += vmlinux.lds |
| 15 | 15 | ||
| 16 | ifdef CONFIG_FUNCTION_TRACER | ||
| 17 | # Do not profile debug and lowlevel utilities | ||
| 18 | CFLAGS_REMOVE_ftrace.o := -pg | ||
| 19 | CFLAGS_REMOVE_time_$(BITS).o := -pg | ||
| 20 | CFLAGS_REMOVE_perf_event.o := -pg | ||
| 21 | CFLAGS_REMOVE_pcr.o := -pg | ||
| 22 | endif | ||
| 23 | |||
| 16 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o | 24 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o |
| 17 | obj-$(CONFIG_SPARC32) += etrap_32.o | 25 | obj-$(CONFIG_SPARC32) += etrap_32.o |
| 18 | obj-$(CONFIG_SPARC32) += rtrap_32.o | 26 | obj-$(CONFIG_SPARC32) += rtrap_32.o |
| @@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o | |||
| 85 | 93 | ||
| 86 | 94 | ||
| 87 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 95 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
| 88 | CFLAGS_REMOVE_ftrace.o := -pg | 96 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
| 89 | 97 | ||
| 90 | obj-$(CONFIG_EARLYFB) += btext.o | 98 | obj-$(CONFIG_EARLYFB) += btext.o |
| 91 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 99 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 9103a56b39e8..03ab022e51c5 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c | |||
| @@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000; | |||
| 13 | 13 | ||
| 14 | static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) | 14 | static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) |
| 15 | { | 15 | { |
| 16 | static u32 call; | 16 | u32 call; |
| 17 | s32 off; | 17 | s32 off; |
| 18 | 18 | ||
| 19 | off = ((s32)addr - (s32)ip); | 19 | off = ((s32)addr - (s32)ip); |
| @@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data) | |||
| 91 | return 0; | 91 | return 0; |
| 92 | } | 92 | } |
| 93 | #endif | 93 | #endif |
| 94 | |||
| 95 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 96 | |||
| 97 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 98 | extern void ftrace_graph_call(void); | ||
| 99 | |||
| 100 | int ftrace_enable_ftrace_graph_caller(void) | ||
| 101 | { | ||
| 102 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 103 | u32 old, new; | ||
| 104 | |||
| 105 | old = *(u32 *) &ftrace_graph_call; | ||
| 106 | new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); | ||
| 107 | return ftrace_modify_code(ip, old, new); | ||
| 108 | } | ||
| 109 | |||
| 110 | int ftrace_disable_ftrace_graph_caller(void) | ||
| 111 | { | ||
| 112 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | ||
| 113 | u32 old, new; | ||
| 114 | |||
| 115 | old = *(u32 *) &ftrace_graph_call; | ||
| 116 | new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); | ||
| 117 | |||
| 118 | return ftrace_modify_code(ip, old, new); | ||
| 119 | } | ||
| 120 | |||
| 121 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Hook the return address and push it in the stack of return addrs | ||
| 125 | * in current thread info. | ||
| 126 | */ | ||
| 127 | unsigned long prepare_ftrace_return(unsigned long parent, | ||
| 128 | unsigned long self_addr, | ||
| 129 | unsigned long frame_pointer) | ||
| 130 | { | ||
| 131 | unsigned long return_hooker = (unsigned long) &return_to_handler; | ||
| 132 | struct ftrace_graph_ent trace; | ||
| 133 | |||
| 134 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
| 135 | return parent + 8UL; | ||
| 136 | |||
| 137 | if (ftrace_push_return_trace(parent, self_addr, &trace.depth, | ||
| 138 | frame_pointer) == -EBUSY) | ||
| 139 | return parent + 8UL; | ||
| 140 | |||
| 141 | trace.func = self_addr; | ||
| 142 | |||
| 143 | /* Only trace if the calling function expects to */ | ||
| 144 | if (!ftrace_graph_entry(&trace)) { | ||
| 145 | current->curr_ret_stack--; | ||
| 146 | return parent + 8UL; | ||
| 147 | } | ||
| 148 | |||
| 149 | return return_hooker; | ||
| 150 | } | ||
| 151 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1cbdb94d97b..454ce3a25273 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
| 21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
| 22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
| 23 | #include <linux/ftrace.h> | ||
| 23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
| 24 | 25 | ||
| 25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
| @@ -647,6 +648,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
| 647 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | 648 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
| 648 | if (unlikely(!bucket)) | 649 | if (unlikely(!bucket)) |
| 649 | return 0; | 650 | return 0; |
| 651 | |||
| 652 | /* The only reference we store to the IRQ bucket is | ||
| 653 | * by physical address which kmemleak can't see, tell | ||
| 654 | * it that this object explicitly is not a leak and | ||
| 655 | * should be scanned. | ||
| 656 | */ | ||
| 657 | kmemleak_not_leak(bucket); | ||
| 658 | |||
| 650 | __flush_dcache_range((unsigned long) bucket, | 659 | __flush_dcache_range((unsigned long) bucket, |
| 651 | ((unsigned long) bucket + | 660 | ((unsigned long) bucket + |
| 652 | sizeof(struct ino_bucket))); | 661 | sizeof(struct ino_bucket))); |
| @@ -721,7 +730,7 @@ static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) | |||
| 721 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); | 730 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); |
| 722 | } | 731 | } |
| 723 | 732 | ||
| 724 | void handler_irq(int irq, struct pt_regs *regs) | 733 | void __irq_entry handler_irq(int irq, struct pt_regs *regs) |
| 725 | { | 734 | { |
| 726 | unsigned long pstate, bucket_pa; | 735 | unsigned long pstate, bucket_pa; |
| 727 | struct pt_regs *old_regs; | 736 | struct pt_regs *old_regs; |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index f5a0fd490b59..0a2bd0f99fc1 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <linux/kgdb.h> | 6 | #include <linux/kgdb.h> |
| 7 | #include <linux/kdebug.h> | 7 | #include <linux/kdebug.h> |
| 8 | #include <linux/ftrace.h> | ||
| 8 | 9 | ||
| 9 | #include <asm/kdebug.h> | 10 | #include <asm/kdebug.h> |
| 10 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
| @@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
| 108 | } | 109 | } |
| 109 | 110 | ||
| 110 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
| 111 | void smp_kgdb_capture_client(int irq, struct pt_regs *regs) | 112 | void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) |
| 112 | { | 113 | { |
| 113 | unsigned long flags; | 114 | unsigned long flags; |
| 114 | 115 | ||
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b287b62c7ea3..75a3d1a25356 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
| @@ -92,7 +92,6 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) | |||
| 92 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | 92 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) |
| 93 | { | 93 | { |
| 94 | unsigned int sum, touched = 0; | 94 | unsigned int sum, touched = 0; |
| 95 | int cpu = smp_processor_id(); | ||
| 96 | 95 | ||
| 97 | clear_softint(1 << irq); | 96 | clear_softint(1 << irq); |
| 98 | 97 | ||
| @@ -106,7 +105,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
| 106 | else | 105 | else |
| 107 | pcr_ops->write(PCR_PIC_PRIV); | 106 | pcr_ops->write(PCR_PIC_PRIV); |
| 108 | 107 | ||
| 109 | sum = kstat_irqs_cpu(0, cpu); | 108 | sum = local_cpu_data().irq0_irqs; |
| 110 | if (__get_cpu_var(nmi_touch)) { | 109 | if (__get_cpu_var(nmi_touch)) { |
| 111 | __get_cpu_var(nmi_touch) = 0; | 110 | __get_cpu_var(nmi_touch) = 0; |
| 112 | touched = 1; | 111 | touched = 1; |
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index b775658a927d..8a000583b5cf 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c | |||
| @@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) | |||
| 371 | struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); | 371 | struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); |
| 372 | 372 | ||
| 373 | if (!rp) { | 373 | if (!rp) { |
| 374 | prom_printf("Cannot allocate IOMMU resource.\n"); | 374 | pr_info("%s: Cannot allocate IOMMU resource.\n", |
| 375 | prom_halt(); | 375 | pbm->name); |
| 376 | return; | ||
| 376 | } | 377 | } |
| 377 | rp->name = "IOMMU"; | 378 | rp->name = "IOMMU"; |
| 378 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; | 379 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; |
| 379 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; | 380 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; |
| 380 | rp->flags = IORESOURCE_BUSY; | 381 | rp->flags = IORESOURCE_BUSY; |
| 381 | request_resource(&pbm->mem_space, rp); | 382 | if (request_resource(&pbm->mem_space, rp)) { |
| 383 | pr_info("%s: Unable to request IOMMU resource.\n", | ||
| 384 | pbm->name); | ||
| 385 | kfree(rp); | ||
| 386 | } | ||
| 382 | } | 387 | } |
| 383 | } | 388 | } |
| 384 | 389 | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 2d94e7a03af5..c4a6a50b4849 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
| 9 | 9 | ||
| 10 | #include <linux/perf_event.h> | 10 | #include <linux/perf_event.h> |
| 11 | #include <linux/ftrace.h> | ||
| 11 | 12 | ||
| 12 | #include <asm/pil.h> | 13 | #include <asm/pil.h> |
| 13 | #include <asm/pcr.h> | 14 | #include <asm/pcr.h> |
| @@ -34,7 +35,7 @@ unsigned int picl_shift; | |||
| 34 | * Therefore in such situations we defer the work by signalling | 35 | * Therefore in such situations we defer the work by signalling |
| 35 | * a lower level cpu IRQ. | 36 | * a lower level cpu IRQ. |
| 36 | */ | 37 | */ |
| 37 | void deferred_pcr_work_irq(int irq, struct pt_regs *regs) | 38 | void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) |
| 38 | { | 39 | { |
| 39 | struct pt_regs *old_regs; | 40 | struct pt_regs *old_regs; |
| 40 | 41 | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4c5334528109..b6a2b8f47040 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/profile.h> | 22 | #include <linux/profile.h> |
| 23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
| 24 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
| 25 | #include <linux/ftrace.h> | ||
| 25 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
| 26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 27 | 28 | ||
| @@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu) | |||
| 823 | &cpumask_of_cpu(cpu)); | 824 | &cpumask_of_cpu(cpu)); |
| 824 | } | 825 | } |
| 825 | 826 | ||
| 826 | void smp_call_function_client(int irq, struct pt_regs *regs) | 827 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) |
| 827 | { | 828 | { |
| 828 | clear_softint(1 << irq); | 829 | clear_softint(1 << irq); |
| 829 | generic_smp_call_function_interrupt(); | 830 | generic_smp_call_function_interrupt(); |
| 830 | } | 831 | } |
| 831 | 832 | ||
| 832 | void smp_call_function_single_client(int irq, struct pt_regs *regs) | 833 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) |
| 833 | { | 834 | { |
| 834 | clear_softint(1 << irq); | 835 | clear_softint(1 << irq); |
| 835 | generic_smp_call_function_single_interrupt(); | 836 | generic_smp_call_function_single_interrupt(); |
| @@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
| 965 | put_cpu(); | 966 | put_cpu(); |
| 966 | } | 967 | } |
| 967 | 968 | ||
| 968 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 969 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
| 969 | { | 970 | { |
| 970 | struct mm_struct *mm; | 971 | struct mm_struct *mm; |
| 971 | unsigned long flags; | 972 | unsigned long flags; |
| @@ -1149,7 +1150,7 @@ void smp_release(void) | |||
| 1149 | */ | 1150 | */ |
| 1150 | extern void prom_world(int); | 1151 | extern void prom_world(int); |
| 1151 | 1152 | ||
| 1152 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | 1153 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) |
| 1153 | { | 1154 | { |
| 1154 | clear_softint(1 << irq); | 1155 | clear_softint(1 << irq); |
| 1155 | 1156 | ||
| @@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu) | |||
| 1365 | &cpumask_of_cpu(cpu)); | 1366 | &cpumask_of_cpu(cpu)); |
| 1366 | } | 1367 | } |
| 1367 | 1368 | ||
| 1368 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 1369 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) |
| 1369 | { | 1370 | { |
| 1370 | clear_softint(1 << irq); | 1371 | clear_softint(1 << irq); |
| 1371 | } | 1372 | } |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 67e165102885..c7bbe6cf7b85 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/clocksource.h> | 35 | #include <linux/clocksource.h> |
| 36 | #include <linux/of_device.h> | 36 | #include <linux/of_device.h> |
| 37 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
| 38 | #include <linux/ftrace.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/oplib.h> | 40 | #include <asm/oplib.h> |
| 40 | #include <asm/timer.h> | 41 | #include <asm/timer.h> |
| @@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = { | |||
| 717 | }; | 718 | }; |
| 718 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); | 719 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); |
| 719 | 720 | ||
| 720 | void timer_interrupt(int irq, struct pt_regs *regs) | 721 | void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) |
| 721 | { | 722 | { |
| 722 | struct pt_regs *old_regs = set_irq_regs(regs); | 723 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 723 | unsigned long tick_mask = tick_ops->softint_mask; | 724 | unsigned long tick_mask = tick_ops->softint_mask; |
| @@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs) | |||
| 728 | 729 | ||
| 729 | irq_enter(); | 730 | irq_enter(); |
| 730 | 731 | ||
| 732 | local_cpu_data().irq0_irqs++; | ||
| 731 | kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); | 733 | kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); |
| 732 | 734 | ||
| 733 | if (unlikely(!evt->event_handler)) { | 735 | if (unlikely(!evt->event_handler)) { |
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 837dfc2390d6..9da57f032983 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
| @@ -2203,27 +2203,6 @@ void dump_stack(void) | |||
| 2203 | 2203 | ||
| 2204 | EXPORT_SYMBOL(dump_stack); | 2204 | EXPORT_SYMBOL(dump_stack); |
| 2205 | 2205 | ||
| 2206 | static inline int is_kernel_stack(struct task_struct *task, | ||
| 2207 | struct reg_window *rw) | ||
| 2208 | { | ||
| 2209 | unsigned long rw_addr = (unsigned long) rw; | ||
| 2210 | unsigned long thread_base, thread_end; | ||
| 2211 | |||
| 2212 | if (rw_addr < PAGE_OFFSET) { | ||
| 2213 | if (task != &init_task) | ||
| 2214 | return 0; | ||
| 2215 | } | ||
| 2216 | |||
| 2217 | thread_base = (unsigned long) task_stack_page(task); | ||
| 2218 | thread_end = thread_base + sizeof(union thread_union); | ||
| 2219 | if (rw_addr >= thread_base && | ||
| 2220 | rw_addr < thread_end && | ||
| 2221 | !(rw_addr & 0x7UL)) | ||
| 2222 | return 1; | ||
| 2223 | |||
| 2224 | return 0; | ||
| 2225 | } | ||
| 2226 | |||
| 2227 | static inline struct reg_window *kernel_stack_up(struct reg_window *rw) | 2206 | static inline struct reg_window *kernel_stack_up(struct reg_window *rw) |
| 2228 | { | 2207 | { |
| 2229 | unsigned long fp = rw->ins[6]; | 2208 | unsigned long fp = rw->ins[6]; |
| @@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
| 2252 | show_regs(regs); | 2231 | show_regs(regs); |
| 2253 | add_taint(TAINT_DIE); | 2232 | add_taint(TAINT_DIE); |
| 2254 | if (regs->tstate & TSTATE_PRIV) { | 2233 | if (regs->tstate & TSTATE_PRIV) { |
| 2234 | struct thread_info *tp = current_thread_info(); | ||
| 2255 | struct reg_window *rw = (struct reg_window *) | 2235 | struct reg_window *rw = (struct reg_window *) |
| 2256 | (regs->u_regs[UREG_FP] + STACK_BIAS); | 2236 | (regs->u_regs[UREG_FP] + STACK_BIAS); |
| 2257 | 2237 | ||
| @@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs) | |||
| 2259 | * find some badly aligned kernel stack. | 2239 | * find some badly aligned kernel stack. |
| 2260 | */ | 2240 | */ |
| 2261 | while (rw && | 2241 | while (rw && |
| 2262 | count++ < 30&& | 2242 | count++ < 30 && |
| 2263 | is_kernel_stack(current, rw)) { | 2243 | kstack_valid(tp, (unsigned long) rw)) { |
| 2264 | printk("Caller[%016lx]: %pS\n", rw->ins[7], | 2244 | printk("Caller[%016lx]: %pS\n", rw->ins[7], |
| 2265 | (void *) rw->ins[7]); | 2245 | (void *) rw->ins[7]); |
| 2266 | 2246 | ||
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 4e5992593967..0c1e6783657f 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
| @@ -46,11 +46,16 @@ SECTIONS | |||
| 46 | SCHED_TEXT | 46 | SCHED_TEXT |
| 47 | LOCK_TEXT | 47 | LOCK_TEXT |
| 48 | KPROBES_TEXT | 48 | KPROBES_TEXT |
| 49 | IRQENTRY_TEXT | ||
| 49 | *(.gnu.warning) | 50 | *(.gnu.warning) |
| 50 | } = 0 | 51 | } = 0 |
| 51 | _etext = .; | 52 | _etext = .; |
| 52 | 53 | ||
| 53 | RO_DATA(PAGE_SIZE) | 54 | RO_DATA(PAGE_SIZE) |
| 55 | |||
| 56 | /* Start of data section */ | ||
| 57 | _sdata = .; | ||
| 58 | |||
| 54 | .data1 : { | 59 | .data1 : { |
| 55 | *(.data1) | 60 | *(.data1) |
| 56 | } | 61 | } |
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 24b8b12deed2..3753e3c6e176 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S | |||
| @@ -7,26 +7,11 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
| 9 | 9 | ||
| 10 | #include <asm/ptrace.h> | ||
| 11 | #include <asm/thread_info.h> | ||
| 12 | |||
| 13 | /* | 10 | /* |
| 14 | * This is the main variant and is called by C code. GCC's -pg option | 11 | * This is the main variant and is called by C code. GCC's -pg option |
| 15 | * automatically instruments every C function with a call to this. | 12 | * automatically instruments every C function with a call to this. |
| 16 | */ | 13 | */ |
| 17 | 14 | ||
| 18 | #ifdef CONFIG_STACK_DEBUG | ||
| 19 | |||
| 20 | #define OVSTACKSIZE 4096 /* lets hope this is enough */ | ||
| 21 | |||
| 22 | .data | ||
| 23 | .align 8 | ||
| 24 | panicstring: | ||
| 25 | .asciz "Stack overflow\n" | ||
| 26 | .align 8 | ||
| 27 | ovstack: | ||
| 28 | .skip OVSTACKSIZE | ||
| 29 | #endif | ||
| 30 | .text | 15 | .text |
| 31 | .align 32 | 16 | .align 32 |
| 32 | .globl _mcount | 17 | .globl _mcount |
| @@ -35,84 +20,48 @@ ovstack: | |||
| 35 | .type mcount,#function | 20 | .type mcount,#function |
| 36 | _mcount: | 21 | _mcount: |
| 37 | mcount: | 22 | mcount: |
| 38 | #ifdef CONFIG_STACK_DEBUG | ||
| 39 | /* | ||
| 40 | * Check whether %sp is dangerously low. | ||
| 41 | */ | ||
| 42 | ldub [%g6 + TI_FPDEPTH], %g1 | ||
| 43 | srl %g1, 1, %g3 | ||
| 44 | add %g3, 1, %g3 | ||
| 45 | sllx %g3, 8, %g3 ! each fpregs frame is 256b | ||
| 46 | add %g3, 192, %g3 | ||
| 47 | add %g6, %g3, %g3 ! where does task_struct+frame end? | ||
| 48 | sub %g3, STACK_BIAS, %g3 | ||
| 49 | cmp %sp, %g3 | ||
| 50 | bg,pt %xcc, 1f | ||
| 51 | nop | ||
| 52 | lduh [%g6 + TI_CPU], %g1 | ||
| 53 | sethi %hi(hardirq_stack), %g3 | ||
| 54 | or %g3, %lo(hardirq_stack), %g3 | ||
| 55 | sllx %g1, 3, %g1 | ||
| 56 | ldx [%g3 + %g1], %g7 | ||
| 57 | sub %g7, STACK_BIAS, %g7 | ||
| 58 | cmp %sp, %g7 | ||
| 59 | bleu,pt %xcc, 2f | ||
| 60 | sethi %hi(THREAD_SIZE), %g3 | ||
| 61 | add %g7, %g3, %g7 | ||
| 62 | cmp %sp, %g7 | ||
| 63 | blu,pn %xcc, 1f | ||
| 64 | 2: sethi %hi(softirq_stack), %g3 | ||
| 65 | or %g3, %lo(softirq_stack), %g3 | ||
| 66 | ldx [%g3 + %g1], %g7 | ||
| 67 | sub %g7, STACK_BIAS, %g7 | ||
| 68 | cmp %sp, %g7 | ||
| 69 | bleu,pt %xcc, 3f | ||
| 70 | sethi %hi(THREAD_SIZE), %g3 | ||
| 71 | add %g7, %g3, %g7 | ||
| 72 | cmp %sp, %g7 | ||
| 73 | blu,pn %xcc, 1f | ||
| 74 | nop | ||
| 75 | /* If we are already on ovstack, don't hop onto it | ||
| 76 | * again, we are already trying to output the stack overflow | ||
| 77 | * message. | ||
| 78 | */ | ||
| 79 | 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough | ||
| 80 | or %g7, %lo(ovstack), %g7 | ||
| 81 | add %g7, OVSTACKSIZE, %g3 | ||
| 82 | sub %g3, STACK_BIAS + 192, %g3 | ||
| 83 | sub %g7, STACK_BIAS, %g7 | ||
| 84 | cmp %sp, %g7 | ||
| 85 | blu,pn %xcc, 2f | ||
| 86 | cmp %sp, %g3 | ||
| 87 | bleu,pn %xcc, 1f | ||
| 88 | nop | ||
| 89 | 2: mov %g3, %sp | ||
| 90 | sethi %hi(panicstring), %g3 | ||
| 91 | call prom_printf | ||
| 92 | or %g3, %lo(panicstring), %o0 | ||
| 93 | call prom_halt | ||
| 94 | nop | ||
| 95 | 1: | ||
| 96 | #endif | ||
| 97 | #ifdef CONFIG_FUNCTION_TRACER | 23 | #ifdef CONFIG_FUNCTION_TRACER |
| 98 | #ifdef CONFIG_DYNAMIC_FTRACE | 24 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 99 | mov %o7, %o0 | 25 | /* Do nothing, the retl/nop below is all we need. */ |
| 100 | .globl mcount_call | ||
| 101 | mcount_call: | ||
| 102 | call ftrace_stub | ||
| 103 | mov %o0, %o7 | ||
| 104 | #else | 26 | #else |
| 105 | sethi %hi(ftrace_trace_function), %g1 | 27 | sethi %hi(function_trace_stop), %g1 |
| 28 | lduw [%g1 + %lo(function_trace_stop)], %g2 | ||
| 29 | brnz,pn %g2, 2f | ||
| 30 | sethi %hi(ftrace_trace_function), %g1 | ||
| 106 | sethi %hi(ftrace_stub), %g2 | 31 | sethi %hi(ftrace_stub), %g2 |
| 107 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 | 32 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 |
| 108 | or %g2, %lo(ftrace_stub), %g2 | 33 | or %g2, %lo(ftrace_stub), %g2 |
| 109 | cmp %g1, %g2 | 34 | cmp %g1, %g2 |
| 110 | be,pn %icc, 1f | 35 | be,pn %icc, 1f |
| 111 | mov %i7, %o1 | 36 | mov %i7, %g3 |
| 112 | jmpl %g1, %g0 | 37 | save %sp, -128, %sp |
| 113 | mov %o7, %o0 | 38 | mov %g3, %o1 |
| 39 | jmpl %g1, %o7 | ||
| 40 | mov %i7, %o0 | ||
| 41 | ret | ||
| 42 | restore | ||
| 114 | /* not reached */ | 43 | /* not reached */ |
| 115 | 1: | 44 | 1: |
| 45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 46 | sethi %hi(ftrace_graph_return), %g1 | ||
| 47 | ldx [%g1 + %lo(ftrace_graph_return)], %g3 | ||
| 48 | cmp %g2, %g3 | ||
| 49 | bne,pn %xcc, 5f | ||
| 50 | sethi %hi(ftrace_graph_entry_stub), %g2 | ||
| 51 | sethi %hi(ftrace_graph_entry), %g1 | ||
| 52 | or %g2, %lo(ftrace_graph_entry_stub), %g2 | ||
| 53 | ldx [%g1 + %lo(ftrace_graph_entry)], %g1 | ||
| 54 | cmp %g1, %g2 | ||
| 55 | be,pt %xcc, 2f | ||
| 56 | nop | ||
| 57 | 5: mov %i7, %g2 | ||
| 58 | mov %fp, %g3 | ||
| 59 | save %sp, -128, %sp | ||
| 60 | mov %g2, %l0 | ||
| 61 | ba,pt %xcc, ftrace_graph_caller | ||
| 62 | mov %g3, %l1 | ||
| 63 | #endif | ||
| 64 | 2: | ||
| 116 | #endif | 65 | #endif |
| 117 | #endif | 66 | #endif |
| 118 | retl | 67 | retl |
| @@ -131,14 +80,50 @@ ftrace_stub: | |||
| 131 | .globl ftrace_caller | 80 | .globl ftrace_caller |
| 132 | .type ftrace_caller,#function | 81 | .type ftrace_caller,#function |
| 133 | ftrace_caller: | 82 | ftrace_caller: |
| 134 | mov %i7, %o1 | 83 | sethi %hi(function_trace_stop), %g1 |
| 135 | mov %o7, %o0 | 84 | mov %i7, %g2 |
| 85 | lduw [%g1 + %lo(function_trace_stop)], %g1 | ||
| 86 | brnz,pn %g1, ftrace_stub | ||
| 87 | mov %fp, %g3 | ||
| 88 | save %sp, -128, %sp | ||
| 89 | mov %g2, %o1 | ||
| 90 | mov %g2, %l0 | ||
| 91 | mov %g3, %l1 | ||
| 136 | .globl ftrace_call | 92 | .globl ftrace_call |
| 137 | ftrace_call: | 93 | ftrace_call: |
| 138 | call ftrace_stub | 94 | call ftrace_stub |
| 139 | mov %o0, %o7 | 95 | mov %i7, %o0 |
| 140 | retl | 96 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 97 | .globl ftrace_graph_call | ||
| 98 | ftrace_graph_call: | ||
| 99 | call ftrace_stub | ||
| 141 | nop | 100 | nop |
| 101 | #endif | ||
| 102 | ret | ||
| 103 | restore | ||
| 104 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 105 | .size ftrace_graph_call,.-ftrace_graph_call | ||
| 106 | #endif | ||
| 107 | .size ftrace_call,.-ftrace_call | ||
| 142 | .size ftrace_caller,.-ftrace_caller | 108 | .size ftrace_caller,.-ftrace_caller |
| 143 | #endif | 109 | #endif |
| 144 | #endif | 110 | #endif |
| 111 | |||
| 112 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 113 | ENTRY(ftrace_graph_caller) | ||
| 114 | mov %l0, %o0 | ||
| 115 | mov %i7, %o1 | ||
| 116 | call prepare_ftrace_return | ||
| 117 | mov %l1, %o2 | ||
| 118 | ret | ||
| 119 | restore %o0, -8, %i7 | ||
| 120 | END(ftrace_graph_caller) | ||
| 121 | |||
| 122 | ENTRY(return_to_handler) | ||
| 123 | save %sp, -128, %sp | ||
| 124 | call ftrace_return_to_handler | ||
| 125 | mov %fp, %o0 | ||
| 126 | jmpl %o0 + 8, %g0 | ||
| 127 | restore | ||
| 128 | END(return_to_handler) | ||
| 129 | #endif | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0eacb1ffb421..9458685902bd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -1216,8 +1216,8 @@ config NUMA_EMU | |||
| 1216 | 1216 | ||
| 1217 | config NODES_SHIFT | 1217 | config NODES_SHIFT |
| 1218 | int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP | 1218 | int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP |
| 1219 | range 1 9 | 1219 | range 1 10 |
| 1220 | default "9" if MAXSMP | 1220 | default "10" if MAXSMP |
| 1221 | default "6" if X86_64 | 1221 | default "6" if X86_64 |
| 1222 | default "4" if X86_NUMAQ | 1222 | default "4" if X86_NUMAQ |
| 1223 | default "3" | 1223 | default "3" |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 59b4556a5b92..e790bc1fbfa3 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
| @@ -626,7 +626,7 @@ ia32_sys_call_table: | |||
| 626 | .quad stub32_sigreturn | 626 | .quad stub32_sigreturn |
| 627 | .quad stub32_clone /* 120 */ | 627 | .quad stub32_clone /* 120 */ |
| 628 | .quad sys_setdomainname | 628 | .quad sys_setdomainname |
| 629 | .quad sys_uname | 629 | .quad sys_newuname |
| 630 | .quad sys_modify_ldt | 630 | .quad sys_modify_ldt |
| 631 | .quad compat_sys_adjtimex | 631 | .quad compat_sys_adjtimex |
| 632 | .quad sys32_mprotect /* 125 */ | 632 | .quad sys32_mprotect /* 125 */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..86a0ff0aeac7 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H | 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
| 22 | 22 | ||
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | #include <linux/mutex.h> | ||
| 24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
| 25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
| 26 | 27 | ||
| @@ -140,6 +141,7 @@ | |||
| 140 | 141 | ||
| 141 | /* constants to configure the command buffer */ | 142 | /* constants to configure the command buffer */ |
| 142 | #define CMD_BUFFER_SIZE 8192 | 143 | #define CMD_BUFFER_SIZE 8192 |
| 144 | #define CMD_BUFFER_UNINITIALIZED 1 | ||
| 143 | #define CMD_BUFFER_ENTRIES 512 | 145 | #define CMD_BUFFER_ENTRIES 512 |
| 144 | #define MMIO_CMD_SIZE_SHIFT 56 | 146 | #define MMIO_CMD_SIZE_SHIFT 56 |
| 145 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) | 147 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
| @@ -237,6 +239,7 @@ struct protection_domain { | |||
| 237 | struct list_head list; /* for list of all protection domains */ | 239 | struct list_head list; /* for list of all protection domains */ |
| 238 | struct list_head dev_list; /* List of all devices in this domain */ | 240 | struct list_head dev_list; /* List of all devices in this domain */ |
| 239 | spinlock_t lock; /* mostly used to lock the page table*/ | 241 | spinlock_t lock; /* mostly used to lock the page table*/ |
| 242 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | ||
| 240 | u16 id; /* the domain id written to the device table */ | 243 | u16 id; /* the domain id written to the device table */ |
| 241 | int mode; /* paging mode (0-6 levels) */ | 244 | int mode; /* paging mode (0-6 levels) */ |
| 242 | u64 *pt_root; /* page table root pointer */ | 245 | u64 *pt_root; /* page table root pointer */ |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a6..b60f2924c413 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
| @@ -28,22 +28,39 @@ | |||
| 28 | 28 | ||
| 29 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
| 30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
| 31 | #include <asm/kvm_para.h> | ||
| 32 | 31 | ||
| 33 | /*G:030 | 32 | /*G:030 |
| 34 | * But first, how does our Guest contact the Host to ask for privileged | 33 | * But first, how does our Guest contact the Host to ask for privileged |
| 35 | * operations? There are two ways: the direct way is to make a "hypercall", | 34 | * operations? There are two ways: the direct way is to make a "hypercall", |
| 36 | * to make requests of the Host Itself. | 35 | * to make requests of the Host Itself. |
| 37 | * | 36 | * |
| 38 | * We use the KVM hypercall mechanism, though completely different hypercall | 37 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
| 39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in | 38 | * above are used by real hardware interrupts). Seventeen hypercalls are |
| 40 | * the %eax register, and the arguments (when required) are placed in %ebx, | 39 | * available: the hypercall number is put in the %eax register, and the |
| 41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. | 40 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. |
| 41 | * If a return value makes sense, it's returned in %eax. | ||
| 42 | * | 42 | * |
| 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
| 44 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
| 45 | * definition of a gentleman: "someone who is only rude intentionally". | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
| 46 | :*/ | 46 | */ |
| 47 | static inline unsigned long | ||
| 48 | hcall(unsigned long call, | ||
| 49 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
| 50 | unsigned long arg4) | ||
| 51 | { | ||
| 52 | /* "int" is the Intel instruction to trigger a trap. */ | ||
| 53 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
| 54 | /* The call in %eax (aka "a") might be overwritten */ | ||
| 55 | : "=a"(call) | ||
| 56 | /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ | ||
| 57 | : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) | ||
| 58 | /* "memory" means this might write somewhere in memory. | ||
| 59 | * This isn't true for all calls, but it's safe to tell | ||
| 60 | * gcc that it might happen so it doesn't get clever. */ | ||
| 61 | : "memory"); | ||
| 62 | return call; | ||
| 63 | } | ||
| 47 | 64 | ||
| 48 | /* Can't use our min() macro here: needs to be a constant */ | 65 | /* Can't use our min() macro here: needs to be a constant */ |
| 49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 66 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f3dadb571d9b..f854d89b7edf 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) | |||
| 118 | return false; | 118 | return false; |
| 119 | 119 | ||
| 120 | /* No device or no PCI device */ | 120 | /* No device or no PCI device */ |
| 121 | if (!dev || dev->bus != &pci_bus_type) | 121 | if (dev->bus != &pci_bus_type) |
| 122 | return false; | 122 | return false; |
| 123 | 123 | ||
| 124 | devid = get_device_id(dev); | 124 | devid = get_device_id(dev); |
| @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
| 392 | u32 tail, head; | 392 | u32 tail, head; |
| 393 | u8 *target; | 393 | u8 *target; |
| 394 | 394 | ||
| 395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
| 395 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
| 396 | target = iommu->cmd_buf + tail; | 397 | target = iommu->cmd_buf + tail; |
| 397 | memcpy_toio(target, cmd, sizeof(*cmd)); | 398 | memcpy_toio(target, cmd, sizeof(*cmd)); |
| @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) | |||
| 2186 | struct dma_ops_domain *dma_dom; | 2187 | struct dma_ops_domain *dma_dom; |
| 2187 | u16 devid; | 2188 | u16 devid; |
| 2188 | 2189 | ||
| 2189 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2190 | for_each_pci_dev(dev) { |
| 2190 | 2191 | ||
| 2191 | /* Do we handle this device? */ | 2192 | /* Do we handle this device? */ |
| 2192 | if (!check_device(&dev->dev)) | 2193 | if (!check_device(&dev->dev)) |
| @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
| 2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2299 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
| 2299 | struct device *dev = dev_data->dev; | 2300 | struct device *dev = dev_data->dev; |
| 2300 | 2301 | ||
| 2301 | do_detach(dev); | 2302 | __detach_device(dev); |
| 2302 | atomic_set(&dev_data->bind, 0); | 2303 | atomic_set(&dev_data->bind, 0); |
| 2303 | } | 2304 | } |
| 2304 | 2305 | ||
| @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) | |||
| 2327 | return NULL; | 2328 | return NULL; |
| 2328 | 2329 | ||
| 2329 | spin_lock_init(&domain->lock); | 2330 | spin_lock_init(&domain->lock); |
| 2331 | mutex_init(&domain->api_lock); | ||
| 2330 | domain->id = domain_id_alloc(); | 2332 | domain->id = domain_id_alloc(); |
| 2331 | if (!domain->id) | 2333 | if (!domain->id) |
| 2332 | goto out_err; | 2334 | goto out_err; |
| @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
| 2379 | 2381 | ||
| 2380 | free_pagetable(domain); | 2382 | free_pagetable(domain); |
| 2381 | 2383 | ||
| 2382 | domain_id_free(domain->id); | 2384 | protection_domain_free(domain); |
| 2383 | |||
| 2384 | kfree(domain); | ||
| 2385 | 2385 | ||
| 2386 | dom->priv = NULL; | 2386 | dom->priv = NULL; |
| 2387 | } | 2387 | } |
| @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
| 2456 | iova &= PAGE_MASK; | 2456 | iova &= PAGE_MASK; |
| 2457 | paddr &= PAGE_MASK; | 2457 | paddr &= PAGE_MASK; |
| 2458 | 2458 | ||
| 2459 | mutex_lock(&domain->api_lock); | ||
| 2460 | |||
| 2459 | for (i = 0; i < npages; ++i) { | 2461 | for (i = 0; i < npages; ++i) { |
| 2460 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | 2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
| 2461 | if (ret) | 2463 | if (ret) |
| @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
| 2465 | paddr += PAGE_SIZE; | 2467 | paddr += PAGE_SIZE; |
| 2466 | } | 2468 | } |
| 2467 | 2469 | ||
| 2470 | mutex_unlock(&domain->api_lock); | ||
| 2471 | |||
| 2468 | return 0; | 2472 | return 0; |
| 2469 | } | 2473 | } |
| 2470 | 2474 | ||
| @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
| 2477 | 2481 | ||
| 2478 | iova &= PAGE_MASK; | 2482 | iova &= PAGE_MASK; |
| 2479 | 2483 | ||
| 2484 | mutex_lock(&domain->api_lock); | ||
| 2485 | |||
| 2480 | for (i = 0; i < npages; ++i) { | 2486 | for (i = 0; i < npages; ++i) { |
| 2481 | iommu_unmap_page(domain, iova, PM_MAP_4k); | 2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
| 2482 | iova += PAGE_SIZE; | 2488 | iova += PAGE_SIZE; |
| 2483 | } | 2489 | } |
| 2484 | 2490 | ||
| 2485 | iommu_flush_tlb_pde(domain); | 2491 | iommu_flush_tlb_pde(domain); |
| 2492 | |||
| 2493 | mutex_unlock(&domain->api_lock); | ||
| 2486 | } | 2494 | } |
| 2487 | 2495 | ||
| 2488 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42f5350b908f..6360abf993d4 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
| @@ -138,9 +138,9 @@ int amd_iommus_present; | |||
| 138 | bool amd_iommu_np_cache __read_mostly; | 138 | bool amd_iommu_np_cache __read_mostly; |
| 139 | 139 | ||
| 140 | /* | 140 | /* |
| 141 | * Set to true if ACPI table parsing and hardware intialization went properly | 141 | * The ACPI table parsing functions set this variable on an error |
| 142 | */ | 142 | */ |
| 143 | static bool amd_iommu_initialized; | 143 | static int __initdata amd_iommu_init_err; |
| 144 | 144 | ||
| 145 | /* | 145 | /* |
| 146 | * List of protection domains - used during resume | 146 | * List of protection domains - used during resume |
| @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) | |||
| 391 | */ | 391 | */ |
| 392 | for (i = 0; i < table->length; ++i) | 392 | for (i = 0; i < table->length; ++i) |
| 393 | checksum += p[i]; | 393 | checksum += p[i]; |
| 394 | if (checksum != 0) | 394 | if (checksum != 0) { |
| 395 | /* ACPI table corrupt */ | 395 | /* ACPI table corrupt */ |
| 396 | return -ENODEV; | 396 | amd_iommu_init_err = -ENODEV; |
| 397 | return 0; | ||
| 398 | } | ||
| 397 | 399 | ||
| 398 | p += IVRS_HEADER_LENGTH; | 400 | p += IVRS_HEADER_LENGTH; |
| 399 | 401 | ||
| @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
| 436 | if (cmd_buf == NULL) | 438 | if (cmd_buf == NULL) |
| 437 | return NULL; | 439 | return NULL; |
| 438 | 440 | ||
| 439 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | 441 | iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; |
| 440 | 442 | ||
| 441 | return cmd_buf; | 443 | return cmd_buf; |
| 442 | } | 444 | } |
| @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
| 472 | &entry, sizeof(entry)); | 474 | &entry, sizeof(entry)); |
| 473 | 475 | ||
| 474 | amd_iommu_reset_cmd_buffer(iommu); | 476 | amd_iommu_reset_cmd_buffer(iommu); |
| 477 | iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); | ||
| 475 | } | 478 | } |
| 476 | 479 | ||
| 477 | static void __init free_command_buffer(struct amd_iommu *iommu) | 480 | static void __init free_command_buffer(struct amd_iommu *iommu) |
| 478 | { | 481 | { |
| 479 | free_pages((unsigned long)iommu->cmd_buf, | 482 | free_pages((unsigned long)iommu->cmd_buf, |
| 480 | get_order(iommu->cmd_buf_size)); | 483 | get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); |
| 481 | } | 484 | } |
| 482 | 485 | ||
| 483 | /* allocates the memory where the IOMMU will log its events to */ | 486 | /* allocates the memory where the IOMMU will log its events to */ |
| @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
| 920 | h->mmio_phys); | 923 | h->mmio_phys); |
| 921 | 924 | ||
| 922 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | 925 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
| 923 | if (iommu == NULL) | 926 | if (iommu == NULL) { |
| 924 | return -ENOMEM; | 927 | amd_iommu_init_err = -ENOMEM; |
| 928 | return 0; | ||
| 929 | } | ||
| 930 | |||
| 925 | ret = init_iommu_one(iommu, h); | 931 | ret = init_iommu_one(iommu, h); |
| 926 | if (ret) | 932 | if (ret) { |
| 927 | return ret; | 933 | amd_iommu_init_err = ret; |
| 934 | return 0; | ||
| 935 | } | ||
| 928 | break; | 936 | break; |
| 929 | default: | 937 | default: |
| 930 | break; | 938 | break; |
| @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
| 934 | } | 942 | } |
| 935 | WARN_ON(p != end); | 943 | WARN_ON(p != end); |
| 936 | 944 | ||
| 937 | amd_iommu_initialized = true; | ||
| 938 | |||
| 939 | return 0; | 945 | return 0; |
| 940 | } | 946 | } |
| 941 | 947 | ||
| @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) | |||
| 1211 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | 1217 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) |
| 1212 | return -ENODEV; | 1218 | return -ENODEV; |
| 1213 | 1219 | ||
| 1220 | ret = amd_iommu_init_err; | ||
| 1221 | if (ret) | ||
| 1222 | goto out; | ||
| 1223 | |||
| 1214 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); | 1224 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
| 1215 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | 1225 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); |
| 1216 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | 1226 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); |
| @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) | |||
| 1270 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | 1280 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) |
| 1271 | goto free; | 1281 | goto free; |
| 1272 | 1282 | ||
| 1273 | if (!amd_iommu_initialized) | 1283 | if (amd_iommu_init_err) { |
| 1284 | ret = amd_iommu_init_err; | ||
| 1274 | goto free; | 1285 | goto free; |
| 1286 | } | ||
| 1275 | 1287 | ||
| 1276 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | 1288 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
| 1277 | goto free; | 1289 | goto free; |
| 1278 | 1290 | ||
| 1291 | if (amd_iommu_init_err) { | ||
| 1292 | ret = amd_iommu_init_err; | ||
| 1293 | goto free; | ||
| 1294 | } | ||
| 1295 | |||
| 1279 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | 1296 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
| 1280 | if (ret) | 1297 | if (ret) |
| 1281 | goto free; | 1298 | goto free; |
| @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) | |||
| 1288 | if (ret) | 1305 | if (ret) |
| 1289 | goto free; | 1306 | goto free; |
| 1290 | 1307 | ||
| 1308 | enable_iommus(); | ||
| 1309 | |||
| 1291 | if (iommu_pass_through) | 1310 | if (iommu_pass_through) |
| 1292 | ret = amd_iommu_init_passthrough(); | 1311 | ret = amd_iommu_init_passthrough(); |
| 1293 | else | 1312 | else |
| @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) | |||
| 1300 | 1319 | ||
| 1301 | amd_iommu_init_notifier(); | 1320 | amd_iommu_init_notifier(); |
| 1302 | 1321 | ||
| 1303 | enable_iommus(); | ||
| 1304 | |||
| 1305 | if (iommu_pass_through) | 1322 | if (iommu_pass_through) |
| 1306 | goto out; | 1323 | goto out; |
| 1307 | 1324 | ||
| @@ -1315,6 +1332,7 @@ out: | |||
| 1315 | return ret; | 1332 | return ret; |
| 1316 | 1333 | ||
| 1317 | free: | 1334 | free: |
| 1335 | disable_iommus(); | ||
| 1318 | 1336 | ||
| 1319 | amd_iommu_uninit_devices(); | 1337 | amd_iommu_uninit_devices(); |
| 1320 | 1338 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3704997e8b25..b5d8b0bcf235 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
| @@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) | |||
| 393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
| 394 | int bus; | 394 | int bus; |
| 395 | int dev_base, dev_limit; | 395 | int dev_base, dev_limit; |
| 396 | u32 ctl; | ||
| 396 | 397 | ||
| 397 | bus = bus_dev_ranges[i].bus; | 398 | bus = bus_dev_ranges[i].bus; |
| 398 | dev_base = bus_dev_ranges[i].dev_base; | 399 | dev_base = bus_dev_ranges[i].dev_base; |
| @@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) | |||
| 406 | gart_iommu_aperture = 1; | 407 | gart_iommu_aperture = 1; |
| 407 | x86_init.iommu.iommu_init = gart_iommu_init; | 408 | x86_init.iommu.iommu_init = gart_iommu_init; |
| 408 | 409 | ||
| 409 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 410 | ctl = read_pci_config(bus, slot, 3, |
| 411 | AMD64_GARTAPERTURECTL); | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Before we do anything else disable the GART. It may | ||
| 415 | * still be enabled if we boot into a crash-kernel here. | ||
| 416 | * Reconfiguring the GART while it is enabled could have | ||
| 417 | * unknown side-effects. | ||
| 418 | */ | ||
| 419 | ctl &= ~GARTEN; | ||
| 420 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | ||
| 421 | |||
| 422 | aper_order = (ctl >> 1) & 7; | ||
| 410 | aper_size = (32 * 1024 * 1024) << aper_order; | 423 | aper_size = (32 * 1024 * 1024) << aper_order; |
| 411 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 424 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
| 412 | aper_base <<= 25; | 425 | aper_base <<= 25; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 00187f1fcfb7..e5a4a1e01618 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void) | |||
| 1640 | } | 1640 | } |
| 1641 | #endif | 1641 | #endif |
| 1642 | 1642 | ||
| 1643 | #ifndef CONFIG_SMP | ||
| 1643 | enable_IR_x2apic(); | 1644 | enable_IR_x2apic(); |
| 1644 | default_setup_apic_routing(); | 1645 | default_setup_apic_routing(); |
| 1646 | #endif | ||
| 1645 | 1647 | ||
| 1646 | verify_local_APIC(); | 1648 | verify_local_APIC(); |
| 1647 | connect_bsp_APIC(); | 1649 | connect_bsp_APIC(); |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77e..ebd4c51d096a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
| 28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
| 29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
| 30 | #include <asm/x86_init.h> | ||
| 31 | 30 | ||
| 32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 31 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
| 33 | 32 | ||
| @@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
| 103 | #ifdef CONFIG_HPET_TIMER | 102 | #ifdef CONFIG_HPET_TIMER |
| 104 | hpet_disable(); | 103 | hpet_disable(); |
| 105 | #endif | 104 | #endif |
| 106 | |||
| 107 | #ifdef CONFIG_X86_64 | ||
| 108 | x86_platform.iommu_shutdown(); | ||
| 109 | #endif | ||
| 110 | |||
| 111 | crash_save_cpu(regs, safe_smp_processor_id()); | 105 | crash_save_cpu(regs, safe_smp_processor_id()); |
| 112 | } | 106 | } |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 740b440fbd73..7bca3c6a02fb 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -519,29 +519,45 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, | |||
| 519 | printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", | 519 | printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", |
| 520 | (unsigned long long) start, | 520 | (unsigned long long) start, |
| 521 | (unsigned long long) end); | 521 | (unsigned long long) end); |
| 522 | e820_print_type(old_type); | 522 | if (checktype) |
| 523 | e820_print_type(old_type); | ||
| 523 | printk(KERN_CONT "\n"); | 524 | printk(KERN_CONT "\n"); |
| 524 | 525 | ||
| 525 | for (i = 0; i < e820.nr_map; i++) { | 526 | for (i = 0; i < e820.nr_map; i++) { |
| 526 | struct e820entry *ei = &e820.map[i]; | 527 | struct e820entry *ei = &e820.map[i]; |
| 527 | u64 final_start, final_end; | 528 | u64 final_start, final_end; |
| 529 | u64 ei_end; | ||
| 528 | 530 | ||
| 529 | if (checktype && ei->type != old_type) | 531 | if (checktype && ei->type != old_type) |
| 530 | continue; | 532 | continue; |
| 533 | |||
| 534 | ei_end = ei->addr + ei->size; | ||
| 531 | /* totally covered? */ | 535 | /* totally covered? */ |
| 532 | if (ei->addr >= start && | 536 | if (ei->addr >= start && ei_end <= end) { |
| 533 | (ei->addr + ei->size) <= (start + size)) { | ||
| 534 | real_removed_size += ei->size; | 537 | real_removed_size += ei->size; |
| 535 | memset(ei, 0, sizeof(struct e820entry)); | 538 | memset(ei, 0, sizeof(struct e820entry)); |
| 536 | continue; | 539 | continue; |
| 537 | } | 540 | } |
| 541 | |||
| 542 | /* new range is totally covered? */ | ||
| 543 | if (ei->addr < start && ei_end > end) { | ||
| 544 | e820_add_region(end, ei_end - end, ei->type); | ||
| 545 | ei->size = start - ei->addr; | ||
| 546 | real_removed_size += size; | ||
| 547 | continue; | ||
| 548 | } | ||
| 549 | |||
| 538 | /* partially covered */ | 550 | /* partially covered */ |
| 539 | final_start = max(start, ei->addr); | 551 | final_start = max(start, ei->addr); |
| 540 | final_end = min(start + size, ei->addr + ei->size); | 552 | final_end = min(end, ei_end); |
| 541 | if (final_start >= final_end) | 553 | if (final_start >= final_end) |
| 542 | continue; | 554 | continue; |
| 543 | real_removed_size += final_end - final_start; | 555 | real_removed_size += final_end - final_start; |
| 544 | 556 | ||
| 557 | /* | ||
| 558 | * left range could be head or tail, so need to update | ||
| 559 | * size at first. | ||
| 560 | */ | ||
| 545 | ei->size -= final_end - final_start; | 561 | ei->size -= final_end - final_start; |
| 546 | if (ei->addr < final_start) | 562 | if (ei->addr < final_start) |
| 547 | continue; | 563 | continue; |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index d10a7e7294f4..23b4ecdffa9b 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -400,9 +400,15 @@ static int hpet_next_event(unsigned long delta, | |||
| 400 | * then we might have a real hardware problem. We can not do | 400 | * then we might have a real hardware problem. We can not do |
| 401 | * much about it here, but at least alert the user/admin with | 401 | * much about it here, but at least alert the user/admin with |
| 402 | * a prominent warning. | 402 | * a prominent warning. |
| 403 | * An erratum on some chipsets (ICH9,..), results in comparator read | ||
| 404 | * immediately following a write returning old value. Workaround | ||
| 405 | * for this is to read this value second time, when first | ||
| 406 | * read returns old value. | ||
| 403 | */ | 407 | */ |
| 404 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | 408 | if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { |
| 409 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | ||
| 405 | KERN_WARNING "hpet: compare register read back failed.\n"); | 410 | KERN_WARNING "hpet: compare register read back failed.\n"); |
| 411 | } | ||
| 406 | 412 | ||
| 407 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 413 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
| 408 | } | 414 | } |
| @@ -1144,6 +1150,7 @@ int hpet_set_periodic_freq(unsigned long freq) | |||
| 1144 | do_div(clc, freq); | 1150 | do_div(clc, freq); |
| 1145 | clc >>= hpet_clockevent.shift; | 1151 | clc >>= hpet_clockevent.shift; |
| 1146 | hpet_pie_delta = clc; | 1152 | hpet_pie_delta = clc; |
| 1153 | hpet_pie_limit = 0; | ||
| 1147 | } | 1154 | } |
| 1148 | return 1; | 1155 | return 1; |
| 1149 | } | 1156 | } |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a2c1edd2d3ac..e81030f71a8f 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
| @@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) | |||
| 664 | { | 664 | { |
| 665 | unsigned long size = get_mpc_size(mpf->physptr); | 665 | unsigned long size = get_mpc_size(mpf->physptr); |
| 666 | 666 | ||
| 667 | reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc"); | 667 | reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); |
| 668 | } | 668 | } |
| 669 | 669 | ||
| 670 | static int __init smp_scan_config(unsigned long base, unsigned long length) | 670 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
| @@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) | |||
| 693 | mpf, (u64)virt_to_phys(mpf)); | 693 | mpf, (u64)virt_to_phys(mpf)); |
| 694 | 694 | ||
| 695 | mem = virt_to_phys(mpf); | 695 | mem = virt_to_phys(mpf); |
| 696 | reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf"); | 696 | reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); |
| 697 | if (mpf->physptr) | 697 | if (mpf->physptr) |
| 698 | smp_reserve_memory(mpf); | 698 | smp_reserve_memory(mpf); |
| 699 | 699 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 68cd24f9deae..0f7f130caa67 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -565,6 +565,9 @@ static void enable_gart_translations(void) | |||
| 565 | 565 | ||
| 566 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 566 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
| 567 | } | 567 | } |
| 568 | |||
| 569 | /* Flush the GART-TLB to remove stale entries */ | ||
| 570 | k8_flush_garts(); | ||
| 568 | } | 571 | } |
| 569 | 572 | ||
| 570 | /* | 573 | /* |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9570541caf7c..c4851eff57b3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -607,6 +607,16 @@ static int __init setup_elfcorehdr(char *arg) | |||
| 607 | early_param("elfcorehdr", setup_elfcorehdr); | 607 | early_param("elfcorehdr", setup_elfcorehdr); |
| 608 | #endif | 608 | #endif |
| 609 | 609 | ||
| 610 | static __init void reserve_ibft_region(void) | ||
| 611 | { | ||
| 612 | unsigned long addr, size = 0; | ||
| 613 | |||
| 614 | addr = find_ibft_region(&size); | ||
| 615 | |||
| 616 | if (size) | ||
| 617 | reserve_early_overlap_ok(addr, addr + size, "ibft"); | ||
| 618 | } | ||
| 619 | |||
| 610 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 620 | #ifdef CONFIG_X86_RESERVE_LOW_64K |
| 611 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | 621 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
| 612 | { | 622 | { |
| @@ -909,6 +919,8 @@ void __init setup_arch(char **cmdline_p) | |||
| 909 | */ | 919 | */ |
| 910 | find_smp_config(); | 920 | find_smp_config(); |
| 911 | 921 | ||
| 922 | reserve_ibft_region(); | ||
| 923 | |||
| 912 | reserve_trampoline_memory(); | 924 | reserve_trampoline_memory(); |
| 913 | 925 | ||
| 914 | #ifdef CONFIG_ACPI_SLEEP | 926 | #ifdef CONFIG_ACPI_SLEEP |
| @@ -976,8 +988,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 976 | 988 | ||
| 977 | dma32_reserve_bootmem(); | 989 | dma32_reserve_bootmem(); |
| 978 | 990 | ||
| 979 | reserve_ibft_region(); | ||
| 980 | |||
| 981 | #ifdef CONFIG_KVM_CLOCK | 991 | #ifdef CONFIG_KVM_CLOCK |
| 982 | kvmclock_init(); | 992 | kvmclock_init(); |
| 983 | #endif | 993 | #endif |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7e59dc1d3fc2..2bdf628066bd 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
| @@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
| 115 | local_irq_save(flags); | 115 | local_irq_save(flags); |
| 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
| 117 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
| 118 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 118 | hcall(call, arg1, arg2, arg3, arg4); |
| 119 | } else { | 119 | } else { |
| 120 | lguest_data.hcalls[next_call].arg0 = call; | 120 | lguest_data.hcalls[next_call].arg0 = call; |
| 121 | lguest_data.hcalls[next_call].arg1 = arg1; | 121 | lguest_data.hcalls[next_call].arg1 = arg1; |
| @@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
| 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
| 146 | * future processing: | 146 | * future processing: |
| 147 | */ | 147 | */ |
| 148 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, unsigned long arg1) |
| 149 | unsigned long arg1) | ||
| 150 | { | 149 | { |
| 151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 150 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
| 152 | kvm_hypercall1(call, arg1); | 151 | hcall(call, arg1, 0, 0, 0); |
| 153 | else | 152 | else |
| 154 | async_hcall(call, arg1, 0, 0, 0); | 153 | async_hcall(call, arg1, 0, 0, 0); |
| 155 | } | 154 | } |
| 156 | 155 | ||
| 157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | 156 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ |
| 158 | static void lazy_hcall2(unsigned long call, | 157 | static void lazy_hcall2(unsigned long call, |
| 159 | unsigned long arg1, | 158 | unsigned long arg1, |
| 160 | unsigned long arg2) | 159 | unsigned long arg2) |
| 161 | { | 160 | { |
| 162 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 161 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
| 163 | kvm_hypercall2(call, arg1, arg2); | 162 | hcall(call, arg1, arg2, 0, 0); |
| 164 | else | 163 | else |
| 165 | async_hcall(call, arg1, arg2, 0, 0); | 164 | async_hcall(call, arg1, arg2, 0, 0); |
| 166 | } | 165 | } |
| 167 | 166 | ||
| 168 | static void lazy_hcall3(unsigned long call, | 167 | static void lazy_hcall3(unsigned long call, |
| 169 | unsigned long arg1, | 168 | unsigned long arg1, |
| 170 | unsigned long arg2, | 169 | unsigned long arg2, |
| 171 | unsigned long arg3) | 170 | unsigned long arg3) |
| 172 | { | 171 | { |
| 173 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 172 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
| 174 | kvm_hypercall3(call, arg1, arg2, arg3); | 173 | hcall(call, arg1, arg2, arg3, 0); |
| 175 | else | 174 | else |
| 176 | async_hcall(call, arg1, arg2, arg3, 0); | 175 | async_hcall(call, arg1, arg2, arg3, 0); |
| 177 | } | 176 | } |
| 178 | 177 | ||
| 179 | #ifdef CONFIG_X86_PAE | 178 | #ifdef CONFIG_X86_PAE |
| 180 | static void lazy_hcall4(unsigned long call, | 179 | static void lazy_hcall4(unsigned long call, |
| 181 | unsigned long arg1, | 180 | unsigned long arg1, |
| 182 | unsigned long arg2, | 181 | unsigned long arg2, |
| 183 | unsigned long arg3, | 182 | unsigned long arg3, |
| 184 | unsigned long arg4) | 183 | unsigned long arg4) |
| 185 | { | 184 | { |
| 186 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 185 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
| 187 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 186 | hcall(call, arg1, arg2, arg3, arg4); |
| 188 | else | 187 | else |
| 189 | async_hcall(call, arg1, arg2, arg3, arg4); | 188 | async_hcall(call, arg1, arg2, arg3, arg4); |
| 190 | } | 189 | } |
| @@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call, | |||
| 196 | :*/ | 195 | :*/ |
| 197 | static void lguest_leave_lazy_mmu_mode(void) | 196 | static void lguest_leave_lazy_mmu_mode(void) |
| 198 | { | 197 | { |
| 199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 198 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
| 200 | paravirt_leave_lazy_mmu(); | 199 | paravirt_leave_lazy_mmu(); |
| 201 | } | 200 | } |
| 202 | 201 | ||
| 203 | static void lguest_end_context_switch(struct task_struct *next) | 202 | static void lguest_end_context_switch(struct task_struct *next) |
| 204 | { | 203 | { |
| 205 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 204 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
| 206 | paravirt_end_context_switch(next); | 205 | paravirt_end_context_switch(next); |
| 207 | } | 206 | } |
| 208 | 207 | ||
| @@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
| 286 | /* Keep the local copy up to date. */ | 285 | /* Keep the local copy up to date. */ |
| 287 | native_write_idt_entry(dt, entrynum, g); | 286 | native_write_idt_entry(dt, entrynum, g); |
| 288 | /* Tell Host about this new entry. */ | 287 | /* Tell Host about this new entry. */ |
| 289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 288 | hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0); |
| 290 | } | 289 | } |
| 291 | 290 | ||
| 292 | /* | 291 | /* |
| @@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) | |||
| 300 | struct desc_struct *idt = (void *)desc->address; | 299 | struct desc_struct *idt = (void *)desc->address; |
| 301 | 300 | ||
| 302 | for (i = 0; i < (desc->size+1)/8; i++) | 301 | for (i = 0; i < (desc->size+1)/8; i++) |
| 303 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); | 302 | hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0); |
| 304 | } | 303 | } |
| 305 | 304 | ||
| 306 | /* | 305 | /* |
| @@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
| 321 | struct desc_struct *gdt = (void *)desc->address; | 320 | struct desc_struct *gdt = (void *)desc->address; |
| 322 | 321 | ||
| 323 | for (i = 0; i < (desc->size+1)/8; i++) | 322 | for (i = 0; i < (desc->size+1)/8; i++) |
| 324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 323 | hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); |
| 325 | } | 324 | } |
| 326 | 325 | ||
| 327 | /* | 326 | /* |
| @@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
| 334 | { | 333 | { |
| 335 | native_write_gdt_entry(dt, entrynum, desc, type); | 334 | native_write_gdt_entry(dt, entrynum, desc, type); |
| 336 | /* Tell Host about this new entry. */ | 335 | /* Tell Host about this new entry. */ |
| 337 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, | 336 | hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, |
| 338 | dt[entrynum].a, dt[entrynum].b); | 337 | dt[entrynum].a, dt[entrynum].b, 0); |
| 339 | } | 338 | } |
| 340 | 339 | ||
| 341 | /* | 340 | /* |
| @@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, | |||
| 931 | } | 930 | } |
| 932 | 931 | ||
| 933 | /* Please wake us this far in the future. */ | 932 | /* Please wake us this far in the future. */ |
| 934 | kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); | 933 | hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); |
| 935 | return 0; | 934 | return 0; |
| 936 | } | 935 | } |
| 937 | 936 | ||
| @@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, | |||
| 942 | case CLOCK_EVT_MODE_UNUSED: | 941 | case CLOCK_EVT_MODE_UNUSED: |
| 943 | case CLOCK_EVT_MODE_SHUTDOWN: | 942 | case CLOCK_EVT_MODE_SHUTDOWN: |
| 944 | /* A 0 argument shuts the clock down. */ | 943 | /* A 0 argument shuts the clock down. */ |
| 945 | kvm_hypercall0(LHCALL_SET_CLOCKEVENT); | 944 | hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); |
| 946 | break; | 945 | break; |
| 947 | case CLOCK_EVT_MODE_ONESHOT: | 946 | case CLOCK_EVT_MODE_ONESHOT: |
| 948 | /* This is what we expect. */ | 947 | /* This is what we expect. */ |
| @@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void) | |||
| 1100 | /* STOP! Until an interrupt comes in. */ | 1099 | /* STOP! Until an interrupt comes in. */ |
| 1101 | static void lguest_safe_halt(void) | 1100 | static void lguest_safe_halt(void) |
| 1102 | { | 1101 | { |
| 1103 | kvm_hypercall0(LHCALL_HALT); | 1102 | hcall(LHCALL_HALT, 0, 0, 0, 0); |
| 1104 | } | 1103 | } |
| 1105 | 1104 | ||
| 1106 | /* | 1105 | /* |
| @@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void) | |||
| 1112 | */ | 1111 | */ |
| 1113 | static void lguest_power_off(void) | 1112 | static void lguest_power_off(void) |
| 1114 | { | 1113 | { |
| 1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1114 | hcall(LHCALL_SHUTDOWN, __pa("Power down"), |
| 1116 | LGUEST_SHUTDOWN_POWEROFF); | 1115 | LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
| 1117 | } | 1116 | } |
| 1118 | 1117 | ||
| 1119 | /* | 1118 | /* |
| @@ -1123,7 +1122,7 @@ static void lguest_power_off(void) | |||
| 1123 | */ | 1122 | */ |
| 1124 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) | 1123 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) |
| 1125 | { | 1124 | { |
| 1126 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); | 1125 | hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
| 1127 | /* The hcall won't return, but to keep gcc happy, we're "done". */ | 1126 | /* The hcall won't return, but to keep gcc happy, we're "done". */ |
| 1128 | return NOTIFY_DONE; | 1127 | return NOTIFY_DONE; |
| 1129 | } | 1128 | } |
| @@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
| 1162 | len = sizeof(scratch) - 1; | 1161 | len = sizeof(scratch) - 1; |
| 1163 | scratch[len] = '\0'; | 1162 | scratch[len] = '\0'; |
| 1164 | memcpy(scratch, buf, len); | 1163 | memcpy(scratch, buf, len); |
| 1165 | kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); | 1164 | hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); |
| 1166 | 1165 | ||
| 1167 | /* This routine returns the number of bytes actually written. */ | 1166 | /* This routine returns the number of bytes actually written. */ |
| 1168 | return len; | 1167 | return len; |
| @@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
| 1174 | */ | 1173 | */ |
| 1175 | static void lguest_restart(char *reason) | 1174 | static void lguest_restart(char *reason) |
| 1176 | { | 1175 | { |
| 1177 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1176 | hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); |
| 1178 | } | 1177 | } |
| 1179 | 1178 | ||
| 1180 | /*G:050 | 1179 | /*G:050 |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 27eac0faee48..4f420c2f2d55 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
| @@ -32,7 +32,7 @@ ENTRY(lguest_entry) | |||
| 32 | */ | 32 | */ |
| 33 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
| 34 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
| 35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | int $LGUEST_TRAP_ENTRY |
| 36 | 36 | ||
| 37 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
| 38 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index b641388d8286..ad47daeafa4e 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
| @@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend) | |||
| 27 | ret | 27 | ret |
| 28 | 28 | ||
| 29 | ENTRY(restore_image) | 29 | ENTRY(restore_image) |
| 30 | movl mmu_cr4_features, %ecx | ||
| 30 | movl resume_pg_dir, %eax | 31 | movl resume_pg_dir, %eax |
| 31 | subl $__PAGE_OFFSET, %eax | 32 | subl $__PAGE_OFFSET, %eax |
| 32 | movl %eax, %cr3 | 33 | movl %eax, %cr3 |
| 33 | 34 | ||
| 35 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
| 36 | andl $~(X86_CR4_PGE), %ecx | ||
| 37 | movl %ecx, %cr4; # turn off PGE | ||
| 38 | movl %cr3, %eax; # flush TLB | ||
| 39 | movl %eax, %cr3 | ||
| 40 | 1: | ||
| 34 | movl restore_pblist, %edx | 41 | movl restore_pblist, %edx |
| 35 | .p2align 4,,7 | 42 | .p2align 4,,7 |
| 36 | 43 | ||
| @@ -54,16 +61,8 @@ done: | |||
| 54 | movl $swapper_pg_dir, %eax | 61 | movl $swapper_pg_dir, %eax |
| 55 | subl $__PAGE_OFFSET, %eax | 62 | subl $__PAGE_OFFSET, %eax |
| 56 | movl %eax, %cr3 | 63 | movl %eax, %cr3 |
| 57 | /* Flush TLB, including "global" things (vmalloc) */ | ||
| 58 | movl mmu_cr4_features, %ecx | 64 | movl mmu_cr4_features, %ecx |
| 59 | jecxz 1f # cr4 Pentium and higher, skip if zero | 65 | jecxz 1f # cr4 Pentium and higher, skip if zero |
| 60 | movl %ecx, %edx | ||
| 61 | andl $~(X86_CR4_PGE), %edx | ||
| 62 | movl %edx, %cr4; # turn off PGE | ||
| 63 | 1: | ||
| 64 | movl %cr3, %eax; # flush TLB | ||
| 65 | movl %eax, %cr3 | ||
| 66 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
| 67 | movl %ecx, %cr4; # turn PGE back on | 66 | movl %ecx, %cr4; # turn PGE back on |
| 68 | 1: | 67 | 1: |
| 69 | 68 | ||
diff --git a/block/Kconfig b/block/Kconfig index 62a5921321cd..f9e89f4d94bb 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
| @@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY | |||
| 78 | Protection. If in doubt, say N. | 78 | Protection. If in doubt, say N. |
| 79 | 79 | ||
| 80 | config BLK_CGROUP | 80 | config BLK_CGROUP |
| 81 | tristate | 81 | tristate "Block cgroup support" |
| 82 | depends on CGROUPS | 82 | depends on CGROUPS |
| 83 | depends on CFQ_GROUP_IOSCHED | ||
| 83 | default n | 84 | default n |
| 84 | ---help--- | 85 | ---help--- |
| 85 | Generic block IO controller cgroup interface. This is the common | 86 | Generic block IO controller cgroup interface. This is the common |
diff --git a/block/blk-settings.c b/block/blk-settings.c index d9a9db5f0a2b..f5ed5a1187ba 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
| 10 | #include <linux/gcd.h> | 10 | #include <linux/gcd.h> |
| 11 | #include <linux/lcm.h> | ||
| 11 | #include <linux/jiffies.h> | 12 | #include <linux/jiffies.h> |
| 12 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
| 13 | 14 | ||
| @@ -462,16 +463,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
| 462 | } | 463 | } |
| 463 | EXPORT_SYMBOL(blk_queue_stack_limits); | 464 | EXPORT_SYMBOL(blk_queue_stack_limits); |
| 464 | 465 | ||
| 465 | static unsigned int lcm(unsigned int a, unsigned int b) | ||
| 466 | { | ||
| 467 | if (a && b) | ||
| 468 | return (a * b) / gcd(a, b); | ||
| 469 | else if (b) | ||
| 470 | return b; | ||
| 471 | |||
| 472 | return a; | ||
| 473 | } | ||
| 474 | |||
| 475 | /** | 466 | /** |
| 476 | * blk_stack_limits - adjust queue_limits for stacked devices | 467 | * blk_stack_limits - adjust queue_limits for stacked devices |
| 477 | * @t: the stacking driver limits (top device) | 468 | * @t: the stacking driver limits (top device) |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c2b821fa324a..306759bbdf1b 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -107,6 +107,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |||
| 107 | return queue_var_show(max_sectors_kb, (page)); | 107 | return queue_var_show(max_sectors_kb, (page)); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | ||
| 111 | { | ||
| 112 | return queue_var_show(queue_max_segments(q), (page)); | ||
| 113 | } | ||
| 114 | |||
| 115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | ||
| 116 | { | ||
| 117 | if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) | ||
| 118 | return queue_var_show(queue_max_segment_size(q), (page)); | ||
| 119 | |||
| 120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | ||
| 121 | } | ||
| 122 | |||
| 110 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) | 123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
| 111 | { | 124 | { |
| 112 | return queue_var_show(queue_logical_block_size(q), page); | 125 | return queue_var_show(queue_logical_block_size(q), page); |
| @@ -281,6 +294,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |||
| 281 | .show = queue_max_hw_sectors_show, | 294 | .show = queue_max_hw_sectors_show, |
| 282 | }; | 295 | }; |
| 283 | 296 | ||
| 297 | static struct queue_sysfs_entry queue_max_segments_entry = { | ||
| 298 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | ||
| 299 | .show = queue_max_segments_show, | ||
| 300 | }; | ||
| 301 | |||
| 302 | static struct queue_sysfs_entry queue_max_segment_size_entry = { | ||
| 303 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | ||
| 304 | .show = queue_max_segment_size_show, | ||
| 305 | }; | ||
| 306 | |||
| 284 | static struct queue_sysfs_entry queue_iosched_entry = { | 307 | static struct queue_sysfs_entry queue_iosched_entry = { |
| 285 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | 308 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
| 286 | .show = elv_iosched_show, | 309 | .show = elv_iosched_show, |
| @@ -356,6 +379,8 @@ static struct attribute *default_attrs[] = { | |||
| 356 | &queue_ra_entry.attr, | 379 | &queue_ra_entry.attr, |
| 357 | &queue_max_hw_sectors_entry.attr, | 380 | &queue_max_hw_sectors_entry.attr, |
| 358 | &queue_max_sectors_entry.attr, | 381 | &queue_max_sectors_entry.attr, |
| 382 | &queue_max_segments_entry.attr, | ||
| 383 | &queue_max_segment_size_entry.attr, | ||
| 359 | &queue_iosched_entry.attr, | 384 | &queue_iosched_entry.attr, |
| 360 | &queue_hw_sector_size_entry.attr, | 385 | &queue_hw_sector_size_entry.attr, |
| 361 | &queue_logical_block_size_entry.attr, | 386 | &queue_logical_block_size_entry.attr, |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index fc98a48554fd..838834be115b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -48,6 +48,7 @@ static const int cfq_hist_divisor = 4; | |||
| 48 | #define CFQ_SERVICE_SHIFT 12 | 48 | #define CFQ_SERVICE_SHIFT 12 |
| 49 | 49 | ||
| 50 | #define CFQQ_SEEK_THR (sector_t)(8 * 100) | 50 | #define CFQQ_SEEK_THR (sector_t)(8 * 100) |
| 51 | #define CFQQ_CLOSE_THR (sector_t)(8 * 1024) | ||
| 51 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) | 52 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
| 52 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | 53 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
| 53 | 54 | ||
| @@ -948,6 +949,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
| 948 | unsigned int major, minor; | 949 | unsigned int major, minor; |
| 949 | 950 | ||
| 950 | cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); | 951 | cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); |
| 952 | if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { | ||
| 953 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
| 954 | cfqg->blkg.dev = MKDEV(major, minor); | ||
| 955 | goto done; | ||
| 956 | } | ||
| 951 | if (cfqg || !create) | 957 | if (cfqg || !create) |
| 952 | goto done; | 958 | goto done; |
| 953 | 959 | ||
| @@ -1518,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
| 1518 | struct cfq_queue *cfqq) | 1524 | struct cfq_queue *cfqq) |
| 1519 | { | 1525 | { |
| 1520 | if (cfqq) { | 1526 | if (cfqq) { |
| 1521 | cfq_log_cfqq(cfqd, cfqq, "set_active"); | 1527 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", |
| 1528 | cfqd->serving_prio, cfqd->serving_type); | ||
| 1522 | cfqq->slice_start = 0; | 1529 | cfqq->slice_start = 0; |
| 1523 | cfqq->dispatch_start = jiffies; | 1530 | cfqq->dispatch_start = jiffies; |
| 1524 | cfqq->allocated_slice = 0; | 1531 | cfqq->allocated_slice = 0; |
| @@ -1661,9 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
| 1661 | } | 1668 | } |
| 1662 | 1669 | ||
| 1663 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1670 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1664 | struct request *rq, bool for_preempt) | 1671 | struct request *rq) |
| 1665 | { | 1672 | { |
| 1666 | return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; | 1673 | return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; |
| 1667 | } | 1674 | } |
| 1668 | 1675 | ||
| 1669 | static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | 1676 | static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, |
| @@ -1690,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
| 1690 | * will contain the closest sector. | 1697 | * will contain the closest sector. |
| 1691 | */ | 1698 | */ |
| 1692 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); | 1699 | __cfqq = rb_entry(parent, struct cfq_queue, p_node); |
| 1693 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) | 1700 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
| 1694 | return __cfqq; | 1701 | return __cfqq; |
| 1695 | 1702 | ||
| 1696 | if (blk_rq_pos(__cfqq->next_rq) < sector) | 1703 | if (blk_rq_pos(__cfqq->next_rq) < sector) |
| @@ -1701,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
| 1701 | return NULL; | 1708 | return NULL; |
| 1702 | 1709 | ||
| 1703 | __cfqq = rb_entry(node, struct cfq_queue, p_node); | 1710 | __cfqq = rb_entry(node, struct cfq_queue, p_node); |
| 1704 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) | 1711 | if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
| 1705 | return __cfqq; | 1712 | return __cfqq; |
| 1706 | 1713 | ||
| 1707 | return NULL; | 1714 | return NULL; |
| @@ -1722,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
| 1722 | { | 1729 | { |
| 1723 | struct cfq_queue *cfqq; | 1730 | struct cfq_queue *cfqq; |
| 1724 | 1731 | ||
| 1732 | if (cfq_class_idle(cur_cfqq)) | ||
| 1733 | return NULL; | ||
| 1725 | if (!cfq_cfqq_sync(cur_cfqq)) | 1734 | if (!cfq_cfqq_sync(cur_cfqq)) |
| 1726 | return NULL; | 1735 | return NULL; |
| 1727 | if (CFQQ_SEEKY(cur_cfqq)) | 1736 | if (CFQQ_SEEKY(cur_cfqq)) |
| @@ -1788,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
| 1788 | * Otherwise, we do only if they are the last ones | 1797 | * Otherwise, we do only if they are the last ones |
| 1789 | * in their service tree. | 1798 | * in their service tree. |
| 1790 | */ | 1799 | */ |
| 1791 | return service_tree->count == 1 && cfq_cfqq_sync(cfqq); | 1800 | if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) |
| 1801 | return 1; | ||
| 1802 | cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", | ||
| 1803 | service_tree->count); | ||
| 1804 | return 0; | ||
| 1792 | } | 1805 | } |
| 1793 | 1806 | ||
| 1794 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1807 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
| @@ -1833,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
| 1833 | * time slice. | 1846 | * time slice. |
| 1834 | */ | 1847 | */ |
| 1835 | if (sample_valid(cic->ttime_samples) && | 1848 | if (sample_valid(cic->ttime_samples) && |
| 1836 | (cfqq->slice_end - jiffies < cic->ttime_mean)) | 1849 | (cfqq->slice_end - jiffies < cic->ttime_mean)) { |
| 1850 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", | ||
| 1851 | cic->ttime_mean); | ||
| 1837 | return; | 1852 | return; |
| 1853 | } | ||
| 1838 | 1854 | ||
| 1839 | cfq_mark_cfqq_wait_request(cfqq); | 1855 | cfq_mark_cfqq_wait_request(cfqq); |
| 1840 | 1856 | ||
| @@ -2042,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
| 2042 | slice = max(slice, 2 * cfqd->cfq_slice_idle); | 2058 | slice = max(slice, 2 * cfqd->cfq_slice_idle); |
| 2043 | 2059 | ||
| 2044 | slice = max_t(unsigned, slice, CFQ_MIN_TT); | 2060 | slice = max_t(unsigned, slice, CFQ_MIN_TT); |
| 2061 | cfq_log(cfqd, "workload slice:%d", slice); | ||
| 2045 | cfqd->workload_expires = jiffies + slice; | 2062 | cfqd->workload_expires = jiffies + slice; |
| 2046 | cfqd->noidle_tree_requires_idle = false; | 2063 | cfqd->noidle_tree_requires_idle = false; |
| 2047 | } | 2064 | } |
| @@ -2189,10 +2206,13 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
| 2189 | struct cfq_queue *cfqq; | 2206 | struct cfq_queue *cfqq; |
| 2190 | int dispatched = 0; | 2207 | int dispatched = 0; |
| 2191 | 2208 | ||
| 2192 | while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) | 2209 | /* Expire the timeslice of the current active queue first */ |
| 2210 | cfq_slice_expired(cfqd, 0); | ||
| 2211 | while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { | ||
| 2212 | __cfq_set_active_queue(cfqd, cfqq); | ||
| 2193 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); | 2213 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
| 2214 | } | ||
| 2194 | 2215 | ||
| 2195 | cfq_slice_expired(cfqd, 0); | ||
| 2196 | BUG_ON(cfqd->busy_queues); | 2216 | BUG_ON(cfqd->busy_queues); |
| 2197 | 2217 | ||
| 2198 | cfq_log(cfqd, "forced_dispatch=%d", dispatched); | 2218 | cfq_log(cfqd, "forced_dispatch=%d", dispatched); |
| @@ -3104,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
| 3104 | * if this request is as-good as one we would expect from the | 3124 | * if this request is as-good as one we would expect from the |
| 3105 | * current cfqq, let it preempt | 3125 | * current cfqq, let it preempt |
| 3106 | */ | 3126 | */ |
| 3107 | if (cfq_rq_close(cfqd, cfqq, rq, true)) | 3127 | if (cfq_rq_close(cfqd, cfqq, rq)) |
| 3108 | return true; | 3128 | return true; |
| 3109 | 3129 | ||
| 3110 | return false; | 3130 | return false; |
| @@ -3308,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 3308 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 3328 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
| 3309 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3329 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; |
| 3310 | cfq_mark_cfqq_wait_busy(cfqq); | 3330 | cfq_mark_cfqq_wait_busy(cfqq); |
| 3331 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | ||
| 3311 | } | 3332 | } |
| 3312 | 3333 | ||
| 3313 | /* | 3334 | /* |
diff --git a/block/elevator.c b/block/elevator.c index df75676f6671..76e3702d5381 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name) | |||
| 154 | 154 | ||
| 155 | spin_unlock(&elv_list_lock); | 155 | spin_unlock(&elv_list_lock); |
| 156 | 156 | ||
| 157 | sprintf(elv, "%s-iosched", name); | 157 | snprintf(elv, sizeof(elv), "%s-iosched", name); |
| 158 | 158 | ||
| 159 | request_module("%s", elv); | 159 | request_module("%s", elv); |
| 160 | spin_lock(&elv_list_lock); | 160 | spin_lock(&elv_list_lock); |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 837de669743a..78c55508aff5 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -117,19 +117,14 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 117 | if (ACPI_FAILURE(status)) | 117 | if (ACPI_FAILURE(status)) |
| 118 | return_ACPI_STATUS(status); | 118 | return_ACPI_STATUS(status); |
| 119 | 119 | ||
| 120 | /* Mark wake-enabled or HW enable, or both */ | 120 | /* Clear the GPE (of stale events), then enable it */ |
| 121 | 121 | status = acpi_hw_clear_gpe(gpe_event_info); | |
| 122 | if (gpe_event_info->runtime_count) { | 122 | if (ACPI_FAILURE(status)) |
| 123 | /* Clear the GPE (of stale events), then enable it */ | 123 | return_ACPI_STATUS(status); |
| 124 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
| 125 | if (ACPI_FAILURE(status)) | ||
| 126 | return_ACPI_STATUS(status); | ||
| 127 | |||
| 128 | /* Enable the requested runtime GPE */ | ||
| 129 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
| 130 | } | ||
| 131 | 124 | ||
| 132 | return_ACPI_STATUS(AE_OK); | 125 | /* Enable the requested GPE */ |
| 126 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
| 127 | return_ACPI_STATUS(status); | ||
| 133 | } | 128 | } |
| 134 | 129 | ||
| 135 | /******************************************************************************* | 130 | /******************************************************************************* |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index edf62bf5b266..2fbfe51fb141 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
| @@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
| 468 | 468 | ||
| 469 | acpi_ut_add_reference(obj_desc->field.region_obj); | 469 | acpi_ut_add_reference(obj_desc->field.region_obj); |
| 470 | 470 | ||
| 471 | /* allow full data read from EC address space */ | ||
| 472 | if (obj_desc->field.region_obj->region.space_id == | ||
| 473 | ACPI_ADR_SPACE_EC) { | ||
| 474 | if (obj_desc->common_field.bit_length > 8) { | ||
| 475 | unsigned width = | ||
| 476 | ACPI_ROUND_BITS_UP_TO_BYTES( | ||
| 477 | obj_desc->common_field.bit_length); | ||
| 478 | // access_bit_width is u8, don't overflow it | ||
| 479 | if (width > 8) | ||
| 480 | width = 8; | ||
| 481 | obj_desc->common_field.access_byte_width = | ||
| 482 | width; | ||
| 483 | obj_desc->common_field.access_bit_width = | ||
| 484 | 8 * width; | ||
| 485 | } | ||
| 486 | } | ||
| 487 | |||
| 471 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 488 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
| 472 | "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", | 489 | "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", |
| 473 | obj_desc->field.start_field_bit_offset, | 490 | obj_desc->field.start_field_bit_offset, |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 5717bd300869..3026e3fa83ef 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -568,13 +568,13 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
| 568 | result = acpi_battery_get_status(battery); | 568 | result = acpi_battery_get_status(battery); |
| 569 | if (result) | 569 | if (result) |
| 570 | return result; | 570 | return result; |
| 571 | #ifdef CONFIG_ACPI_SYSFS_POWER | ||
| 572 | if (!acpi_battery_present(battery)) { | 571 | if (!acpi_battery_present(battery)) { |
| 572 | #ifdef CONFIG_ACPI_SYSFS_POWER | ||
| 573 | sysfs_remove_battery(battery); | 573 | sysfs_remove_battery(battery); |
| 574 | #endif | ||
| 574 | battery->update_time = 0; | 575 | battery->update_time = 0; |
| 575 | return 0; | 576 | return 0; |
| 576 | } | 577 | } |
| 577 | #endif | ||
| 578 | if (!battery->update_time || | 578 | if (!battery->update_time || |
| 579 | old_present != acpi_battery_present(battery)) { | 579 | old_present != acpi_battery_present(battery)) { |
| 580 | result = acpi_battery_get_info(battery); | 580 | result = acpi_battery_get_info(battery); |
| @@ -880,7 +880,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) | |||
| 880 | #ifdef CONFIG_ACPI_SYSFS_POWER | 880 | #ifdef CONFIG_ACPI_SYSFS_POWER |
| 881 | /* acpi_battery_update could remove power_supply object */ | 881 | /* acpi_battery_update could remove power_supply object */ |
| 882 | if (battery->bat.dev) | 882 | if (battery->bat.dev) |
| 883 | kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE); | 883 | power_supply_changed(&battery->bat); |
| 884 | #endif | 884 | #endif |
| 885 | } | 885 | } |
| 886 | 886 | ||
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index a9c429c5d50f..3fe29e992be8 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
| @@ -1026,13 +1026,10 @@ static int dock_remove(struct dock_station *ds) | |||
| 1026 | static acpi_status | 1026 | static acpi_status |
| 1027 | find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) | 1027 | find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) |
| 1028 | { | 1028 | { |
| 1029 | acpi_status status = AE_OK; | ||
| 1030 | |||
| 1031 | if (is_dock(handle)) | 1029 | if (is_dock(handle)) |
| 1032 | if (dock_add(handle) >= 0) | 1030 | dock_add(handle); |
| 1033 | status = AE_CTRL_TERMINATE; | ||
| 1034 | 1031 | ||
| 1035 | return status; | 1032 | return AE_OK; |
| 1036 | } | 1033 | } |
| 1037 | 1034 | ||
| 1038 | static acpi_status | 1035 | static acpi_status |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 35ba2547f544..f2234db85da0 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -629,12 +629,12 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
| 629 | 629 | ||
| 630 | static acpi_status | 630 | static acpi_status |
| 631 | acpi_ec_space_handler(u32 function, acpi_physical_address address, | 631 | acpi_ec_space_handler(u32 function, acpi_physical_address address, |
| 632 | u32 bits, u64 *value, | 632 | u32 bits, u64 *value64, |
| 633 | void *handler_context, void *region_context) | 633 | void *handler_context, void *region_context) |
| 634 | { | 634 | { |
| 635 | struct acpi_ec *ec = handler_context; | 635 | struct acpi_ec *ec = handler_context; |
| 636 | int result = 0, i; | 636 | int result = 0, i, bytes = bits / 8; |
| 637 | u8 temp = 0; | 637 | u8 *value = (u8 *)value64; |
| 638 | 638 | ||
| 639 | if ((address > 0xFF) || !value || !handler_context) | 639 | if ((address > 0xFF) || !value || !handler_context) |
| 640 | return AE_BAD_PARAMETER; | 640 | return AE_BAD_PARAMETER; |
| @@ -642,32 +642,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, | |||
| 642 | if (function != ACPI_READ && function != ACPI_WRITE) | 642 | if (function != ACPI_READ && function != ACPI_WRITE) |
| 643 | return AE_BAD_PARAMETER; | 643 | return AE_BAD_PARAMETER; |
| 644 | 644 | ||
| 645 | if (bits != 8 && acpi_strict) | 645 | if (EC_FLAGS_MSI || bits > 8) |
| 646 | return AE_BAD_PARAMETER; | ||
| 647 | |||
| 648 | if (EC_FLAGS_MSI) | ||
| 649 | acpi_ec_burst_enable(ec); | 646 | acpi_ec_burst_enable(ec); |
| 650 | 647 | ||
| 651 | if (function == ACPI_READ) { | 648 | for (i = 0; i < bytes; ++i, ++address, ++value) |
| 652 | result = acpi_ec_read(ec, address, &temp); | 649 | result = (function == ACPI_READ) ? |
| 653 | *value = temp; | 650 | acpi_ec_read(ec, address, value) : |
| 654 | } else { | 651 | acpi_ec_write(ec, address, *value); |
| 655 | temp = 0xff & (*value); | ||
| 656 | result = acpi_ec_write(ec, address, temp); | ||
| 657 | } | ||
| 658 | |||
| 659 | for (i = 8; unlikely(bits - i > 0); i += 8) { | ||
| 660 | ++address; | ||
| 661 | if (function == ACPI_READ) { | ||
| 662 | result = acpi_ec_read(ec, address, &temp); | ||
| 663 | (*value) |= ((u64)temp) << i; | ||
| 664 | } else { | ||
| 665 | temp = 0xff & ((*value) >> i); | ||
| 666 | result = acpi_ec_write(ec, address, temp); | ||
| 667 | } | ||
| 668 | } | ||
| 669 | 652 | ||
| 670 | if (EC_FLAGS_MSI) | 653 | if (EC_FLAGS_MSI || bits > 8) |
| 671 | acpi_ec_burst_disable(ec); | 654 | acpi_ec_burst_disable(ec); |
| 672 | 655 | ||
| 673 | switch (result) { | 656 | switch (result) { |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index b8725461d887..b0337d314604 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
| @@ -61,8 +61,10 @@ int node_to_pxm(int node) | |||
| 61 | 61 | ||
| 62 | void __acpi_map_pxm_to_node(int pxm, int node) | 62 | void __acpi_map_pxm_to_node(int pxm, int node) |
| 63 | { | 63 | { |
| 64 | pxm_to_node_map[pxm] = node; | 64 | if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) |
| 65 | node_to_pxm_map[node] = pxm; | 65 | pxm_to_node_map[pxm] = node; |
| 66 | if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) | ||
| 67 | node_to_pxm_map[node] = pxm; | ||
| 66 | } | 68 | } |
| 67 | 69 | ||
| 68 | int acpi_map_pxm_to_node(int pxm) | 70 | int acpi_map_pxm_to_node(int pxm) |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8e6d8665f0ae..7594f65800cf 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
| @@ -758,7 +758,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
| 758 | queue = hp ? kacpi_hotplug_wq : | 758 | queue = hp ? kacpi_hotplug_wq : |
| 759 | (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); | 759 | (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); |
| 760 | dpc->wait = hp ? 1 : 0; | 760 | dpc->wait = hp ? 1 : 0; |
| 761 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 761 | |
| 762 | if (queue == kacpi_hotplug_wq) | ||
| 763 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | ||
| 764 | else if (queue == kacpi_notify_wq) | ||
| 765 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | ||
| 766 | else | ||
| 767 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | ||
| 768 | |||
| 762 | ret = queue_work(queue, &dpc->work); | 769 | ret = queue_work(queue, &dpc->work); |
| 763 | 770 | ||
| 764 | if (!ret) { | 771 | if (!ret) { |
| @@ -1151,16 +1158,10 @@ int acpi_check_resource_conflict(const struct resource *res) | |||
| 1151 | 1158 | ||
| 1152 | if (clash) { | 1159 | if (clash) { |
| 1153 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { | 1160 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { |
| 1154 | printk("%sACPI: %s resource %s [0x%llx-0x%llx]" | 1161 | printk(KERN_WARNING "ACPI: resource %s %pR" |
| 1155 | " conflicts with ACPI region %s" | 1162 | " conflicts with ACPI region %s %pR\n", |
| 1156 | " [0x%llx-0x%llx]\n", | 1163 | res->name, res, res_list_elem->name, |
| 1157 | acpi_enforce_resources == ENFORCE_RESOURCES_LAX | 1164 | res_list_elem); |
| 1158 | ? KERN_WARNING : KERN_ERR, | ||
| 1159 | ioport ? "I/O" : "Memory", res->name, | ||
| 1160 | (long long) res->start, (long long) res->end, | ||
| 1161 | res_list_elem->name, | ||
| 1162 | (long long) res_list_elem->start, | ||
| 1163 | (long long) res_list_elem->end); | ||
| 1164 | if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) | 1165 | if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) |
| 1165 | printk(KERN_NOTICE "ACPI: This conflict may" | 1166 | printk(KERN_NOTICE "ACPI: This conflict may" |
| 1166 | " cause random problems and system" | 1167 | " cause random problems and system" |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0261b116d051..0338f513a010 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1081,12 +1081,6 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
| 1081 | if (ACPI_IS_ROOT_DEVICE(device)) { | 1081 | if (ACPI_IS_ROOT_DEVICE(device)) { |
| 1082 | acpi_add_id(device, ACPI_SYSTEM_HID); | 1082 | acpi_add_id(device, ACPI_SYSTEM_HID); |
| 1083 | break; | 1083 | break; |
| 1084 | } else if (ACPI_IS_ROOT_DEVICE(device->parent)) { | ||
| 1085 | /* \_SB_, the only root-level namespace device */ | ||
| 1086 | acpi_add_id(device, ACPI_BUS_HID); | ||
| 1087 | strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); | ||
| 1088 | strcpy(device->pnp.device_class, ACPI_BUS_CLASS); | ||
| 1089 | break; | ||
| 1090 | } | 1084 | } |
| 1091 | 1085 | ||
| 1092 | status = acpi_get_object_info(device->handle, &info); | 1086 | status = acpi_get_object_info(device->handle, &info); |
| @@ -1121,6 +1115,12 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
| 1121 | acpi_add_id(device, ACPI_DOCK_HID); | 1115 | acpi_add_id(device, ACPI_DOCK_HID); |
| 1122 | else if (!acpi_ibm_smbus_match(device)) | 1116 | else if (!acpi_ibm_smbus_match(device)) |
| 1123 | acpi_add_id(device, ACPI_SMBUS_IBM_HID); | 1117 | acpi_add_id(device, ACPI_SMBUS_IBM_HID); |
| 1118 | else if (!acpi_device_hid(device) && | ||
| 1119 | ACPI_IS_ROOT_DEVICE(device->parent)) { | ||
| 1120 | acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ | ||
| 1121 | strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); | ||
| 1122 | strcpy(device->pnp.device_class, ACPI_BUS_CLASS); | ||
| 1123 | } | ||
| 1124 | 1124 | ||
| 1125 | break; | 1125 | break; |
| 1126 | case ACPI_BUS_TYPE_POWER: | 1126 | case ACPI_BUS_TYPE_POWER: |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 6a0143796772..a0c93b321482 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <linux/dmi.h> | 44 | #include <linux/dmi.h> |
| 45 | #include <acpi/acpi_bus.h> | 45 | #include <acpi/acpi_bus.h> |
| 46 | #include <acpi/acpi_drivers.h> | 46 | #include <acpi/acpi_drivers.h> |
| 47 | #include <linux/suspend.h> | ||
| 47 | 48 | ||
| 48 | #define PREFIX "ACPI: " | 49 | #define PREFIX "ACPI: " |
| 49 | 50 | ||
| @@ -89,7 +90,6 @@ module_param(allow_duplicates, bool, 0644); | |||
| 89 | static int register_count = 0; | 90 | static int register_count = 0; |
| 90 | static int acpi_video_bus_add(struct acpi_device *device); | 91 | static int acpi_video_bus_add(struct acpi_device *device); |
| 91 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 92 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
| 92 | static int acpi_video_resume(struct acpi_device *device); | ||
| 93 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event); | 93 | static void acpi_video_bus_notify(struct acpi_device *device, u32 event); |
| 94 | 94 | ||
| 95 | static const struct acpi_device_id video_device_ids[] = { | 95 | static const struct acpi_device_id video_device_ids[] = { |
| @@ -105,7 +105,6 @@ static struct acpi_driver acpi_video_bus = { | |||
| 105 | .ops = { | 105 | .ops = { |
| 106 | .add = acpi_video_bus_add, | 106 | .add = acpi_video_bus_add, |
| 107 | .remove = acpi_video_bus_remove, | 107 | .remove = acpi_video_bus_remove, |
| 108 | .resume = acpi_video_resume, | ||
| 109 | .notify = acpi_video_bus_notify, | 108 | .notify = acpi_video_bus_notify, |
| 110 | }, | 109 | }, |
| 111 | }; | 110 | }; |
| @@ -160,6 +159,7 @@ struct acpi_video_bus { | |||
| 160 | struct proc_dir_entry *dir; | 159 | struct proc_dir_entry *dir; |
| 161 | struct input_dev *input; | 160 | struct input_dev *input; |
| 162 | char phys[32]; /* for input device */ | 161 | char phys[32]; /* for input device */ |
| 162 | struct notifier_block pm_nb; | ||
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| 165 | struct acpi_video_device_flags { | 165 | struct acpi_video_device_flags { |
| @@ -1021,6 +1021,13 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 1021 | if (IS_ERR(device->backlight)) | 1021 | if (IS_ERR(device->backlight)) |
| 1022 | return; | 1022 | return; |
| 1023 | 1023 | ||
| 1024 | /* | ||
| 1025 | * Save current brightness level in case we have to restore it | ||
| 1026 | * before acpi_video_device_lcd_set_level() is called next time. | ||
| 1027 | */ | ||
| 1028 | device->backlight->props.brightness = | ||
| 1029 | acpi_video_get_brightness(device->backlight); | ||
| 1030 | |||
| 1024 | result = sysfs_create_link(&device->backlight->dev.kobj, | 1031 | result = sysfs_create_link(&device->backlight->dev.kobj, |
| 1025 | &device->dev->dev.kobj, "device"); | 1032 | &device->dev->dev.kobj, "device"); |
| 1026 | if (result) | 1033 | if (result) |
| @@ -2123,7 +2130,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) | |||
| 2123 | { | 2130 | { |
| 2124 | struct acpi_video_bus *video = acpi_driver_data(device); | 2131 | struct acpi_video_bus *video = acpi_driver_data(device); |
| 2125 | struct input_dev *input; | 2132 | struct input_dev *input; |
| 2126 | int keycode; | 2133 | int keycode = 0; |
| 2127 | 2134 | ||
| 2128 | if (!video) | 2135 | if (!video) |
| 2129 | return; | 2136 | return; |
| @@ -2159,17 +2166,19 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) | |||
| 2159 | break; | 2166 | break; |
| 2160 | 2167 | ||
| 2161 | default: | 2168 | default: |
| 2162 | keycode = KEY_UNKNOWN; | ||
| 2163 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 2169 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
| 2164 | "Unsupported event [0x%x]\n", event)); | 2170 | "Unsupported event [0x%x]\n", event)); |
| 2165 | break; | 2171 | break; |
| 2166 | } | 2172 | } |
| 2167 | 2173 | ||
| 2168 | acpi_notifier_call_chain(device, event, 0); | 2174 | acpi_notifier_call_chain(device, event, 0); |
| 2169 | input_report_key(input, keycode, 1); | 2175 | |
| 2170 | input_sync(input); | 2176 | if (keycode) { |
| 2171 | input_report_key(input, keycode, 0); | 2177 | input_report_key(input, keycode, 1); |
| 2172 | input_sync(input); | 2178 | input_sync(input); |
| 2179 | input_report_key(input, keycode, 0); | ||
| 2180 | input_sync(input); | ||
| 2181 | } | ||
| 2173 | 2182 | ||
| 2174 | return; | 2183 | return; |
| 2175 | } | 2184 | } |
| @@ -2180,7 +2189,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) | |||
| 2180 | struct acpi_device *device = NULL; | 2189 | struct acpi_device *device = NULL; |
| 2181 | struct acpi_video_bus *bus; | 2190 | struct acpi_video_bus *bus; |
| 2182 | struct input_dev *input; | 2191 | struct input_dev *input; |
| 2183 | int keycode; | 2192 | int keycode = 0; |
| 2184 | 2193 | ||
| 2185 | if (!video_device) | 2194 | if (!video_device) |
| 2186 | return; | 2195 | return; |
| @@ -2221,39 +2230,48 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) | |||
| 2221 | keycode = KEY_DISPLAY_OFF; | 2230 | keycode = KEY_DISPLAY_OFF; |
| 2222 | break; | 2231 | break; |
| 2223 | default: | 2232 | default: |
| 2224 | keycode = KEY_UNKNOWN; | ||
| 2225 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 2233 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
| 2226 | "Unsupported event [0x%x]\n", event)); | 2234 | "Unsupported event [0x%x]\n", event)); |
| 2227 | break; | 2235 | break; |
| 2228 | } | 2236 | } |
| 2229 | 2237 | ||
| 2230 | acpi_notifier_call_chain(device, event, 0); | 2238 | acpi_notifier_call_chain(device, event, 0); |
| 2231 | input_report_key(input, keycode, 1); | 2239 | |
| 2232 | input_sync(input); | 2240 | if (keycode) { |
| 2233 | input_report_key(input, keycode, 0); | 2241 | input_report_key(input, keycode, 1); |
| 2234 | input_sync(input); | 2242 | input_sync(input); |
| 2243 | input_report_key(input, keycode, 0); | ||
| 2244 | input_sync(input); | ||
| 2245 | } | ||
| 2235 | 2246 | ||
| 2236 | return; | 2247 | return; |
| 2237 | } | 2248 | } |
| 2238 | 2249 | ||
| 2239 | static int instance; | 2250 | static int acpi_video_resume(struct notifier_block *nb, |
| 2240 | static int acpi_video_resume(struct acpi_device *device) | 2251 | unsigned long val, void *ign) |
| 2241 | { | 2252 | { |
| 2242 | struct acpi_video_bus *video; | 2253 | struct acpi_video_bus *video; |
| 2243 | struct acpi_video_device *video_device; | 2254 | struct acpi_video_device *video_device; |
| 2244 | int i; | 2255 | int i; |
| 2245 | 2256 | ||
| 2246 | if (!device || !acpi_driver_data(device)) | 2257 | switch (val) { |
| 2247 | return -EINVAL; | 2258 | case PM_HIBERNATION_PREPARE: |
| 2259 | case PM_SUSPEND_PREPARE: | ||
| 2260 | case PM_RESTORE_PREPARE: | ||
| 2261 | return NOTIFY_DONE; | ||
| 2262 | } | ||
| 2248 | 2263 | ||
| 2249 | video = acpi_driver_data(device); | 2264 | video = container_of(nb, struct acpi_video_bus, pm_nb); |
| 2265 | |||
| 2266 | dev_info(&video->device->dev, "Restoring backlight state\n"); | ||
| 2250 | 2267 | ||
| 2251 | for (i = 0; i < video->attached_count; i++) { | 2268 | for (i = 0; i < video->attached_count; i++) { |
| 2252 | video_device = video->attached_array[i].bind_info; | 2269 | video_device = video->attached_array[i].bind_info; |
| 2253 | if (video_device && video_device->backlight) | 2270 | if (video_device && video_device->backlight) |
| 2254 | acpi_video_set_brightness(video_device->backlight); | 2271 | acpi_video_set_brightness(video_device->backlight); |
| 2255 | } | 2272 | } |
| 2256 | return AE_OK; | 2273 | |
| 2274 | return NOTIFY_OK; | ||
| 2257 | } | 2275 | } |
| 2258 | 2276 | ||
| 2259 | static acpi_status | 2277 | static acpi_status |
| @@ -2277,6 +2295,8 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context, | |||
| 2277 | return AE_OK; | 2295 | return AE_OK; |
| 2278 | } | 2296 | } |
| 2279 | 2297 | ||
| 2298 | static int instance; | ||
| 2299 | |||
| 2280 | static int acpi_video_bus_add(struct acpi_device *device) | 2300 | static int acpi_video_bus_add(struct acpi_device *device) |
| 2281 | { | 2301 | { |
| 2282 | struct acpi_video_bus *video; | 2302 | struct acpi_video_bus *video; |
| @@ -2358,7 +2378,6 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
| 2358 | set_bit(KEY_BRIGHTNESSDOWN, input->keybit); | 2378 | set_bit(KEY_BRIGHTNESSDOWN, input->keybit); |
| 2359 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); | 2379 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); |
| 2360 | set_bit(KEY_DISPLAY_OFF, input->keybit); | 2380 | set_bit(KEY_DISPLAY_OFF, input->keybit); |
| 2361 | set_bit(KEY_UNKNOWN, input->keybit); | ||
| 2362 | 2381 | ||
| 2363 | error = input_register_device(input); | 2382 | error = input_register_device(input); |
| 2364 | if (error) | 2383 | if (error) |
| @@ -2370,6 +2389,10 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
| 2370 | video->flags.rom ? "yes" : "no", | 2389 | video->flags.rom ? "yes" : "no", |
| 2371 | video->flags.post ? "yes" : "no"); | 2390 | video->flags.post ? "yes" : "no"); |
| 2372 | 2391 | ||
| 2392 | video->pm_nb.notifier_call = acpi_video_resume; | ||
| 2393 | video->pm_nb.priority = 0; | ||
| 2394 | register_pm_notifier(&video->pm_nb); | ||
| 2395 | |||
| 2373 | return 0; | 2396 | return 0; |
| 2374 | 2397 | ||
| 2375 | err_free_input_dev: | 2398 | err_free_input_dev: |
| @@ -2396,6 +2419,8 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) | |||
| 2396 | 2419 | ||
| 2397 | video = acpi_driver_data(device); | 2420 | video = acpi_driver_data(device); |
| 2398 | 2421 | ||
| 2422 | unregister_pm_notifier(&video->pm_nb); | ||
| 2423 | |||
| 2399 | acpi_video_bus_stop_devices(video); | 2424 | acpi_video_bus_stop_devices(video); |
| 2400 | acpi_video_bus_put_devices(video); | 2425 | acpi_video_bus_put_devices(video); |
| 2401 | acpi_video_bus_remove_fs(device); | 2426 | acpi_video_bus_remove_fs(device); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3f6771e63230..49cffb6094a3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -1494,6 +1494,7 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
| 1494 | { | 1494 | { |
| 1495 | struct ata_eh_context *ehc = &dev->link->eh_context; | 1495 | struct ata_eh_context *ehc = &dev->link->eh_context; |
| 1496 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; | 1496 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; |
| 1497 | bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; | ||
| 1497 | u64 sectors = ata_id_n_sectors(dev->id); | 1498 | u64 sectors = ata_id_n_sectors(dev->id); |
| 1498 | u64 native_sectors; | 1499 | u64 native_sectors; |
| 1499 | int rc; | 1500 | int rc; |
| @@ -1510,7 +1511,7 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
| 1510 | /* If device aborted the command or HPA isn't going to | 1511 | /* If device aborted the command or HPA isn't going to |
| 1511 | * be unlocked, skip HPA resizing. | 1512 | * be unlocked, skip HPA resizing. |
| 1512 | */ | 1513 | */ |
| 1513 | if (rc == -EACCES || !ata_ignore_hpa) { | 1514 | if (rc == -EACCES || !unlock_hpa) { |
| 1514 | ata_dev_printk(dev, KERN_WARNING, "HPA support seems " | 1515 | ata_dev_printk(dev, KERN_WARNING, "HPA support seems " |
| 1515 | "broken, skipping HPA handling\n"); | 1516 | "broken, skipping HPA handling\n"); |
| 1516 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; | 1517 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; |
| @@ -1525,7 +1526,7 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
| 1525 | dev->n_native_sectors = native_sectors; | 1526 | dev->n_native_sectors = native_sectors; |
| 1526 | 1527 | ||
| 1527 | /* nothing to do? */ | 1528 | /* nothing to do? */ |
| 1528 | if (native_sectors <= sectors || !ata_ignore_hpa) { | 1529 | if (native_sectors <= sectors || !unlock_hpa) { |
| 1529 | if (!print_info || native_sectors == sectors) | 1530 | if (!print_info || native_sectors == sectors) |
| 1530 | return 0; | 1531 | return 0; |
| 1531 | 1532 | ||
| @@ -4186,36 +4187,51 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | |||
| 4186 | goto fail; | 4187 | goto fail; |
| 4187 | 4188 | ||
| 4188 | /* verify n_sectors hasn't changed */ | 4189 | /* verify n_sectors hasn't changed */ |
| 4189 | if (dev->class == ATA_DEV_ATA && n_sectors && | 4190 | if (dev->class != ATA_DEV_ATA || !n_sectors || |
| 4190 | dev->n_sectors != n_sectors) { | 4191 | dev->n_sectors == n_sectors) |
| 4191 | ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " | 4192 | return 0; |
| 4192 | "%llu != %llu\n", | 4193 | |
| 4193 | (unsigned long long)n_sectors, | 4194 | /* n_sectors has changed */ |
| 4194 | (unsigned long long)dev->n_sectors); | 4195 | ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", |
| 4195 | /* | 4196 | (unsigned long long)n_sectors, |
| 4196 | * Something could have caused HPA to be unlocked | 4197 | (unsigned long long)dev->n_sectors); |
| 4197 | * involuntarily. If n_native_sectors hasn't changed | 4198 | |
| 4198 | * and the new size matches it, keep the device. | 4199 | /* |
| 4199 | */ | 4200 | * Something could have caused HPA to be unlocked |
| 4200 | if (dev->n_native_sectors == n_native_sectors && | 4201 | * involuntarily. If n_native_sectors hasn't changed and the |
| 4201 | dev->n_sectors > n_sectors && | 4202 | * new size matches it, keep the device. |
| 4202 | dev->n_sectors == n_native_sectors) { | 4203 | */ |
| 4203 | ata_dev_printk(dev, KERN_WARNING, | 4204 | if (dev->n_native_sectors == n_native_sectors && |
| 4204 | "new n_sectors matches native, probably " | 4205 | dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { |
| 4205 | "late HPA unlock, continuing\n"); | 4206 | ata_dev_printk(dev, KERN_WARNING, |
| 4206 | /* keep using the old n_sectors */ | 4207 | "new n_sectors matches native, probably " |
| 4207 | dev->n_sectors = n_sectors; | 4208 | "late HPA unlock, continuing\n"); |
| 4208 | } else { | 4209 | /* keep using the old n_sectors */ |
| 4209 | /* restore original n_[native]_sectors and fail */ | 4210 | dev->n_sectors = n_sectors; |
| 4210 | dev->n_native_sectors = n_native_sectors; | 4211 | return 0; |
| 4211 | dev->n_sectors = n_sectors; | ||
| 4212 | rc = -ENODEV; | ||
| 4213 | goto fail; | ||
| 4214 | } | ||
| 4215 | } | 4212 | } |
| 4216 | 4213 | ||
| 4217 | return 0; | 4214 | /* |
| 4215 | * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try | ||
| 4216 | * unlocking HPA in those cases. | ||
| 4217 | * | ||
| 4218 | * https://bugzilla.kernel.org/show_bug.cgi?id=15396 | ||
| 4219 | */ | ||
| 4220 | if (dev->n_native_sectors == n_native_sectors && | ||
| 4221 | dev->n_sectors < n_sectors && n_sectors == n_native_sectors && | ||
| 4222 | !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { | ||
| 4223 | ata_dev_printk(dev, KERN_WARNING, | ||
| 4224 | "old n_sectors matches native, probably " | ||
| 4225 | "late HPA lock, will try to unlock HPA\n"); | ||
| 4226 | /* try unlocking HPA */ | ||
| 4227 | dev->flags |= ATA_DFLAG_UNLOCK_HPA; | ||
| 4228 | rc = -EIO; | ||
| 4229 | } else | ||
| 4230 | rc = -ENODEV; | ||
| 4218 | 4231 | ||
| 4232 | /* restore original n_[native_]sectors and fail */ | ||
| 4233 | dev->n_native_sectors = n_native_sectors; | ||
| 4234 | dev->n_sectors = n_sectors; | ||
| 4219 | fail: | 4235 | fail: |
| 4220 | ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); | 4236 | ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); |
| 4221 | return rc; | 4237 | return rc; |
| @@ -4354,6 +4370,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4354 | { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, | 4370 | { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, |
| 4355 | { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, | 4371 | { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, |
| 4356 | 4372 | ||
| 4373 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ | ||
| 4374 | { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, | ||
| 4375 | |||
| 4357 | /* devices which puke on READ_NATIVE_MAX */ | 4376 | /* devices which puke on READ_NATIVE_MAX */ |
| 4358 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, | 4377 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, |
| 4359 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, | 4378 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 6411e0c7b9fe..e3877b6843c9 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -1816,10 +1816,6 @@ retry: | |||
| 1816 | !ap->ops->sff_irq_check(ap)) | 1816 | !ap->ops->sff_irq_check(ap)) |
| 1817 | continue; | 1817 | continue; |
| 1818 | 1818 | ||
| 1819 | if (printk_ratelimit()) | ||
| 1820 | ata_port_printk(ap, KERN_INFO, | ||
| 1821 | "clearing spurious IRQ\n"); | ||
| 1822 | |||
| 1823 | if (idle & (1 << i)) { | 1819 | if (idle & (1 << i)) { |
| 1824 | ap->ops->sff_check_status(ap); | 1820 | ap->ops->sff_check_status(ap); |
| 1825 | ap->ops->sff_irq_clear(ap); | 1821 | ap->ops->sff_irq_clear(ap); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 4f4aa5897b4c..933442f40321 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -313,7 +313,7 @@ static ssize_t | |||
| 313 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, | 313 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, |
| 314 | char *buf) | 314 | char *buf) |
| 315 | { | 315 | { |
| 316 | return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); | 316 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); | 319 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 985abd7f49a7..057979a19eea 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
| 16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| 17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
| 18 | #include <linux/gfp.h> | 18 | #include <linux/slab.h> |
| 19 | 19 | ||
| 20 | static struct sysdev_class_attribute *node_state_attrs[]; | 20 | static struct sysdev_class_attribute *node_state_attrs[]; |
| 21 | 21 | ||
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 459f1bc25a7b..c5f22bb0a48e 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
| @@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) | |||
| 2533 | Controller->RequestQueue[n] = RequestQueue; | 2533 | Controller->RequestQueue[n] = RequestQueue; |
| 2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); | 2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); |
| 2535 | RequestQueue->queuedata = Controller; | 2535 | RequestQueue->queuedata = Controller; |
| 2536 | blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); | ||
| 2537 | blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); | 2536 | blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); |
| 2538 | blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); | 2537 | blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); |
| 2539 | disk->queue = RequestQueue; | 2538 | disk->queue = RequestQueue; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9e3af307aae1..eb5ff0531cfb 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -3341,6 +3341,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) | |||
| 3341 | printk(KERN_WARNING | 3341 | printk(KERN_WARNING |
| 3342 | "cciss: controller cciss%d failed, stopping.\n", | 3342 | "cciss: controller cciss%d failed, stopping.\n", |
| 3343 | h->ctlr); | 3343 | h->ctlr); |
| 3344 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 3344 | fail_all_cmds(h->ctlr); | 3345 | fail_all_cmds(h->ctlr); |
| 3345 | return IRQ_HANDLED; | 3346 | return IRQ_HANDLED; |
| 3346 | } | 3347 | } |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 17956ff6a08d..df018990c422 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
| @@ -536,7 +536,9 @@ static void atodb_endio(struct bio *bio, int error) | |||
| 536 | put_ldev(mdev); | 536 | put_ldev(mdev); |
| 537 | } | 537 | } |
| 538 | 538 | ||
| 539 | /* sector to word */ | ||
| 539 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | 540 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) |
| 541 | |||
| 540 | /* activity log to on disk bitmap -- prepare bio unless that sector | 542 | /* activity log to on disk bitmap -- prepare bio unless that sector |
| 541 | * is already covered by previously prepared bios */ | 543 | * is already covered by previously prepared bios */ |
| 542 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | 544 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, |
| @@ -546,13 +548,20 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | |||
| 546 | { | 548 | { |
| 547 | struct bio *bio; | 549 | struct bio *bio; |
| 548 | struct page *page; | 550 | struct page *page; |
| 549 | sector_t on_disk_sector = enr + mdev->ldev->md.md_offset | 551 | sector_t on_disk_sector; |
| 550 | + mdev->ldev->md.bm_offset; | ||
| 551 | unsigned int page_offset = PAGE_SIZE; | 552 | unsigned int page_offset = PAGE_SIZE; |
| 552 | int offset; | 553 | int offset; |
| 553 | int i = 0; | 554 | int i = 0; |
| 554 | int err = -ENOMEM; | 555 | int err = -ENOMEM; |
| 555 | 556 | ||
| 557 | /* We always write aligned, full 4k blocks, | ||
| 558 | * so we can ignore the logical_block_size (for now) */ | ||
| 559 | enr &= ~7U; | ||
| 560 | on_disk_sector = enr + mdev->ldev->md.md_offset | ||
| 561 | + mdev->ldev->md.bm_offset; | ||
| 562 | |||
| 563 | D_ASSERT(!(on_disk_sector & 7U)); | ||
| 564 | |||
| 556 | /* Check if that enr is already covered by an already created bio. | 565 | /* Check if that enr is already covered by an already created bio. |
| 557 | * Caution, bios[] is not NULL terminated, | 566 | * Caution, bios[] is not NULL terminated, |
| 558 | * but only initialized to all NULL. | 567 | * but only initialized to all NULL. |
| @@ -588,7 +597,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | |||
| 588 | 597 | ||
| 589 | offset = S2W(enr); | 598 | offset = S2W(enr); |
| 590 | drbd_bm_get_lel(mdev, offset, | 599 | drbd_bm_get_lel(mdev, offset, |
| 591 | min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), | 600 | min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), |
| 592 | kmap(page) + page_offset); | 601 | kmap(page) + page_offset); |
| 593 | kunmap(page); | 602 | kunmap(page); |
| 594 | 603 | ||
| @@ -597,7 +606,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | |||
| 597 | bio->bi_bdev = mdev->ldev->md_bdev; | 606 | bio->bi_bdev = mdev->ldev->md_bdev; |
| 598 | bio->bi_sector = on_disk_sector; | 607 | bio->bi_sector = on_disk_sector; |
| 599 | 608 | ||
| 600 | if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) | 609 | if (bio_add_page(bio, page, 4096, page_offset) != 4096) |
| 601 | goto out_put_page; | 610 | goto out_put_page; |
| 602 | 611 | ||
| 603 | atomic_inc(&wc->count); | 612 | atomic_inc(&wc->count); |
| @@ -1327,7 +1336,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev) | |||
| 1327 | /* ok, ->resync is there. */ | 1336 | /* ok, ->resync is there. */ |
| 1328 | for (i = 0; i < mdev->resync->nr_elements; i++) { | 1337 | for (i = 0; i < mdev->resync->nr_elements; i++) { |
| 1329 | e = lc_element_by_index(mdev->resync, i); | 1338 | e = lc_element_by_index(mdev->resync, i); |
| 1330 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | 1339 | bm_ext = lc_entry(e, struct bm_extent, lce); |
| 1331 | if (bm_ext->lce.lc_number == LC_FREE) | 1340 | if (bm_ext->lce.lc_number == LC_FREE) |
| 1332 | continue; | 1341 | continue; |
| 1333 | if (bm_ext->lce.lc_number == mdev->resync_wenr) { | 1342 | if (bm_ext->lce.lc_number == mdev->resync_wenr) { |
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 3d6f3d988949..3390716898d5 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
| @@ -67,7 +67,7 @@ struct drbd_bitmap { | |||
| 67 | size_t bm_words; | 67 | size_t bm_words; |
| 68 | size_t bm_number_of_pages; | 68 | size_t bm_number_of_pages; |
| 69 | sector_t bm_dev_capacity; | 69 | sector_t bm_dev_capacity; |
| 70 | struct semaphore bm_change; /* serializes resize operations */ | 70 | struct mutex bm_change; /* serializes resize operations */ |
| 71 | 71 | ||
| 72 | atomic_t bm_async_io; | 72 | atomic_t bm_async_io; |
| 73 | wait_queue_head_t bm_io_wait; | 73 | wait_queue_head_t bm_io_wait; |
| @@ -115,7 +115,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) | |||
| 115 | return; | 115 | return; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | trylock_failed = down_trylock(&b->bm_change); | 118 | trylock_failed = !mutex_trylock(&b->bm_change); |
| 119 | 119 | ||
| 120 | if (trylock_failed) { | 120 | if (trylock_failed) { |
| 121 | dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", | 121 | dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", |
| @@ -126,7 +126,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) | |||
| 126 | b->bm_task == mdev->receiver.task ? "receiver" : | 126 | b->bm_task == mdev->receiver.task ? "receiver" : |
| 127 | b->bm_task == mdev->asender.task ? "asender" : | 127 | b->bm_task == mdev->asender.task ? "asender" : |
| 128 | b->bm_task == mdev->worker.task ? "worker" : "?"); | 128 | b->bm_task == mdev->worker.task ? "worker" : "?"); |
| 129 | down(&b->bm_change); | 129 | mutex_lock(&b->bm_change); |
| 130 | } | 130 | } |
| 131 | if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) | 131 | if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) |
| 132 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); | 132 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); |
| @@ -148,7 +148,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev) | |||
| 148 | 148 | ||
| 149 | b->bm_why = NULL; | 149 | b->bm_why = NULL; |
| 150 | b->bm_task = NULL; | 150 | b->bm_task = NULL; |
| 151 | up(&b->bm_change); | 151 | mutex_unlock(&b->bm_change); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /* word offset to long pointer */ | 154 | /* word offset to long pointer */ |
| @@ -296,7 +296,7 @@ int drbd_bm_init(struct drbd_conf *mdev) | |||
| 296 | if (!b) | 296 | if (!b) |
| 297 | return -ENOMEM; | 297 | return -ENOMEM; |
| 298 | spin_lock_init(&b->bm_lock); | 298 | spin_lock_init(&b->bm_lock); |
| 299 | init_MUTEX(&b->bm_change); | 299 | mutex_init(&b->bm_change); |
| 300 | init_waitqueue_head(&b->bm_io_wait); | 300 | init_waitqueue_head(&b->bm_io_wait); |
| 301 | 301 | ||
| 302 | mdev->bitmap = b; | 302 | mdev->bitmap = b; |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index d9301e861d9f..e5e86a781820 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
| @@ -261,6 +261,9 @@ static inline const char *cmdname(enum drbd_packets cmd) | |||
| 261 | [P_OV_REQUEST] = "OVRequest", | 261 | [P_OV_REQUEST] = "OVRequest", |
| 262 | [P_OV_REPLY] = "OVReply", | 262 | [P_OV_REPLY] = "OVReply", |
| 263 | [P_OV_RESULT] = "OVResult", | 263 | [P_OV_RESULT] = "OVResult", |
| 264 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", | ||
| 265 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | ||
| 266 | [P_COMPRESSED_BITMAP] = "CBitmap", | ||
| 264 | [P_MAX_CMD] = NULL, | 267 | [P_MAX_CMD] = NULL, |
| 265 | }; | 268 | }; |
| 266 | 269 | ||
| @@ -443,13 +446,18 @@ struct p_rs_param_89 { | |||
| 443 | char csums_alg[SHARED_SECRET_MAX]; | 446 | char csums_alg[SHARED_SECRET_MAX]; |
| 444 | } __packed; | 447 | } __packed; |
| 445 | 448 | ||
| 449 | enum drbd_conn_flags { | ||
| 450 | CF_WANT_LOSE = 1, | ||
| 451 | CF_DRY_RUN = 2, | ||
| 452 | }; | ||
| 453 | |||
| 446 | struct p_protocol { | 454 | struct p_protocol { |
| 447 | struct p_header head; | 455 | struct p_header head; |
| 448 | u32 protocol; | 456 | u32 protocol; |
| 449 | u32 after_sb_0p; | 457 | u32 after_sb_0p; |
| 450 | u32 after_sb_1p; | 458 | u32 after_sb_1p; |
| 451 | u32 after_sb_2p; | 459 | u32 after_sb_2p; |
| 452 | u32 want_lose; | 460 | u32 conn_flags; |
| 453 | u32 two_primaries; | 461 | u32 two_primaries; |
| 454 | 462 | ||
| 455 | /* Since protocol version 87 and higher. */ | 463 | /* Since protocol version 87 and higher. */ |
| @@ -791,6 +799,8 @@ enum { | |||
| 791 | * while this is set. */ | 799 | * while this is set. */ |
| 792 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from | 800 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from |
| 793 | * the peer, if it changed there as well. */ | 801 | * the peer, if it changed there as well. */ |
| 802 | CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ | ||
| 803 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ | ||
| 794 | }; | 804 | }; |
| 795 | 805 | ||
| 796 | struct drbd_bitmap; /* opaque for drbd_conf */ | 806 | struct drbd_bitmap; /* opaque for drbd_conf */ |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index ab871e00ffc5..67e0fc542249 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
| @@ -1668,7 +1668,7 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) | |||
| 1668 | int drbd_send_protocol(struct drbd_conf *mdev) | 1668 | int drbd_send_protocol(struct drbd_conf *mdev) |
| 1669 | { | 1669 | { |
| 1670 | struct p_protocol *p; | 1670 | struct p_protocol *p; |
| 1671 | int size, rv; | 1671 | int size, cf, rv; |
| 1672 | 1672 | ||
| 1673 | size = sizeof(struct p_protocol); | 1673 | size = sizeof(struct p_protocol); |
| 1674 | 1674 | ||
| @@ -1685,9 +1685,21 @@ int drbd_send_protocol(struct drbd_conf *mdev) | |||
| 1685 | p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); | 1685 | p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); |
| 1686 | p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); | 1686 | p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); |
| 1687 | p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); | 1687 | p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); |
| 1688 | p->want_lose = cpu_to_be32(mdev->net_conf->want_lose); | ||
| 1689 | p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); | 1688 | p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); |
| 1690 | 1689 | ||
| 1690 | cf = 0; | ||
| 1691 | if (mdev->net_conf->want_lose) | ||
| 1692 | cf |= CF_WANT_LOSE; | ||
| 1693 | if (mdev->net_conf->dry_run) { | ||
| 1694 | if (mdev->agreed_pro_version >= 92) | ||
| 1695 | cf |= CF_DRY_RUN; | ||
| 1696 | else { | ||
| 1697 | dev_err(DEV, "--dry-run is not supported by peer"); | ||
| 1698 | return 0; | ||
| 1699 | } | ||
| 1700 | } | ||
| 1701 | p->conn_flags = cpu_to_be32(cf); | ||
| 1702 | |||
| 1691 | if (mdev->agreed_pro_version >= 87) | 1703 | if (mdev->agreed_pro_version >= 87) |
| 1692 | strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); | 1704 | strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); |
| 1693 | 1705 | ||
| @@ -3161,14 +3173,18 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) | |||
| 3161 | void drbd_free_sock(struct drbd_conf *mdev) | 3173 | void drbd_free_sock(struct drbd_conf *mdev) |
| 3162 | { | 3174 | { |
| 3163 | if (mdev->data.socket) { | 3175 | if (mdev->data.socket) { |
| 3176 | mutex_lock(&mdev->data.mutex); | ||
| 3164 | kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); | 3177 | kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); |
| 3165 | sock_release(mdev->data.socket); | 3178 | sock_release(mdev->data.socket); |
| 3166 | mdev->data.socket = NULL; | 3179 | mdev->data.socket = NULL; |
| 3180 | mutex_unlock(&mdev->data.mutex); | ||
| 3167 | } | 3181 | } |
| 3168 | if (mdev->meta.socket) { | 3182 | if (mdev->meta.socket) { |
| 3183 | mutex_lock(&mdev->meta.mutex); | ||
| 3169 | kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); | 3184 | kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); |
| 3170 | sock_release(mdev->meta.socket); | 3185 | sock_release(mdev->meta.socket); |
| 3171 | mdev->meta.socket = NULL; | 3186 | mdev->meta.socket = NULL; |
| 3187 | mutex_unlock(&mdev->meta.mutex); | ||
| 3172 | } | 3188 | } |
| 3173 | } | 3189 | } |
| 3174 | 3190 | ||
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 4df3b40b1057..6429d2b19e06 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
| @@ -285,8 +285,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | if (r == SS_NO_UP_TO_DATE_DISK && force && | 287 | if (r == SS_NO_UP_TO_DATE_DISK && force && |
| 288 | (mdev->state.disk == D_INCONSISTENT || | 288 | (mdev->state.disk < D_UP_TO_DATE && |
| 289 | mdev->state.disk == D_OUTDATED)) { | 289 | mdev->state.disk >= D_INCONSISTENT)) { |
| 290 | mask.disk = D_MASK; | 290 | mask.disk = D_MASK; |
| 291 | val.disk = D_UP_TO_DATE; | 291 | val.disk = D_UP_TO_DATE; |
| 292 | forced = 1; | 292 | forced = 1; |
| @@ -407,7 +407,7 @@ static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
| 407 | } | 407 | } |
| 408 | 408 | ||
| 409 | reply->ret_code = | 409 | reply->ret_code = |
| 410 | drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); | 410 | drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); |
| 411 | 411 | ||
| 412 | return 0; | 412 | return 0; |
| 413 | } | 413 | } |
| @@ -941,6 +941,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
| 941 | 941 | ||
| 942 | drbd_md_set_sector_offsets(mdev, nbc); | 942 | drbd_md_set_sector_offsets(mdev, nbc); |
| 943 | 943 | ||
| 944 | /* allocate a second IO page if logical_block_size != 512 */ | ||
| 945 | logical_block_size = bdev_logical_block_size(nbc->md_bdev); | ||
| 946 | if (logical_block_size == 0) | ||
| 947 | logical_block_size = MD_SECTOR_SIZE; | ||
| 948 | |||
| 949 | if (logical_block_size != MD_SECTOR_SIZE) { | ||
| 950 | if (!mdev->md_io_tmpp) { | ||
| 951 | struct page *page = alloc_page(GFP_NOIO); | ||
| 952 | if (!page) | ||
| 953 | goto force_diskless_dec; | ||
| 954 | |||
| 955 | dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", | ||
| 956 | logical_block_size, MD_SECTOR_SIZE); | ||
| 957 | dev_warn(DEV, "Workaround engaged (has performance impact).\n"); | ||
| 958 | |||
| 959 | mdev->md_io_tmpp = page; | ||
| 960 | } | ||
| 961 | } | ||
| 962 | |||
| 944 | if (!mdev->bitmap) { | 963 | if (!mdev->bitmap) { |
| 945 | if (drbd_bm_init(mdev)) { | 964 | if (drbd_bm_init(mdev)) { |
| 946 | retcode = ERR_NOMEM; | 965 | retcode = ERR_NOMEM; |
| @@ -980,25 +999,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
| 980 | goto force_diskless_dec; | 999 | goto force_diskless_dec; |
| 981 | } | 1000 | } |
| 982 | 1001 | ||
| 983 | /* allocate a second IO page if logical_block_size != 512 */ | ||
| 984 | logical_block_size = bdev_logical_block_size(nbc->md_bdev); | ||
| 985 | if (logical_block_size == 0) | ||
| 986 | logical_block_size = MD_SECTOR_SIZE; | ||
| 987 | |||
| 988 | if (logical_block_size != MD_SECTOR_SIZE) { | ||
| 989 | if (!mdev->md_io_tmpp) { | ||
| 990 | struct page *page = alloc_page(GFP_NOIO); | ||
| 991 | if (!page) | ||
| 992 | goto force_diskless_dec; | ||
| 993 | |||
| 994 | dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", | ||
| 995 | logical_block_size, MD_SECTOR_SIZE); | ||
| 996 | dev_warn(DEV, "Workaround engaged (has performance impact).\n"); | ||
| 997 | |||
| 998 | mdev->md_io_tmpp = page; | ||
| 999 | } | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | /* Reset the "barriers don't work" bits here, then force meta data to | 1002 | /* Reset the "barriers don't work" bits here, then force meta data to |
| 1003 | * be written, to ensure we determine if barriers are supported. */ | 1003 | * be written, to ensure we determine if barriers are supported. */ |
| 1004 | if (nbc->dc.no_md_flush) | 1004 | if (nbc->dc.no_md_flush) |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d065c646b35a..ed9f1de24a71 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
| @@ -2513,6 +2513,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
| 2513 | } | 2513 | } |
| 2514 | 2514 | ||
| 2515 | if (hg == -100) { | 2515 | if (hg == -100) { |
| 2516 | /* FIXME this log message is not correct if we end up here | ||
| 2517 | * after an attempted attach on a diskless node. | ||
| 2518 | * We just refuse to attach -- well, we drop the "connection" | ||
| 2519 | * to that disk, in a way... */ | ||
| 2516 | dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); | 2520 | dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); |
| 2517 | drbd_khelper(mdev, "split-brain"); | 2521 | drbd_khelper(mdev, "split-brain"); |
| 2518 | return C_MASK; | 2522 | return C_MASK; |
| @@ -2538,6 +2542,16 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
| 2538 | } | 2542 | } |
| 2539 | } | 2543 | } |
| 2540 | 2544 | ||
| 2545 | if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { | ||
| 2546 | if (hg == 0) | ||
| 2547 | dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); | ||
| 2548 | else | ||
| 2549 | dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", | ||
| 2550 | drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), | ||
| 2551 | abs(hg) >= 2 ? "full" : "bit-map based"); | ||
| 2552 | return C_MASK; | ||
| 2553 | } | ||
| 2554 | |||
| 2541 | if (abs(hg) >= 2) { | 2555 | if (abs(hg) >= 2) { |
| 2542 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); | 2556 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); |
| 2543 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) | 2557 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) |
| @@ -2585,7 +2599,7 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) | |||
| 2585 | struct p_protocol *p = (struct p_protocol *)h; | 2599 | struct p_protocol *p = (struct p_protocol *)h; |
| 2586 | int header_size, data_size; | 2600 | int header_size, data_size; |
| 2587 | int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; | 2601 | int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; |
| 2588 | int p_want_lose, p_two_primaries; | 2602 | int p_want_lose, p_two_primaries, cf; |
| 2589 | char p_integrity_alg[SHARED_SECRET_MAX] = ""; | 2603 | char p_integrity_alg[SHARED_SECRET_MAX] = ""; |
| 2590 | 2604 | ||
| 2591 | header_size = sizeof(*p) - sizeof(*h); | 2605 | header_size = sizeof(*p) - sizeof(*h); |
| @@ -2598,8 +2612,14 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) | |||
| 2598 | p_after_sb_0p = be32_to_cpu(p->after_sb_0p); | 2612 | p_after_sb_0p = be32_to_cpu(p->after_sb_0p); |
| 2599 | p_after_sb_1p = be32_to_cpu(p->after_sb_1p); | 2613 | p_after_sb_1p = be32_to_cpu(p->after_sb_1p); |
| 2600 | p_after_sb_2p = be32_to_cpu(p->after_sb_2p); | 2614 | p_after_sb_2p = be32_to_cpu(p->after_sb_2p); |
| 2601 | p_want_lose = be32_to_cpu(p->want_lose); | ||
| 2602 | p_two_primaries = be32_to_cpu(p->two_primaries); | 2615 | p_two_primaries = be32_to_cpu(p->two_primaries); |
| 2616 | cf = be32_to_cpu(p->conn_flags); | ||
| 2617 | p_want_lose = cf & CF_WANT_LOSE; | ||
| 2618 | |||
| 2619 | clear_bit(CONN_DRY_RUN, &mdev->flags); | ||
| 2620 | |||
| 2621 | if (cf & CF_DRY_RUN) | ||
| 2622 | set_bit(CONN_DRY_RUN, &mdev->flags); | ||
| 2603 | 2623 | ||
| 2604 | if (p_proto != mdev->net_conf->wire_protocol) { | 2624 | if (p_proto != mdev->net_conf->wire_protocol) { |
| 2605 | dev_err(DEV, "incompatible communication protocols\n"); | 2625 | dev_err(DEV, "incompatible communication protocols\n"); |
| @@ -3118,13 +3138,16 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) | |||
| 3118 | 3138 | ||
| 3119 | put_ldev(mdev); | 3139 | put_ldev(mdev); |
| 3120 | if (nconn == C_MASK) { | 3140 | if (nconn == C_MASK) { |
| 3141 | nconn = C_CONNECTED; | ||
| 3121 | if (mdev->state.disk == D_NEGOTIATING) { | 3142 | if (mdev->state.disk == D_NEGOTIATING) { |
| 3122 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); | 3143 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); |
| 3123 | nconn = C_CONNECTED; | ||
| 3124 | } else if (peer_state.disk == D_NEGOTIATING) { | 3144 | } else if (peer_state.disk == D_NEGOTIATING) { |
| 3125 | dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); | 3145 | dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); |
| 3126 | peer_state.disk = D_DISKLESS; | 3146 | peer_state.disk = D_DISKLESS; |
| 3147 | real_peer_disk = D_DISKLESS; | ||
| 3127 | } else { | 3148 | } else { |
| 3149 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) | ||
| 3150 | return FALSE; | ||
| 3128 | D_ASSERT(oconn == C_WF_REPORT_PARAMS); | 3151 | D_ASSERT(oconn == C_WF_REPORT_PARAMS); |
| 3129 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3152 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
| 3130 | return FALSE; | 3153 | return FALSE; |
| @@ -3594,10 +3617,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
| 3594 | 3617 | ||
| 3595 | /* asender does not clean up anything. it must not interfere, either */ | 3618 | /* asender does not clean up anything. it must not interfere, either */ |
| 3596 | drbd_thread_stop(&mdev->asender); | 3619 | drbd_thread_stop(&mdev->asender); |
| 3597 | |||
| 3598 | mutex_lock(&mdev->data.mutex); | ||
| 3599 | drbd_free_sock(mdev); | 3620 | drbd_free_sock(mdev); |
| 3600 | mutex_unlock(&mdev->data.mutex); | ||
| 3601 | 3621 | ||
| 3602 | spin_lock_irq(&mdev->req_lock); | 3622 | spin_lock_irq(&mdev->req_lock); |
| 3603 | _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | 3623 | _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); |
| @@ -4054,6 +4074,8 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) | |||
| 4054 | { | 4074 | { |
| 4055 | /* restore idle timeout */ | 4075 | /* restore idle timeout */ |
| 4056 | mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; | 4076 | mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; |
| 4077 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) | ||
| 4078 | wake_up(&mdev->misc_wait); | ||
| 4057 | 4079 | ||
| 4058 | return TRUE; | 4080 | return TRUE; |
| 4059 | } | 4081 | } |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index b453c2bca3be..44bf6d11197e 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
| @@ -938,7 +938,8 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
| 938 | 938 | ||
| 939 | if (eq) { | 939 | if (eq) { |
| 940 | drbd_set_in_sync(mdev, e->sector, e->size); | 940 | drbd_set_in_sync(mdev, e->sector, e->size); |
| 941 | mdev->rs_same_csum++; | 941 | /* rs_same_csums unit is BM_BLOCK_SIZE */ |
| 942 | mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT; | ||
| 942 | ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); | 943 | ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); |
| 943 | } else { | 944 | } else { |
| 944 | inc_rs_pending(mdev); | 945 | inc_rs_pending(mdev); |
| @@ -1288,6 +1289,14 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) | |||
| 1288 | return retcode; | 1289 | return retcode; |
| 1289 | } | 1290 | } |
| 1290 | 1291 | ||
| 1292 | static void ping_peer(struct drbd_conf *mdev) | ||
| 1293 | { | ||
| 1294 | clear_bit(GOT_PING_ACK, &mdev->flags); | ||
| 1295 | request_ping(mdev); | ||
| 1296 | wait_event(mdev->misc_wait, | ||
| 1297 | test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); | ||
| 1298 | } | ||
| 1299 | |||
| 1291 | /** | 1300 | /** |
| 1292 | * drbd_start_resync() - Start the resync process | 1301 | * drbd_start_resync() - Start the resync process |
| 1293 | * @mdev: DRBD device. | 1302 | * @mdev: DRBD device. |
| @@ -1371,7 +1380,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
| 1371 | _drbd_pause_after(mdev); | 1380 | _drbd_pause_after(mdev); |
| 1372 | } | 1381 | } |
| 1373 | write_unlock_irq(&global_state_lock); | 1382 | write_unlock_irq(&global_state_lock); |
| 1374 | drbd_state_unlock(mdev); | ||
| 1375 | put_ldev(mdev); | 1383 | put_ldev(mdev); |
| 1376 | 1384 | ||
| 1377 | if (r == SS_SUCCESS) { | 1385 | if (r == SS_SUCCESS) { |
| @@ -1382,11 +1390,8 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
| 1382 | 1390 | ||
| 1383 | if (mdev->rs_total == 0) { | 1391 | if (mdev->rs_total == 0) { |
| 1384 | /* Peer still reachable? Beware of failing before-resync-target handlers! */ | 1392 | /* Peer still reachable? Beware of failing before-resync-target handlers! */ |
| 1385 | request_ping(mdev); | 1393 | ping_peer(mdev); |
| 1386 | __set_current_state(TASK_INTERRUPTIBLE); | ||
| 1387 | schedule_timeout(mdev->net_conf->ping_timeo*HZ/9); /* 9 instead 10 */ | ||
| 1388 | drbd_resync_finished(mdev); | 1394 | drbd_resync_finished(mdev); |
| 1389 | return; | ||
| 1390 | } | 1395 | } |
| 1391 | 1396 | ||
| 1392 | /* ns.conn may already be != mdev->state.conn, | 1397 | /* ns.conn may already be != mdev->state.conn, |
| @@ -1398,6 +1403,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
| 1398 | 1403 | ||
| 1399 | drbd_md_sync(mdev); | 1404 | drbd_md_sync(mdev); |
| 1400 | } | 1405 | } |
| 1406 | drbd_state_unlock(mdev); | ||
| 1401 | } | 1407 | } |
| 1402 | 1408 | ||
| 1403 | int drbd_worker(struct drbd_thread *thi) | 1409 | int drbd_worker(struct drbd_thread *thi) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cb69929d917a..8546d123b9a7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -237,6 +237,8 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
| 237 | if (ret) | 237 | if (ret) |
| 238 | goto fail; | 238 | goto fail; |
| 239 | 239 | ||
| 240 | file_update_time(file); | ||
| 241 | |||
| 240 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, | 242 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, |
| 241 | bvec->bv_page, bv_offs, size, IV); | 243 | bvec->bv_page, bv_offs, size, IV); |
| 242 | copied = size; | 244 | copied = size; |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 8866ca369d5e..71acf4e53356 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
| @@ -341,11 +341,11 @@ static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg) | |||
| 341 | && (j++ < PCD_SPIN)) | 341 | && (j++ < PCD_SPIN)) |
| 342 | udelay(PCD_DELAY); | 342 | udelay(PCD_DELAY); |
| 343 | 343 | ||
| 344 | if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) { | 344 | if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) { |
| 345 | s = read_reg(cd, 7); | 345 | s = read_reg(cd, 7); |
| 346 | e = read_reg(cd, 1); | 346 | e = read_reg(cd, 1); |
| 347 | p = read_reg(cd, 2); | 347 | p = read_reg(cd, 2); |
| 348 | if (j >= PCD_SPIN) | 348 | if (j > PCD_SPIN) |
| 349 | e |= 0x100; | 349 | e |= 0x100; |
| 350 | if (fun) | 350 | if (fun) |
| 351 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" | 351 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ddb4f9abd480..c059aab3006b 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -391,11 +391,11 @@ static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) | |||
| 391 | && (j++ < PF_SPIN)) | 391 | && (j++ < PF_SPIN)) |
| 392 | udelay(PF_SPIN_DEL); | 392 | udelay(PF_SPIN_DEL); |
| 393 | 393 | ||
| 394 | if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { | 394 | if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) { |
| 395 | s = read_reg(pf, 7); | 395 | s = read_reg(pf, 7); |
| 396 | e = read_reg(pf, 1); | 396 | e = read_reg(pf, 1); |
| 397 | p = read_reg(pf, 2); | 397 | p = read_reg(pf, 2); |
| 398 | if (j >= PF_SPIN) | 398 | if (j > PF_SPIN) |
| 399 | e |= 0x100; | 399 | e |= 0x100; |
| 400 | if (fun) | 400 | if (fun) |
| 401 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" | 401 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" |
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 1e4006e18f03..bc5825fdeaab 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
| @@ -274,11 +274,11 @@ static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg) | |||
| 274 | && (j++ < PT_SPIN)) | 274 | && (j++ < PT_SPIN)) |
| 275 | udelay(PT_SPIN_DEL); | 275 | udelay(PT_SPIN_DEL); |
| 276 | 276 | ||
| 277 | if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) { | 277 | if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) { |
| 278 | s = read_reg(pi, 7); | 278 | s = read_reg(pi, 7); |
| 279 | e = read_reg(pi, 1); | 279 | e = read_reg(pi, 1); |
| 280 | p = read_reg(pi, 2); | 280 | p = read_reg(pi, 2); |
| 281 | if (j >= PT_SPIN) | 281 | if (j > PT_SPIN) |
| 282 | e |= 0x100; | 282 | e |= 0x100; |
| 283 | if (fun) | 283 | if (fun) |
| 284 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" | 284 | printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4b12b820c9a6..2138a7ae050c 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -348,14 +348,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) | |||
| 348 | set_capacity(vblk->disk, cap); | 348 | set_capacity(vblk->disk, cap); |
| 349 | 349 | ||
| 350 | /* We can handle whatever the host told us to handle. */ | 350 | /* We can handle whatever the host told us to handle. */ |
| 351 | blk_queue_max_phys_segments(q, vblk->sg_elems-2); | 351 | blk_queue_max_segments(q, vblk->sg_elems-2); |
| 352 | blk_queue_max_hw_segments(q, vblk->sg_elems-2); | ||
| 353 | 352 | ||
| 354 | /* No need to bounce any requests */ | 353 | /* No need to bounce any requests */ |
| 355 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 354 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
| 356 | 355 | ||
| 357 | /* No real sector limit. */ | 356 | /* No real sector limit. */ |
| 358 | blk_queue_max_sectors(q, -1U); | 357 | blk_queue_max_hw_sectors(q, -1U); |
| 359 | 358 | ||
| 360 | /* Host can optionally specify maximum segment size and number of | 359 | /* Host can optionally specify maximum segment size and number of |
| 361 | * segments. */ | 360 | * segments. */ |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d41331bc2aa7..aa4248efc5d8 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
| @@ -1817,8 +1817,6 @@ static int intel_845_configure(void) | |||
| 1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); | 1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); |
| 1818 | /* clear any possible error conditions */ | 1818 | /* clear any possible error conditions */ |
| 1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); | 1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); |
| 1820 | |||
| 1821 | intel_i830_setup_flush(); | ||
| 1822 | return 0; | 1820 | return 0; |
| 1823 | } | 1821 | } |
| 1824 | 1822 | ||
| @@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
| 2188 | .agp_destroy_page = agp_generic_destroy_page, | 2186 | .agp_destroy_page = agp_generic_destroy_page, |
| 2189 | .agp_destroy_pages = agp_generic_destroy_pages, | 2187 | .agp_destroy_pages = agp_generic_destroy_pages, |
| 2190 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 2188 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
| 2191 | .chipset_flush = intel_i830_chipset_flush, | ||
| 2192 | }; | 2189 | }; |
| 2193 | 2190 | ||
| 2194 | static const struct agp_bridge_driver intel_850_driver = { | 2191 | static const struct agp_bridge_driver intel_850_driver = { |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 6c32fbf07164..56b27671adc4 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
| @@ -2021,8 +2021,6 @@ static int __init rs_init(void) | |||
| 2021 | state->baud_base = amiga_colorclock; | 2021 | state->baud_base = amiga_colorclock; |
| 2022 | state->xmit_fifo_size = 1; | 2022 | state->xmit_fifo_size = 1; |
| 2023 | 2023 | ||
| 2024 | local_irq_save(flags); | ||
| 2025 | |||
| 2026 | /* set ISRs, and then disable the rx interrupts */ | 2024 | /* set ISRs, and then disable the rx interrupts */ |
| 2027 | error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state); | 2025 | error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state); |
| 2028 | if (error) | 2026 | if (error) |
| @@ -2033,6 +2031,8 @@ static int __init rs_init(void) | |||
| 2033 | if (error) | 2031 | if (error) |
| 2034 | goto fail_free_irq; | 2032 | goto fail_free_irq; |
| 2035 | 2033 | ||
| 2034 | local_irq_save(flags); | ||
| 2035 | |||
| 2036 | /* turn off Rx and Tx interrupts */ | 2036 | /* turn off Rx and Tx interrupts */ |
| 2037 | custom.intena = IF_RBF | IF_TBE; | 2037 | custom.intena = IF_RBF | IF_TBE; |
| 2038 | mb(); | 2038 | mb(); |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index d3890e8d30e1..35cca4c7fb18 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
| @@ -368,16 +368,12 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) | |||
| 368 | hp = tty->driver_data; | 368 | hp = tty->driver_data; |
| 369 | 369 | ||
| 370 | spin_lock_irqsave(&hp->lock, flags); | 370 | spin_lock_irqsave(&hp->lock, flags); |
| 371 | tty_kref_get(tty); | ||
| 372 | 371 | ||
| 373 | if (--hp->count == 0) { | 372 | if (--hp->count == 0) { |
| 374 | /* We are done with the tty pointer now. */ | 373 | /* We are done with the tty pointer now. */ |
| 375 | hp->tty = NULL; | 374 | hp->tty = NULL; |
| 376 | spin_unlock_irqrestore(&hp->lock, flags); | 375 | spin_unlock_irqrestore(&hp->lock, flags); |
| 377 | 376 | ||
| 378 | /* Put the ref obtained in hvc_open() */ | ||
| 379 | tty_kref_put(tty); | ||
| 380 | |||
| 381 | if (hp->ops->notifier_del) | 377 | if (hp->ops->notifier_del) |
| 382 | hp->ops->notifier_del(hp, hp->data); | 378 | hp->ops->notifier_del(hp, hp->data); |
| 383 | 379 | ||
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 1f3215ac085b..f54dab8acdcd 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
| @@ -225,6 +225,7 @@ int __weak phys_mem_access_prot_allowed(struct file *file, | |||
| 225 | * outside of main memory. | 225 | * outside of main memory. |
| 226 | * | 226 | * |
| 227 | */ | 227 | */ |
| 228 | #ifdef pgprot_noncached | ||
| 228 | static int uncached_access(struct file *file, unsigned long addr) | 229 | static int uncached_access(struct file *file, unsigned long addr) |
| 229 | { | 230 | { |
| 230 | #if defined(CONFIG_IA64) | 231 | #if defined(CONFIG_IA64) |
| @@ -251,6 +252,7 @@ static int uncached_access(struct file *file, unsigned long addr) | |||
| 251 | return addr >= __pa(high_memory); | 252 | return addr >= __pa(high_memory); |
| 252 | #endif | 253 | #endif |
| 253 | } | 254 | } |
| 255 | #endif | ||
| 254 | 256 | ||
| 255 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 257 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 256 | unsigned long size, pgprot_t vma_prot) | 258 | unsigned long size, pgprot_t vma_prot) |
| @@ -710,11 +712,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) | |||
| 710 | switch (orig) { | 712 | switch (orig) { |
| 711 | case SEEK_CUR: | 713 | case SEEK_CUR: |
| 712 | offset += file->f_pos; | 714 | offset += file->f_pos; |
| 713 | if ((unsigned long long)offset < | ||
| 714 | (unsigned long long)file->f_pos) { | ||
| 715 | ret = -EOVERFLOW; | ||
| 716 | break; | ||
| 717 | } | ||
| 718 | case SEEK_SET: | 715 | case SEEK_SET: |
| 719 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ | 716 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ |
| 720 | if ((unsigned long long)offset >= ~0xFFFULL) { | 717 | if ((unsigned long long)offset >= ~0xFFFULL) { |
| @@ -908,6 +905,9 @@ static int __init chr_dev_init(void) | |||
| 908 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); | 905 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); |
| 909 | 906 | ||
| 910 | mem_class = class_create(THIS_MODULE, "mem"); | 907 | mem_class = class_create(THIS_MODULE, "mem"); |
| 908 | if (IS_ERR(mem_class)) | ||
| 909 | return PTR_ERR(mem_class); | ||
| 910 | |||
| 911 | mem_class->devnode = mem_devnode; | 911 | mem_class->devnode = mem_devnode; |
| 912 | for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { | 912 | for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { |
| 913 | if (!devlist[minor].name) | 913 | if (!devlist[minor].name) |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 95c9f54f3d30..47023053ee85 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
| @@ -1768,7 +1768,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1768 | int len, lsr; | 1768 | int len, lsr; |
| 1769 | 1769 | ||
| 1770 | len = mxser_chars_in_buffer(tty); | 1770 | len = mxser_chars_in_buffer(tty); |
| 1771 | spin_lock(&info->slock); | 1771 | spin_lock_irq(&info->slock); |
| 1772 | lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE; | 1772 | lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE; |
| 1773 | spin_unlock_irq(&info->slock); | 1773 | spin_unlock_irq(&info->slock); |
| 1774 | len += (lsr ? 0 : 1); | 1774 | len += (lsr ? 0 : 1); |
| @@ -1778,12 +1778,12 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1778 | case MOXA_ASPP_MON: { | 1778 | case MOXA_ASPP_MON: { |
| 1779 | int mcr, status; | 1779 | int mcr, status; |
| 1780 | 1780 | ||
| 1781 | spin_lock(&info->slock); | 1781 | spin_lock_irq(&info->slock); |
| 1782 | status = mxser_get_msr(info->ioaddr, 1, tty->index); | 1782 | status = mxser_get_msr(info->ioaddr, 1, tty->index); |
| 1783 | mxser_check_modem_status(tty, info, status); | 1783 | mxser_check_modem_status(tty, info, status); |
| 1784 | 1784 | ||
| 1785 | mcr = inb(info->ioaddr + UART_MCR); | 1785 | mcr = inb(info->ioaddr + UART_MCR); |
| 1786 | spin_unlock(&info->slock); | 1786 | spin_unlock_irq(&info->slock); |
| 1787 | 1787 | ||
| 1788 | if (mcr & MOXA_MUST_MCR_XON_FLAG) | 1788 | if (mcr & MOXA_MUST_MCR_XON_FLAG) |
| 1789 | info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD; | 1789 | info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD; |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index d331c59b571c..8756ab0daa8b 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
| @@ -248,6 +248,7 @@ static const struct file_operations raw_fops = { | |||
| 248 | .aio_read = generic_file_aio_read, | 248 | .aio_read = generic_file_aio_read, |
| 249 | .write = do_sync_write, | 249 | .write = do_sync_write, |
| 250 | .aio_write = blkdev_aio_write, | 250 | .aio_write = blkdev_aio_write, |
| 251 | .fsync = blkdev_fsync, | ||
| 251 | .open = raw_open, | 252 | .open = raw_open, |
| 252 | .release= raw_release, | 253 | .release= raw_release, |
| 253 | .ioctl = raw_ioctl, | 254 | .ioctl = raw_ioctl, |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 026ea6c27e07..196428c2287a 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -33,6 +33,35 @@ | |||
| 33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
| 34 | #include "hvc_console.h" | 34 | #include "hvc_console.h" |
| 35 | 35 | ||
| 36 | /* Moved here from .h file in order to disable MULTIPORT. */ | ||
| 37 | #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ | ||
| 38 | |||
| 39 | struct virtio_console_multiport_conf { | ||
| 40 | struct virtio_console_config config; | ||
| 41 | /* max. number of ports this device can hold */ | ||
| 42 | __u32 max_nr_ports; | ||
| 43 | /* number of ports added so far */ | ||
| 44 | __u32 nr_ports; | ||
| 45 | } __attribute__((packed)); | ||
| 46 | |||
| 47 | /* | ||
| 48 | * A message that's passed between the Host and the Guest for a | ||
| 49 | * particular port. | ||
| 50 | */ | ||
| 51 | struct virtio_console_control { | ||
| 52 | __u32 id; /* Port number */ | ||
| 53 | __u16 event; /* The kind of control event (see below) */ | ||
| 54 | __u16 value; /* Extra information for the key */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | /* Some events for control messages */ | ||
| 58 | #define VIRTIO_CONSOLE_PORT_READY 0 | ||
| 59 | #define VIRTIO_CONSOLE_CONSOLE_PORT 1 | ||
| 60 | #define VIRTIO_CONSOLE_RESIZE 2 | ||
| 61 | #define VIRTIO_CONSOLE_PORT_OPEN 3 | ||
| 62 | #define VIRTIO_CONSOLE_PORT_NAME 4 | ||
| 63 | #define VIRTIO_CONSOLE_PORT_REMOVE 5 | ||
| 64 | |||
| 36 | /* | 65 | /* |
| 37 | * This is a global struct for storing common data for all the devices | 66 | * This is a global struct for storing common data for all the devices |
| 38 | * this driver handles. | 67 | * this driver handles. |
| @@ -121,7 +150,7 @@ struct ports_device { | |||
| 121 | spinlock_t cvq_lock; | 150 | spinlock_t cvq_lock; |
| 122 | 151 | ||
| 123 | /* The current config space is stored here */ | 152 | /* The current config space is stored here */ |
| 124 | struct virtio_console_config config; | 153 | struct virtio_console_multiport_conf config; |
| 125 | 154 | ||
| 126 | /* The virtio device we're associated with */ | 155 | /* The virtio device we're associated with */ |
| 127 | struct virtio_device *vdev; | 156 | struct virtio_device *vdev; |
| @@ -416,20 +445,16 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) | |||
| 416 | out_vq->vq_ops->kick(out_vq); | 445 | out_vq->vq_ops->kick(out_vq); |
| 417 | 446 | ||
| 418 | if (ret < 0) { | 447 | if (ret < 0) { |
| 419 | len = 0; | 448 | in_count = 0; |
| 420 | goto fail; | 449 | goto fail; |
| 421 | } | 450 | } |
| 422 | 451 | ||
| 423 | /* | 452 | /* Wait till the host acknowledges it pushed out the data we sent. */ |
| 424 | * Wait till the host acknowledges it pushed out the data we | ||
| 425 | * sent. Also ensure we return to userspace the number of | ||
| 426 | * bytes that were successfully consumed by the host. | ||
| 427 | */ | ||
| 428 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) | 453 | while (!out_vq->vq_ops->get_buf(out_vq, &len)) |
| 429 | cpu_relax(); | 454 | cpu_relax(); |
| 430 | fail: | 455 | fail: |
| 431 | /* We're expected to return the amount of data we wrote */ | 456 | /* We're expected to return the amount of data we wrote */ |
| 432 | return len; | 457 | return in_count; |
| 433 | } | 458 | } |
| 434 | 459 | ||
| 435 | /* | 460 | /* |
| @@ -646,13 +671,13 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
| 646 | { | 671 | { |
| 647 | struct port *port; | 672 | struct port *port; |
| 648 | 673 | ||
| 674 | if (unlikely(early_put_chars)) | ||
| 675 | return early_put_chars(vtermno, buf, count); | ||
| 676 | |||
| 649 | port = find_port_by_vtermno(vtermno); | 677 | port = find_port_by_vtermno(vtermno); |
| 650 | if (!port) | 678 | if (!port) |
| 651 | return 0; | 679 | return 0; |
| 652 | 680 | ||
| 653 | if (unlikely(early_put_chars)) | ||
| 654 | return early_put_chars(vtermno, buf, count); | ||
| 655 | |||
| 656 | return send_buf(port, (void *)buf, count); | 681 | return send_buf(port, (void *)buf, count); |
| 657 | } | 682 | } |
| 658 | 683 | ||
| @@ -1218,7 +1243,7 @@ fail: | |||
| 1218 | */ | 1243 | */ |
| 1219 | static void config_work_handler(struct work_struct *work) | 1244 | static void config_work_handler(struct work_struct *work) |
| 1220 | { | 1245 | { |
| 1221 | struct virtio_console_config virtconconf; | 1246 | struct virtio_console_multiport_conf virtconconf; |
| 1222 | struct ports_device *portdev; | 1247 | struct ports_device *portdev; |
| 1223 | struct virtio_device *vdev; | 1248 | struct virtio_device *vdev; |
| 1224 | int err; | 1249 | int err; |
| @@ -1227,7 +1252,8 @@ static void config_work_handler(struct work_struct *work) | |||
| 1227 | 1252 | ||
| 1228 | vdev = portdev->vdev; | 1253 | vdev = portdev->vdev; |
| 1229 | vdev->config->get(vdev, | 1254 | vdev->config->get(vdev, |
| 1230 | offsetof(struct virtio_console_config, nr_ports), | 1255 | offsetof(struct virtio_console_multiport_conf, |
| 1256 | nr_ports), | ||
| 1231 | &virtconconf.nr_ports, | 1257 | &virtconconf.nr_ports, |
| 1232 | sizeof(virtconconf.nr_ports)); | 1258 | sizeof(virtconconf.nr_ports)); |
| 1233 | 1259 | ||
| @@ -1419,16 +1445,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
| 1419 | multiport = false; | 1445 | multiport = false; |
| 1420 | portdev->config.nr_ports = 1; | 1446 | portdev->config.nr_ports = 1; |
| 1421 | portdev->config.max_nr_ports = 1; | 1447 | portdev->config.max_nr_ports = 1; |
| 1448 | #if 0 /* Multiport is not quite ready yet --RR */ | ||
| 1422 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { | 1449 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { |
| 1423 | multiport = true; | 1450 | multiport = true; |
| 1424 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; | 1451 | vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; |
| 1425 | 1452 | ||
| 1426 | vdev->config->get(vdev, offsetof(struct virtio_console_config, | 1453 | vdev->config->get(vdev, |
| 1427 | nr_ports), | 1454 | offsetof(struct virtio_console_multiport_conf, |
| 1455 | nr_ports), | ||
| 1428 | &portdev->config.nr_ports, | 1456 | &portdev->config.nr_ports, |
| 1429 | sizeof(portdev->config.nr_ports)); | 1457 | sizeof(portdev->config.nr_ports)); |
| 1430 | vdev->config->get(vdev, offsetof(struct virtio_console_config, | 1458 | vdev->config->get(vdev, |
| 1431 | max_nr_ports), | 1459 | offsetof(struct virtio_console_multiport_conf, |
| 1460 | max_nr_ports), | ||
| 1432 | &portdev->config.max_nr_ports, | 1461 | &portdev->config.max_nr_ports, |
| 1433 | sizeof(portdev->config.max_nr_ports)); | 1462 | sizeof(portdev->config.max_nr_ports)); |
| 1434 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { | 1463 | if (portdev->config.nr_ports > portdev->config.max_nr_ports) { |
| @@ -1444,6 +1473,7 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) | |||
| 1444 | 1473 | ||
| 1445 | /* Let the Host know we support multiple ports.*/ | 1474 | /* Let the Host know we support multiple ports.*/ |
| 1446 | vdev->config->finalize_features(vdev); | 1475 | vdev->config->finalize_features(vdev); |
| 1476 | #endif | ||
| 1447 | 1477 | ||
| 1448 | err = init_vqs(portdev); | 1478 | err = init_vqs(portdev); |
| 1449 | if (err < 0) { | 1479 | if (err < 0) { |
| @@ -1526,7 +1556,6 @@ static struct virtio_device_id id_table[] = { | |||
| 1526 | 1556 | ||
| 1527 | static unsigned int features[] = { | 1557 | static unsigned int features[] = { |
| 1528 | VIRTIO_CONSOLE_F_SIZE, | 1558 | VIRTIO_CONSOLE_F_SIZE, |
| 1529 | VIRTIO_CONSOLE_F_MULTIPORT, | ||
| 1530 | }; | 1559 | }; |
| 1531 | 1560 | ||
| 1532 | static struct virtio_driver virtio_console = { | 1561 | static struct virtio_driver virtio_console = { |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 702dcc98c074..14a34d99eea2 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
| @@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
| 960 | u.packet.header_length = GET_HEADER_LENGTH(control); | 960 | u.packet.header_length = GET_HEADER_LENGTH(control); |
| 961 | 961 | ||
| 962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { |
| 963 | if (u.packet.header_length % 4 != 0) | ||
| 964 | return -EINVAL; | ||
| 963 | header_length = u.packet.header_length; | 965 | header_length = u.packet.header_length; |
| 964 | } else { | 966 | } else { |
| 965 | /* | 967 | /* |
| @@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
| 969 | if (ctx->header_size == 0) { | 971 | if (ctx->header_size == 0) { |
| 970 | if (u.packet.header_length > 0) | 972 | if (u.packet.header_length > 0) |
| 971 | return -EINVAL; | 973 | return -EINVAL; |
| 972 | } else if (u.packet.header_length % ctx->header_size != 0) { | 974 | } else if (u.packet.header_length == 0 || |
| 975 | u.packet.header_length % ctx->header_size != 0) { | ||
| 973 | return -EINVAL; | 976 | return -EINVAL; |
| 974 | } | 977 | } |
| 975 | header_length = 0; | 978 | header_length = 0; |
| @@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client, | |||
| 1354 | return -ENODEV; | 1357 | return -ENODEV; |
| 1355 | 1358 | ||
| 1356 | if (_IOC_TYPE(cmd) != '#' || | 1359 | if (_IOC_TYPE(cmd) != '#' || |
| 1357 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1360 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
| 1361 | _IOC_SIZE(cmd) > sizeof(buffer)) | ||
| 1358 | return -EINVAL; | 1362 | return -EINVAL; |
| 1359 | 1363 | ||
| 1360 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | 1364 | if (_IOC_DIR(cmd) == _IOC_READ) |
| 1361 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1365 | memset(&buffer, 0, _IOC_SIZE(cmd)); |
| 1362 | copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | 1366 | |
| 1367 | if (_IOC_DIR(cmd) & _IOC_WRITE) | ||
| 1368 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | ||
| 1363 | return -EFAULT; | 1369 | return -EFAULT; |
| 1364 | } | ||
| 1365 | 1370 | ||
| 1366 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); | 1371 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
| 1367 | if (ret < 0) | 1372 | if (ret < 0) |
| 1368 | return ret; | 1373 | return ret; |
| 1369 | 1374 | ||
| 1370 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1375 | if (_IOC_DIR(cmd) & _IOC_READ) |
| 1371 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1376 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) |
| 1372 | copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) | ||
| 1373 | return -EFAULT; | 1377 | return -EFAULT; |
| 1374 | } | ||
| 1375 | 1378 | ||
| 1376 | return ret; | 1379 | return ret; |
| 1377 | } | 1380 | } |
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 134dd7328397..d6470ef36e4a 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c | |||
| @@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(ibft_addr); | |||
| 51 | * Routine used to find the iSCSI Boot Format Table. The logical | 51 | * Routine used to find the iSCSI Boot Format Table. The logical |
| 52 | * kernel address is set in the ibft_addr global variable. | 52 | * kernel address is set in the ibft_addr global variable. |
| 53 | */ | 53 | */ |
| 54 | void __init reserve_ibft_region(void) | 54 | unsigned long __init find_ibft_region(unsigned long *sizep) |
| 55 | { | 55 | { |
| 56 | unsigned long pos; | 56 | unsigned long pos; |
| 57 | unsigned int len = 0; | 57 | unsigned int len = 0; |
| @@ -77,6 +77,11 @@ void __init reserve_ibft_region(void) | |||
| 77 | } | 77 | } |
| 78 | } | 78 | } |
| 79 | } | 79 | } |
| 80 | if (ibft_addr) | 80 | if (ibft_addr) { |
| 81 | reserve_bootmem(pos, PAGE_ALIGN(len), BOOTMEM_DEFAULT); | 81 | *sizep = PAGE_ALIGN(len); |
| 82 | return pos; | ||
| 83 | } | ||
| 84 | |||
| 85 | *sizep = 0; | ||
| 86 | return 0; | ||
| 82 | } | 87 | } |
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index ac4d0f0ea02b..ddd053108a13 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c | |||
| @@ -131,6 +131,7 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) | |||
| 131 | unsigned long flags; | 131 | unsigned long flags; |
| 132 | u32 lvr, flr, bflr = 0; | 132 | u32 lvr, flr, bflr = 0; |
| 133 | u32 ver; | 133 | u32 ver; |
| 134 | int ret = 0; | ||
| 134 | 135 | ||
| 135 | if (offset < 0 || offset > tgpio->gpio.ngpio) | 136 | if (offset < 0 || offset > tgpio->gpio.ngpio) |
| 136 | return -EINVAL; | 137 | return -EINVAL; |
| @@ -154,8 +155,10 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) | |||
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { | 157 | if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { |
| 157 | if (ver < 3) | 158 | if (ver < 3) { |
| 158 | return -EINVAL; | 159 | ret = -EINVAL; |
| 160 | goto out; | ||
| 161 | } | ||
| 159 | else { | 162 | else { |
| 160 | flr |= 1 << offset; | 163 | flr |= 1 << offset; |
| 161 | bflr |= 1 << offset; | 164 | bflr |= 1 << offset; |
| @@ -175,9 +178,10 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) | |||
| 175 | iowrite32(bflr, tgpio->membase + TGPIO_BFLR); | 178 | iowrite32(bflr, tgpio->membase + TGPIO_BFLR); |
| 176 | 179 | ||
| 177 | iowrite32(1 << offset, tgpio->membase + TGPIO_ICR); | 180 | iowrite32(1 << offset, tgpio->membase + TGPIO_ICR); |
| 178 | spin_unlock_irqrestore(&tgpio->lock, flags); | ||
| 179 | 181 | ||
| 180 | return 0; | 182 | out: |
| 183 | spin_unlock_irqrestore(&tgpio->lock, flags); | ||
| 184 | return ret; | ||
| 181 | } | 185 | } |
| 182 | 186 | ||
| 183 | static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) | 187 | static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2cc6e87d849d..18f41d7061f0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -85,6 +85,8 @@ static struct edid_quirk { | |||
| 85 | 85 | ||
| 86 | /* Envision Peripherals, Inc. EN-7100e */ | 86 | /* Envision Peripherals, Inc. EN-7100e */ |
| 87 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, | 87 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, |
| 88 | /* Envision EN2028 */ | ||
| 89 | { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, | ||
| 88 | 90 | ||
| 89 | /* Funai Electronics PM36B */ | 91 | /* Funai Electronics PM36B */ |
| 90 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | | 92 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index b743411d8144..a0c365f2e521 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
| @@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) | |||
| 516 | } | 516 | } |
| 517 | driver = dev->driver; | 517 | driver = dev->driver; |
| 518 | 518 | ||
| 519 | drm_vblank_cleanup(dev); | ||
| 520 | |||
| 521 | drm_lastclose(dev); | 519 | drm_lastclose(dev); |
| 522 | 520 | ||
| 523 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && | 521 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && |
| @@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) | |||
| 537 | dev->agp = NULL; | 535 | dev->agp = NULL; |
| 538 | } | 536 | } |
| 539 | 537 | ||
| 538 | drm_vblank_cleanup(dev); | ||
| 539 | |||
| 540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | 540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) |
| 541 | drm_rmmap(dev, r_list->map); | 541 | drm_rmmap(dev, r_list->map); |
| 542 | drm_ht_remove(&dev->map_hash); | 542 | drm_ht_remove(&dev->map_hash); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b574503dddd0..a0b8447b06e7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
| 226 | } else { | 226 | } else { |
| 227 | struct drm_i915_gem_object *obj_priv; | 227 | struct drm_i915_gem_object *obj_priv; |
| 228 | 228 | ||
| 229 | obj_priv = obj->driver_private; | 229 | obj_priv = to_intel_bo(obj); |
| 230 | seq_printf(m, "Fenced object[%2d] = %p: %s " | 230 | seq_printf(m, "Fenced object[%2d] = %p: %s " |
| 231 | "%08x %08zx %08x %s %08x %08x %d", | 231 | "%08x %08zx %08x %s %08x %08x %d", |
| 232 | i, obj, get_pin_flag(obj_priv), | 232 | i, obj, get_pin_flag(obj_priv), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4b26919abdb2..0af3dcc85ce9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -80,14 +80,14 @@ const static struct intel_device_info intel_i915g_info = { | |||
| 80 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 80 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
| 81 | }; | 81 | }; |
| 82 | const static struct intel_device_info intel_i915gm_info = { | 82 | const static struct intel_device_info intel_i915gm_info = { |
| 83 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 83 | .is_i9xx = 1, .is_mobile = 1, |
| 84 | .cursor_needs_physical = 1, | 84 | .cursor_needs_physical = 1, |
| 85 | }; | 85 | }; |
| 86 | const static struct intel_device_info intel_i945g_info = { | 86 | const static struct intel_device_info intel_i945g_info = { |
| 87 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 87 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
| 88 | }; | 88 | }; |
| 89 | const static struct intel_device_info intel_i945gm_info = { | 89 | const static struct intel_device_info intel_i945gm_info = { |
| 90 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 90 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
| 91 | .has_hotplug = 1, .cursor_needs_physical = 1, | 91 | .has_hotplug = 1, .cursor_needs_physical = 1, |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| @@ -361,7 +361,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
| 361 | !dev_priv->mm.suspended) { | 361 | !dev_priv->mm.suspended) { |
| 362 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | 362 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; |
| 363 | struct drm_gem_object *obj = ring->ring_obj; | 363 | struct drm_gem_object *obj = ring->ring_obj; |
| 364 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 364 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 365 | dev_priv->mm.suspended = 0; | 365 | dev_priv->mm.suspended = 0; |
| 366 | 366 | ||
| 367 | /* Stop the ring if it's running. */ | 367 | /* Stop the ring if it's running. */ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aba8260fbc5e..6960849522f8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -611,6 +611,8 @@ typedef struct drm_i915_private { | |||
| 611 | /* Reclocking support */ | 611 | /* Reclocking support */ |
| 612 | bool render_reclock_avail; | 612 | bool render_reclock_avail; |
| 613 | bool lvds_downclock_avail; | 613 | bool lvds_downclock_avail; |
| 614 | /* indicate whether the LVDS EDID is OK */ | ||
| 615 | bool lvds_edid_good; | ||
| 614 | /* indicates the reduced downclock for LVDS*/ | 616 | /* indicates the reduced downclock for LVDS*/ |
| 615 | int lvds_downclock; | 617 | int lvds_downclock; |
| 616 | struct work_struct idle_work; | 618 | struct work_struct idle_work; |
| @@ -731,6 +733,8 @@ struct drm_i915_gem_object { | |||
| 731 | atomic_t pending_flip; | 733 | atomic_t pending_flip; |
| 732 | }; | 734 | }; |
| 733 | 735 | ||
| 736 | #define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) | ||
| 737 | |||
| 734 | /** | 738 | /** |
| 735 | * Request queue structure. | 739 | * Request queue structure. |
| 736 | * | 740 | * |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 368d726853d1..80871c62a571 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages, | |||
| 163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) |
| 164 | { | 164 | { |
| 165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; |
| 166 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 166 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 167 | 167 | ||
| 168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
| 169 | obj_priv->tiling_mode != I915_TILING_NONE; | 169 | obj_priv->tiling_mode != I915_TILING_NONE; |
| @@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 264 | struct drm_i915_gem_pread *args, | 264 | struct drm_i915_gem_pread *args, |
| 265 | struct drm_file *file_priv) | 265 | struct drm_file *file_priv) |
| 266 | { | 266 | { |
| 267 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 267 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 268 | ssize_t remain; | 268 | ssize_t remain; |
| 269 | loff_t offset, page_base; | 269 | loff_t offset, page_base; |
| 270 | char __user *user_data; | 270 | char __user *user_data; |
| @@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 285 | if (ret != 0) | 285 | if (ret != 0) |
| 286 | goto fail_put_pages; | 286 | goto fail_put_pages; |
| 287 | 287 | ||
| 288 | obj_priv = obj->driver_private; | 288 | obj_priv = to_intel_bo(obj); |
| 289 | offset = args->offset; | 289 | offset = args->offset; |
| 290 | 290 | ||
| 291 | while (remain > 0) { | 291 | while (remain > 0) { |
| @@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 354 | struct drm_i915_gem_pread *args, | 354 | struct drm_i915_gem_pread *args, |
| 355 | struct drm_file *file_priv) | 355 | struct drm_file *file_priv) |
| 356 | { | 356 | { |
| 357 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 358 | struct mm_struct *mm = current->mm; | 358 | struct mm_struct *mm = current->mm; |
| 359 | struct page **user_pages; | 359 | struct page **user_pages; |
| 360 | ssize_t remain; | 360 | ssize_t remain; |
| @@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 403 | if (ret != 0) | 403 | if (ret != 0) |
| 404 | goto fail_put_pages; | 404 | goto fail_put_pages; |
| 405 | 405 | ||
| 406 | obj_priv = obj->driver_private; | 406 | obj_priv = to_intel_bo(obj); |
| 407 | offset = args->offset; | 407 | offset = args->offset; |
| 408 | 408 | ||
| 409 | while (remain > 0) { | 409 | while (remain > 0) { |
| @@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
| 479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 480 | if (obj == NULL) | 480 | if (obj == NULL) |
| 481 | return -EBADF; | 481 | return -EBADF; |
| 482 | obj_priv = obj->driver_private; | 482 | obj_priv = to_intel_bo(obj); |
| 483 | 483 | ||
| 484 | /* Bounds check source. | 484 | /* Bounds check source. |
| 485 | * | 485 | * |
| @@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 581 | struct drm_i915_gem_pwrite *args, | 581 | struct drm_i915_gem_pwrite *args, |
| 582 | struct drm_file *file_priv) | 582 | struct drm_file *file_priv) |
| 583 | { | 583 | { |
| 584 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 584 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 585 | drm_i915_private_t *dev_priv = dev->dev_private; | 585 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 586 | ssize_t remain; | 586 | ssize_t remain; |
| 587 | loff_t offset, page_base; | 587 | loff_t offset, page_base; |
| @@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 605 | if (ret) | 605 | if (ret) |
| 606 | goto fail; | 606 | goto fail; |
| 607 | 607 | ||
| 608 | obj_priv = obj->driver_private; | 608 | obj_priv = to_intel_bo(obj); |
| 609 | offset = obj_priv->gtt_offset + args->offset; | 609 | offset = obj_priv->gtt_offset + args->offset; |
| 610 | 610 | ||
| 611 | while (remain > 0) { | 611 | while (remain > 0) { |
| @@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 655 | struct drm_i915_gem_pwrite *args, | 655 | struct drm_i915_gem_pwrite *args, |
| 656 | struct drm_file *file_priv) | 656 | struct drm_file *file_priv) |
| 657 | { | 657 | { |
| 658 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 658 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 659 | drm_i915_private_t *dev_priv = dev->dev_private; | 659 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 660 | ssize_t remain; | 660 | ssize_t remain; |
| 661 | loff_t gtt_page_base, offset; | 661 | loff_t gtt_page_base, offset; |
| @@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 699 | if (ret) | 699 | if (ret) |
| 700 | goto out_unpin_object; | 700 | goto out_unpin_object; |
| 701 | 701 | ||
| 702 | obj_priv = obj->driver_private; | 702 | obj_priv = to_intel_bo(obj); |
| 703 | offset = obj_priv->gtt_offset + args->offset; | 703 | offset = obj_priv->gtt_offset + args->offset; |
| 704 | 704 | ||
| 705 | while (remain > 0) { | 705 | while (remain > 0) { |
| @@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 761 | struct drm_i915_gem_pwrite *args, | 761 | struct drm_i915_gem_pwrite *args, |
| 762 | struct drm_file *file_priv) | 762 | struct drm_file *file_priv) |
| 763 | { | 763 | { |
| 764 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 764 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 765 | ssize_t remain; | 765 | ssize_t remain; |
| 766 | loff_t offset, page_base; | 766 | loff_t offset, page_base; |
| 767 | char __user *user_data; | 767 | char __user *user_data; |
| @@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 781 | if (ret != 0) | 781 | if (ret != 0) |
| 782 | goto fail_put_pages; | 782 | goto fail_put_pages; |
| 783 | 783 | ||
| 784 | obj_priv = obj->driver_private; | 784 | obj_priv = to_intel_bo(obj); |
| 785 | offset = args->offset; | 785 | offset = args->offset; |
| 786 | obj_priv->dirty = 1; | 786 | obj_priv->dirty = 1; |
| 787 | 787 | ||
| @@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 829 | struct drm_i915_gem_pwrite *args, | 829 | struct drm_i915_gem_pwrite *args, |
| 830 | struct drm_file *file_priv) | 830 | struct drm_file *file_priv) |
| 831 | { | 831 | { |
| 832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 832 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 833 | struct mm_struct *mm = current->mm; | 833 | struct mm_struct *mm = current->mm; |
| 834 | struct page **user_pages; | 834 | struct page **user_pages; |
| 835 | ssize_t remain; | 835 | ssize_t remain; |
| @@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 877 | if (ret != 0) | 877 | if (ret != 0) |
| 878 | goto fail_put_pages; | 878 | goto fail_put_pages; |
| 879 | 879 | ||
| 880 | obj_priv = obj->driver_private; | 880 | obj_priv = to_intel_bo(obj); |
| 881 | offset = args->offset; | 881 | offset = args->offset; |
| 882 | obj_priv->dirty = 1; | 882 | obj_priv->dirty = 1; |
| 883 | 883 | ||
| @@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 953 | if (obj == NULL) | 953 | if (obj == NULL) |
| 954 | return -EBADF; | 954 | return -EBADF; |
| 955 | obj_priv = obj->driver_private; | 955 | obj_priv = to_intel_bo(obj); |
| 956 | 956 | ||
| 957 | /* Bounds check destination. | 957 | /* Bounds check destination. |
| 958 | * | 958 | * |
| @@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
| 1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 1035 | if (obj == NULL) | 1035 | if (obj == NULL) |
| 1036 | return -EBADF; | 1036 | return -EBADF; |
| 1037 | obj_priv = obj->driver_private; | 1037 | obj_priv = to_intel_bo(obj); |
| 1038 | 1038 | ||
| 1039 | mutex_lock(&dev->struct_mutex); | 1039 | mutex_lock(&dev->struct_mutex); |
| 1040 | 1040 | ||
| @@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
| 1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
| 1097 | __func__, args->handle, obj, obj->size); | 1097 | __func__, args->handle, obj, obj->size); |
| 1098 | #endif | 1098 | #endif |
| 1099 | obj_priv = obj->driver_private; | 1099 | obj_priv = to_intel_bo(obj); |
| 1100 | 1100 | ||
| 1101 | /* Pinned buffers may be scanout, so flush the cache */ | 1101 | /* Pinned buffers may be scanout, so flush the cache */ |
| 1102 | if (obj_priv->pin_count) | 1102 | if (obj_priv->pin_count) |
| @@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1167 | struct drm_gem_object *obj = vma->vm_private_data; | 1167 | struct drm_gem_object *obj = vma->vm_private_data; |
| 1168 | struct drm_device *dev = obj->dev; | 1168 | struct drm_device *dev = obj->dev; |
| 1169 | struct drm_i915_private *dev_priv = dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1170 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1170 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1171 | pgoff_t page_offset; | 1171 | pgoff_t page_offset; |
| 1172 | unsigned long pfn; | 1172 | unsigned long pfn; |
| 1173 | int ret = 0; | 1173 | int ret = 0; |
| @@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
| 1234 | { | 1234 | { |
| 1235 | struct drm_device *dev = obj->dev; | 1235 | struct drm_device *dev = obj->dev; |
| 1236 | struct drm_gem_mm *mm = dev->mm_private; | 1236 | struct drm_gem_mm *mm = dev->mm_private; |
| 1237 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1237 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1238 | struct drm_map_list *list; | 1238 | struct drm_map_list *list; |
| 1239 | struct drm_local_map *map; | 1239 | struct drm_local_map *map; |
| 1240 | int ret = 0; | 1240 | int ret = 0; |
| @@ -1305,7 +1305,7 @@ void | |||
| 1305 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1305 | i915_gem_release_mmap(struct drm_gem_object *obj) |
| 1306 | { | 1306 | { |
| 1307 | struct drm_device *dev = obj->dev; | 1307 | struct drm_device *dev = obj->dev; |
| 1308 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1308 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1309 | 1309 | ||
| 1310 | if (dev->dev_mapping) | 1310 | if (dev->dev_mapping) |
| 1311 | unmap_mapping_range(dev->dev_mapping, | 1311 | unmap_mapping_range(dev->dev_mapping, |
| @@ -1316,7 +1316,7 @@ static void | |||
| 1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) |
| 1317 | { | 1317 | { |
| 1318 | struct drm_device *dev = obj->dev; | 1318 | struct drm_device *dev = obj->dev; |
| 1319 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1319 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1320 | struct drm_gem_mm *mm = dev->mm_private; | 1320 | struct drm_gem_mm *mm = dev->mm_private; |
| 1321 | struct drm_map_list *list; | 1321 | struct drm_map_list *list; |
| 1322 | 1322 | ||
| @@ -1347,7 +1347,7 @@ static uint32_t | |||
| 1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) |
| 1348 | { | 1348 | { |
| 1349 | struct drm_device *dev = obj->dev; | 1349 | struct drm_device *dev = obj->dev; |
| 1350 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1350 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1351 | int start, i; | 1351 | int start, i; |
| 1352 | 1352 | ||
| 1353 | /* | 1353 | /* |
| @@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
| 1406 | 1406 | ||
| 1407 | mutex_lock(&dev->struct_mutex); | 1407 | mutex_lock(&dev->struct_mutex); |
| 1408 | 1408 | ||
| 1409 | obj_priv = obj->driver_private; | 1409 | obj_priv = to_intel_bo(obj); |
| 1410 | 1410 | ||
| 1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
| 1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
| @@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
| 1450 | void | 1450 | void |
| 1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
| 1452 | { | 1452 | { |
| 1453 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1453 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1454 | int page_count = obj->size / PAGE_SIZE; | 1454 | int page_count = obj->size / PAGE_SIZE; |
| 1455 | int i; | 1455 | int i; |
| 1456 | 1456 | ||
| @@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | |||
| 1486 | { | 1486 | { |
| 1487 | struct drm_device *dev = obj->dev; | 1487 | struct drm_device *dev = obj->dev; |
| 1488 | drm_i915_private_t *dev_priv = dev->dev_private; | 1488 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1489 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1489 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1490 | 1490 | ||
| 1491 | /* Add a reference if we're newly entering the active list. */ | 1491 | /* Add a reference if we're newly entering the active list. */ |
| 1492 | if (!obj_priv->active) { | 1492 | if (!obj_priv->active) { |
| @@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
| 1506 | { | 1506 | { |
| 1507 | struct drm_device *dev = obj->dev; | 1507 | struct drm_device *dev = obj->dev; |
| 1508 | drm_i915_private_t *dev_priv = dev->dev_private; | 1508 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1509 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1510 | 1510 | ||
| 1511 | BUG_ON(!obj_priv->active); | 1511 | BUG_ON(!obj_priv->active); |
| 1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); |
| @@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
| 1517 | static void | 1517 | static void |
| 1518 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1518 | i915_gem_object_truncate(struct drm_gem_object *obj) |
| 1519 | { | 1519 | { |
| 1520 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1520 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1521 | struct inode *inode; | 1521 | struct inode *inode; |
| 1522 | 1522 | ||
| 1523 | inode = obj->filp->f_path.dentry->d_inode; | 1523 | inode = obj->filp->f_path.dentry->d_inode; |
| @@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
| 1538 | { | 1538 | { |
| 1539 | struct drm_device *dev = obj->dev; | 1539 | struct drm_device *dev = obj->dev; |
| 1540 | drm_i915_private_t *dev_priv = dev->dev_private; | 1540 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1541 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1542 | 1542 | ||
| 1543 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1543 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 1544 | if (obj_priv->pin_count != 0) | 1544 | if (obj_priv->pin_count != 0) |
| @@ -1965,7 +1965,7 @@ static int | |||
| 1965 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 1965 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) |
| 1966 | { | 1966 | { |
| 1967 | struct drm_device *dev = obj->dev; | 1967 | struct drm_device *dev = obj->dev; |
| 1968 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1968 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1969 | int ret; | 1969 | int ret; |
| 1970 | 1970 | ||
| 1971 | /* This function only exists to support waiting for existing rendering, | 1971 | /* This function only exists to support waiting for existing rendering, |
| @@ -1997,7 +1997,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
| 1997 | { | 1997 | { |
| 1998 | struct drm_device *dev = obj->dev; | 1998 | struct drm_device *dev = obj->dev; |
| 1999 | drm_i915_private_t *dev_priv = dev->dev_private; | 1999 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2000 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2000 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2001 | int ret = 0; | 2001 | int ret = 0; |
| 2002 | 2002 | ||
| 2003 | #if WATCH_BUF | 2003 | #if WATCH_BUF |
| @@ -2173,7 +2173,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
| 2173 | #if WATCH_LRU | 2173 | #if WATCH_LRU |
| 2174 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2174 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
| 2175 | #endif | 2175 | #endif |
| 2176 | obj_priv = obj->driver_private; | 2176 | obj_priv = to_intel_bo(obj); |
| 2177 | BUG_ON(obj_priv->pin_count != 0); | 2177 | BUG_ON(obj_priv->pin_count != 0); |
| 2178 | BUG_ON(obj_priv->active); | 2178 | BUG_ON(obj_priv->active); |
| 2179 | 2179 | ||
| @@ -2244,7 +2244,7 @@ int | |||
| 2244 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2244 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
| 2245 | gfp_t gfpmask) | 2245 | gfp_t gfpmask) |
| 2246 | { | 2246 | { |
| 2247 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2247 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2248 | int page_count, i; | 2248 | int page_count, i; |
| 2249 | struct address_space *mapping; | 2249 | struct address_space *mapping; |
| 2250 | struct inode *inode; | 2250 | struct inode *inode; |
| @@ -2297,7 +2297,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2297 | struct drm_gem_object *obj = reg->obj; | 2297 | struct drm_gem_object *obj = reg->obj; |
| 2298 | struct drm_device *dev = obj->dev; | 2298 | struct drm_device *dev = obj->dev; |
| 2299 | drm_i915_private_t *dev_priv = dev->dev_private; | 2299 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2300 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2300 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2301 | int regnum = obj_priv->fence_reg; | 2301 | int regnum = obj_priv->fence_reg; |
| 2302 | uint64_t val; | 2302 | uint64_t val; |
| 2303 | 2303 | ||
| @@ -2319,7 +2319,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2319 | struct drm_gem_object *obj = reg->obj; | 2319 | struct drm_gem_object *obj = reg->obj; |
| 2320 | struct drm_device *dev = obj->dev; | 2320 | struct drm_device *dev = obj->dev; |
| 2321 | drm_i915_private_t *dev_priv = dev->dev_private; | 2321 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2322 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2322 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2323 | int regnum = obj_priv->fence_reg; | 2323 | int regnum = obj_priv->fence_reg; |
| 2324 | uint64_t val; | 2324 | uint64_t val; |
| 2325 | 2325 | ||
| @@ -2339,7 +2339,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2339 | struct drm_gem_object *obj = reg->obj; | 2339 | struct drm_gem_object *obj = reg->obj; |
| 2340 | struct drm_device *dev = obj->dev; | 2340 | struct drm_device *dev = obj->dev; |
| 2341 | drm_i915_private_t *dev_priv = dev->dev_private; | 2341 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2342 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2342 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2343 | int regnum = obj_priv->fence_reg; | 2343 | int regnum = obj_priv->fence_reg; |
| 2344 | int tile_width; | 2344 | int tile_width; |
| 2345 | uint32_t fence_reg, val; | 2345 | uint32_t fence_reg, val; |
| @@ -2381,7 +2381,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2381 | struct drm_gem_object *obj = reg->obj; | 2381 | struct drm_gem_object *obj = reg->obj; |
| 2382 | struct drm_device *dev = obj->dev; | 2382 | struct drm_device *dev = obj->dev; |
| 2383 | drm_i915_private_t *dev_priv = dev->dev_private; | 2383 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2384 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2384 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2385 | int regnum = obj_priv->fence_reg; | 2385 | int regnum = obj_priv->fence_reg; |
| 2386 | uint32_t val; | 2386 | uint32_t val; |
| 2387 | uint32_t pitch_val; | 2387 | uint32_t pitch_val; |
| @@ -2425,7 +2425,7 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
| 2425 | if (!reg->obj) | 2425 | if (!reg->obj) |
| 2426 | return i; | 2426 | return i; |
| 2427 | 2427 | ||
| 2428 | obj_priv = reg->obj->driver_private; | 2428 | obj_priv = to_intel_bo(reg->obj); |
| 2429 | if (!obj_priv->pin_count) | 2429 | if (!obj_priv->pin_count) |
| 2430 | avail++; | 2430 | avail++; |
| 2431 | } | 2431 | } |
| @@ -2480,7 +2480,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
| 2480 | { | 2480 | { |
| 2481 | struct drm_device *dev = obj->dev; | 2481 | struct drm_device *dev = obj->dev; |
| 2482 | struct drm_i915_private *dev_priv = dev->dev_private; | 2482 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2483 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2483 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2484 | struct drm_i915_fence_reg *reg = NULL; | 2484 | struct drm_i915_fence_reg *reg = NULL; |
| 2485 | int ret; | 2485 | int ret; |
| 2486 | 2486 | ||
| @@ -2547,7 +2547,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
| 2547 | { | 2547 | { |
| 2548 | struct drm_device *dev = obj->dev; | 2548 | struct drm_device *dev = obj->dev; |
| 2549 | drm_i915_private_t *dev_priv = dev->dev_private; | 2549 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2550 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2550 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2551 | 2551 | ||
| 2552 | if (IS_GEN6(dev)) { | 2552 | if (IS_GEN6(dev)) { |
| 2553 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2553 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
| @@ -2583,7 +2583,7 @@ int | |||
| 2583 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 2583 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) |
| 2584 | { | 2584 | { |
| 2585 | struct drm_device *dev = obj->dev; | 2585 | struct drm_device *dev = obj->dev; |
| 2586 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2586 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2587 | 2587 | ||
| 2588 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2588 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
| 2589 | return 0; | 2589 | return 0; |
| @@ -2621,7 +2621,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2621 | { | 2621 | { |
| 2622 | struct drm_device *dev = obj->dev; | 2622 | struct drm_device *dev = obj->dev; |
| 2623 | drm_i915_private_t *dev_priv = dev->dev_private; | 2623 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2624 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2624 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2625 | struct drm_mm_node *free_space; | 2625 | struct drm_mm_node *free_space; |
| 2626 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2626 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
| 2627 | int ret; | 2627 | int ret; |
| @@ -2728,7 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2728 | void | 2728 | void |
| 2729 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2729 | i915_gem_clflush_object(struct drm_gem_object *obj) |
| 2730 | { | 2730 | { |
| 2731 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2731 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2732 | 2732 | ||
| 2733 | /* If we don't have a page list set up, then we're not pinned | 2733 | /* If we don't have a page list set up, then we're not pinned |
| 2734 | * to GPU, and we can ignore the cache flush because it'll happen | 2734 | * to GPU, and we can ignore the cache flush because it'll happen |
| @@ -2829,7 +2829,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
| 2829 | int | 2829 | int |
| 2830 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2830 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
| 2831 | { | 2831 | { |
| 2832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2832 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2833 | uint32_t old_write_domain, old_read_domains; | 2833 | uint32_t old_write_domain, old_read_domains; |
| 2834 | int ret; | 2834 | int ret; |
| 2835 | 2835 | ||
| @@ -2879,7 +2879,7 @@ int | |||
| 2879 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 2879 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) |
| 2880 | { | 2880 | { |
| 2881 | struct drm_device *dev = obj->dev; | 2881 | struct drm_device *dev = obj->dev; |
| 2882 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2882 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 2883 | uint32_t old_write_domain, old_read_domains; | 2883 | uint32_t old_write_domain, old_read_domains; |
| 2884 | int ret; | 2884 | int ret; |
| 2885 | 2885 | ||
| @@ -3092,7 +3092,7 @@ static void | |||
| 3092 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3092 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
| 3093 | { | 3093 | { |
| 3094 | struct drm_device *dev = obj->dev; | 3094 | struct drm_device *dev = obj->dev; |
| 3095 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3095 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 3096 | uint32_t invalidate_domains = 0; | 3096 | uint32_t invalidate_domains = 0; |
| 3097 | uint32_t flush_domains = 0; | 3097 | uint32_t flush_domains = 0; |
| 3098 | uint32_t old_read_domains; | 3098 | uint32_t old_read_domains; |
| @@ -3177,7 +3177,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
| 3177 | static void | 3177 | static void |
| 3178 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3178 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
| 3179 | { | 3179 | { |
| 3180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3180 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 3181 | 3181 | ||
| 3182 | if (!obj_priv->page_cpu_valid) | 3182 | if (!obj_priv->page_cpu_valid) |
| 3183 | return; | 3183 | return; |
| @@ -3217,7 +3217,7 @@ static int | |||
| 3217 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3217 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
| 3218 | uint64_t offset, uint64_t size) | 3218 | uint64_t offset, uint64_t size) |
| 3219 | { | 3219 | { |
| 3220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3220 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 3221 | uint32_t old_read_domains; | 3221 | uint32_t old_read_domains; |
| 3222 | int i, ret; | 3222 | int i, ret; |
| 3223 | 3223 | ||
| @@ -3286,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3286 | { | 3286 | { |
| 3287 | struct drm_device *dev = obj->dev; | 3287 | struct drm_device *dev = obj->dev; |
| 3288 | drm_i915_private_t *dev_priv = dev->dev_private; | 3288 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 3289 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3289 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 3290 | int i, ret; | 3290 | int i, ret; |
| 3291 | void __iomem *reloc_page; | 3291 | void __iomem *reloc_page; |
| 3292 | bool need_fence; | 3292 | bool need_fence; |
| @@ -3337,7 +3337,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3337 | i915_gem_object_unpin(obj); | 3337 | i915_gem_object_unpin(obj); |
| 3338 | return -EBADF; | 3338 | return -EBADF; |
| 3339 | } | 3339 | } |
| 3340 | target_obj_priv = target_obj->driver_private; | 3340 | target_obj_priv = to_intel_bo(target_obj); |
| 3341 | 3341 | ||
| 3342 | #if WATCH_RELOC | 3342 | #if WATCH_RELOC |
| 3343 | DRM_INFO("%s: obj %p offset %08x target %d " | 3343 | DRM_INFO("%s: obj %p offset %08x target %d " |
| @@ -3689,7 +3689,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
| 3689 | prepare_to_wait(&dev_priv->pending_flip_queue, | 3689 | prepare_to_wait(&dev_priv->pending_flip_queue, |
| 3690 | &wait, TASK_INTERRUPTIBLE); | 3690 | &wait, TASK_INTERRUPTIBLE); |
| 3691 | for (i = 0; i < count; i++) { | 3691 | for (i = 0; i < count; i++) { |
| 3692 | obj_priv = object_list[i]->driver_private; | 3692 | obj_priv = to_intel_bo(object_list[i]); |
| 3693 | if (atomic_read(&obj_priv->pending_flip) > 0) | 3693 | if (atomic_read(&obj_priv->pending_flip) > 0) |
| 3694 | break; | 3694 | break; |
| 3695 | } | 3695 | } |
| @@ -3798,7 +3798,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3798 | goto err; | 3798 | goto err; |
| 3799 | } | 3799 | } |
| 3800 | 3800 | ||
| 3801 | obj_priv = object_list[i]->driver_private; | 3801 | obj_priv = to_intel_bo(object_list[i]); |
| 3802 | if (obj_priv->in_execbuffer) { | 3802 | if (obj_priv->in_execbuffer) { |
| 3803 | DRM_ERROR("Object %p appears more than once in object list\n", | 3803 | DRM_ERROR("Object %p appears more than once in object list\n", |
| 3804 | object_list[i]); | 3804 | object_list[i]); |
| @@ -3924,7 +3924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
| 3924 | 3924 | ||
| 3925 | for (i = 0; i < args->buffer_count; i++) { | 3925 | for (i = 0; i < args->buffer_count; i++) { |
| 3926 | struct drm_gem_object *obj = object_list[i]; | 3926 | struct drm_gem_object *obj = object_list[i]; |
| 3927 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3927 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 3928 | uint32_t old_write_domain = obj->write_domain; | 3928 | uint32_t old_write_domain = obj->write_domain; |
| 3929 | 3929 | ||
| 3930 | obj->write_domain = obj->pending_write_domain; | 3930 | obj->write_domain = obj->pending_write_domain; |
| @@ -3999,7 +3999,7 @@ err: | |||
| 3999 | 3999 | ||
| 4000 | for (i = 0; i < args->buffer_count; i++) { | 4000 | for (i = 0; i < args->buffer_count; i++) { |
| 4001 | if (object_list[i]) { | 4001 | if (object_list[i]) { |
| 4002 | obj_priv = object_list[i]->driver_private; | 4002 | obj_priv = to_intel_bo(object_list[i]); |
| 4003 | obj_priv->in_execbuffer = false; | 4003 | obj_priv->in_execbuffer = false; |
| 4004 | } | 4004 | } |
| 4005 | drm_gem_object_unreference(object_list[i]); | 4005 | drm_gem_object_unreference(object_list[i]); |
| @@ -4177,7 +4177,7 @@ int | |||
| 4177 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4177 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
| 4178 | { | 4178 | { |
| 4179 | struct drm_device *dev = obj->dev; | 4179 | struct drm_device *dev = obj->dev; |
| 4180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4180 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 4181 | int ret; | 4181 | int ret; |
| 4182 | 4182 | ||
| 4183 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4183 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| @@ -4210,7 +4210,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
| 4210 | { | 4210 | { |
| 4211 | struct drm_device *dev = obj->dev; | 4211 | struct drm_device *dev = obj->dev; |
| 4212 | drm_i915_private_t *dev_priv = dev->dev_private; | 4212 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 4213 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4213 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 4214 | 4214 | ||
| 4215 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4215 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 4216 | obj_priv->pin_count--; | 4216 | obj_priv->pin_count--; |
| @@ -4250,7 +4250,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
| 4250 | mutex_unlock(&dev->struct_mutex); | 4250 | mutex_unlock(&dev->struct_mutex); |
| 4251 | return -EBADF; | 4251 | return -EBADF; |
| 4252 | } | 4252 | } |
| 4253 | obj_priv = obj->driver_private; | 4253 | obj_priv = to_intel_bo(obj); |
| 4254 | 4254 | ||
| 4255 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4255 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
| 4256 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4256 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
| @@ -4307,7 +4307,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
| 4307 | return -EBADF; | 4307 | return -EBADF; |
| 4308 | } | 4308 | } |
| 4309 | 4309 | ||
| 4310 | obj_priv = obj->driver_private; | 4310 | obj_priv = to_intel_bo(obj); |
| 4311 | if (obj_priv->pin_filp != file_priv) { | 4311 | if (obj_priv->pin_filp != file_priv) { |
| 4312 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4312 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
| 4313 | args->handle); | 4313 | args->handle); |
| @@ -4349,7 +4349,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 4349 | */ | 4349 | */ |
| 4350 | i915_gem_retire_requests(dev); | 4350 | i915_gem_retire_requests(dev); |
| 4351 | 4351 | ||
| 4352 | obj_priv = obj->driver_private; | 4352 | obj_priv = to_intel_bo(obj); |
| 4353 | /* Don't count being on the flushing list against the object being | 4353 | /* Don't count being on the flushing list against the object being |
| 4354 | * done. Otherwise, a buffer left on the flushing list but not getting | 4354 | * done. Otherwise, a buffer left on the flushing list but not getting |
| 4355 | * flushed (because nobody's flushing that domain) won't ever return | 4355 | * flushed (because nobody's flushing that domain) won't ever return |
| @@ -4395,7 +4395,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
| 4395 | } | 4395 | } |
| 4396 | 4396 | ||
| 4397 | mutex_lock(&dev->struct_mutex); | 4397 | mutex_lock(&dev->struct_mutex); |
| 4398 | obj_priv = obj->driver_private; | 4398 | obj_priv = to_intel_bo(obj); |
| 4399 | 4399 | ||
| 4400 | if (obj_priv->pin_count) { | 4400 | if (obj_priv->pin_count) { |
| 4401 | drm_gem_object_unreference(obj); | 4401 | drm_gem_object_unreference(obj); |
| @@ -4456,7 +4456,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
| 4456 | void i915_gem_free_object(struct drm_gem_object *obj) | 4456 | void i915_gem_free_object(struct drm_gem_object *obj) |
| 4457 | { | 4457 | { |
| 4458 | struct drm_device *dev = obj->dev; | 4458 | struct drm_device *dev = obj->dev; |
| 4459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4459 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 4460 | 4460 | ||
| 4461 | trace_i915_gem_object_destroy(obj); | 4461 | trace_i915_gem_object_destroy(obj); |
| 4462 | 4462 | ||
| @@ -4565,7 +4565,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4565 | DRM_ERROR("Failed to allocate status page\n"); | 4565 | DRM_ERROR("Failed to allocate status page\n"); |
| 4566 | return -ENOMEM; | 4566 | return -ENOMEM; |
| 4567 | } | 4567 | } |
| 4568 | obj_priv = obj->driver_private; | 4568 | obj_priv = to_intel_bo(obj); |
| 4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
| 4570 | 4570 | ||
| 4571 | ret = i915_gem_object_pin(obj, 4096); | 4571 | ret = i915_gem_object_pin(obj, 4096); |
| @@ -4609,7 +4609,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
| 4609 | return; | 4609 | return; |
| 4610 | 4610 | ||
| 4611 | obj = dev_priv->hws_obj; | 4611 | obj = dev_priv->hws_obj; |
| 4612 | obj_priv = obj->driver_private; | 4612 | obj_priv = to_intel_bo(obj); |
| 4613 | 4613 | ||
| 4614 | kunmap(obj_priv->pages[0]); | 4614 | kunmap(obj_priv->pages[0]); |
| 4615 | i915_gem_object_unpin(obj); | 4615 | i915_gem_object_unpin(obj); |
| @@ -4643,7 +4643,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
| 4643 | i915_gem_cleanup_hws(dev); | 4643 | i915_gem_cleanup_hws(dev); |
| 4644 | return -ENOMEM; | 4644 | return -ENOMEM; |
| 4645 | } | 4645 | } |
| 4646 | obj_priv = obj->driver_private; | 4646 | obj_priv = to_intel_bo(obj); |
| 4647 | 4647 | ||
| 4648 | ret = i915_gem_object_pin(obj, 4096); | 4648 | ret = i915_gem_object_pin(obj, 4096); |
| 4649 | if (ret != 0) { | 4649 | if (ret != 0) { |
| @@ -4936,7 +4936,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
| 4936 | int ret; | 4936 | int ret; |
| 4937 | int page_count; | 4937 | int page_count; |
| 4938 | 4938 | ||
| 4939 | obj_priv = obj->driver_private; | 4939 | obj_priv = to_intel_bo(obj); |
| 4940 | if (!obj_priv->phys_obj) | 4940 | if (!obj_priv->phys_obj) |
| 4941 | return; | 4941 | return; |
| 4942 | 4942 | ||
| @@ -4975,7 +4975,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
| 4975 | if (id > I915_MAX_PHYS_OBJECT) | 4975 | if (id > I915_MAX_PHYS_OBJECT) |
| 4976 | return -EINVAL; | 4976 | return -EINVAL; |
| 4977 | 4977 | ||
| 4978 | obj_priv = obj->driver_private; | 4978 | obj_priv = to_intel_bo(obj); |
| 4979 | 4979 | ||
| 4980 | if (obj_priv->phys_obj) { | 4980 | if (obj_priv->phys_obj) { |
| 4981 | if (obj_priv->phys_obj->id == id) | 4981 | if (obj_priv->phys_obj->id == id) |
| @@ -5026,7 +5026,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 5026 | struct drm_i915_gem_pwrite *args, | 5026 | struct drm_i915_gem_pwrite *args, |
| 5027 | struct drm_file *file_priv) | 5027 | struct drm_file *file_priv) |
| 5028 | { | 5028 | { |
| 5029 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 5029 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 5030 | void *obj_addr; | 5030 | void *obj_addr; |
| 5031 | int ret; | 5031 | int ret; |
| 5032 | char __user *user_data; | 5032 | char __user *user_data; |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index e602614bd3f8..35507cf53fa3 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
| @@ -72,7 +72,7 @@ void | |||
| 72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | 72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, |
| 73 | const char *where, uint32_t mark) | 73 | const char *where, uint32_t mark) |
| 74 | { | 74 | { |
| 75 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 75 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 76 | int page; | 76 | int page; |
| 77 | 77 | ||
| 78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); |
| @@ -137,7 +137,7 @@ void | |||
| 137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) |
| 138 | { | 138 | { |
| 139 | struct drm_device *dev = obj->dev; | 139 | struct drm_device *dev = obj->dev; |
| 140 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 140 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 141 | int page; | 141 | int page; |
| 142 | uint32_t *gtt_mapping; | 142 | uint32_t *gtt_mapping; |
| 143 | uint32_t *backing_map = NULL; | 143 | uint32_t *backing_map = NULL; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c01c878e51ba..449157f71610 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -240,7 +240,7 @@ bool | |||
| 240 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 240 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) |
| 241 | { | 241 | { |
| 242 | struct drm_device *dev = obj->dev; | 242 | struct drm_device *dev = obj->dev; |
| 243 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 243 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 244 | 244 | ||
| 245 | if (obj_priv->gtt_space == NULL) | 245 | if (obj_priv->gtt_space == NULL) |
| 246 | return true; | 246 | return true; |
| @@ -280,7 +280,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
| 280 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 280 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 281 | if (obj == NULL) | 281 | if (obj == NULL) |
| 282 | return -EINVAL; | 282 | return -EINVAL; |
| 283 | obj_priv = obj->driver_private; | 283 | obj_priv = to_intel_bo(obj); |
| 284 | 284 | ||
| 285 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 285 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { |
| 286 | drm_gem_object_unreference_unlocked(obj); | 286 | drm_gem_object_unreference_unlocked(obj); |
| @@ -364,7 +364,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
| 364 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 364 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
| 365 | if (obj == NULL) | 365 | if (obj == NULL) |
| 366 | return -EINVAL; | 366 | return -EINVAL; |
| 367 | obj_priv = obj->driver_private; | 367 | obj_priv = to_intel_bo(obj); |
| 368 | 368 | ||
| 369 | mutex_lock(&dev->struct_mutex); | 369 | mutex_lock(&dev->struct_mutex); |
| 370 | 370 | ||
| @@ -427,7 +427,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | |||
| 427 | { | 427 | { |
| 428 | struct drm_device *dev = obj->dev; | 428 | struct drm_device *dev = obj->dev; |
| 429 | drm_i915_private_t *dev_priv = dev->dev_private; | 429 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 430 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 430 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 431 | int page_count = obj->size >> PAGE_SHIFT; | 431 | int page_count = obj->size >> PAGE_SHIFT; |
| 432 | int i; | 432 | int i; |
| 433 | 433 | ||
| @@ -456,7 +456,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
| 456 | { | 456 | { |
| 457 | struct drm_device *dev = obj->dev; | 457 | struct drm_device *dev = obj->dev; |
| 458 | drm_i915_private_t *dev_priv = dev->dev_private; | 458 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 459 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 460 | int page_count = obj->size >> PAGE_SHIFT; | 460 | int page_count = obj->size >> PAGE_SHIFT; |
| 461 | int i; | 461 | int i; |
| 462 | 462 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 49c458bc6502..6421481d6222 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
| 260 | 260 | ||
| 261 | if (mode_config->num_connector) { | 261 | if (mode_config->num_connector) { |
| 262 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 262 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 263 | struct intel_output *intel_output = to_intel_output(connector); | 263 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 264 | 264 | ||
| 265 | if (intel_output->hot_plug) | 265 | if (intel_encoder->hot_plug) |
| 266 | (*intel_output->hot_plug) (intel_output); | 266 | (*intel_encoder->hot_plug) (intel_encoder); |
| 267 | } | 267 | } |
| 268 | } | 268 | } |
| 269 | /* Just fire off a uevent and let userspace tell us what to do */ | 269 | /* Just fire off a uevent and let userspace tell us what to do */ |
| @@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev, | |||
| 444 | if (src == NULL) | 444 | if (src == NULL) |
| 445 | return NULL; | 445 | return NULL; |
| 446 | 446 | ||
| 447 | src_priv = src->driver_private; | 447 | src_priv = to_intel_bo(src); |
| 448 | if (src_priv->pages == NULL) | 448 | if (src_priv->pages == NULL) |
| 449 | return NULL; | 449 | return NULL; |
| 450 | 450 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 38110ce742a5..759c2ef72eff 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
| 247 | 247 | ||
| 248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | 248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
| 249 | { | 249 | { |
| 250 | struct intel_output *intel_output = to_intel_output(connector); | 250 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 251 | 251 | ||
| 252 | /* CRT should always be at 0, but check anyway */ | 252 | /* CRT should always be at 0, but check anyway */ |
| 253 | if (intel_output->type != INTEL_OUTPUT_ANALOG) | 253 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) |
| 254 | return false; | 254 | return false; |
| 255 | 255 | ||
| 256 | return intel_ddc_probe(intel_output); | 256 | return intel_ddc_probe(intel_encoder); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static enum drm_connector_status | 259 | static enum drm_connector_status |
| 260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | 260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
| 261 | { | 261 | { |
| 262 | struct drm_encoder *encoder = &intel_output->enc; | 262 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 263 | struct drm_device *dev = encoder->dev; | 263 | struct drm_device *dev = encoder->dev; |
| 264 | struct drm_i915_private *dev_priv = dev->dev_private; | 264 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| @@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | |||
| 387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) |
| 388 | { | 388 | { |
| 389 | struct drm_device *dev = connector->dev; | 389 | struct drm_device *dev = connector->dev; |
| 390 | struct intel_output *intel_output = to_intel_output(connector); | 390 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 391 | struct drm_encoder *encoder = &intel_output->enc; | 391 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 392 | struct drm_crtc *crtc; | 392 | struct drm_crtc *crtc; |
| 393 | int dpms_mode; | 393 | int dpms_mode; |
| 394 | enum drm_connector_status status; | 394 | enum drm_connector_status status; |
| @@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
| 405 | 405 | ||
| 406 | /* for pre-945g platforms use load detect */ | 406 | /* for pre-945g platforms use load detect */ |
| 407 | if (encoder->crtc && encoder->crtc->enabled) { | 407 | if (encoder->crtc && encoder->crtc->enabled) { |
| 408 | status = intel_crt_load_detect(encoder->crtc, intel_output); | 408 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
| 409 | } else { | 409 | } else { |
| 410 | crtc = intel_get_load_detect_pipe(intel_output, | 410 | crtc = intel_get_load_detect_pipe(intel_encoder, |
| 411 | NULL, &dpms_mode); | 411 | NULL, &dpms_mode); |
| 412 | if (crtc) { | 412 | if (crtc) { |
| 413 | status = intel_crt_load_detect(crtc, intel_output); | 413 | status = intel_crt_load_detect(crtc, intel_encoder); |
| 414 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 414 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
| 415 | } else | 415 | } else |
| 416 | status = connector_status_unknown; | 416 | status = connector_status_unknown; |
| 417 | } | 417 | } |
| @@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
| 421 | 421 | ||
| 422 | static void intel_crt_destroy(struct drm_connector *connector) | 422 | static void intel_crt_destroy(struct drm_connector *connector) |
| 423 | { | 423 | { |
| 424 | struct intel_output *intel_output = to_intel_output(connector); | 424 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 425 | 425 | ||
| 426 | intel_i2c_destroy(intel_output->ddc_bus); | 426 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 427 | drm_sysfs_connector_remove(connector); | 427 | drm_sysfs_connector_remove(connector); |
| 428 | drm_connector_cleanup(connector); | 428 | drm_connector_cleanup(connector); |
| 429 | kfree(connector); | 429 | kfree(connector); |
| @@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
| 432 | static int intel_crt_get_modes(struct drm_connector *connector) | 432 | static int intel_crt_get_modes(struct drm_connector *connector) |
| 433 | { | 433 | { |
| 434 | int ret; | 434 | int ret; |
| 435 | struct intel_output *intel_output = to_intel_output(connector); | 435 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 436 | struct i2c_adapter *ddcbus; | 436 | struct i2c_adapter *ddcbus; |
| 437 | struct drm_device *dev = connector->dev; | 437 | struct drm_device *dev = connector->dev; |
| 438 | 438 | ||
| 439 | 439 | ||
| 440 | ret = intel_ddc_get_modes(intel_output); | 440 | ret = intel_ddc_get_modes(intel_encoder); |
| 441 | if (ret || !IS_G4X(dev)) | 441 | if (ret || !IS_G4X(dev)) |
| 442 | goto end; | 442 | goto end; |
| 443 | 443 | ||
| 444 | ddcbus = intel_output->ddc_bus; | 444 | ddcbus = intel_encoder->ddc_bus; |
| 445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | 445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ |
| 446 | intel_output->ddc_bus = | 446 | intel_encoder->ddc_bus = |
| 447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | 447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); |
| 448 | 448 | ||
| 449 | if (!intel_output->ddc_bus) { | 449 | if (!intel_encoder->ddc_bus) { |
| 450 | intel_output->ddc_bus = ddcbus; | 450 | intel_encoder->ddc_bus = ddcbus; |
| 451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | 451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, |
| 452 | "DDC bus registration failed for CRTDDC_D.\n"); | 452 | "DDC bus registration failed for CRTDDC_D.\n"); |
| 453 | goto end; | 453 | goto end; |
| 454 | } | 454 | } |
| 455 | /* Try to get modes by GPIOD port */ | 455 | /* Try to get modes by GPIOD port */ |
| 456 | ret = intel_ddc_get_modes(intel_output); | 456 | ret = intel_ddc_get_modes(intel_encoder); |
| 457 | intel_i2c_destroy(ddcbus); | 457 | intel_i2c_destroy(ddcbus); |
| 458 | 458 | ||
| 459 | end: | 459 | end: |
| @@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
| 506 | void intel_crt_init(struct drm_device *dev) | 506 | void intel_crt_init(struct drm_device *dev) |
| 507 | { | 507 | { |
| 508 | struct drm_connector *connector; | 508 | struct drm_connector *connector; |
| 509 | struct intel_output *intel_output; | 509 | struct intel_encoder *intel_encoder; |
| 510 | struct drm_i915_private *dev_priv = dev->dev_private; | 510 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 511 | u32 i2c_reg; | 511 | u32 i2c_reg; |
| 512 | 512 | ||
| 513 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 513 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); |
| 514 | if (!intel_output) | 514 | if (!intel_encoder) |
| 515 | return; | 515 | return; |
| 516 | 516 | ||
| 517 | connector = &intel_output->base; | 517 | connector = &intel_encoder->base; |
| 518 | drm_connector_init(dev, &intel_output->base, | 518 | drm_connector_init(dev, &intel_encoder->base, |
| 519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
| 520 | 520 | ||
| 521 | drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, | 521 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, |
| 522 | DRM_MODE_ENCODER_DAC); | 522 | DRM_MODE_ENCODER_DAC); |
| 523 | 523 | ||
| 524 | drm_mode_connector_attach_encoder(&intel_output->base, | 524 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
| 525 | &intel_output->enc); | 525 | &intel_encoder->enc); |
| 526 | 526 | ||
| 527 | /* Set up the DDC bus. */ | 527 | /* Set up the DDC bus. */ |
| 528 | if (HAS_PCH_SPLIT(dev)) | 528 | if (HAS_PCH_SPLIT(dev)) |
| @@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev) | |||
| 533 | if (dev_priv->crt_ddc_bus != 0) | 533 | if (dev_priv->crt_ddc_bus != 0) |
| 534 | i2c_reg = dev_priv->crt_ddc_bus; | 534 | i2c_reg = dev_priv->crt_ddc_bus; |
| 535 | } | 535 | } |
| 536 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | 536 | intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); |
| 537 | if (!intel_output->ddc_bus) { | 537 | if (!intel_encoder->ddc_bus) { |
| 538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
| 539 | "failed.\n"); | 539 | "failed.\n"); |
| 540 | return; | 540 | return; |
| 541 | } | 541 | } |
| 542 | 542 | ||
| 543 | intel_output->type = INTEL_OUTPUT_ANALOG; | 543 | intel_encoder->type = INTEL_OUTPUT_ANALOG; |
| 544 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 544 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
| 545 | (1 << INTEL_ANALOG_CLONE_BIT) | | 545 | (1 << INTEL_ANALOG_CLONE_BIT) | |
| 546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
| 547 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 547 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 548 | connector->interlace_allowed = 0; | 548 | connector->interlace_allowed = 0; |
| 549 | connector->doublescan_allowed = 0; | 549 | connector->doublescan_allowed = 0; |
| 550 | 550 | ||
| 551 | drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); | 551 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); |
| 552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
| 553 | 553 | ||
| 554 | drm_sysfs_connector_add(connector); | 554 | drm_sysfs_connector_add(connector); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7e753b2845f..e7356fb6c918 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
| 747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | 747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { |
| 748 | if (l_entry->encoder && | 748 | if (l_entry->encoder && |
| 749 | l_entry->encoder->crtc == crtc) { | 749 | l_entry->encoder->crtc == crtc) { |
| 750 | struct intel_output *intel_output = to_intel_output(l_entry); | 750 | struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); |
| 751 | if (intel_output->type == type) | 751 | if (intel_encoder->type == type) |
| 752 | return true; | 752 | return true; |
| 753 | } | 753 | } |
| 754 | } | 754 | } |
| 755 | return false; | 755 | return false; |
| 756 | } | 756 | } |
| 757 | 757 | ||
| 758 | struct drm_connector * | 758 | static struct drm_connector * |
| 759 | intel_pipe_get_output (struct drm_crtc *crtc) | 759 | intel_pipe_get_connector (struct drm_crtc *crtc) |
| 760 | { | 760 | { |
| 761 | struct drm_device *dev = crtc->dev; | 761 | struct drm_device *dev = crtc->dev; |
| 762 | struct drm_mode_config *mode_config = &dev->mode_config; | 762 | struct drm_mode_config *mode_config = &dev->mode_config; |
| @@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 1003 | struct drm_i915_private *dev_priv = dev->dev_private; | 1003 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1004 | struct drm_framebuffer *fb = crtc->fb; | 1004 | struct drm_framebuffer *fb = crtc->fb; |
| 1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
| 1006 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1006 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
| 1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1008 | int plane, i; | 1008 | int plane, i; |
| 1009 | u32 fbc_ctl, fbc_ctl2; | 1009 | u32 fbc_ctl, fbc_ctl2; |
| @@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
| 1080 | struct drm_i915_private *dev_priv = dev->dev_private; | 1080 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1081 | struct drm_framebuffer *fb = crtc->fb; | 1081 | struct drm_framebuffer *fb = crtc->fb; |
| 1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
| 1083 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1083 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
| 1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | 1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : |
| 1086 | DPFC_CTL_PLANEB); | 1086 | DPFC_CTL_PLANEB); |
| @@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
| 1176 | return; | 1176 | return; |
| 1177 | 1177 | ||
| 1178 | intel_fb = to_intel_framebuffer(fb); | 1178 | intel_fb = to_intel_framebuffer(fb); |
| 1179 | obj_priv = intel_fb->obj->driver_private; | 1179 | obj_priv = to_intel_bo(intel_fb->obj); |
| 1180 | 1180 | ||
| 1181 | /* | 1181 | /* |
| 1182 | * If FBC is already on, we just have to verify that we can | 1182 | * If FBC is already on, we just have to verify that we can |
| @@ -1243,7 +1243,7 @@ out_disable: | |||
| 1243 | static int | 1243 | static int |
| 1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
| 1245 | { | 1245 | { |
| 1246 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1246 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
| 1247 | u32 alignment; | 1247 | u32 alignment; |
| 1248 | int ret; | 1248 | int ret; |
| 1249 | 1249 | ||
| @@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 1323 | 1323 | ||
| 1324 | intel_fb = to_intel_framebuffer(crtc->fb); | 1324 | intel_fb = to_intel_framebuffer(crtc->fb); |
| 1325 | obj = intel_fb->obj; | 1325 | obj = intel_fb->obj; |
| 1326 | obj_priv = obj->driver_private; | 1326 | obj_priv = to_intel_bo(obj); |
| 1327 | 1327 | ||
| 1328 | mutex_lock(&dev->struct_mutex); | 1328 | mutex_lock(&dev->struct_mutex); |
| 1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
| @@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 1401 | 1401 | ||
| 1402 | if (old_fb) { | 1402 | if (old_fb) { |
| 1403 | intel_fb = to_intel_framebuffer(old_fb); | 1403 | intel_fb = to_intel_framebuffer(old_fb); |
| 1404 | obj_priv = intel_fb->obj->driver_private; | 1404 | obj_priv = to_intel_bo(intel_fb->obj); |
| 1405 | i915_gem_object_unpin(intel_fb->obj); | 1405 | i915_gem_object_unpin(intel_fb->obj); |
| 1406 | } | 1406 | } |
| 1407 | intel_increase_pllclock(crtc, true); | 1407 | intel_increase_pllclock(crtc, true); |
| @@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | 2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; |
| 2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | 2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; |
| 2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
| 2920 | int refclk, num_outputs = 0; | 2920 | int refclk, num_connectors = 0; |
| 2921 | intel_clock_t clock, reduced_clock; | 2921 | intel_clock_t clock, reduced_clock; |
| 2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; |
| 2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
| @@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 2943 | drm_vblank_pre_modeset(dev, pipe); | 2943 | drm_vblank_pre_modeset(dev, pipe); |
| 2944 | 2944 | ||
| 2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 2946 | struct intel_output *intel_output = to_intel_output(connector); | 2946 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 2947 | 2947 | ||
| 2948 | if (!connector->encoder || connector->encoder->crtc != crtc) | 2948 | if (!connector->encoder || connector->encoder->crtc != crtc) |
| 2949 | continue; | 2949 | continue; |
| 2950 | 2950 | ||
| 2951 | switch (intel_output->type) { | 2951 | switch (intel_encoder->type) { |
| 2952 | case INTEL_OUTPUT_LVDS: | 2952 | case INTEL_OUTPUT_LVDS: |
| 2953 | is_lvds = true; | 2953 | is_lvds = true; |
| 2954 | break; | 2954 | break; |
| 2955 | case INTEL_OUTPUT_SDVO: | 2955 | case INTEL_OUTPUT_SDVO: |
| 2956 | case INTEL_OUTPUT_HDMI: | 2956 | case INTEL_OUTPUT_HDMI: |
| 2957 | is_sdvo = true; | 2957 | is_sdvo = true; |
| 2958 | if (intel_output->needs_tv_clock) | 2958 | if (intel_encoder->needs_tv_clock) |
| 2959 | is_tv = true; | 2959 | is_tv = true; |
| 2960 | break; | 2960 | break; |
| 2961 | case INTEL_OUTPUT_DVO: | 2961 | case INTEL_OUTPUT_DVO: |
| @@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 2975 | break; | 2975 | break; |
| 2976 | } | 2976 | } |
| 2977 | 2977 | ||
| 2978 | num_outputs++; | 2978 | num_connectors++; |
| 2979 | } | 2979 | } |
| 2980 | 2980 | ||
| 2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { | 2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { |
| 2982 | refclk = dev_priv->lvds_ssc_freq * 1000; | 2982 | refclk = dev_priv->lvds_ssc_freq * 1000; |
| 2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
| 2984 | refclk / 1000); | 2984 | refclk / 1000); |
| @@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3049 | if (is_edp) { | 3049 | if (is_edp) { |
| 3050 | struct drm_connector *edp; | 3050 | struct drm_connector *edp; |
| 3051 | target_clock = mode->clock; | 3051 | target_clock = mode->clock; |
| 3052 | edp = intel_pipe_get_output(crtc); | 3052 | edp = intel_pipe_get_connector(crtc); |
| 3053 | intel_edp_link_config(to_intel_output(edp), | 3053 | intel_edp_link_config(to_intel_encoder(edp), |
| 3054 | &lane, &link_bw); | 3054 | &lane, &link_bw); |
| 3055 | } else { | 3055 | } else { |
| 3056 | /* DP over FDI requires target mode clock | 3056 | /* DP over FDI requires target mode clock |
| @@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3231 | /* XXX: just matching BIOS for now */ | 3231 | /* XXX: just matching BIOS for now */ |
| 3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
| 3233 | dpll |= 3; | 3233 | dpll |= 3; |
| 3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) | 3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) |
| 3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
| 3236 | else | 3236 | else |
| 3237 | dpll |= PLL_REF_INPUT_DREFCLK; | 3237 | dpll |= PLL_REF_INPUT_DREFCLK; |
| @@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 3511 | if (!bo) | 3511 | if (!bo) |
| 3512 | return -ENOENT; | 3512 | return -ENOENT; |
| 3513 | 3513 | ||
| 3514 | obj_priv = bo->driver_private; | 3514 | obj_priv = to_intel_bo(bo); |
| 3515 | 3515 | ||
| 3516 | if (bo->size < width * height * 4) { | 3516 | if (bo->size < width * height * 4) { |
| 3517 | DRM_ERROR("buffer is to small\n"); | 3517 | DRM_ERROR("buffer is to small\n"); |
| @@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
| 3655 | * detection. | 3655 | * detection. |
| 3656 | * | 3656 | * |
| 3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for | 3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
| 3658 | * its requirements. The pipe will be connected to no other outputs. | 3658 | * its requirements. The pipe will be connected to no other encoders. |
| 3659 | * | 3659 | * |
| 3660 | * Currently this code will only succeed if there is a pipe with no outputs | 3660 | * Currently this code will only succeed if there is a pipe with no encoders |
| 3661 | * configured for it. In the future, it could choose to temporarily disable | 3661 | * configured for it. In the future, it could choose to temporarily disable |
| 3662 | * some outputs to free up a pipe for its use. | 3662 | * some outputs to free up a pipe for its use. |
| 3663 | * | 3663 | * |
| @@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = { | |||
| 3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
| 3671 | }; | 3671 | }; |
| 3672 | 3672 | ||
| 3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
| 3674 | struct drm_display_mode *mode, | 3674 | struct drm_display_mode *mode, |
| 3675 | int *dpms_mode) | 3675 | int *dpms_mode) |
| 3676 | { | 3676 | { |
| 3677 | struct intel_crtc *intel_crtc; | 3677 | struct intel_crtc *intel_crtc; |
| 3678 | struct drm_crtc *possible_crtc; | 3678 | struct drm_crtc *possible_crtc; |
| 3679 | struct drm_crtc *supported_crtc =NULL; | 3679 | struct drm_crtc *supported_crtc =NULL; |
| 3680 | struct drm_encoder *encoder = &intel_output->enc; | 3680 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 3681 | struct drm_crtc *crtc = NULL; | 3681 | struct drm_crtc *crtc = NULL; |
| 3682 | struct drm_device *dev = encoder->dev; | 3682 | struct drm_device *dev = encoder->dev; |
| 3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
| @@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
| 3729 | } | 3729 | } |
| 3730 | 3730 | ||
| 3731 | encoder->crtc = crtc; | 3731 | encoder->crtc = crtc; |
| 3732 | intel_output->base.encoder = encoder; | 3732 | intel_encoder->base.encoder = encoder; |
| 3733 | intel_output->load_detect_temp = true; | 3733 | intel_encoder->load_detect_temp = true; |
| 3734 | 3734 | ||
| 3735 | intel_crtc = to_intel_crtc(crtc); | 3735 | intel_crtc = to_intel_crtc(crtc); |
| 3736 | *dpms_mode = intel_crtc->dpms_mode; | 3736 | *dpms_mode = intel_crtc->dpms_mode; |
| @@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
| 3755 | return crtc; | 3755 | return crtc; |
| 3756 | } | 3756 | } |
| 3757 | 3757 | ||
| 3758 | void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) | 3758 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) |
| 3759 | { | 3759 | { |
| 3760 | struct drm_encoder *encoder = &intel_output->enc; | 3760 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 3761 | struct drm_device *dev = encoder->dev; | 3761 | struct drm_device *dev = encoder->dev; |
| 3762 | struct drm_crtc *crtc = encoder->crtc; | 3762 | struct drm_crtc *crtc = encoder->crtc; |
| 3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
| 3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
| 3765 | 3765 | ||
| 3766 | if (intel_output->load_detect_temp) { | 3766 | if (intel_encoder->load_detect_temp) { |
| 3767 | encoder->crtc = NULL; | 3767 | encoder->crtc = NULL; |
| 3768 | intel_output->base.encoder = NULL; | 3768 | intel_encoder->base.encoder = NULL; |
| 3769 | intel_output->load_detect_temp = false; | 3769 | intel_encoder->load_detect_temp = false; |
| 3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
| 3771 | drm_helper_disable_unused_functions(dev); | 3771 | drm_helper_disable_unused_functions(dev); |
| 3772 | } | 3772 | } |
| 3773 | 3773 | ||
| 3774 | /* Switch crtc and output back off if necessary */ | 3774 | /* Switch crtc and encoder back off if necessary */ |
| 3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | 3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { |
| 3776 | if (encoder->crtc == crtc) | 3776 | if (encoder->crtc == crtc) |
| 3777 | encoder_funcs->dpms(encoder, dpms_mode); | 3777 | encoder_funcs->dpms(encoder, dpms_mode); |
| @@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4156 | work = intel_crtc->unpin_work; | 4156 | work = intel_crtc->unpin_work; |
| 4157 | if (work == NULL || !work->pending) { | 4157 | if (work == NULL || !work->pending) { |
| 4158 | if (work && !work->pending) { | 4158 | if (work && !work->pending) { |
| 4159 | obj_priv = work->pending_flip_obj->driver_private; | 4159 | obj_priv = to_intel_bo(work->pending_flip_obj); |
| 4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | 4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", |
| 4161 | obj_priv, | 4161 | obj_priv, |
| 4162 | atomic_read(&obj_priv->pending_flip)); | 4162 | atomic_read(&obj_priv->pending_flip)); |
| @@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
| 4181 | 4181 | ||
| 4182 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4182 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 4183 | 4183 | ||
| 4184 | obj_priv = work->pending_flip_obj->driver_private; | 4184 | obj_priv = to_intel_bo(work->pending_flip_obj); |
| 4185 | 4185 | ||
| 4186 | /* Initial scanout buffer will have a 0 pending flip count */ | 4186 | /* Initial scanout buffer will have a 0 pending flip count */ |
| 4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || |
| @@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
| 4253 | if (ret != 0) { | 4253 | if (ret != 0) { |
| 4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | 4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", |
| 4255 | obj->driver_private); | 4255 | to_intel_bo(obj)); |
| 4256 | kfree(work); | 4256 | kfree(work); |
| 4257 | intel_crtc->unpin_work = NULL; | 4257 | intel_crtc->unpin_work = NULL; |
| 4258 | mutex_unlock(&dev->struct_mutex); | 4258 | mutex_unlock(&dev->struct_mutex); |
| @@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 4266 | crtc->fb = fb; | 4266 | crtc->fb = fb; |
| 4267 | i915_gem_object_flush_write_domain(obj); | 4267 | i915_gem_object_flush_write_domain(obj); |
| 4268 | drm_vblank_get(dev, intel_crtc->pipe); | 4268 | drm_vblank_get(dev, intel_crtc->pipe); |
| 4269 | obj_priv = obj->driver_private; | 4269 | obj_priv = to_intel_bo(obj); |
| 4270 | atomic_inc(&obj_priv->pending_flip); | 4270 | atomic_inc(&obj_priv->pending_flip); |
| 4271 | work->pending_flip_obj = obj; | 4271 | work->pending_flip_obj = obj; |
| 4272 | 4272 | ||
| @@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
| 4399 | int entry = 0; | 4399 | int entry = 0; |
| 4400 | 4400 | ||
| 4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 4402 | struct intel_output *intel_output = to_intel_output(connector); | 4402 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 4403 | if (type_mask & intel_output->clone_mask) | 4403 | if (type_mask & intel_encoder->clone_mask) |
| 4404 | index_mask |= (1 << entry); | 4404 | index_mask |= (1 << entry); |
| 4405 | entry++; | 4405 | entry++; |
| 4406 | } | 4406 | } |
| @@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 4495 | intel_tv_init(dev); | 4495 | intel_tv_init(dev); |
| 4496 | 4496 | ||
| 4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 4498 | struct intel_output *intel_output = to_intel_output(connector); | 4498 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 4499 | struct drm_encoder *encoder = &intel_output->enc; | 4499 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 4500 | 4500 | ||
| 4501 | encoder->possible_crtcs = intel_output->crtc_mask; | 4501 | encoder->possible_crtcs = intel_encoder->crtc_mask; |
| 4502 | encoder->possible_clones = intel_connector_clones(dev, | 4502 | encoder->possible_clones = intel_connector_clones(dev, |
| 4503 | intel_output->clone_mask); | 4503 | intel_encoder->clone_mask); |
| 4504 | } | 4504 | } |
| 4505 | } | 4505 | } |
| 4506 | 4506 | ||
| @@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
| 4779 | struct drm_i915_gem_object *obj_priv = NULL; | 4779 | struct drm_i915_gem_object *obj_priv = NULL; |
| 4780 | 4780 | ||
| 4781 | if (dev_priv->pwrctx) { | 4781 | if (dev_priv->pwrctx) { |
| 4782 | obj_priv = dev_priv->pwrctx->driver_private; | 4782 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
| 4783 | } else { | 4783 | } else { |
| 4784 | struct drm_gem_object *pwrctx; | 4784 | struct drm_gem_object *pwrctx; |
| 4785 | 4785 | ||
| 4786 | pwrctx = intel_alloc_power_context(dev); | 4786 | pwrctx = intel_alloc_power_context(dev); |
| 4787 | if (pwrctx) { | 4787 | if (pwrctx) { |
| 4788 | dev_priv->pwrctx = pwrctx; | 4788 | dev_priv->pwrctx = pwrctx; |
| 4789 | obj_priv = pwrctx->driver_private; | 4789 | obj_priv = to_intel_bo(pwrctx); |
| 4790 | } | 4790 | } |
| 4791 | } | 4791 | } |
| 4792 | 4792 | ||
| @@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev) | |||
| 4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
| 4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
| 4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
| 4818 | } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { | 4818 | } else if (IS_I965GM(dev)) { |
| 4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
| 4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
| 4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
| @@ -4957,7 +4957,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 4957 | if (dev_priv->pwrctx) { | 4957 | if (dev_priv->pwrctx) { |
| 4958 | struct drm_i915_gem_object *obj_priv; | 4958 | struct drm_i915_gem_object *obj_priv; |
| 4959 | 4959 | ||
| 4960 | obj_priv = dev_priv->pwrctx->driver_private; | 4960 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
| 4961 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | 4961 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); |
| 4962 | I915_READ(PWRCTXA); | 4962 | I915_READ(PWRCTXA); |
| 4963 | i915_gem_object_unpin(dev_priv->pwrctx); | 4963 | i915_gem_object_unpin(dev_priv->pwrctx); |
| @@ -4978,9 +4978,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 4978 | */ | 4978 | */ |
| 4979 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | 4979 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
| 4980 | { | 4980 | { |
| 4981 | struct intel_output *intel_output = to_intel_output(connector); | 4981 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 4982 | 4982 | ||
| 4983 | return &intel_output->enc; | 4983 | return &intel_encoder->enc; |
| 4984 | } | 4984 | } |
| 4985 | 4985 | ||
| 4986 | /* | 4986 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8e283f75941d..77e40cfcf216 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -55,23 +55,23 @@ struct intel_dp_priv { | |||
| 55 | uint8_t link_bw; | 55 | uint8_t link_bw; |
| 56 | uint8_t lane_count; | 56 | uint8_t lane_count; |
| 57 | uint8_t dpcd[4]; | 57 | uint8_t dpcd[4]; |
| 58 | struct intel_output *intel_output; | 58 | struct intel_encoder *intel_encoder; |
| 59 | struct i2c_adapter adapter; | 59 | struct i2c_adapter adapter; |
| 60 | struct i2c_algo_dp_aux_data algo; | 60 | struct i2c_algo_dp_aux_data algo; |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | static void | 63 | static void |
| 64 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 64 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
| 65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | 65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); |
| 66 | 66 | ||
| 67 | static void | 67 | static void |
| 68 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | 68 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); |
| 69 | 69 | ||
| 70 | void | 70 | void |
| 71 | intel_edp_link_config (struct intel_output *intel_output, | 71 | intel_edp_link_config (struct intel_encoder *intel_encoder, |
| 72 | int *lane_num, int *link_bw) | 72 | int *lane_num, int *link_bw) |
| 73 | { | 73 | { |
| 74 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 74 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 75 | 75 | ||
| 76 | *lane_num = dp_priv->lane_count; | 76 | *lane_num = dp_priv->lane_count; |
| 77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | 77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) |
| @@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output, | |||
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static int | 83 | static int |
| 84 | intel_dp_max_lane_count(struct intel_output *intel_output) | 84 | intel_dp_max_lane_count(struct intel_encoder *intel_encoder) |
| 85 | { | 85 | { |
| 86 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 86 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 87 | int max_lane_count = 4; | 87 | int max_lane_count = 4; |
| 88 | 88 | ||
| 89 | if (dp_priv->dpcd[0] >= 0x11) { | 89 | if (dp_priv->dpcd[0] >= 0x11) { |
| @@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) | |||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static int | 101 | static int |
| 102 | intel_dp_max_link_bw(struct intel_output *intel_output) | 102 | intel_dp_max_link_bw(struct intel_encoder *intel_encoder) |
| 103 | { | 103 | { |
| 104 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 104 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 105 | int max_link_bw = dp_priv->dpcd[1]; | 105 | int max_link_bw = dp_priv->dpcd[1]; |
| 106 | 106 | ||
| 107 | switch (max_link_bw) { | 107 | switch (max_link_bw) { |
| @@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw) | |||
| 127 | /* I think this is a fiction */ | 127 | /* I think this is a fiction */ |
| 128 | static int | 128 | static int |
| 129 | intel_dp_link_required(struct drm_device *dev, | 129 | intel_dp_link_required(struct drm_device *dev, |
| 130 | struct intel_output *intel_output, int pixel_clock) | 130 | struct intel_encoder *intel_encoder, int pixel_clock) |
| 131 | { | 131 | { |
| 132 | struct drm_i915_private *dev_priv = dev->dev_private; | 132 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 133 | 133 | ||
| 134 | if (IS_eDP(intel_output)) | 134 | if (IS_eDP(intel_encoder)) |
| 135 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 135 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
| 136 | else | 136 | else |
| 137 | return pixel_clock * 3; | 137 | return pixel_clock * 3; |
| @@ -141,11 +141,11 @@ static int | |||
| 141 | intel_dp_mode_valid(struct drm_connector *connector, | 141 | intel_dp_mode_valid(struct drm_connector *connector, |
| 142 | struct drm_display_mode *mode) | 142 | struct drm_display_mode *mode) |
| 143 | { | 143 | { |
| 144 | struct intel_output *intel_output = to_intel_output(connector); | 144 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | 145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
| 146 | int max_lanes = intel_dp_max_lane_count(intel_output); | 146 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
| 147 | 147 | ||
| 148 | if (intel_dp_link_required(connector->dev, intel_output, mode->clock) | 148 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) |
| 149 | > max_link_clock * max_lanes) | 149 | > max_link_clock * max_lanes) |
| 150 | return MODE_CLOCK_HIGH; | 150 | return MODE_CLOCK_HIGH; |
| 151 | 151 | ||
| @@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev) | |||
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | static int | 211 | static int |
| 212 | intel_dp_aux_ch(struct intel_output *intel_output, | 212 | intel_dp_aux_ch(struct intel_encoder *intel_encoder, |
| 213 | uint8_t *send, int send_bytes, | 213 | uint8_t *send, int send_bytes, |
| 214 | uint8_t *recv, int recv_size) | 214 | uint8_t *recv, int recv_size) |
| 215 | { | 215 | { |
| 216 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 216 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 217 | uint32_t output_reg = dp_priv->output_reg; | 217 | uint32_t output_reg = dp_priv->output_reg; |
| 218 | struct drm_device *dev = intel_output->base.dev; | 218 | struct drm_device *dev = intel_encoder->base.dev; |
| 219 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 220 | uint32_t ch_ctl = output_reg + 0x10; | 220 | uint32_t ch_ctl = output_reg + 0x10; |
| 221 | uint32_t ch_data = ch_ctl + 4; | 221 | uint32_t ch_data = ch_ctl + 4; |
| @@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
| 230 | * and would like to run at 2MHz. So, take the | 230 | * and would like to run at 2MHz. So, take the |
| 231 | * hrawclk value and divide by 2 and use that | 231 | * hrawclk value and divide by 2 and use that |
| 232 | */ | 232 | */ |
| 233 | if (IS_eDP(intel_output)) | 233 | if (IS_eDP(intel_encoder)) |
| 234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
| 235 | else if (HAS_PCH_SPLIT(dev)) | 235 | else if (HAS_PCH_SPLIT(dev)) |
| 236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ | 236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ |
| @@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
| 313 | 313 | ||
| 314 | /* Write data to the aux channel in native mode */ | 314 | /* Write data to the aux channel in native mode */ |
| 315 | static int | 315 | static int |
| 316 | intel_dp_aux_native_write(struct intel_output *intel_output, | 316 | intel_dp_aux_native_write(struct intel_encoder *intel_encoder, |
| 317 | uint16_t address, uint8_t *send, int send_bytes) | 317 | uint16_t address, uint8_t *send, int send_bytes) |
| 318 | { | 318 | { |
| 319 | int ret; | 319 | int ret; |
| @@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
| 330 | memcpy(&msg[4], send, send_bytes); | 330 | memcpy(&msg[4], send, send_bytes); |
| 331 | msg_bytes = send_bytes + 4; | 331 | msg_bytes = send_bytes + 4; |
| 332 | for (;;) { | 332 | for (;;) { |
| 333 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); | 333 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); |
| 334 | if (ret < 0) | 334 | if (ret < 0) |
| 335 | return ret; | 335 | return ret; |
| 336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
| @@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
| 345 | 345 | ||
| 346 | /* Write a single byte to the aux channel in native mode */ | 346 | /* Write a single byte to the aux channel in native mode */ |
| 347 | static int | 347 | static int |
| 348 | intel_dp_aux_native_write_1(struct intel_output *intel_output, | 348 | intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, |
| 349 | uint16_t address, uint8_t byte) | 349 | uint16_t address, uint8_t byte) |
| 350 | { | 350 | { |
| 351 | return intel_dp_aux_native_write(intel_output, address, &byte, 1); | 351 | return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | /* read bytes from a native aux channel */ | 354 | /* read bytes from a native aux channel */ |
| 355 | static int | 355 | static int |
| 356 | intel_dp_aux_native_read(struct intel_output *intel_output, | 356 | intel_dp_aux_native_read(struct intel_encoder *intel_encoder, |
| 357 | uint16_t address, uint8_t *recv, int recv_bytes) | 357 | uint16_t address, uint8_t *recv, int recv_bytes) |
| 358 | { | 358 | { |
| 359 | uint8_t msg[4]; | 359 | uint8_t msg[4]; |
| @@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
| 372 | reply_bytes = recv_bytes + 1; | 372 | reply_bytes = recv_bytes + 1; |
| 373 | 373 | ||
| 374 | for (;;) { | 374 | for (;;) { |
| 375 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, | 375 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, |
| 376 | reply, reply_bytes); | 376 | reply, reply_bytes); |
| 377 | if (ret == 0) | 377 | if (ret == 0) |
| 378 | return -EPROTO; | 378 | return -EPROTO; |
| @@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 398 | struct intel_dp_priv *dp_priv = container_of(adapter, | 398 | struct intel_dp_priv *dp_priv = container_of(adapter, |
| 399 | struct intel_dp_priv, | 399 | struct intel_dp_priv, |
| 400 | adapter); | 400 | adapter); |
| 401 | struct intel_output *intel_output = dp_priv->intel_output; | 401 | struct intel_encoder *intel_encoder = dp_priv->intel_encoder; |
| 402 | uint16_t address = algo_data->address; | 402 | uint16_t address = algo_data->address; |
| 403 | uint8_t msg[5]; | 403 | uint8_t msg[5]; |
| 404 | uint8_t reply[2]; | 404 | uint8_t reply[2]; |
| @@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | for (;;) { | 439 | for (;;) { |
| 440 | ret = intel_dp_aux_ch(intel_output, | 440 | ret = intel_dp_aux_ch(intel_encoder, |
| 441 | msg, msg_bytes, | 441 | msg, msg_bytes, |
| 442 | reply, reply_bytes); | 442 | reply, reply_bytes); |
| 443 | if (ret < 0) { | 443 | if (ret < 0) { |
| @@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
| 465 | } | 465 | } |
| 466 | 466 | ||
| 467 | static int | 467 | static int |
| 468 | intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | 468 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) |
| 469 | { | 469 | { |
| 470 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 470 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 471 | 471 | ||
| 472 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 472 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
| 473 | dp_priv->algo.running = false; | 473 | dp_priv->algo.running = false; |
| @@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | |||
| 480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | 480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); |
| 481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | 481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; |
| 482 | dp_priv->adapter.algo_data = &dp_priv->algo; | 482 | dp_priv->adapter.algo_data = &dp_priv->algo; |
| 483 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | 483 | dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; |
| 484 | 484 | ||
| 485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | 485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); |
| 486 | } | 486 | } |
| @@ -489,18 +489,18 @@ static bool | |||
| 489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | 489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, |
| 490 | struct drm_display_mode *adjusted_mode) | 490 | struct drm_display_mode *adjusted_mode) |
| 491 | { | 491 | { |
| 492 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 492 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 493 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 493 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 494 | int lane_count, clock; | 494 | int lane_count, clock; |
| 495 | int max_lane_count = intel_dp_max_lane_count(intel_output); | 495 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); |
| 496 | int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; | 496 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; |
| 497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
| 498 | 498 | ||
| 499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
| 500 | for (clock = 0; clock <= max_clock; clock++) { | 500 | for (clock = 0; clock <= max_clock; clock++) { |
| 501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; |
| 502 | 502 | ||
| 503 | if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) | 503 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
| 504 | <= link_avail) { | 504 | <= link_avail) { |
| 505 | dp_priv->link_bw = bws[clock]; | 505 | dp_priv->link_bw = bws[clock]; |
| 506 | dp_priv->lane_count = lane_count; | 506 | dp_priv->lane_count = lane_count; |
| @@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 562 | struct intel_dp_m_n m_n; | 562 | struct intel_dp_m_n m_n; |
| 563 | 563 | ||
| 564 | /* | 564 | /* |
| 565 | * Find the lane count in the intel_output private | 565 | * Find the lane count in the intel_encoder private |
| 566 | */ | 566 | */ |
| 567 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 567 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 568 | struct intel_output *intel_output = to_intel_output(connector); | 568 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 569 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 569 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 570 | 570 | ||
| 571 | if (!connector->encoder || connector->encoder->crtc != crtc) | 571 | if (!connector->encoder || connector->encoder->crtc != crtc) |
| 572 | continue; | 572 | continue; |
| 573 | 573 | ||
| 574 | if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { | 574 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
| 575 | lane_count = dp_priv->lane_count; | 575 | lane_count = dp_priv->lane_count; |
| 576 | break; | 576 | break; |
| 577 | } | 577 | } |
| @@ -626,9 +626,9 @@ static void | |||
| 626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
| 627 | struct drm_display_mode *adjusted_mode) | 627 | struct drm_display_mode *adjusted_mode) |
| 628 | { | 628 | { |
| 629 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 629 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 630 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 630 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 631 | struct drm_crtc *crtc = intel_output->enc.crtc; | 631 | struct drm_crtc *crtc = intel_encoder->enc.crtc; |
| 632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 633 | 633 | ||
| 634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | 634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | |
| @@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 667 | if (intel_crtc->pipe == 1) | 667 | if (intel_crtc->pipe == 1) |
| 668 | dp_priv->DP |= DP_PIPEB_SELECT; | 668 | dp_priv->DP |= DP_PIPEB_SELECT; |
| 669 | 669 | ||
| 670 | if (IS_eDP(intel_output)) { | 670 | if (IS_eDP(intel_encoder)) { |
| 671 | /* don't miss out required setting for eDP */ | 671 | /* don't miss out required setting for eDP */ |
| 672 | dp_priv->DP |= DP_PLL_ENABLE; | 672 | dp_priv->DP |= DP_PLL_ENABLE; |
| 673 | if (adjusted_mode->clock < 200000) | 673 | if (adjusted_mode->clock < 200000) |
| @@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) | |||
| 702 | static void | 702 | static void |
| 703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | 703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) |
| 704 | { | 704 | { |
| 705 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 705 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 706 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 706 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 707 | struct drm_device *dev = intel_output->base.dev; | 707 | struct drm_device *dev = intel_encoder->base.dev; |
| 708 | struct drm_i915_private *dev_priv = dev->dev_private; | 708 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); |
| 710 | 710 | ||
| 711 | if (mode != DRM_MODE_DPMS_ON) { | 711 | if (mode != DRM_MODE_DPMS_ON) { |
| 712 | if (dp_reg & DP_PORT_EN) { | 712 | if (dp_reg & DP_PORT_EN) { |
| 713 | intel_dp_link_down(intel_output, dp_priv->DP); | 713 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
| 714 | if (IS_eDP(intel_output)) | 714 | if (IS_eDP(intel_encoder)) |
| 715 | ironlake_edp_backlight_off(dev); | 715 | ironlake_edp_backlight_off(dev); |
| 716 | } | 716 | } |
| 717 | } else { | 717 | } else { |
| 718 | if (!(dp_reg & DP_PORT_EN)) { | 718 | if (!(dp_reg & DP_PORT_EN)) { |
| 719 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 719 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
| 720 | if (IS_eDP(intel_output)) | 720 | if (IS_eDP(intel_encoder)) |
| 721 | ironlake_edp_backlight_on(dev); | 721 | ironlake_edp_backlight_on(dev); |
| 722 | } | 722 | } |
| 723 | } | 723 | } |
| @@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
| 729 | * link status information | 729 | * link status information |
| 730 | */ | 730 | */ |
| 731 | static bool | 731 | static bool |
| 732 | intel_dp_get_link_status(struct intel_output *intel_output, | 732 | intel_dp_get_link_status(struct intel_encoder *intel_encoder, |
| 733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | 733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) |
| 734 | { | 734 | { |
| 735 | int ret; | 735 | int ret; |
| 736 | 736 | ||
| 737 | ret = intel_dp_aux_native_read(intel_output, | 737 | ret = intel_dp_aux_native_read(intel_encoder, |
| 738 | DP_LANE0_1_STATUS, | 738 | DP_LANE0_1_STATUS, |
| 739 | link_status, DP_LINK_STATUS_SIZE); | 739 | link_status, DP_LINK_STATUS_SIZE); |
| 740 | if (ret != DP_LINK_STATUS_SIZE) | 740 | if (ret != DP_LINK_STATUS_SIZE) |
| @@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | |||
| 752 | static void | 752 | static void |
| 753 | intel_dp_save(struct drm_connector *connector) | 753 | intel_dp_save(struct drm_connector *connector) |
| 754 | { | 754 | { |
| 755 | struct intel_output *intel_output = to_intel_output(connector); | 755 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 756 | struct drm_device *dev = intel_output->base.dev; | 756 | struct drm_device *dev = intel_encoder->base.dev; |
| 757 | struct drm_i915_private *dev_priv = dev->dev_private; | 757 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 758 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 758 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 759 | 759 | ||
| 760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | 760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); |
| 761 | intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, | 761 | intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, |
| 762 | dp_priv->save_link_configuration, | 762 | dp_priv->save_link_configuration, |
| 763 | sizeof (dp_priv->save_link_configuration)); | 763 | sizeof (dp_priv->save_link_configuration)); |
| 764 | } | 764 | } |
| @@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
| 825 | } | 825 | } |
| 826 | 826 | ||
| 827 | static void | 827 | static void |
| 828 | intel_get_adjust_train(struct intel_output *intel_output, | 828 | intel_get_adjust_train(struct intel_encoder *intel_encoder, |
| 829 | uint8_t link_status[DP_LINK_STATUS_SIZE], | 829 | uint8_t link_status[DP_LINK_STATUS_SIZE], |
| 830 | int lane_count, | 830 | int lane_count, |
| 831 | uint8_t train_set[4]) | 831 | uint8_t train_set[4]) |
| @@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | |||
| 942 | } | 942 | } |
| 943 | 943 | ||
| 944 | static bool | 944 | static bool |
| 945 | intel_dp_set_link_train(struct intel_output *intel_output, | 945 | intel_dp_set_link_train(struct intel_encoder *intel_encoder, |
| 946 | uint32_t dp_reg_value, | 946 | uint32_t dp_reg_value, |
| 947 | uint8_t dp_train_pat, | 947 | uint8_t dp_train_pat, |
| 948 | uint8_t train_set[4], | 948 | uint8_t train_set[4], |
| 949 | bool first) | 949 | bool first) |
| 950 | { | 950 | { |
| 951 | struct drm_device *dev = intel_output->base.dev; | 951 | struct drm_device *dev = intel_encoder->base.dev; |
| 952 | struct drm_i915_private *dev_priv = dev->dev_private; | 952 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 953 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 953 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 954 | int ret; | 954 | int ret; |
| 955 | 955 | ||
| 956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | 956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); |
| @@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
| 958 | if (first) | 958 | if (first) |
| 959 | intel_wait_for_vblank(dev); | 959 | intel_wait_for_vblank(dev); |
| 960 | 960 | ||
| 961 | intel_dp_aux_native_write_1(intel_output, | 961 | intel_dp_aux_native_write_1(intel_encoder, |
| 962 | DP_TRAINING_PATTERN_SET, | 962 | DP_TRAINING_PATTERN_SET, |
| 963 | dp_train_pat); | 963 | dp_train_pat); |
| 964 | 964 | ||
| 965 | ret = intel_dp_aux_native_write(intel_output, | 965 | ret = intel_dp_aux_native_write(intel_encoder, |
| 966 | DP_TRAINING_LANE0_SET, train_set, 4); | 966 | DP_TRAINING_LANE0_SET, train_set, 4); |
| 967 | if (ret != 4) | 967 | if (ret != 4) |
| 968 | return false; | 968 | return false; |
| @@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | static void | 973 | static void |
| 974 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 974 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
| 975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | 975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) |
| 976 | { | 976 | { |
| 977 | struct drm_device *dev = intel_output->base.dev; | 977 | struct drm_device *dev = intel_encoder->base.dev; |
| 978 | struct drm_i915_private *dev_priv = dev->dev_private; | 978 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 979 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 979 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 980 | uint8_t train_set[4]; | 980 | uint8_t train_set[4]; |
| 981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
| 982 | int i; | 982 | int i; |
| @@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
| 987 | int tries; | 987 | int tries; |
| 988 | 988 | ||
| 989 | /* Write the link configuration data */ | 989 | /* Write the link configuration data */ |
| 990 | intel_dp_aux_native_write(intel_output, 0x100, | 990 | intel_dp_aux_native_write(intel_encoder, 0x100, |
| 991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | 991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); |
| 992 | 992 | ||
| 993 | DP |= DP_PORT_EN; | 993 | DP |= DP_PORT_EN; |
| @@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
| 1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); |
| 1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
| 1003 | 1003 | ||
| 1004 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, | 1004 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, |
| 1005 | DP_TRAINING_PATTERN_1, train_set, first)) | 1005 | DP_TRAINING_PATTERN_1, train_set, first)) |
| 1006 | break; | 1006 | break; |
| 1007 | first = false; | 1007 | first = false; |
| 1008 | /* Set training pattern 1 */ | 1008 | /* Set training pattern 1 */ |
| 1009 | 1009 | ||
| 1010 | udelay(100); | 1010 | udelay(100); |
| 1011 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1011 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
| 1012 | break; | 1012 | break; |
| 1013 | 1013 | ||
| 1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | 1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { |
| @@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
| 1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
| 1034 | 1034 | ||
| 1035 | /* Compute new train_set as requested by target */ | 1035 | /* Compute new train_set as requested by target */ |
| 1036 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1036 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
| 1037 | } | 1037 | } |
| 1038 | 1038 | ||
| 1039 | /* channel equalization */ | 1039 | /* channel equalization */ |
| @@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
| 1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
| 1046 | 1046 | ||
| 1047 | /* channel eq pattern */ | 1047 | /* channel eq pattern */ |
| 1048 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, | 1048 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, |
| 1049 | DP_TRAINING_PATTERN_2, train_set, | 1049 | DP_TRAINING_PATTERN_2, train_set, |
| 1050 | false)) | 1050 | false)) |
| 1051 | break; | 1051 | break; |
| 1052 | 1052 | ||
| 1053 | udelay(400); | 1053 | udelay(400); |
| 1054 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1054 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
| 1055 | break; | 1055 | break; |
| 1056 | 1056 | ||
| 1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | 1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { |
| @@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
| 1064 | break; | 1064 | break; |
| 1065 | 1065 | ||
| 1066 | /* Compute new train_set as requested by target */ | 1066 | /* Compute new train_set as requested by target */ |
| 1067 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1067 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
| 1068 | ++tries; | 1068 | ++tries; |
| 1069 | } | 1069 | } |
| 1070 | 1070 | ||
| 1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | 1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); |
| 1072 | POSTING_READ(dp_priv->output_reg); | 1072 | POSTING_READ(dp_priv->output_reg); |
| 1073 | intel_dp_aux_native_write_1(intel_output, | 1073 | intel_dp_aux_native_write_1(intel_encoder, |
| 1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | 1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); |
| 1075 | } | 1075 | } |
| 1076 | 1076 | ||
| 1077 | static void | 1077 | static void |
| 1078 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | 1078 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) |
| 1079 | { | 1079 | { |
| 1080 | struct drm_device *dev = intel_output->base.dev; | 1080 | struct drm_device *dev = intel_encoder->base.dev; |
| 1081 | struct drm_i915_private *dev_priv = dev->dev_private; | 1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1082 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1082 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1083 | 1083 | ||
| 1084 | DRM_DEBUG_KMS("\n"); | 1084 | DRM_DEBUG_KMS("\n"); |
| 1085 | 1085 | ||
| 1086 | if (IS_eDP(intel_output)) { | 1086 | if (IS_eDP(intel_encoder)) { |
| 1087 | DP &= ~DP_PLL_ENABLE; | 1087 | DP &= ~DP_PLL_ENABLE; |
| 1088 | I915_WRITE(dp_priv->output_reg, DP); | 1088 | I915_WRITE(dp_priv->output_reg, DP); |
| 1089 | POSTING_READ(dp_priv->output_reg); | 1089 | POSTING_READ(dp_priv->output_reg); |
| @@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
| 1096 | 1096 | ||
| 1097 | udelay(17000); | 1097 | udelay(17000); |
| 1098 | 1098 | ||
| 1099 | if (IS_eDP(intel_output)) | 1099 | if (IS_eDP(intel_encoder)) |
| 1100 | DP |= DP_LINK_TRAIN_OFF; | 1100 | DP |= DP_LINK_TRAIN_OFF; |
| 1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | 1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); |
| 1102 | POSTING_READ(dp_priv->output_reg); | 1102 | POSTING_READ(dp_priv->output_reg); |
| @@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
| 1105 | static void | 1105 | static void |
| 1106 | intel_dp_restore(struct drm_connector *connector) | 1106 | intel_dp_restore(struct drm_connector *connector) |
| 1107 | { | 1107 | { |
| 1108 | struct intel_output *intel_output = to_intel_output(connector); | 1108 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1109 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1109 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1110 | 1110 | ||
| 1111 | if (dp_priv->save_DP & DP_PORT_EN) | 1111 | if (dp_priv->save_DP & DP_PORT_EN) |
| 1112 | intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); | 1112 | intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); |
| 1113 | else | 1113 | else |
| 1114 | intel_dp_link_down(intel_output, dp_priv->save_DP); | 1114 | intel_dp_link_down(intel_encoder, dp_priv->save_DP); |
| 1115 | } | 1115 | } |
| 1116 | 1116 | ||
| 1117 | /* | 1117 | /* |
| @@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector) | |||
| 1124 | */ | 1124 | */ |
| 1125 | 1125 | ||
| 1126 | static void | 1126 | static void |
| 1127 | intel_dp_check_link_status(struct intel_output *intel_output) | 1127 | intel_dp_check_link_status(struct intel_encoder *intel_encoder) |
| 1128 | { | 1128 | { |
| 1129 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1129 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
| 1131 | 1131 | ||
| 1132 | if (!intel_output->enc.crtc) | 1132 | if (!intel_encoder->enc.crtc) |
| 1133 | return; | 1133 | return; |
| 1134 | 1134 | ||
| 1135 | if (!intel_dp_get_link_status(intel_output, link_status)) { | 1135 | if (!intel_dp_get_link_status(intel_encoder, link_status)) { |
| 1136 | intel_dp_link_down(intel_output, dp_priv->DP); | 1136 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
| 1137 | return; | 1137 | return; |
| 1138 | } | 1138 | } |
| 1139 | 1139 | ||
| 1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | 1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) |
| 1141 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 1141 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
| 1142 | } | 1142 | } |
| 1143 | 1143 | ||
| 1144 | static enum drm_connector_status | 1144 | static enum drm_connector_status |
| 1145 | ironlake_dp_detect(struct drm_connector *connector) | 1145 | ironlake_dp_detect(struct drm_connector *connector) |
| 1146 | { | 1146 | { |
| 1147 | struct intel_output *intel_output = to_intel_output(connector); | 1147 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1148 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1148 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1149 | enum drm_connector_status status; | 1149 | enum drm_connector_status status; |
| 1150 | 1150 | ||
| 1151 | status = connector_status_disconnected; | 1151 | status = connector_status_disconnected; |
| 1152 | if (intel_dp_aux_native_read(intel_output, | 1152 | if (intel_dp_aux_native_read(intel_encoder, |
| 1153 | 0x000, dp_priv->dpcd, | 1153 | 0x000, dp_priv->dpcd, |
| 1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
| 1155 | { | 1155 | { |
| @@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
| 1168 | static enum drm_connector_status | 1168 | static enum drm_connector_status |
| 1169 | intel_dp_detect(struct drm_connector *connector) | 1169 | intel_dp_detect(struct drm_connector *connector) |
| 1170 | { | 1170 | { |
| 1171 | struct intel_output *intel_output = to_intel_output(connector); | 1171 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1172 | struct drm_device *dev = intel_output->base.dev; | 1172 | struct drm_device *dev = intel_encoder->base.dev; |
| 1173 | struct drm_i915_private *dev_priv = dev->dev_private; | 1173 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1174 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1174 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1175 | uint32_t temp, bit; | 1175 | uint32_t temp, bit; |
| 1176 | enum drm_connector_status status; | 1176 | enum drm_connector_status status; |
| 1177 | 1177 | ||
| @@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector) | |||
| 1210 | return connector_status_disconnected; | 1210 | return connector_status_disconnected; |
| 1211 | 1211 | ||
| 1212 | status = connector_status_disconnected; | 1212 | status = connector_status_disconnected; |
| 1213 | if (intel_dp_aux_native_read(intel_output, | 1213 | if (intel_dp_aux_native_read(intel_encoder, |
| 1214 | 0x000, dp_priv->dpcd, | 1214 | 0x000, dp_priv->dpcd, |
| 1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
| 1216 | { | 1216 | { |
| @@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector) | |||
| 1222 | 1222 | ||
| 1223 | static int intel_dp_get_modes(struct drm_connector *connector) | 1223 | static int intel_dp_get_modes(struct drm_connector *connector) |
| 1224 | { | 1224 | { |
| 1225 | struct intel_output *intel_output = to_intel_output(connector); | 1225 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1226 | struct drm_device *dev = intel_output->base.dev; | 1226 | struct drm_device *dev = intel_encoder->base.dev; |
| 1227 | struct drm_i915_private *dev_priv = dev->dev_private; | 1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1228 | int ret; | 1228 | int ret; |
| 1229 | 1229 | ||
| 1230 | /* We should parse the EDID data and find out if it has an audio sink | 1230 | /* We should parse the EDID data and find out if it has an audio sink |
| 1231 | */ | 1231 | */ |
| 1232 | 1232 | ||
| 1233 | ret = intel_ddc_get_modes(intel_output); | 1233 | ret = intel_ddc_get_modes(intel_encoder); |
| 1234 | if (ret) | 1234 | if (ret) |
| 1235 | return ret; | 1235 | return ret; |
| 1236 | 1236 | ||
| 1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
| 1238 | if (IS_eDP(intel_output)) { | 1238 | if (IS_eDP(intel_encoder)) { |
| 1239 | if (dev_priv->panel_fixed_mode != NULL) { | 1239 | if (dev_priv->panel_fixed_mode != NULL) { |
| 1240 | struct drm_display_mode *mode; | 1240 | struct drm_display_mode *mode; |
| 1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
| @@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
| 1249 | static void | 1249 | static void |
| 1250 | intel_dp_destroy (struct drm_connector *connector) | 1250 | intel_dp_destroy (struct drm_connector *connector) |
| 1251 | { | 1251 | { |
| 1252 | struct intel_output *intel_output = to_intel_output(connector); | 1252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1253 | 1253 | ||
| 1254 | if (intel_output->i2c_bus) | 1254 | if (intel_encoder->i2c_bus) |
| 1255 | intel_i2c_destroy(intel_output->i2c_bus); | 1255 | intel_i2c_destroy(intel_encoder->i2c_bus); |
| 1256 | drm_sysfs_connector_remove(connector); | 1256 | drm_sysfs_connector_remove(connector); |
| 1257 | drm_connector_cleanup(connector); | 1257 | drm_connector_cleanup(connector); |
| 1258 | kfree(intel_output); | 1258 | kfree(intel_encoder); |
| 1259 | } | 1259 | } |
| 1260 | 1260 | ||
| 1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
| @@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { | |||
| 1291 | }; | 1291 | }; |
| 1292 | 1292 | ||
| 1293 | void | 1293 | void |
| 1294 | intel_dp_hot_plug(struct intel_output *intel_output) | 1294 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
| 1295 | { | 1295 | { |
| 1296 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1296 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
| 1297 | 1297 | ||
| 1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
| 1299 | intel_dp_check_link_status(intel_output); | 1299 | intel_dp_check_link_status(intel_encoder); |
| 1300 | } | 1300 | } |
| 1301 | 1301 | ||
| 1302 | void | 1302 | void |
| @@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
| 1304 | { | 1304 | { |
| 1305 | struct drm_i915_private *dev_priv = dev->dev_private; | 1305 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1306 | struct drm_connector *connector; | 1306 | struct drm_connector *connector; |
| 1307 | struct intel_output *intel_output; | 1307 | struct intel_encoder *intel_encoder; |
| 1308 | struct intel_dp_priv *dp_priv; | 1308 | struct intel_dp_priv *dp_priv; |
| 1309 | const char *name = NULL; | 1309 | const char *name = NULL; |
| 1310 | 1310 | ||
| 1311 | intel_output = kcalloc(sizeof(struct intel_output) + | 1311 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
| 1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
| 1313 | if (!intel_output) | 1313 | if (!intel_encoder) |
| 1314 | return; | 1314 | return; |
| 1315 | 1315 | ||
| 1316 | dp_priv = (struct intel_dp_priv *)(intel_output + 1); | 1316 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
| 1317 | 1317 | ||
| 1318 | connector = &intel_output->base; | 1318 | connector = &intel_encoder->base; |
| 1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, |
| 1320 | DRM_MODE_CONNECTOR_DisplayPort); | 1320 | DRM_MODE_CONNECTOR_DisplayPort); |
| 1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
| 1322 | 1322 | ||
| 1323 | if (output_reg == DP_A) | 1323 | if (output_reg == DP_A) |
| 1324 | intel_output->type = INTEL_OUTPUT_EDP; | 1324 | intel_encoder->type = INTEL_OUTPUT_EDP; |
| 1325 | else | 1325 | else |
| 1326 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1326 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
| 1327 | 1327 | ||
| 1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
| 1329 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1329 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
| 1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | 1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
| 1331 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | 1331 | intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); |
| 1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
| 1333 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1333 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
| 1334 | 1334 | ||
| 1335 | if (IS_eDP(intel_output)) | 1335 | if (IS_eDP(intel_encoder)) |
| 1336 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1336 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
| 1337 | 1337 | ||
| 1338 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1338 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 1339 | connector->interlace_allowed = true; | 1339 | connector->interlace_allowed = true; |
| 1340 | connector->doublescan_allowed = 0; | 1340 | connector->doublescan_allowed = 0; |
| 1341 | 1341 | ||
| 1342 | dp_priv->intel_output = intel_output; | 1342 | dp_priv->intel_encoder = intel_encoder; |
| 1343 | dp_priv->output_reg = output_reg; | 1343 | dp_priv->output_reg = output_reg; |
| 1344 | dp_priv->has_audio = false; | 1344 | dp_priv->has_audio = false; |
| 1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | 1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; |
| 1346 | intel_output->dev_priv = dp_priv; | 1346 | intel_encoder->dev_priv = dp_priv; |
| 1347 | 1347 | ||
| 1348 | drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, | 1348 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, |
| 1349 | DRM_MODE_ENCODER_TMDS); | 1349 | DRM_MODE_ENCODER_TMDS); |
| 1350 | drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); | 1350 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); |
| 1351 | 1351 | ||
| 1352 | drm_mode_connector_attach_encoder(&intel_output->base, | 1352 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
| 1353 | &intel_output->enc); | 1353 | &intel_encoder->enc); |
| 1354 | drm_sysfs_connector_add(connector); | 1354 | drm_sysfs_connector_add(connector); |
| 1355 | 1355 | ||
| 1356 | /* Set up the DDC bus. */ | 1356 | /* Set up the DDC bus. */ |
| @@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
| 1378 | break; | 1378 | break; |
| 1379 | } | 1379 | } |
| 1380 | 1380 | ||
| 1381 | intel_dp_i2c_init(intel_output, name); | 1381 | intel_dp_i2c_init(intel_encoder, name); |
| 1382 | 1382 | ||
| 1383 | intel_output->ddc_bus = &dp_priv->adapter; | 1383 | intel_encoder->ddc_bus = &dp_priv->adapter; |
| 1384 | intel_output->hot_plug = intel_dp_hot_plug; | 1384 | intel_encoder->hot_plug = intel_dp_hot_plug; |
| 1385 | 1385 | ||
| 1386 | if (output_reg == DP_A) { | 1386 | if (output_reg == DP_A) { |
| 1387 | /* initialize panel mode from VBT if available for eDP */ | 1387 | /* initialize panel mode from VBT if available for eDP */ |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3a467ca57857..e30253755f12 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -95,7 +95,7 @@ struct intel_framebuffer { | |||
| 95 | }; | 95 | }; |
| 96 | 96 | ||
| 97 | 97 | ||
| 98 | struct intel_output { | 98 | struct intel_encoder { |
| 99 | struct drm_connector base; | 99 | struct drm_connector base; |
| 100 | 100 | ||
| 101 | struct drm_encoder enc; | 101 | struct drm_encoder enc; |
| @@ -105,7 +105,7 @@ struct intel_output { | |||
| 105 | bool load_detect_temp; | 105 | bool load_detect_temp; |
| 106 | bool needs_tv_clock; | 106 | bool needs_tv_clock; |
| 107 | void *dev_priv; | 107 | void *dev_priv; |
| 108 | void (*hot_plug)(struct intel_output *); | 108 | void (*hot_plug)(struct intel_encoder *); |
| 109 | int crtc_mask; | 109 | int crtc_mask; |
| 110 | int clone_mask; | 110 | int clone_mask; |
| 111 | }; | 111 | }; |
| @@ -152,15 +152,15 @@ struct intel_crtc { | |||
| 152 | }; | 152 | }; |
| 153 | 153 | ||
| 154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
| 155 | #define to_intel_output(x) container_of(x, struct intel_output, base) | 155 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
| 156 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | 156 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
| 157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
| 158 | 158 | ||
| 159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
| 160 | const char *name); | 160 | const char *name); |
| 161 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 161 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
| 162 | int intel_ddc_get_modes(struct intel_output *intel_output); | 162 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder); |
| 163 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 163 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); |
| 164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
| 165 | void intel_i2c_reset_gmbus(struct drm_device *dev); | 165 | void intel_i2c_reset_gmbus(struct drm_device *dev); |
| 166 | 166 | ||
| @@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
| 175 | void | 175 | void |
| 176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
| 177 | struct drm_display_mode *adjusted_mode); | 177 | struct drm_display_mode *adjusted_mode); |
| 178 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | 178 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
| 179 | 179 | ||
| 180 | 180 | ||
| 181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | 181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); |
| @@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
| 191 | struct drm_file *file_priv); | 191 | struct drm_file *file_priv); |
| 192 | extern void intel_wait_for_vblank(struct drm_device *dev); | 192 | extern void intel_wait_for_vblank(struct drm_device *dev); |
| 193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
| 194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
| 195 | struct drm_display_mode *mode, | 195 | struct drm_display_mode *mode, |
| 196 | int *dpms_mode); | 196 | int *dpms_mode); |
| 197 | extern void intel_release_load_detect_pipe(struct intel_output *intel_output, | 197 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
| 198 | int dpms_mode); | 198 | int dpms_mode); |
| 199 | 199 | ||
| 200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | 200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 0427ca5a2514..ebf213c96b9c 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
| @@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { | |||
| 80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | 80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) |
| 81 | { | 81 | { |
| 82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
| 83 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 83 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 84 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 84 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 85 | u32 dvo_reg = dvo->dvo_reg; | 85 | u32 dvo_reg = dvo->dvo_reg; |
| 86 | u32 temp = I915_READ(dvo_reg); | 86 | u32 temp = I915_READ(dvo_reg); |
| 87 | 87 | ||
| @@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
| 99 | static void intel_dvo_save(struct drm_connector *connector) | 99 | static void intel_dvo_save(struct drm_connector *connector) |
| 100 | { | 100 | { |
| 101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
| 102 | struct intel_output *intel_output = to_intel_output(connector); | 102 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 103 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 103 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 104 | 104 | ||
| 105 | /* Each output should probably just save the registers it touches, | 105 | /* Each output should probably just save the registers it touches, |
| 106 | * but for now, use more overkill. | 106 | * but for now, use more overkill. |
| @@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector) | |||
| 115 | static void intel_dvo_restore(struct drm_connector *connector) | 115 | static void intel_dvo_restore(struct drm_connector *connector) |
| 116 | { | 116 | { |
| 117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
| 118 | struct intel_output *intel_output = to_intel_output(connector); | 118 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 119 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 119 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 120 | 120 | ||
| 121 | dvo->dev_ops->restore(dvo); | 121 | dvo->dev_ops->restore(dvo); |
| 122 | 122 | ||
| @@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector) | |||
| 128 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 128 | static int intel_dvo_mode_valid(struct drm_connector *connector, |
| 129 | struct drm_display_mode *mode) | 129 | struct drm_display_mode *mode) |
| 130 | { | 130 | { |
| 131 | struct intel_output *intel_output = to_intel_output(connector); | 131 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 132 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 132 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 133 | 133 | ||
| 134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| 135 | return MODE_NO_DBLESCAN; | 135 | return MODE_NO_DBLESCAN; |
| @@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | |||
| 150 | struct drm_display_mode *mode, | 150 | struct drm_display_mode *mode, |
| 151 | struct drm_display_mode *adjusted_mode) | 151 | struct drm_display_mode *adjusted_mode) |
| 152 | { | 152 | { |
| 153 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 153 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 154 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 154 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 155 | 155 | ||
| 156 | /* If we have timings from the BIOS for the panel, put them in | 156 | /* If we have timings from the BIOS for the panel, put them in |
| 157 | * to the adjusted mode. The CRTC will be set up for this mode, | 157 | * to the adjusted mode. The CRTC will be set up for this mode, |
| @@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
| 186 | struct drm_device *dev = encoder->dev; | 186 | struct drm_device *dev = encoder->dev; |
| 187 | struct drm_i915_private *dev_priv = dev->dev_private; | 187 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| 189 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 189 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 190 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 190 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 191 | int pipe = intel_crtc->pipe; | 191 | int pipe = intel_crtc->pipe; |
| 192 | u32 dvo_val; | 192 | u32 dvo_val; |
| 193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; | 193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; |
| @@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
| 241 | */ | 241 | */ |
| 242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) |
| 243 | { | 243 | { |
| 244 | struct intel_output *intel_output = to_intel_output(connector); | 244 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 245 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 245 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 246 | 246 | ||
| 247 | return dvo->dev_ops->detect(dvo); | 247 | return dvo->dev_ops->detect(dvo); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int intel_dvo_get_modes(struct drm_connector *connector) | 250 | static int intel_dvo_get_modes(struct drm_connector *connector) |
| 251 | { | 251 | { |
| 252 | struct intel_output *intel_output = to_intel_output(connector); | 252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 253 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 253 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 254 | 254 | ||
| 255 | /* We should probably have an i2c driver get_modes function for those | 255 | /* We should probably have an i2c driver get_modes function for those |
| 256 | * devices which will have a fixed set of modes determined by the chip | 256 | * devices which will have a fixed set of modes determined by the chip |
| 257 | * (TV-out, for example), but for now with just TMDS and LVDS, | 257 | * (TV-out, for example), but for now with just TMDS and LVDS, |
| 258 | * that's not the case. | 258 | * that's not the case. |
| 259 | */ | 259 | */ |
| 260 | intel_ddc_get_modes(intel_output); | 260 | intel_ddc_get_modes(intel_encoder); |
| 261 | if (!list_empty(&connector->probed_modes)) | 261 | if (!list_empty(&connector->probed_modes)) |
| 262 | return 1; | 262 | return 1; |
| 263 | 263 | ||
| @@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
| 275 | 275 | ||
| 276 | static void intel_dvo_destroy (struct drm_connector *connector) | 276 | static void intel_dvo_destroy (struct drm_connector *connector) |
| 277 | { | 277 | { |
| 278 | struct intel_output *intel_output = to_intel_output(connector); | 278 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 279 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 279 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 280 | 280 | ||
| 281 | if (dvo) { | 281 | if (dvo) { |
| 282 | if (dvo->dev_ops->destroy) | 282 | if (dvo->dev_ops->destroy) |
| @@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector) | |||
| 286 | /* no need, in i830_dvoices[] now */ | 286 | /* no need, in i830_dvoices[] now */ |
| 287 | //kfree(dvo); | 287 | //kfree(dvo); |
| 288 | } | 288 | } |
| 289 | if (intel_output->i2c_bus) | 289 | if (intel_encoder->i2c_bus) |
| 290 | intel_i2c_destroy(intel_output->i2c_bus); | 290 | intel_i2c_destroy(intel_encoder->i2c_bus); |
| 291 | if (intel_output->ddc_bus) | 291 | if (intel_encoder->ddc_bus) |
| 292 | intel_i2c_destroy(intel_output->ddc_bus); | 292 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 293 | drm_sysfs_connector_remove(connector); | 293 | drm_sysfs_connector_remove(connector); |
| 294 | drm_connector_cleanup(connector); | 294 | drm_connector_cleanup(connector); |
| 295 | kfree(intel_output); | 295 | kfree(intel_encoder); |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | #ifdef RANDR_GET_CRTC_INTERFACE | 298 | #ifdef RANDR_GET_CRTC_INTERFACE |
| @@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) | |||
| 300 | { | 300 | { |
| 301 | struct drm_device *dev = connector->dev; | 301 | struct drm_device *dev = connector->dev; |
| 302 | struct drm_i915_private *dev_priv = dev->dev_private; | 302 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 303 | struct intel_output *intel_output = to_intel_output(connector); | 303 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 304 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 304 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); | 305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); |
| 306 | 306 | ||
| 307 | return intel_pipe_to_crtc(pScrn, pipe); | 307 | return intel_pipe_to_crtc(pScrn, pipe); |
| @@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
| 352 | { | 352 | { |
| 353 | struct drm_device *dev = connector->dev; | 353 | struct drm_device *dev = connector->dev; |
| 354 | struct drm_i915_private *dev_priv = dev->dev_private; | 354 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 355 | struct intel_output *intel_output = to_intel_output(connector); | 355 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 356 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 356 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
| 357 | uint32_t dvo_reg = dvo->dvo_reg; | 357 | uint32_t dvo_reg = dvo->dvo_reg; |
| 358 | uint32_t dvo_val = I915_READ(dvo_reg); | 358 | uint32_t dvo_val = I915_READ(dvo_reg); |
| 359 | struct drm_display_mode *mode = NULL; | 359 | struct drm_display_mode *mode = NULL; |
| @@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
| 383 | 383 | ||
| 384 | void intel_dvo_init(struct drm_device *dev) | 384 | void intel_dvo_init(struct drm_device *dev) |
| 385 | { | 385 | { |
| 386 | struct intel_output *intel_output; | 386 | struct intel_encoder *intel_encoder; |
| 387 | struct intel_dvo_device *dvo; | 387 | struct intel_dvo_device *dvo; |
| 388 | struct i2c_adapter *i2cbus = NULL; | 388 | struct i2c_adapter *i2cbus = NULL; |
| 389 | int ret = 0; | 389 | int ret = 0; |
| 390 | int i; | 390 | int i; |
| 391 | int encoder_type = DRM_MODE_ENCODER_NONE; | 391 | int encoder_type = DRM_MODE_ENCODER_NONE; |
| 392 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | 392 | intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); |
| 393 | if (!intel_output) | 393 | if (!intel_encoder) |
| 394 | return; | 394 | return; |
| 395 | 395 | ||
| 396 | /* Set up the DDC bus */ | 396 | /* Set up the DDC bus */ |
| 397 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | 397 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); |
| 398 | if (!intel_output->ddc_bus) | 398 | if (!intel_encoder->ddc_bus) |
| 399 | goto free_intel; | 399 | goto free_intel; |
| 400 | 400 | ||
| 401 | /* Now, try to find a controller */ | 401 | /* Now, try to find a controller */ |
| 402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
| 403 | struct drm_connector *connector = &intel_output->base; | 403 | struct drm_connector *connector = &intel_encoder->base; |
| 404 | int gpio; | 404 | int gpio; |
| 405 | 405 | ||
| 406 | dvo = &intel_dvo_devices[i]; | 406 | dvo = &intel_dvo_devices[i]; |
| @@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev) | |||
| 435 | if (!ret) | 435 | if (!ret) |
| 436 | continue; | 436 | continue; |
| 437 | 437 | ||
| 438 | intel_output->type = INTEL_OUTPUT_DVO; | 438 | intel_encoder->type = INTEL_OUTPUT_DVO; |
| 439 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 439 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 440 | switch (dvo->type) { | 440 | switch (dvo->type) { |
| 441 | case INTEL_DVO_CHIP_TMDS: | 441 | case INTEL_DVO_CHIP_TMDS: |
| 442 | intel_output->clone_mask = | 442 | intel_encoder->clone_mask = |
| 443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | | 443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | |
| 444 | (1 << INTEL_ANALOG_CLONE_BIT); | 444 | (1 << INTEL_ANALOG_CLONE_BIT); |
| 445 | drm_connector_init(dev, connector, | 445 | drm_connector_init(dev, connector, |
| @@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
| 448 | encoder_type = DRM_MODE_ENCODER_TMDS; | 448 | encoder_type = DRM_MODE_ENCODER_TMDS; |
| 449 | break; | 449 | break; |
| 450 | case INTEL_DVO_CHIP_LVDS: | 450 | case INTEL_DVO_CHIP_LVDS: |
| 451 | intel_output->clone_mask = | 451 | intel_encoder->clone_mask = |
| 452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); | 452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); |
| 453 | drm_connector_init(dev, connector, | 453 | drm_connector_init(dev, connector, |
| 454 | &intel_dvo_connector_funcs, | 454 | &intel_dvo_connector_funcs, |
| @@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev) | |||
| 463 | connector->interlace_allowed = false; | 463 | connector->interlace_allowed = false; |
| 464 | connector->doublescan_allowed = false; | 464 | connector->doublescan_allowed = false; |
| 465 | 465 | ||
| 466 | intel_output->dev_priv = dvo; | 466 | intel_encoder->dev_priv = dvo; |
| 467 | intel_output->i2c_bus = i2cbus; | 467 | intel_encoder->i2c_bus = i2cbus; |
| 468 | 468 | ||
| 469 | drm_encoder_init(dev, &intel_output->enc, | 469 | drm_encoder_init(dev, &intel_encoder->enc, |
| 470 | &intel_dvo_enc_funcs, encoder_type); | 470 | &intel_dvo_enc_funcs, encoder_type); |
| 471 | drm_encoder_helper_add(&intel_output->enc, | 471 | drm_encoder_helper_add(&intel_encoder->enc, |
| 472 | &intel_dvo_helper_funcs); | 472 | &intel_dvo_helper_funcs); |
| 473 | 473 | ||
| 474 | drm_mode_connector_attach_encoder(&intel_output->base, | 474 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
| 475 | &intel_output->enc); | 475 | &intel_encoder->enc); |
| 476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | 476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { |
| 477 | /* For our LVDS chipsets, we should hopefully be able | 477 | /* For our LVDS chipsets, we should hopefully be able |
| 478 | * to dig the fixed panel mode out of the BIOS data. | 478 | * to dig the fixed panel mode out of the BIOS data. |
| @@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev) | |||
| 490 | return; | 490 | return; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | intel_i2c_destroy(intel_output->ddc_bus); | 493 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 494 | /* Didn't find a chip, so tear down. */ | 494 | /* Didn't find a chip, so tear down. */ |
| 495 | if (i2cbus != NULL) | 495 | if (i2cbus != NULL) |
| 496 | intel_i2c_destroy(i2cbus); | 496 | intel_i2c_destroy(i2cbus); |
| 497 | free_intel: | 497 | free_intel: |
| 498 | kfree(intel_output); | 498 | kfree(intel_encoder); |
| 499 | } | 499 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 69bbef92f130..8a0b3bcdc7b1 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 144 | ret = -ENOMEM; | 144 | ret = -ENOMEM; |
| 145 | goto out; | 145 | goto out; |
| 146 | } | 146 | } |
| 147 | obj_priv = fbo->driver_private; | 147 | obj_priv = to_intel_bo(fbo); |
| 148 | 148 | ||
| 149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
| 150 | 150 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed02f641258..48cade0cf7b1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
| 51 | struct drm_i915_private *dev_priv = dev->dev_private; | 51 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 52 | struct drm_crtc *crtc = encoder->crtc; | 52 | struct drm_crtc *crtc = encoder->crtc; |
| 53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 54 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 54 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 55 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 55 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
| 56 | u32 sdvox; | 56 | u32 sdvox; |
| 57 | 57 | ||
| 58 | sdvox = SDVO_ENCODING_HDMI | | 58 | sdvox = SDVO_ENCODING_HDMI | |
| @@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
| 74 | { | 74 | { |
| 75 | struct drm_device *dev = encoder->dev; | 75 | struct drm_device *dev = encoder->dev; |
| 76 | struct drm_i915_private *dev_priv = dev->dev_private; | 76 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 77 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 77 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 78 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 78 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
| 79 | u32 temp; | 79 | u32 temp; |
| 80 | 80 | ||
| 81 | temp = I915_READ(hdmi_priv->sdvox_reg); | 81 | temp = I915_READ(hdmi_priv->sdvox_reg); |
| @@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector) | |||
| 110 | { | 110 | { |
| 111 | struct drm_device *dev = connector->dev; | 111 | struct drm_device *dev = connector->dev; |
| 112 | struct drm_i915_private *dev_priv = dev->dev_private; | 112 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 113 | struct intel_output *intel_output = to_intel_output(connector); | 113 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 114 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 114 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
| 115 | 115 | ||
| 116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); | 116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); |
| 117 | } | 117 | } |
| @@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector) | |||
| 120 | { | 120 | { |
| 121 | struct drm_device *dev = connector->dev; | 121 | struct drm_device *dev = connector->dev; |
| 122 | struct drm_i915_private *dev_priv = dev->dev_private; | 122 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 123 | struct intel_output *intel_output = to_intel_output(connector); | 123 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 124 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 124 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
| 125 | 125 | ||
| 126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); | 126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); |
| 127 | POSTING_READ(hdmi_priv->sdvox_reg); | 127 | POSTING_READ(hdmi_priv->sdvox_reg); |
| @@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
| 151 | static enum drm_connector_status | 151 | static enum drm_connector_status |
| 152 | intel_hdmi_detect(struct drm_connector *connector) | 152 | intel_hdmi_detect(struct drm_connector *connector) |
| 153 | { | 153 | { |
| 154 | struct intel_output *intel_output = to_intel_output(connector); | 154 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 155 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 155 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
| 156 | struct edid *edid = NULL; | 156 | struct edid *edid = NULL; |
| 157 | enum drm_connector_status status = connector_status_disconnected; | 157 | enum drm_connector_status status = connector_status_disconnected; |
| 158 | 158 | ||
| 159 | hdmi_priv->has_hdmi_sink = false; | 159 | hdmi_priv->has_hdmi_sink = false; |
| 160 | edid = drm_get_edid(&intel_output->base, | 160 | edid = drm_get_edid(&intel_encoder->base, |
| 161 | intel_output->ddc_bus); | 161 | intel_encoder->ddc_bus); |
| 162 | 162 | ||
| 163 | if (edid) { | 163 | if (edid) { |
| 164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
| 165 | status = connector_status_connected; | 165 | status = connector_status_connected; |
| 166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
| 167 | } | 167 | } |
| 168 | intel_output->base.display_info.raw_edid = NULL; | 168 | intel_encoder->base.display_info.raw_edid = NULL; |
| 169 | kfree(edid); | 169 | kfree(edid); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| @@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
| 174 | 174 | ||
| 175 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 175 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
| 176 | { | 176 | { |
| 177 | struct intel_output *intel_output = to_intel_output(connector); | 177 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 178 | 178 | ||
| 179 | /* We should parse the EDID data and find out if it's an HDMI sink so | 179 | /* We should parse the EDID data and find out if it's an HDMI sink so |
| 180 | * we can send audio to it. | 180 | * we can send audio to it. |
| 181 | */ | 181 | */ |
| 182 | 182 | ||
| 183 | return intel_ddc_get_modes(intel_output); | 183 | return intel_ddc_get_modes(intel_encoder); |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | static void intel_hdmi_destroy(struct drm_connector *connector) | 186 | static void intel_hdmi_destroy(struct drm_connector *connector) |
| 187 | { | 187 | { |
| 188 | struct intel_output *intel_output = to_intel_output(connector); | 188 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 189 | 189 | ||
| 190 | if (intel_output->i2c_bus) | 190 | if (intel_encoder->i2c_bus) |
| 191 | intel_i2c_destroy(intel_output->i2c_bus); | 191 | intel_i2c_destroy(intel_encoder->i2c_bus); |
| 192 | drm_sysfs_connector_remove(connector); | 192 | drm_sysfs_connector_remove(connector); |
| 193 | drm_connector_cleanup(connector); | 193 | drm_connector_cleanup(connector); |
| 194 | kfree(intel_output); | 194 | kfree(intel_encoder); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | 197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
| @@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
| 230 | { | 230 | { |
| 231 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 232 | struct drm_connector *connector; | 232 | struct drm_connector *connector; |
| 233 | struct intel_output *intel_output; | 233 | struct intel_encoder *intel_encoder; |
| 234 | struct intel_hdmi_priv *hdmi_priv; | 234 | struct intel_hdmi_priv *hdmi_priv; |
| 235 | 235 | ||
| 236 | intel_output = kcalloc(sizeof(struct intel_output) + | 236 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
| 237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
| 238 | if (!intel_output) | 238 | if (!intel_encoder) |
| 239 | return; | 239 | return; |
| 240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); | 240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); |
| 241 | 241 | ||
| 242 | connector = &intel_output->base; | 242 | connector = &intel_encoder->base; |
| 243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
| 244 | DRM_MODE_CONNECTOR_HDMIA); | 244 | DRM_MODE_CONNECTOR_HDMIA); |
| 245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
| 246 | 246 | ||
| 247 | intel_output->type = INTEL_OUTPUT_HDMI; | 247 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
| 248 | 248 | ||
| 249 | connector->interlace_allowed = 0; | 249 | connector->interlace_allowed = 0; |
| 250 | connector->doublescan_allowed = 0; | 250 | connector->doublescan_allowed = 0; |
| 251 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 251 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 252 | 252 | ||
| 253 | /* Set up the DDC bus. */ | 253 | /* Set up the DDC bus. */ |
| 254 | if (sdvox_reg == SDVOB) { | 254 | if (sdvox_reg == SDVOB) { |
| 255 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 255 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
| 256 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 256 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
| 257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
| 258 | } else if (sdvox_reg == SDVOC) { | 258 | } else if (sdvox_reg == SDVOC) { |
| 259 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 259 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
| 260 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 260 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
| 261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
| 262 | } else if (sdvox_reg == HDMIB) { | 262 | } else if (sdvox_reg == HDMIB) { |
| 263 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 263 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
| 264 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 264 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
| 265 | "HDMIB"); | 265 | "HDMIB"); |
| 266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
| 267 | } else if (sdvox_reg == HDMIC) { | 267 | } else if (sdvox_reg == HDMIC) { |
| 268 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 268 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
| 269 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 269 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
| 270 | "HDMIC"); | 270 | "HDMIC"); |
| 271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
| 272 | } else if (sdvox_reg == HDMID) { | 272 | } else if (sdvox_reg == HDMID) { |
| 273 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 273 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
| 274 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 274 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
| 275 | "HDMID"); | 275 | "HDMID"); |
| 276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | 276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
| 277 | } | 277 | } |
| 278 | if (!intel_output->ddc_bus) | 278 | if (!intel_encoder->ddc_bus) |
| 279 | goto err_connector; | 279 | goto err_connector; |
| 280 | 280 | ||
| 281 | hdmi_priv->sdvox_reg = sdvox_reg; | 281 | hdmi_priv->sdvox_reg = sdvox_reg; |
| 282 | intel_output->dev_priv = hdmi_priv; | 282 | intel_encoder->dev_priv = hdmi_priv; |
| 283 | 283 | ||
| 284 | drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, | 284 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, |
| 285 | DRM_MODE_ENCODER_TMDS); | 285 | DRM_MODE_ENCODER_TMDS); |
| 286 | drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); | 286 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); |
| 287 | 287 | ||
| 288 | drm_mode_connector_attach_encoder(&intel_output->base, | 288 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
| 289 | &intel_output->enc); | 289 | &intel_encoder->enc); |
| 290 | drm_sysfs_connector_add(connector); | 290 | drm_sysfs_connector_add(connector); |
| 291 | 291 | ||
| 292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
| @@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
| 302 | 302 | ||
| 303 | err_connector: | 303 | err_connector: |
| 304 | drm_connector_cleanup(connector); | 304 | drm_connector_cleanup(connector); |
| 305 | kfree(intel_output); | 305 | kfree(intel_encoder); |
| 306 | 306 | ||
| 307 | return; | 307 | return; |
| 308 | } | 308 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 216e9f52b6e0..b66806a37d37 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
| 239 | struct drm_i915_private *dev_priv = dev->dev_private; | 239 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
| 241 | struct drm_encoder *tmp_encoder; | 241 | struct drm_encoder *tmp_encoder; |
| 242 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 242 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 243 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 243 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
| 244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | 244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; |
| 245 | int left_border = 0, right_border = 0, top_border = 0; | 245 | int left_border = 0, right_border = 0, top_border = 0; |
| 246 | int bottom_border = 0; | 246 | int bottom_border = 0; |
| @@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
| 587 | { | 587 | { |
| 588 | struct drm_device *dev = encoder->dev; | 588 | struct drm_device *dev = encoder->dev; |
| 589 | struct drm_i915_private *dev_priv = dev->dev_private; | 589 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 590 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 590 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 591 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 591 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
| 592 | 592 | ||
| 593 | /* | 593 | /* |
| 594 | * The LVDS pin pair will already have been turned on in the | 594 | * The LVDS pin pair will already have been turned on in the |
| @@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
| 635 | static int intel_lvds_get_modes(struct drm_connector *connector) | 635 | static int intel_lvds_get_modes(struct drm_connector *connector) |
| 636 | { | 636 | { |
| 637 | struct drm_device *dev = connector->dev; | 637 | struct drm_device *dev = connector->dev; |
| 638 | struct intel_output *intel_output = to_intel_output(connector); | 638 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 639 | struct drm_i915_private *dev_priv = dev->dev_private; | 639 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 640 | int ret = 0; | 640 | int ret = 0; |
| 641 | 641 | ||
| 642 | ret = intel_ddc_get_modes(intel_output); | 642 | if (dev_priv->lvds_edid_good) { |
| 643 | ret = intel_ddc_get_modes(intel_encoder); | ||
| 643 | 644 | ||
| 644 | if (ret) | 645 | if (ret) |
| 645 | return ret; | 646 | return ret; |
| 647 | } | ||
| 646 | 648 | ||
| 647 | /* Didn't get an EDID, so | 649 | /* Didn't get an EDID, so |
| 648 | * Set wide sync ranges so we get all modes | 650 | * Set wide sync ranges so we get all modes |
| @@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 715 | static void intel_lvds_destroy(struct drm_connector *connector) | 717 | static void intel_lvds_destroy(struct drm_connector *connector) |
| 716 | { | 718 | { |
| 717 | struct drm_device *dev = connector->dev; | 719 | struct drm_device *dev = connector->dev; |
| 718 | struct intel_output *intel_output = to_intel_output(connector); | 720 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 719 | struct drm_i915_private *dev_priv = dev->dev_private; | 721 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 720 | 722 | ||
| 721 | if (intel_output->ddc_bus) | 723 | if (intel_encoder->ddc_bus) |
| 722 | intel_i2c_destroy(intel_output->ddc_bus); | 724 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 723 | if (dev_priv->lid_notifier.notifier_call) | 725 | if (dev_priv->lid_notifier.notifier_call) |
| 724 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 726 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
| 725 | drm_sysfs_connector_remove(connector); | 727 | drm_sysfs_connector_remove(connector); |
| @@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
| 732 | uint64_t value) | 734 | uint64_t value) |
| 733 | { | 735 | { |
| 734 | struct drm_device *dev = connector->dev; | 736 | struct drm_device *dev = connector->dev; |
| 735 | struct intel_output *intel_output = | 737 | struct intel_encoder *intel_encoder = |
| 736 | to_intel_output(connector); | 738 | to_intel_encoder(connector); |
| 737 | 739 | ||
| 738 | if (property == dev->mode_config.scaling_mode_property && | 740 | if (property == dev->mode_config.scaling_mode_property && |
| 739 | connector->encoder) { | 741 | connector->encoder) { |
| 740 | struct drm_crtc *crtc = connector->encoder->crtc; | 742 | struct drm_crtc *crtc = connector->encoder->crtc; |
| 741 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 743 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
| 742 | if (value == DRM_MODE_SCALE_NONE) { | 744 | if (value == DRM_MODE_SCALE_NONE) { |
| 743 | DRM_DEBUG_KMS("no scaling not supported\n"); | 745 | DRM_DEBUG_KMS("no scaling not supported\n"); |
| 744 | return 0; | 746 | return 0; |
| @@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
| 858 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 860 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
| 859 | }, | 861 | }, |
| 860 | }, | 862 | }, |
| 863 | { | ||
| 864 | .callback = intel_no_lvds_dmi_callback, | ||
| 865 | .ident = "Clientron U800", | ||
| 866 | .matches = { | ||
| 867 | DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), | ||
| 868 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), | ||
| 869 | }, | ||
| 870 | }, | ||
| 861 | 871 | ||
| 862 | { } /* terminating entry */ | 872 | { } /* terminating entry */ |
| 863 | }; | 873 | }; |
| @@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) | |||
| 968 | void intel_lvds_init(struct drm_device *dev) | 978 | void intel_lvds_init(struct drm_device *dev) |
| 969 | { | 979 | { |
| 970 | struct drm_i915_private *dev_priv = dev->dev_private; | 980 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 971 | struct intel_output *intel_output; | 981 | struct intel_encoder *intel_encoder; |
| 972 | struct drm_connector *connector; | 982 | struct drm_connector *connector; |
| 973 | struct drm_encoder *encoder; | 983 | struct drm_encoder *encoder; |
| 974 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 984 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
| @@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 996 | gpio = PCH_GPIOC; | 1006 | gpio = PCH_GPIOC; |
| 997 | } | 1007 | } |
| 998 | 1008 | ||
| 999 | intel_output = kzalloc(sizeof(struct intel_output) + | 1009 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
| 1000 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | 1010 | sizeof(struct intel_lvds_priv), GFP_KERNEL); |
| 1001 | if (!intel_output) { | 1011 | if (!intel_encoder) { |
| 1002 | return; | 1012 | return; |
| 1003 | } | 1013 | } |
| 1004 | 1014 | ||
| 1005 | connector = &intel_output->base; | 1015 | connector = &intel_encoder->base; |
| 1006 | encoder = &intel_output->enc; | 1016 | encoder = &intel_encoder->enc; |
| 1007 | drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, | 1017 | drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, |
| 1008 | DRM_MODE_CONNECTOR_LVDS); | 1018 | DRM_MODE_CONNECTOR_LVDS); |
| 1009 | 1019 | ||
| 1010 | drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, | 1020 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, |
| 1011 | DRM_MODE_ENCODER_LVDS); | 1021 | DRM_MODE_ENCODER_LVDS); |
| 1012 | 1022 | ||
| 1013 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1023 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
| 1014 | intel_output->type = INTEL_OUTPUT_LVDS; | 1024 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
| 1015 | 1025 | ||
| 1016 | intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 1026 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
| 1017 | intel_output->crtc_mask = (1 << 1); | 1027 | intel_encoder->crtc_mask = (1 << 1); |
| 1018 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 1028 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
| 1019 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 1029 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
| 1020 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1030 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
| 1021 | connector->interlace_allowed = false; | 1031 | connector->interlace_allowed = false; |
| 1022 | connector->doublescan_allowed = false; | 1032 | connector->doublescan_allowed = false; |
| 1023 | 1033 | ||
| 1024 | lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); | 1034 | lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); |
| 1025 | intel_output->dev_priv = lvds_priv; | 1035 | intel_encoder->dev_priv = lvds_priv; |
| 1026 | /* create the scaling mode property */ | 1036 | /* create the scaling mode property */ |
| 1027 | drm_mode_create_scaling_mode_property(dev); | 1037 | drm_mode_create_scaling_mode_property(dev); |
| 1028 | /* | 1038 | /* |
| 1029 | * the initial panel fitting mode will be FULL_SCREEN. | 1039 | * the initial panel fitting mode will be FULL_SCREEN. |
| 1030 | */ | 1040 | */ |
| 1031 | 1041 | ||
| 1032 | drm_connector_attach_property(&intel_output->base, | 1042 | drm_connector_attach_property(&intel_encoder->base, |
| 1033 | dev->mode_config.scaling_mode_property, | 1043 | dev->mode_config.scaling_mode_property, |
| 1034 | DRM_MODE_SCALE_FULLSCREEN); | 1044 | DRM_MODE_SCALE_FULLSCREEN); |
| 1035 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 1045 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; |
| @@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1044 | */ | 1054 | */ |
| 1045 | 1055 | ||
| 1046 | /* Set up the DDC bus. */ | 1056 | /* Set up the DDC bus. */ |
| 1047 | intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); | 1057 | intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); |
| 1048 | if (!intel_output->ddc_bus) { | 1058 | if (!intel_encoder->ddc_bus) { |
| 1049 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 1059 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
| 1050 | "failed.\n"); | 1060 | "failed.\n"); |
| 1051 | goto failed; | 1061 | goto failed; |
| @@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1055 | * Attempt to get the fixed panel mode from DDC. Assume that the | 1065 | * Attempt to get the fixed panel mode from DDC. Assume that the |
| 1056 | * preferred mode is the right one. | 1066 | * preferred mode is the right one. |
| 1057 | */ | 1067 | */ |
| 1058 | intel_ddc_get_modes(intel_output); | 1068 | dev_priv->lvds_edid_good = true; |
| 1069 | |||
| 1070 | if (!intel_ddc_get_modes(intel_encoder)) | ||
| 1071 | dev_priv->lvds_edid_good = false; | ||
| 1059 | 1072 | ||
| 1060 | list_for_each_entry(scan, &connector->probed_modes, head) { | 1073 | list_for_each_entry(scan, &connector->probed_modes, head) { |
| 1061 | mutex_lock(&dev->mode_config.mutex); | 1074 | mutex_lock(&dev->mode_config.mutex); |
| @@ -1133,9 +1146,9 @@ out: | |||
| 1133 | 1146 | ||
| 1134 | failed: | 1147 | failed: |
| 1135 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1148 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
| 1136 | if (intel_output->ddc_bus) | 1149 | if (intel_encoder->ddc_bus) |
| 1137 | intel_i2c_destroy(intel_output->ddc_bus); | 1150 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 1138 | drm_connector_cleanup(connector); | 1151 | drm_connector_cleanup(connector); |
| 1139 | drm_encoder_cleanup(encoder); | 1152 | drm_encoder_cleanup(encoder); |
| 1140 | kfree(intel_output); | 1153 | kfree(intel_encoder); |
| 1141 | } | 1154 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 89d303d1d3fb..8e5c83b2d120 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | * intel_ddc_probe | 34 | * intel_ddc_probe |
| 35 | * | 35 | * |
| 36 | */ | 36 | */ |
| 37 | bool intel_ddc_probe(struct intel_output *intel_output) | 37 | bool intel_ddc_probe(struct intel_encoder *intel_encoder) |
| 38 | { | 38 | { |
| 39 | u8 out_buf[] = { 0x0, 0x0}; | 39 | u8 out_buf[] = { 0x0, 0x0}; |
| 40 | u8 buf[2]; | 40 | u8 buf[2]; |
| @@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
| 54 | } | 54 | } |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | intel_i2c_quirk_set(intel_output->base.dev, true); | 57 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
| 58 | ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); | 58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); |
| 59 | intel_i2c_quirk_set(intel_output->base.dev, false); | 59 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
| 60 | if (ret == 2) | 60 | if (ret == 2) |
| 61 | return true; | 61 | return true; |
| 62 | 62 | ||
| @@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
| 69 | * | 69 | * |
| 70 | * Fetch the EDID information from @connector using the DDC bus. | 70 | * Fetch the EDID information from @connector using the DDC bus. |
| 71 | */ | 71 | */ |
| 72 | int intel_ddc_get_modes(struct intel_output *intel_output) | 72 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder) |
| 73 | { | 73 | { |
| 74 | struct edid *edid; | 74 | struct edid *edid; |
| 75 | int ret = 0; | 75 | int ret = 0; |
| 76 | 76 | ||
| 77 | intel_i2c_quirk_set(intel_output->base.dev, true); | 77 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
| 78 | edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); | 78 | edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); |
| 79 | intel_i2c_quirk_set(intel_output->base.dev, false); | 79 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
| 80 | if (edid) { | 80 | if (edid) { |
| 81 | drm_mode_connector_update_edid_property(&intel_output->base, | 81 | drm_mode_connector_update_edid_property(&intel_encoder->base, |
| 82 | edid); | 82 | edid); |
| 83 | ret = drm_add_edid_modes(&intel_output->base, edid); | 83 | ret = drm_add_edid_modes(&intel_encoder->base, edid); |
| 84 | intel_output->base.display_info.raw_edid = NULL; | 84 | intel_encoder->base.display_info.raw_edid = NULL; |
| 85 | kfree(edid); | 85 | kfree(edid); |
| 86 | } | 86 | } |
| 87 | 87 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 60595fc26fdd..6d524a1fc271 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
| @@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 724 | int ret, tmp_width; | 724 | int ret, tmp_width; |
| 725 | struct overlay_registers *regs; | 725 | struct overlay_registers *regs; |
| 726 | bool scale_changed = false; | 726 | bool scale_changed = false; |
| 727 | struct drm_i915_gem_object *bo_priv = new_bo->driver_private; | 727 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); |
| 728 | struct drm_device *dev = overlay->dev; | 728 | struct drm_device *dev = overlay->dev; |
| 729 | 729 | ||
| 730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
| @@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 809 | intel_overlay_continue(overlay, scale_changed); | 809 | intel_overlay_continue(overlay, scale_changed); |
| 810 | 810 | ||
| 811 | overlay->old_vid_bo = overlay->vid_bo; | 811 | overlay->old_vid_bo = overlay->vid_bo; |
| 812 | overlay->vid_bo = new_bo->driver_private; | 812 | overlay->vid_bo = to_intel_bo(new_bo); |
| 813 | 813 | ||
| 814 | return 0; | 814 | return 0; |
| 815 | 815 | ||
| @@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
| 1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); | 1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); |
| 1345 | if (!reg_bo) | 1345 | if (!reg_bo) |
| 1346 | goto out_free; | 1346 | goto out_free; |
| 1347 | overlay->reg_bo = reg_bo->driver_private; | 1347 | overlay->reg_bo = to_intel_bo(reg_bo); |
| 1348 | 1348 | ||
| 1349 | if (OVERLAY_NONPHYSICAL(dev)) { | 1349 | if (OVERLAY_NONPHYSICAL(dev)) { |
| 1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | 1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26e13a0bf30b..87d953664cb0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -54,7 +54,7 @@ struct intel_sdvo_priv { | |||
| 54 | u8 slave_addr; | 54 | u8 slave_addr; |
| 55 | 55 | ||
| 56 | /* Register for the SDVO device: SDVOB or SDVOC */ | 56 | /* Register for the SDVO device: SDVOB or SDVOC */ |
| 57 | int output_device; | 57 | int sdvo_reg; |
| 58 | 58 | ||
| 59 | /* Active outputs controlled by this SDVO output */ | 59 | /* Active outputs controlled by this SDVO output */ |
| 60 | uint16_t controlled_output; | 60 | uint16_t controlled_output; |
| @@ -124,7 +124,7 @@ struct intel_sdvo_priv { | |||
| 124 | */ | 124 | */ |
| 125 | struct intel_sdvo_encode encode; | 125 | struct intel_sdvo_encode encode; |
| 126 | 126 | ||
| 127 | /* DDC bus used by this SDVO output */ | 127 | /* DDC bus used by this SDVO encoder */ |
| 128 | uint8_t ddc_bus; | 128 | uint8_t ddc_bus; |
| 129 | 129 | ||
| 130 | /* Mac mini hack -- use the same DDC as the analog connector */ | 130 | /* Mac mini hack -- use the same DDC as the analog connector */ |
| @@ -162,22 +162,22 @@ struct intel_sdvo_priv { | |||
| 162 | }; | 162 | }; |
| 163 | 163 | ||
| 164 | static bool | 164 | static bool |
| 165 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); | 165 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); |
| 166 | 166 | ||
| 167 | /** | 167 | /** |
| 168 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 168 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
| 169 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 169 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
| 170 | * comments in the BIOS). | 170 | * comments in the BIOS). |
| 171 | */ | 171 | */ |
| 172 | static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | 172 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) |
| 173 | { | 173 | { |
| 174 | struct drm_device *dev = intel_output->base.dev; | 174 | struct drm_device *dev = intel_encoder->base.dev; |
| 175 | struct drm_i915_private *dev_priv = dev->dev_private; | 175 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 176 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 176 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 177 | u32 bval = val, cval = val; | 177 | u32 bval = val, cval = val; |
| 178 | int i; | 178 | int i; |
| 179 | 179 | ||
| 180 | if (sdvo_priv->output_device == SDVOB) { | 180 | if (sdvo_priv->sdvo_reg == SDVOB) { |
| 181 | cval = I915_READ(SDVOC); | 181 | cval = I915_READ(SDVOC); |
| 182 | } else { | 182 | } else { |
| 183 | bval = I915_READ(SDVOB); | 183 | bval = I915_READ(SDVOB); |
| @@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | |||
| 196 | } | 196 | } |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | 199 | static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, |
| 200 | u8 *ch) | 200 | u8 *ch) |
| 201 | { | 201 | { |
| 202 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 202 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 203 | u8 out_buf[2]; | 203 | u8 out_buf[2]; |
| 204 | u8 buf[2]; | 204 | u8 buf[2]; |
| 205 | int ret; | 205 | int ret; |
| @@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
| 222 | out_buf[0] = addr; | 222 | out_buf[0] = addr; |
| 223 | out_buf[1] = 0; | 223 | out_buf[1] = 0; |
| 224 | 224 | ||
| 225 | if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) | 225 | if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) |
| 226 | { | 226 | { |
| 227 | *ch = buf[0]; | 227 | *ch = buf[0]; |
| 228 | return true; | 228 | return true; |
| @@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
| 232 | return false; | 232 | return false; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | 235 | static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, |
| 236 | u8 ch) | 236 | u8 ch) |
| 237 | { | 237 | { |
| 238 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 238 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 239 | u8 out_buf[2]; | 239 | u8 out_buf[2]; |
| 240 | struct i2c_msg msgs[] = { | 240 | struct i2c_msg msgs[] = { |
| 241 | { | 241 | { |
| @@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
| 249 | out_buf[0] = addr; | 249 | out_buf[0] = addr; |
| 250 | out_buf[1] = ch; | 250 | out_buf[1] = ch; |
| 251 | 251 | ||
| 252 | if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) | 252 | if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) |
| 253 | { | 253 | { |
| 254 | return true; | 254 | return true; |
| 255 | } | 255 | } |
| @@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name { | |||
| 353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | 353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), |
| 354 | }; | 354 | }; |
| 355 | 355 | ||
| 356 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | 356 | #define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") |
| 357 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) | 357 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) |
| 358 | 358 | ||
| 359 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | 359 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, |
| 360 | void *args, int args_len) | 360 | void *args, int args_len) |
| 361 | { | 361 | { |
| 362 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 362 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 363 | int i; | 363 | int i; |
| 364 | 364 | ||
| 365 | DRM_DEBUG_KMS("%s: W: %02X ", | 365 | DRM_DEBUG_KMS("%s: W: %02X ", |
| @@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | |||
| 379 | DRM_LOG_KMS("\n"); | 379 | DRM_LOG_KMS("\n"); |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | 382 | static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, |
| 383 | void *args, int args_len) | 383 | void *args, int args_len) |
| 384 | { | 384 | { |
| 385 | int i; | 385 | int i; |
| 386 | 386 | ||
| 387 | intel_sdvo_debug_write(intel_output, cmd, args, args_len); | 387 | intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); |
| 388 | 388 | ||
| 389 | for (i = 0; i < args_len; i++) { | 389 | for (i = 0; i < args_len; i++) { |
| 390 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, | 390 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, |
| 391 | ((u8*)args)[i]); | 391 | ((u8*)args)[i]); |
| 392 | } | 392 | } |
| 393 | 393 | ||
| 394 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); | 394 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | static const char *cmd_status_names[] = { | 397 | static const char *cmd_status_names[] = { |
| @@ -404,11 +404,11 @@ static const char *cmd_status_names[] = { | |||
| 404 | "Scaling not supported" | 404 | "Scaling not supported" |
| 405 | }; | 405 | }; |
| 406 | 406 | ||
| 407 | static void intel_sdvo_debug_response(struct intel_output *intel_output, | 407 | static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, |
| 408 | void *response, int response_len, | 408 | void *response, int response_len, |
| 409 | u8 status) | 409 | u8 status) |
| 410 | { | 410 | { |
| 411 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 411 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 412 | int i; | 412 | int i; |
| 413 | 413 | ||
| 414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); | 414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); |
| @@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, | |||
| 423 | DRM_LOG_KMS("\n"); | 423 | DRM_LOG_KMS("\n"); |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, | 426 | static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, |
| 427 | void *response, int response_len) | 427 | void *response, int response_len) |
| 428 | { | 428 | { |
| 429 | int i; | 429 | int i; |
| @@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, | |||
| 433 | while (retry--) { | 433 | while (retry--) { |
| 434 | /* Read the command response */ | 434 | /* Read the command response */ |
| 435 | for (i = 0; i < response_len; i++) { | 435 | for (i = 0; i < response_len; i++) { |
| 436 | intel_sdvo_read_byte(intel_output, | 436 | intel_sdvo_read_byte(intel_encoder, |
| 437 | SDVO_I2C_RETURN_0 + i, | 437 | SDVO_I2C_RETURN_0 + i, |
| 438 | &((u8 *)response)[i]); | 438 | &((u8 *)response)[i]); |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | /* read the return status */ | 441 | /* read the return status */ |
| 442 | intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, | 442 | intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, |
| 443 | &status); | 443 | &status); |
| 444 | 444 | ||
| 445 | intel_sdvo_debug_response(intel_output, response, response_len, | 445 | intel_sdvo_debug_response(intel_encoder, response, response_len, |
| 446 | status); | 446 | status); |
| 447 | if (status != SDVO_CMD_STATUS_PENDING) | 447 | if (status != SDVO_CMD_STATUS_PENDING) |
| 448 | return status; | 448 | return status; |
| @@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
| 470 | * another I2C transaction after issuing the DDC bus switch, it will be | 470 | * another I2C transaction after issuing the DDC bus switch, it will be |
| 471 | * switched to the internal SDVO register. | 471 | * switched to the internal SDVO register. |
| 472 | */ | 472 | */ |
| 473 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | 473 | static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, |
| 474 | u8 target) | 474 | u8 target) |
| 475 | { | 475 | { |
| 476 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 476 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | 477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; |
| 478 | struct i2c_msg msgs[] = { | 478 | struct i2c_msg msgs[] = { |
| 479 | { | 479 | { |
| @@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
| 497 | }, | 497 | }, |
| 498 | }; | 498 | }; |
| 499 | 499 | ||
| 500 | intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 500 | intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
| 501 | &target, 1); | 501 | &target, 1); |
| 502 | /* write the DDC switch command argument */ | 502 | /* write the DDC switch command argument */ |
| 503 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | 503 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); |
| 504 | 504 | ||
| 505 | out_buf[0] = SDVO_I2C_OPCODE; | 505 | out_buf[0] = SDVO_I2C_OPCODE; |
| 506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | 506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; |
| @@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
| 509 | ret_value[0] = 0; | 509 | ret_value[0] = 0; |
| 510 | ret_value[1] = 0; | 510 | ret_value[1] = 0; |
| 511 | 511 | ||
| 512 | ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | 512 | ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); |
| 513 | if (ret != 3) { | 513 | if (ret != 3) { |
| 514 | /* failure in I2C transfer */ | 514 | /* failure in I2C transfer */ |
| 515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | 515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); |
| @@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
| 523 | return; | 523 | return; |
| 524 | } | 524 | } |
| 525 | 525 | ||
| 526 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | 526 | static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) |
| 527 | { | 527 | { |
| 528 | struct intel_sdvo_set_target_input_args targets = {0}; | 528 | struct intel_sdvo_set_target_input_args targets = {0}; |
| 529 | u8 status; | 529 | u8 status; |
| @@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
| 534 | if (target_1) | 534 | if (target_1) |
| 535 | targets.target_1 = 1; | 535 | targets.target_1 = 1; |
| 536 | 536 | ||
| 537 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, | 537 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, |
| 538 | sizeof(targets)); | 538 | sizeof(targets)); |
| 539 | 539 | ||
| 540 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 540 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 541 | 541 | ||
| 542 | return (status == SDVO_CMD_STATUS_SUCCESS); | 542 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 543 | } | 543 | } |
| @@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
| 548 | * This function is making an assumption about the layout of the response, | 548 | * This function is making an assumption about the layout of the response, |
| 549 | * which should be checked against the docs. | 549 | * which should be checked against the docs. |
| 550 | */ | 550 | */ |
| 551 | static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) | 551 | static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) |
| 552 | { | 552 | { |
| 553 | struct intel_sdvo_get_trained_inputs_response response; | 553 | struct intel_sdvo_get_trained_inputs_response response; |
| 554 | u8 status; | 554 | u8 status; |
| 555 | 555 | ||
| 556 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); | 556 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); |
| 557 | status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); | 557 | status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); |
| 558 | if (status != SDVO_CMD_STATUS_SUCCESS) | 558 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 559 | return false; | 559 | return false; |
| 560 | 560 | ||
| @@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo | |||
| 563 | return true; | 563 | return true; |
| 564 | } | 564 | } |
| 565 | 565 | ||
| 566 | static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, | 566 | static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, |
| 567 | u16 *outputs) | 567 | u16 *outputs) |
| 568 | { | 568 | { |
| 569 | u8 status; | 569 | u8 status; |
| 570 | 570 | ||
| 571 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); | 571 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); |
| 572 | status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); | 572 | status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); |
| 573 | 573 | ||
| 574 | return (status == SDVO_CMD_STATUS_SUCCESS); | 574 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 575 | } | 575 | } |
| 576 | 576 | ||
| 577 | static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, | 577 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, |
| 578 | u16 outputs) | 578 | u16 outputs) |
| 579 | { | 579 | { |
| 580 | u8 status; | 580 | u8 status; |
| 581 | 581 | ||
| 582 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, | 582 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, |
| 583 | sizeof(outputs)); | 583 | sizeof(outputs)); |
| 584 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 584 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 585 | return (status == SDVO_CMD_STATUS_SUCCESS); | 585 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, | 588 | static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, |
| 589 | int mode) | 589 | int mode) |
| 590 | { | 590 | { |
| 591 | u8 status, state = SDVO_ENCODER_STATE_ON; | 591 | u8 status, state = SDVO_ENCODER_STATE_ON; |
| @@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output | |||
| 605 | break; | 605 | break; |
| 606 | } | 606 | } |
| 607 | 607 | ||
| 608 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, | 608 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, |
| 609 | sizeof(state)); | 609 | sizeof(state)); |
| 610 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 610 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 611 | 611 | ||
| 612 | return (status == SDVO_CMD_STATUS_SUCCESS); | 612 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 613 | } | 613 | } |
| 614 | 614 | ||
| 615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, | 615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, |
| 616 | int *clock_min, | 616 | int *clock_min, |
| 617 | int *clock_max) | 617 | int *clock_max) |
| 618 | { | 618 | { |
| 619 | struct intel_sdvo_pixel_clock_range clocks; | 619 | struct intel_sdvo_pixel_clock_range clocks; |
| 620 | u8 status; | 620 | u8 status; |
| 621 | 621 | ||
| 622 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | 622 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, |
| 623 | NULL, 0); | 623 | NULL, 0); |
| 624 | 624 | ||
| 625 | status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); | 625 | status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); |
| 626 | 626 | ||
| 627 | if (status != SDVO_CMD_STATUS_SUCCESS) | 627 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 628 | return false; | 628 | return false; |
| @@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou | |||
| 634 | return true; | 634 | return true; |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | static bool intel_sdvo_set_target_output(struct intel_output *intel_output, | 637 | static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, |
| 638 | u16 outputs) | 638 | u16 outputs) |
| 639 | { | 639 | { |
| 640 | u8 status; | 640 | u8 status; |
| 641 | 641 | ||
| 642 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, | 642 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, |
| 643 | sizeof(outputs)); | 643 | sizeof(outputs)); |
| 644 | 644 | ||
| 645 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 645 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 646 | return (status == SDVO_CMD_STATUS_SUCCESS); | 646 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 647 | } | 647 | } |
| 648 | 648 | ||
| 649 | static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | 649 | static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, |
| 650 | struct intel_sdvo_dtd *dtd) | 650 | struct intel_sdvo_dtd *dtd) |
| 651 | { | 651 | { |
| 652 | u8 status; | 652 | u8 status; |
| 653 | 653 | ||
| 654 | intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); | 654 | intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); |
| 655 | status = intel_sdvo_read_response(intel_output, &dtd->part1, | 655 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
| 656 | sizeof(dtd->part1)); | 656 | sizeof(dtd->part1)); |
| 657 | if (status != SDVO_CMD_STATUS_SUCCESS) | 657 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 658 | return false; | 658 | return false; |
| 659 | 659 | ||
| 660 | intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); | 660 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); |
| 661 | status = intel_sdvo_read_response(intel_output, &dtd->part2, | 661 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
| 662 | sizeof(dtd->part2)); | 662 | sizeof(dtd->part2)); |
| 663 | if (status != SDVO_CMD_STATUS_SUCCESS) | 663 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 664 | return false; | 664 | return false; |
| @@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | |||
| 666 | return true; | 666 | return true; |
| 667 | } | 667 | } |
| 668 | 668 | ||
| 669 | static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, | 669 | static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, |
| 670 | struct intel_sdvo_dtd *dtd) | 670 | struct intel_sdvo_dtd *dtd) |
| 671 | { | 671 | { |
| 672 | return intel_sdvo_get_timing(intel_output, | 672 | return intel_sdvo_get_timing(intel_encoder, |
| 673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); | 673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); |
| 674 | } | 674 | } |
| 675 | 675 | ||
| 676 | static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, | 676 | static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, |
| 677 | struct intel_sdvo_dtd *dtd) | 677 | struct intel_sdvo_dtd *dtd) |
| 678 | { | 678 | { |
| 679 | return intel_sdvo_get_timing(intel_output, | 679 | return intel_sdvo_get_timing(intel_encoder, |
| 680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); | 680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, | 683 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, |
| 684 | struct intel_sdvo_dtd *dtd) | 684 | struct intel_sdvo_dtd *dtd) |
| 685 | { | 685 | { |
| 686 | u8 status; | 686 | u8 status; |
| 687 | 687 | ||
| 688 | intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); | 688 | intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); |
| 689 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 689 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 690 | if (status != SDVO_CMD_STATUS_SUCCESS) | 690 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 691 | return false; | 691 | return false; |
| 692 | 692 | ||
| 693 | intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | 693 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); |
| 694 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 694 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 695 | if (status != SDVO_CMD_STATUS_SUCCESS) | 695 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 696 | return false; | 696 | return false; |
| 697 | 697 | ||
| 698 | return true; | 698 | return true; |
| 699 | } | 699 | } |
| 700 | 700 | ||
| 701 | static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, | 701 | static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, |
| 702 | struct intel_sdvo_dtd *dtd) | 702 | struct intel_sdvo_dtd *dtd) |
| 703 | { | 703 | { |
| 704 | return intel_sdvo_set_timing(intel_output, | 704 | return intel_sdvo_set_timing(intel_encoder, |
| 705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); | 705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); |
| 706 | } | 706 | } |
| 707 | 707 | ||
| 708 | static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, | 708 | static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, |
| 709 | struct intel_sdvo_dtd *dtd) | 709 | struct intel_sdvo_dtd *dtd) |
| 710 | { | 710 | { |
| 711 | return intel_sdvo_set_timing(intel_output, | 711 | return intel_sdvo_set_timing(intel_encoder, |
| 712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | 712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); |
| 713 | } | 713 | } |
| 714 | 714 | ||
| 715 | static bool | 715 | static bool |
| 716 | intel_sdvo_create_preferred_input_timing(struct intel_output *output, | 716 | intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, |
| 717 | uint16_t clock, | 717 | uint16_t clock, |
| 718 | uint16_t width, | 718 | uint16_t width, |
| 719 | uint16_t height) | 719 | uint16_t height) |
| 720 | { | 720 | { |
| 721 | struct intel_sdvo_preferred_input_timing_args args; | 721 | struct intel_sdvo_preferred_input_timing_args args; |
| 722 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 722 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 723 | uint8_t status; | 723 | uint8_t status; |
| 724 | 724 | ||
| 725 | memset(&args, 0, sizeof(args)); | 725 | memset(&args, 0, sizeof(args)); |
| @@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, | |||
| 733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) | 733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) |
| 734 | args.scaled = 1; | 734 | args.scaled = 1; |
| 735 | 735 | ||
| 736 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 736 | intel_sdvo_write_cmd(intel_encoder, |
| 737 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | ||
| 737 | &args, sizeof(args)); | 738 | &args, sizeof(args)); |
| 738 | status = intel_sdvo_read_response(output, NULL, 0); | 739 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 739 | if (status != SDVO_CMD_STATUS_SUCCESS) | 740 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 740 | return false; | 741 | return false; |
| 741 | 742 | ||
| 742 | return true; | 743 | return true; |
| 743 | } | 744 | } |
| 744 | 745 | ||
| 745 | static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | 746 | static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, |
| 746 | struct intel_sdvo_dtd *dtd) | 747 | struct intel_sdvo_dtd *dtd) |
| 747 | { | 748 | { |
| 748 | bool status; | 749 | bool status; |
| 749 | 750 | ||
| 750 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 751 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
| 751 | NULL, 0); | 752 | NULL, 0); |
| 752 | 753 | ||
| 753 | status = intel_sdvo_read_response(output, &dtd->part1, | 754 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
| 754 | sizeof(dtd->part1)); | 755 | sizeof(dtd->part1)); |
| 755 | if (status != SDVO_CMD_STATUS_SUCCESS) | 756 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 756 | return false; | 757 | return false; |
| 757 | 758 | ||
| 758 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | 759 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
| 759 | NULL, 0); | 760 | NULL, 0); |
| 760 | 761 | ||
| 761 | status = intel_sdvo_read_response(output, &dtd->part2, | 762 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
| 762 | sizeof(dtd->part2)); | 763 | sizeof(dtd->part2)); |
| 763 | if (status != SDVO_CMD_STATUS_SUCCESS) | 764 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 764 | return false; | 765 | return false; |
| @@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | |||
| 766 | return false; | 767 | return false; |
| 767 | } | 768 | } |
| 768 | 769 | ||
| 769 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | 770 | static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) |
| 770 | { | 771 | { |
| 771 | u8 response, status; | 772 | u8 response, status; |
| 772 | 773 | ||
| 773 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); | 774 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); |
| 774 | status = intel_sdvo_read_response(intel_output, &response, 1); | 775 | status = intel_sdvo_read_response(intel_encoder, &response, 1); |
| 775 | 776 | ||
| 776 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 777 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 777 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); | 778 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); |
| @@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | |||
| 783 | return response; | 784 | return response; |
| 784 | } | 785 | } |
| 785 | 786 | ||
| 786 | static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) | 787 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) |
| 787 | { | 788 | { |
| 788 | u8 status; | 789 | u8 status; |
| 789 | 790 | ||
| 790 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); | 791 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); |
| 791 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 792 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 792 | if (status != SDVO_CMD_STATUS_SUCCESS) | 793 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 793 | return false; | 794 | return false; |
| 794 | 795 | ||
| @@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
| 877 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 878 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
| 878 | } | 879 | } |
| 879 | 880 | ||
| 880 | static bool intel_sdvo_get_supp_encode(struct intel_output *output, | 881 | static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, |
| 881 | struct intel_sdvo_encode *encode) | 882 | struct intel_sdvo_encode *encode) |
| 882 | { | 883 | { |
| 883 | uint8_t status; | 884 | uint8_t status; |
| 884 | 885 | ||
| 885 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | 886 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); |
| 886 | status = intel_sdvo_read_response(output, encode, sizeof(*encode)); | 887 | status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); |
| 887 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | 888 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ |
| 888 | memset(encode, 0, sizeof(*encode)); | 889 | memset(encode, 0, sizeof(*encode)); |
| 889 | return false; | 890 | return false; |
| @@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, | |||
| 892 | return true; | 893 | return true; |
| 893 | } | 894 | } |
| 894 | 895 | ||
| 895 | static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) | 896 | static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, |
| 897 | uint8_t mode) | ||
| 896 | { | 898 | { |
| 897 | uint8_t status; | 899 | uint8_t status; |
| 898 | 900 | ||
| 899 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); | 901 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); |
| 900 | status = intel_sdvo_read_response(output, NULL, 0); | 902 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 901 | 903 | ||
| 902 | return (status == SDVO_CMD_STATUS_SUCCESS); | 904 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 903 | } | 905 | } |
| 904 | 906 | ||
| 905 | static bool intel_sdvo_set_colorimetry(struct intel_output *output, | 907 | static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, |
| 906 | uint8_t mode) | 908 | uint8_t mode) |
| 907 | { | 909 | { |
| 908 | uint8_t status; | 910 | uint8_t status; |
| 909 | 911 | ||
| 910 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | 912 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); |
| 911 | status = intel_sdvo_read_response(output, NULL, 0); | 913 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 912 | 914 | ||
| 913 | return (status == SDVO_CMD_STATUS_SUCCESS); | 915 | return (status == SDVO_CMD_STATUS_SUCCESS); |
| 914 | } | 916 | } |
| 915 | 917 | ||
| 916 | #if 0 | 918 | #if 0 |
| 917 | static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | 919 | static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) |
| 918 | { | 920 | { |
| 919 | int i, j; | 921 | int i, j; |
| 920 | uint8_t set_buf_index[2]; | 922 | uint8_t set_buf_index[2]; |
| @@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | |||
| 923 | uint8_t buf[48]; | 925 | uint8_t buf[48]; |
| 924 | uint8_t *pos; | 926 | uint8_t *pos; |
| 925 | 927 | ||
| 926 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | 928 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); |
| 927 | intel_sdvo_read_response(output, &av_split, 1); | 929 | intel_sdvo_read_response(encoder, &av_split, 1); |
| 928 | 930 | ||
| 929 | for (i = 0; i <= av_split; i++) { | 931 | for (i = 0; i <= av_split; i++) { |
| 930 | set_buf_index[0] = i; set_buf_index[1] = 0; | 932 | set_buf_index[0] = i; set_buf_index[1] = 0; |
| 931 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, | 933 | intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, |
| 932 | set_buf_index, 2); | 934 | set_buf_index, 2); |
| 933 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); | 935 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); |
| 934 | intel_sdvo_read_response(output, &buf_size, 1); | 936 | intel_sdvo_read_response(encoder, &buf_size, 1); |
| 935 | 937 | ||
| 936 | pos = buf; | 938 | pos = buf; |
| 937 | for (j = 0; j <= buf_size; j += 8) { | 939 | for (j = 0; j <= buf_size; j += 8) { |
| 938 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, | 940 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, |
| 939 | NULL, 0); | 941 | NULL, 0); |
| 940 | intel_sdvo_read_response(output, pos, 8); | 942 | intel_sdvo_read_response(encoder, pos, 8); |
| 941 | pos += 8; | 943 | pos += 8; |
| 942 | } | 944 | } |
| 943 | } | 945 | } |
| 944 | } | 946 | } |
| 945 | #endif | 947 | #endif |
| 946 | 948 | ||
| 947 | static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, | 949 | static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, |
| 948 | uint8_t *data, int8_t size, uint8_t tx_rate) | 950 | int index, |
| 951 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
| 949 | { | 952 | { |
| 950 | uint8_t set_buf_index[2]; | 953 | uint8_t set_buf_index[2]; |
| 951 | 954 | ||
| 952 | set_buf_index[0] = index; | 955 | set_buf_index[0] = index; |
| 953 | set_buf_index[1] = 0; | 956 | set_buf_index[1] = 0; |
| 954 | 957 | ||
| 955 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); | 958 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, |
| 959 | set_buf_index, 2); | ||
| 956 | 960 | ||
| 957 | for (; size > 0; size -= 8) { | 961 | for (; size > 0; size -= 8) { |
| 958 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); | 962 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); |
| 959 | data += 8; | 963 | data += 8; |
| 960 | } | 964 | } |
| 961 | 965 | ||
| 962 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | 966 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); |
| 963 | } | 967 | } |
| 964 | 968 | ||
| 965 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | 969 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) |
| @@ -1034,7 +1038,7 @@ struct dip_infoframe { | |||
| 1034 | } __attribute__ ((packed)) u; | 1038 | } __attribute__ ((packed)) u; |
| 1035 | } __attribute__((packed)); | 1039 | } __attribute__((packed)); |
| 1036 | 1040 | ||
| 1037 | static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | 1041 | static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, |
| 1038 | struct drm_display_mode * mode) | 1042 | struct drm_display_mode * mode) |
| 1039 | { | 1043 | { |
| 1040 | struct dip_infoframe avi_if = { | 1044 | struct dip_infoframe avi_if = { |
| @@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | |||
| 1045 | 1049 | ||
| 1046 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 1050 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, |
| 1047 | 4 + avi_if.len); | 1051 | 4 + avi_if.len); |
| 1048 | intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, | 1052 | intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, |
| 1053 | 4 + avi_if.len, | ||
| 1049 | SDVO_HBUF_TX_VSYNC); | 1054 | SDVO_HBUF_TX_VSYNC); |
| 1050 | } | 1055 | } |
| 1051 | 1056 | ||
| 1052 | static void intel_sdvo_set_tv_format(struct intel_output *output) | 1057 | static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) |
| 1053 | { | 1058 | { |
| 1054 | 1059 | ||
| 1055 | struct intel_sdvo_tv_format format; | 1060 | struct intel_sdvo_tv_format format; |
| 1056 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1061 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1057 | uint32_t format_map, i; | 1062 | uint32_t format_map, i; |
| 1058 | uint8_t status; | 1063 | uint8_t status; |
| 1059 | 1064 | ||
| @@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) | |||
| 1066 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? | 1071 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? |
| 1067 | sizeof(format) : sizeof(format_map)); | 1072 | sizeof(format) : sizeof(format_map)); |
| 1068 | 1073 | ||
| 1069 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, | 1074 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, |
| 1070 | sizeof(format)); | 1075 | sizeof(format)); |
| 1071 | 1076 | ||
| 1072 | status = intel_sdvo_read_response(output, NULL, 0); | 1077 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 1073 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1078 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 1074 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", | 1079 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", |
| 1075 | SDVO_NAME(sdvo_priv)); | 1080 | SDVO_NAME(sdvo_priv)); |
| @@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
| 1079 | struct drm_display_mode *mode, | 1084 | struct drm_display_mode *mode, |
| 1080 | struct drm_display_mode *adjusted_mode) | 1085 | struct drm_display_mode *adjusted_mode) |
| 1081 | { | 1086 | { |
| 1082 | struct intel_output *output = enc_to_intel_output(encoder); | 1087 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 1083 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | 1088 | struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; |
| 1084 | 1089 | ||
| 1085 | if (dev_priv->is_tv) { | 1090 | if (dev_priv->is_tv) { |
| 1086 | struct intel_sdvo_dtd output_dtd; | 1091 | struct intel_sdvo_dtd output_dtd; |
| @@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
| 1095 | 1100 | ||
| 1096 | /* Set output timings */ | 1101 | /* Set output timings */ |
| 1097 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1102 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
| 1098 | intel_sdvo_set_target_output(output, | 1103 | intel_sdvo_set_target_output(intel_encoder, |
| 1099 | dev_priv->controlled_output); | 1104 | dev_priv->controlled_output); |
| 1100 | intel_sdvo_set_output_timing(output, &output_dtd); | 1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
| 1101 | 1106 | ||
| 1102 | /* Set the input timing to the screen. Assume always input 0. */ | 1107 | /* Set the input timing to the screen. Assume always input 0. */ |
| 1103 | intel_sdvo_set_target_input(output, true, false); | 1108 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 1104 | 1109 | ||
| 1105 | 1110 | ||
| 1106 | success = intel_sdvo_create_preferred_input_timing(output, | 1111 | success = intel_sdvo_create_preferred_input_timing(intel_encoder, |
| 1107 | mode->clock / 10, | 1112 | mode->clock / 10, |
| 1108 | mode->hdisplay, | 1113 | mode->hdisplay, |
| 1109 | mode->vdisplay); | 1114 | mode->vdisplay); |
| 1110 | if (success) { | 1115 | if (success) { |
| 1111 | struct intel_sdvo_dtd input_dtd; | 1116 | struct intel_sdvo_dtd input_dtd; |
| 1112 | 1117 | ||
| 1113 | intel_sdvo_get_preferred_input_timing(output, | 1118 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
| 1114 | &input_dtd); | 1119 | &input_dtd); |
| 1115 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1120 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
| 1116 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1121 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
| @@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
| 1133 | intel_sdvo_get_dtd_from_mode(&output_dtd, | 1138 | intel_sdvo_get_dtd_from_mode(&output_dtd, |
| 1134 | dev_priv->sdvo_lvds_fixed_mode); | 1139 | dev_priv->sdvo_lvds_fixed_mode); |
| 1135 | 1140 | ||
| 1136 | intel_sdvo_set_target_output(output, | 1141 | intel_sdvo_set_target_output(intel_encoder, |
| 1137 | dev_priv->controlled_output); | 1142 | dev_priv->controlled_output); |
| 1138 | intel_sdvo_set_output_timing(output, &output_dtd); | 1143 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
| 1139 | 1144 | ||
| 1140 | /* Set the input timing to the screen. Assume always input 0. */ | 1145 | /* Set the input timing to the screen. Assume always input 0. */ |
| 1141 | intel_sdvo_set_target_input(output, true, false); | 1146 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 1142 | 1147 | ||
| 1143 | 1148 | ||
| 1144 | success = intel_sdvo_create_preferred_input_timing( | 1149 | success = intel_sdvo_create_preferred_input_timing( |
| 1145 | output, | 1150 | intel_encoder, |
| 1146 | mode->clock / 10, | 1151 | mode->clock / 10, |
| 1147 | mode->hdisplay, | 1152 | mode->hdisplay, |
| 1148 | mode->vdisplay); | 1153 | mode->vdisplay); |
| @@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
| 1150 | if (success) { | 1155 | if (success) { |
| 1151 | struct intel_sdvo_dtd input_dtd; | 1156 | struct intel_sdvo_dtd input_dtd; |
| 1152 | 1157 | ||
| 1153 | intel_sdvo_get_preferred_input_timing(output, | 1158 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
| 1154 | &input_dtd); | 1159 | &input_dtd); |
| 1155 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1160 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
| 1156 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1161 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
| @@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1182 | struct drm_i915_private *dev_priv = dev->dev_private; | 1187 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1183 | struct drm_crtc *crtc = encoder->crtc; | 1188 | struct drm_crtc *crtc = encoder->crtc; |
| 1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1189 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1185 | struct intel_output *output = enc_to_intel_output(encoder); | 1190 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 1186 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1191 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1187 | u32 sdvox = 0; | 1192 | u32 sdvox = 0; |
| 1188 | int sdvo_pixel_multiply; | 1193 | int sdvo_pixel_multiply; |
| 1189 | struct intel_sdvo_in_out_map in_out; | 1194 | struct intel_sdvo_in_out_map in_out; |
| @@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1202 | in_out.in0 = sdvo_priv->controlled_output; | 1207 | in_out.in0 = sdvo_priv->controlled_output; |
| 1203 | in_out.in1 = 0; | 1208 | in_out.in1 = 0; |
| 1204 | 1209 | ||
| 1205 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, | 1210 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, |
| 1206 | &in_out, sizeof(in_out)); | 1211 | &in_out, sizeof(in_out)); |
| 1207 | status = intel_sdvo_read_response(output, NULL, 0); | 1212 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
| 1208 | 1213 | ||
| 1209 | if (sdvo_priv->is_hdmi) { | 1214 | if (sdvo_priv->is_hdmi) { |
| 1210 | intel_sdvo_set_avi_infoframe(output, mode); | 1215 | intel_sdvo_set_avi_infoframe(intel_encoder, mode); |
| 1211 | sdvox |= SDVO_AUDIO_ENABLE; | 1216 | sdvox |= SDVO_AUDIO_ENABLE; |
| 1212 | } | 1217 | } |
| 1213 | 1218 | ||
| @@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1224 | */ | 1229 | */ |
| 1225 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { | 1230 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { |
| 1226 | /* Set the output timing to the screen */ | 1231 | /* Set the output timing to the screen */ |
| 1227 | intel_sdvo_set_target_output(output, | 1232 | intel_sdvo_set_target_output(intel_encoder, |
| 1228 | sdvo_priv->controlled_output); | 1233 | sdvo_priv->controlled_output); |
| 1229 | intel_sdvo_set_output_timing(output, &input_dtd); | 1234 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); |
| 1230 | } | 1235 | } |
| 1231 | 1236 | ||
| 1232 | /* Set the input timing to the screen. Assume always input 0. */ | 1237 | /* Set the input timing to the screen. Assume always input 0. */ |
| 1233 | intel_sdvo_set_target_input(output, true, false); | 1238 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 1234 | 1239 | ||
| 1235 | if (sdvo_priv->is_tv) | 1240 | if (sdvo_priv->is_tv) |
| 1236 | intel_sdvo_set_tv_format(output); | 1241 | intel_sdvo_set_tv_format(intel_encoder); |
| 1237 | 1242 | ||
| 1238 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | 1243 | /* We would like to use intel_sdvo_create_preferred_input_timing() to |
| 1239 | * provide the device with a timing it can support, if it supports that | 1244 | * provide the device with a timing it can support, if it supports that |
| @@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1241 | * output the preferred timing, and we don't support that currently. | 1246 | * output the preferred timing, and we don't support that currently. |
| 1242 | */ | 1247 | */ |
| 1243 | #if 0 | 1248 | #if 0 |
| 1244 | success = intel_sdvo_create_preferred_input_timing(output, clock, | 1249 | success = intel_sdvo_create_preferred_input_timing(encoder, clock, |
| 1245 | width, height); | 1250 | width, height); |
| 1246 | if (success) { | 1251 | if (success) { |
| 1247 | struct intel_sdvo_dtd *input_dtd; | 1252 | struct intel_sdvo_dtd *input_dtd; |
| 1248 | 1253 | ||
| 1249 | intel_sdvo_get_preferred_input_timing(output, &input_dtd); | 1254 | intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); |
| 1250 | intel_sdvo_set_input_timing(output, &input_dtd); | 1255 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
| 1251 | } | 1256 | } |
| 1252 | #else | 1257 | #else |
| 1253 | intel_sdvo_set_input_timing(output, &input_dtd); | 1258 | intel_sdvo_set_input_timing(intel_encoder, &input_dtd); |
| 1254 | #endif | 1259 | #endif |
| 1255 | 1260 | ||
| 1256 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | 1261 | switch (intel_sdvo_get_pixel_multiplier(mode)) { |
| 1257 | case 1: | 1262 | case 1: |
| 1258 | intel_sdvo_set_clock_rate_mult(output, | 1263 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
| 1259 | SDVO_CLOCK_RATE_MULT_1X); | 1264 | SDVO_CLOCK_RATE_MULT_1X); |
| 1260 | break; | 1265 | break; |
| 1261 | case 2: | 1266 | case 2: |
| 1262 | intel_sdvo_set_clock_rate_mult(output, | 1267 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
| 1263 | SDVO_CLOCK_RATE_MULT_2X); | 1268 | SDVO_CLOCK_RATE_MULT_2X); |
| 1264 | break; | 1269 | break; |
| 1265 | case 4: | 1270 | case 4: |
| 1266 | intel_sdvo_set_clock_rate_mult(output, | 1271 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
| 1267 | SDVO_CLOCK_RATE_MULT_4X); | 1272 | SDVO_CLOCK_RATE_MULT_4X); |
| 1268 | break; | 1273 | break; |
| 1269 | } | 1274 | } |
| @@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1274 | SDVO_VSYNC_ACTIVE_HIGH | | 1279 | SDVO_VSYNC_ACTIVE_HIGH | |
| 1275 | SDVO_HSYNC_ACTIVE_HIGH; | 1280 | SDVO_HSYNC_ACTIVE_HIGH; |
| 1276 | } else { | 1281 | } else { |
| 1277 | sdvox |= I915_READ(sdvo_priv->output_device); | 1282 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); |
| 1278 | switch (sdvo_priv->output_device) { | 1283 | switch (sdvo_priv->sdvo_reg) { |
| 1279 | case SDVOB: | 1284 | case SDVOB: |
| 1280 | sdvox &= SDVOB_PRESERVE_MASK; | 1285 | sdvox &= SDVOB_PRESERVE_MASK; |
| 1281 | break; | 1286 | break; |
| @@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1299 | 1304 | ||
| 1300 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) | 1305 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) |
| 1301 | sdvox |= SDVO_STALL_SELECT; | 1306 | sdvox |= SDVO_STALL_SELECT; |
| 1302 | intel_sdvo_write_sdvox(output, sdvox); | 1307 | intel_sdvo_write_sdvox(intel_encoder, sdvox); |
| 1303 | } | 1308 | } |
| 1304 | 1309 | ||
| 1305 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | 1310 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) |
| 1306 | { | 1311 | { |
| 1307 | struct drm_device *dev = encoder->dev; | 1312 | struct drm_device *dev = encoder->dev; |
| 1308 | struct drm_i915_private *dev_priv = dev->dev_private; | 1313 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1309 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1314 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 1310 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1315 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1311 | u32 temp; | 1316 | u32 temp; |
| 1312 | 1317 | ||
| 1313 | if (mode != DRM_MODE_DPMS_ON) { | 1318 | if (mode != DRM_MODE_DPMS_ON) { |
| 1314 | intel_sdvo_set_active_outputs(intel_output, 0); | 1319 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
| 1315 | if (0) | 1320 | if (0) |
| 1316 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1321 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
| 1317 | 1322 | ||
| 1318 | if (mode == DRM_MODE_DPMS_OFF) { | 1323 | if (mode == DRM_MODE_DPMS_OFF) { |
| 1319 | temp = I915_READ(sdvo_priv->output_device); | 1324 | temp = I915_READ(sdvo_priv->sdvo_reg); |
| 1320 | if ((temp & SDVO_ENABLE) != 0) { | 1325 | if ((temp & SDVO_ENABLE) != 0) { |
| 1321 | intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); | 1326 | intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); |
| 1322 | } | 1327 | } |
| 1323 | } | 1328 | } |
| 1324 | } else { | 1329 | } else { |
| @@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
| 1326 | int i; | 1331 | int i; |
| 1327 | u8 status; | 1332 | u8 status; |
| 1328 | 1333 | ||
| 1329 | temp = I915_READ(sdvo_priv->output_device); | 1334 | temp = I915_READ(sdvo_priv->sdvo_reg); |
| 1330 | if ((temp & SDVO_ENABLE) == 0) | 1335 | if ((temp & SDVO_ENABLE) == 0) |
| 1331 | intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); | 1336 | intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); |
| 1332 | for (i = 0; i < 2; i++) | 1337 | for (i = 0; i < 2; i++) |
| 1333 | intel_wait_for_vblank(dev); | 1338 | intel_wait_for_vblank(dev); |
| 1334 | 1339 | ||
| 1335 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, | 1340 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, |
| 1336 | &input2); | 1341 | &input2); |
| 1337 | 1342 | ||
| 1338 | 1343 | ||
| @@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
| 1346 | } | 1351 | } |
| 1347 | 1352 | ||
| 1348 | if (0) | 1353 | if (0) |
| 1349 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1354 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
| 1350 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); | 1355 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); |
| 1351 | } | 1356 | } |
| 1352 | return; | 1357 | return; |
| 1353 | } | 1358 | } |
| @@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
| 1356 | { | 1361 | { |
| 1357 | struct drm_device *dev = connector->dev; | 1362 | struct drm_device *dev = connector->dev; |
| 1358 | struct drm_i915_private *dev_priv = dev->dev_private; | 1363 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1359 | struct intel_output *intel_output = to_intel_output(connector); | 1364 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1360 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1365 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1361 | int o; | 1366 | int o; |
| 1362 | 1367 | ||
| 1363 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); | 1368 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); |
| 1364 | intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); | 1369 | intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); |
| 1365 | 1370 | ||
| 1366 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1371 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
| 1367 | intel_sdvo_set_target_input(intel_output, true, false); | 1372 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 1368 | intel_sdvo_get_input_timing(intel_output, | 1373 | intel_sdvo_get_input_timing(intel_encoder, |
| 1369 | &sdvo_priv->save_input_dtd_1); | 1374 | &sdvo_priv->save_input_dtd_1); |
| 1370 | } | 1375 | } |
| 1371 | 1376 | ||
| 1372 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1377 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
| 1373 | intel_sdvo_set_target_input(intel_output, false, true); | 1378 | intel_sdvo_set_target_input(intel_encoder, false, true); |
| 1374 | intel_sdvo_get_input_timing(intel_output, | 1379 | intel_sdvo_get_input_timing(intel_encoder, |
| 1375 | &sdvo_priv->save_input_dtd_2); | 1380 | &sdvo_priv->save_input_dtd_2); |
| 1376 | } | 1381 | } |
| 1377 | 1382 | ||
| @@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
| 1380 | u16 this_output = (1 << o); | 1385 | u16 this_output = (1 << o); |
| 1381 | if (sdvo_priv->caps.output_flags & this_output) | 1386 | if (sdvo_priv->caps.output_flags & this_output) |
| 1382 | { | 1387 | { |
| 1383 | intel_sdvo_set_target_output(intel_output, this_output); | 1388 | intel_sdvo_set_target_output(intel_encoder, this_output); |
| 1384 | intel_sdvo_get_output_timing(intel_output, | 1389 | intel_sdvo_get_output_timing(intel_encoder, |
| 1385 | &sdvo_priv->save_output_dtd[o]); | 1390 | &sdvo_priv->save_output_dtd[o]); |
| 1386 | } | 1391 | } |
| 1387 | } | 1392 | } |
| @@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
| 1389 | /* XXX: Save TV format/enhancements. */ | 1394 | /* XXX: Save TV format/enhancements. */ |
| 1390 | } | 1395 | } |
| 1391 | 1396 | ||
| 1392 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); | 1397 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); |
| 1393 | } | 1398 | } |
| 1394 | 1399 | ||
| 1395 | static void intel_sdvo_restore(struct drm_connector *connector) | 1400 | static void intel_sdvo_restore(struct drm_connector *connector) |
| 1396 | { | 1401 | { |
| 1397 | struct drm_device *dev = connector->dev; | 1402 | struct drm_device *dev = connector->dev; |
| 1398 | struct intel_output *intel_output = to_intel_output(connector); | 1403 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1399 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1404 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1400 | int o; | 1405 | int o; |
| 1401 | int i; | 1406 | int i; |
| 1402 | bool input1, input2; | 1407 | bool input1, input2; |
| 1403 | u8 status; | 1408 | u8 status; |
| 1404 | 1409 | ||
| 1405 | intel_sdvo_set_active_outputs(intel_output, 0); | 1410 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
| 1406 | 1411 | ||
| 1407 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | 1412 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) |
| 1408 | { | 1413 | { |
| 1409 | u16 this_output = (1 << o); | 1414 | u16 this_output = (1 << o); |
| 1410 | if (sdvo_priv->caps.output_flags & this_output) { | 1415 | if (sdvo_priv->caps.output_flags & this_output) { |
| 1411 | intel_sdvo_set_target_output(intel_output, this_output); | 1416 | intel_sdvo_set_target_output(intel_encoder, this_output); |
| 1412 | intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); | 1417 | intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); |
| 1413 | } | 1418 | } |
| 1414 | } | 1419 | } |
| 1415 | 1420 | ||
| 1416 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
| 1417 | intel_sdvo_set_target_input(intel_output, true, false); | 1422 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 1418 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); | 1423 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); |
| 1419 | } | 1424 | } |
| 1420 | 1425 | ||
| 1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1426 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
| 1422 | intel_sdvo_set_target_input(intel_output, false, true); | 1427 | intel_sdvo_set_target_input(intel_encoder, false, true); |
| 1423 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); | 1428 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); |
| 1424 | } | 1429 | } |
| 1425 | 1430 | ||
| 1426 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); | 1431 | intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); |
| 1427 | 1432 | ||
| 1428 | if (sdvo_priv->is_tv) { | 1433 | if (sdvo_priv->is_tv) { |
| 1429 | /* XXX: Restore TV format/enhancements. */ | 1434 | /* XXX: Restore TV format/enhancements. */ |
| 1430 | } | 1435 | } |
| 1431 | 1436 | ||
| 1432 | intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); | 1437 | intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); |
| 1433 | 1438 | ||
| 1434 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | 1439 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) |
| 1435 | { | 1440 | { |
| 1436 | for (i = 0; i < 2; i++) | 1441 | for (i = 0; i < 2; i++) |
| 1437 | intel_wait_for_vblank(dev); | 1442 | intel_wait_for_vblank(dev); |
| 1438 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); | 1443 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); |
| 1439 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) | 1444 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) |
| 1440 | DRM_DEBUG_KMS("First %s output reported failure to " | 1445 | DRM_DEBUG_KMS("First %s output reported failure to " |
| 1441 | "sync\n", SDVO_NAME(sdvo_priv)); | 1446 | "sync\n", SDVO_NAME(sdvo_priv)); |
| 1442 | } | 1447 | } |
| 1443 | 1448 | ||
| 1444 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); | 1449 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); |
| 1445 | } | 1450 | } |
| 1446 | 1451 | ||
| 1447 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1452 | static int intel_sdvo_mode_valid(struct drm_connector *connector, |
| 1448 | struct drm_display_mode *mode) | 1453 | struct drm_display_mode *mode) |
| 1449 | { | 1454 | { |
| 1450 | struct intel_output *intel_output = to_intel_output(connector); | 1455 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1451 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1456 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1452 | 1457 | ||
| 1453 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1458 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| 1454 | return MODE_NO_DBLESCAN; | 1459 | return MODE_NO_DBLESCAN; |
| @@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
| 1473 | return MODE_OK; | 1478 | return MODE_OK; |
| 1474 | } | 1479 | } |
| 1475 | 1480 | ||
| 1476 | static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) | 1481 | static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) |
| 1477 | { | 1482 | { |
| 1478 | u8 status; | 1483 | u8 status; |
| 1479 | 1484 | ||
| 1480 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); | 1485 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); |
| 1481 | status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); | 1486 | status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); |
| 1482 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1487 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 1483 | return false; | 1488 | return false; |
| 1484 | 1489 | ||
| @@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc | |||
| 1488 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | 1493 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) |
| 1489 | { | 1494 | { |
| 1490 | struct drm_connector *connector = NULL; | 1495 | struct drm_connector *connector = NULL; |
| 1491 | struct intel_output *iout = NULL; | 1496 | struct intel_encoder *iout = NULL; |
| 1492 | struct intel_sdvo_priv *sdvo; | 1497 | struct intel_sdvo_priv *sdvo; |
| 1493 | 1498 | ||
| 1494 | /* find the sdvo connector */ | 1499 | /* find the sdvo connector */ |
| 1495 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1500 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 1496 | iout = to_intel_output(connector); | 1501 | iout = to_intel_encoder(connector); |
| 1497 | 1502 | ||
| 1498 | if (iout->type != INTEL_OUTPUT_SDVO) | 1503 | if (iout->type != INTEL_OUTPUT_SDVO) |
| 1499 | continue; | 1504 | continue; |
| 1500 | 1505 | ||
| 1501 | sdvo = iout->dev_priv; | 1506 | sdvo = iout->dev_priv; |
| 1502 | 1507 | ||
| 1503 | if (sdvo->output_device == SDVOB && sdvoB) | 1508 | if (sdvo->sdvo_reg == SDVOB && sdvoB) |
| 1504 | return connector; | 1509 | return connector; |
| 1505 | 1510 | ||
| 1506 | if (sdvo->output_device == SDVOC && !sdvoB) | 1511 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) |
| 1507 | return connector; | 1512 | return connector; |
| 1508 | 1513 | ||
| 1509 | } | 1514 | } |
| @@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) | |||
| 1515 | { | 1520 | { |
| 1516 | u8 response[2]; | 1521 | u8 response[2]; |
| 1517 | u8 status; | 1522 | u8 status; |
| 1518 | struct intel_output *intel_output; | 1523 | struct intel_encoder *intel_encoder; |
| 1519 | DRM_DEBUG_KMS("\n"); | 1524 | DRM_DEBUG_KMS("\n"); |
| 1520 | 1525 | ||
| 1521 | if (!connector) | 1526 | if (!connector) |
| 1522 | return 0; | 1527 | return 0; |
| 1523 | 1528 | ||
| 1524 | intel_output = to_intel_output(connector); | 1529 | intel_encoder = to_intel_encoder(connector); |
| 1525 | 1530 | ||
| 1526 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1531 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
| 1527 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1532 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
| 1528 | 1533 | ||
| 1529 | if (response[0] !=0) | 1534 | if (response[0] !=0) |
| 1530 | return 1; | 1535 | return 1; |
| @@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
| 1536 | { | 1541 | { |
| 1537 | u8 response[2]; | 1542 | u8 response[2]; |
| 1538 | u8 status; | 1543 | u8 status; |
| 1539 | struct intel_output *intel_output = to_intel_output(connector); | 1544 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1540 | 1545 | ||
| 1541 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1546 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
| 1542 | intel_sdvo_read_response(intel_output, &response, 2); | 1547 | intel_sdvo_read_response(intel_encoder, &response, 2); |
| 1543 | 1548 | ||
| 1544 | if (on) { | 1549 | if (on) { |
| 1545 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1550 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
| 1546 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1551 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
| 1547 | 1552 | ||
| 1548 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1553 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
| 1549 | } else { | 1554 | } else { |
| 1550 | response[0] = 0; | 1555 | response[0] = 0; |
| 1551 | response[1] = 0; | 1556 | response[1] = 0; |
| 1552 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1557 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
| 1553 | } | 1558 | } |
| 1554 | 1559 | ||
| 1555 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1560 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
| 1556 | intel_sdvo_read_response(intel_output, &response, 2); | 1561 | intel_sdvo_read_response(intel_encoder, &response, 2); |
| 1557 | } | 1562 | } |
| 1558 | 1563 | ||
| 1559 | static bool | 1564 | static bool |
| 1560 | intel_sdvo_multifunc_encoder(struct intel_output *intel_output) | 1565 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) |
| 1561 | { | 1566 | { |
| 1562 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1567 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1563 | int caps = 0; | 1568 | int caps = 0; |
| 1564 | 1569 | ||
| 1565 | if (sdvo_priv->caps.output_flags & | 1570 | if (sdvo_priv->caps.output_flags & |
| @@ -1593,11 +1598,11 @@ static struct drm_connector * | |||
| 1593 | intel_find_analog_connector(struct drm_device *dev) | 1598 | intel_find_analog_connector(struct drm_device *dev) |
| 1594 | { | 1599 | { |
| 1595 | struct drm_connector *connector; | 1600 | struct drm_connector *connector; |
| 1596 | struct intel_output *intel_output; | 1601 | struct intel_encoder *intel_encoder; |
| 1597 | 1602 | ||
| 1598 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1603 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 1599 | intel_output = to_intel_output(connector); | 1604 | intel_encoder = to_intel_encoder(connector); |
| 1600 | if (intel_output->type == INTEL_OUTPUT_ANALOG) | 1605 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) |
| 1601 | return connector; | 1606 | return connector; |
| 1602 | } | 1607 | } |
| 1603 | return NULL; | 1608 | return NULL; |
| @@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev) | |||
| 1622 | enum drm_connector_status | 1627 | enum drm_connector_status |
| 1623 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | 1628 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) |
| 1624 | { | 1629 | { |
| 1625 | struct intel_output *intel_output = to_intel_output(connector); | 1630 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1626 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1631 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1627 | enum drm_connector_status status = connector_status_connected; | 1632 | enum drm_connector_status status = connector_status_connected; |
| 1628 | struct edid *edid = NULL; | 1633 | struct edid *edid = NULL; |
| 1629 | 1634 | ||
| 1630 | edid = drm_get_edid(&intel_output->base, | 1635 | edid = drm_get_edid(&intel_encoder->base, |
| 1631 | intel_output->ddc_bus); | 1636 | intel_encoder->ddc_bus); |
| 1632 | 1637 | ||
| 1633 | /* This is only applied to SDVO cards with multiple outputs */ | 1638 | /* This is only applied to SDVO cards with multiple outputs */ |
| 1634 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | 1639 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { |
| 1635 | uint8_t saved_ddc, temp_ddc; | 1640 | uint8_t saved_ddc, temp_ddc; |
| 1636 | saved_ddc = sdvo_priv->ddc_bus; | 1641 | saved_ddc = sdvo_priv->ddc_bus; |
| 1637 | temp_ddc = sdvo_priv->ddc_bus >> 1; | 1642 | temp_ddc = sdvo_priv->ddc_bus >> 1; |
| @@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
| 1641 | */ | 1646 | */ |
| 1642 | while(temp_ddc > 1) { | 1647 | while(temp_ddc > 1) { |
| 1643 | sdvo_priv->ddc_bus = temp_ddc; | 1648 | sdvo_priv->ddc_bus = temp_ddc; |
| 1644 | edid = drm_get_edid(&intel_output->base, | 1649 | edid = drm_get_edid(&intel_encoder->base, |
| 1645 | intel_output->ddc_bus); | 1650 | intel_encoder->ddc_bus); |
| 1646 | if (edid) { | 1651 | if (edid) { |
| 1647 | /* | 1652 | /* |
| 1648 | * When we can get the EDID, maybe it is the | 1653 | * When we can get the EDID, maybe it is the |
| @@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
| 1661 | */ | 1666 | */ |
| 1662 | if (edid == NULL && | 1667 | if (edid == NULL && |
| 1663 | sdvo_priv->analog_ddc_bus && | 1668 | sdvo_priv->analog_ddc_bus && |
| 1664 | !intel_analog_is_connected(intel_output->base.dev)) | 1669 | !intel_analog_is_connected(intel_encoder->base.dev)) |
| 1665 | edid = drm_get_edid(&intel_output->base, | 1670 | edid = drm_get_edid(&intel_encoder->base, |
| 1666 | sdvo_priv->analog_ddc_bus); | 1671 | sdvo_priv->analog_ddc_bus); |
| 1667 | if (edid != NULL) { | 1672 | if (edid != NULL) { |
| 1668 | /* Don't report the output as connected if it's a DVI-I | 1673 | /* Don't report the output as connected if it's a DVI-I |
| @@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
| 1677 | } | 1682 | } |
| 1678 | 1683 | ||
| 1679 | kfree(edid); | 1684 | kfree(edid); |
| 1680 | intel_output->base.display_info.raw_edid = NULL; | 1685 | intel_encoder->base.display_info.raw_edid = NULL; |
| 1681 | 1686 | ||
| 1682 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | 1687 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
| 1683 | status = connector_status_disconnected; | 1688 | status = connector_status_disconnected; |
| @@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
| 1689 | { | 1694 | { |
| 1690 | uint16_t response; | 1695 | uint16_t response; |
| 1691 | u8 status; | 1696 | u8 status; |
| 1692 | struct intel_output *intel_output = to_intel_output(connector); | 1697 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1693 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1698 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1694 | 1699 | ||
| 1695 | intel_sdvo_write_cmd(intel_output, | 1700 | intel_sdvo_write_cmd(intel_encoder, |
| 1696 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1701 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
| 1697 | if (sdvo_priv->is_tv) { | 1702 | if (sdvo_priv->is_tv) { |
| 1698 | /* add 30ms delay when the output type is SDVO-TV */ | 1703 | /* add 30ms delay when the output type is SDVO-TV */ |
| 1699 | mdelay(30); | 1704 | mdelay(30); |
| 1700 | } | 1705 | } |
| 1701 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1706 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
| 1702 | 1707 | ||
| 1703 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1708 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); |
| 1704 | 1709 | ||
| @@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
| 1708 | if (response == 0) | 1713 | if (response == 0) |
| 1709 | return connector_status_disconnected; | 1714 | return connector_status_disconnected; |
| 1710 | 1715 | ||
| 1711 | if (intel_sdvo_multifunc_encoder(intel_output) && | 1716 | if (intel_sdvo_multifunc_encoder(intel_encoder) && |
| 1712 | sdvo_priv->attached_output != response) { | 1717 | sdvo_priv->attached_output != response) { |
| 1713 | if (sdvo_priv->controlled_output != response && | 1718 | if (sdvo_priv->controlled_output != response && |
| 1714 | intel_sdvo_output_setup(intel_output, response) != true) | 1719 | intel_sdvo_output_setup(intel_encoder, response) != true) |
| 1715 | return connector_status_unknown; | 1720 | return connector_status_unknown; |
| 1716 | sdvo_priv->attached_output = response; | 1721 | sdvo_priv->attached_output = response; |
| 1717 | } | 1722 | } |
| @@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
| 1720 | 1725 | ||
| 1721 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1726 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
| 1722 | { | 1727 | { |
| 1723 | struct intel_output *intel_output = to_intel_output(connector); | 1728 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1724 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1729 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1725 | int num_modes; | 1730 | int num_modes; |
| 1726 | 1731 | ||
| 1727 | /* set the bus switch and get the modes */ | 1732 | /* set the bus switch and get the modes */ |
| 1728 | num_modes = intel_ddc_get_modes(intel_output); | 1733 | num_modes = intel_ddc_get_modes(intel_encoder); |
| 1729 | 1734 | ||
| 1730 | /* | 1735 | /* |
| 1731 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1736 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
| @@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
| 1735 | */ | 1740 | */ |
| 1736 | if (num_modes == 0 && | 1741 | if (num_modes == 0 && |
| 1737 | sdvo_priv->analog_ddc_bus && | 1742 | sdvo_priv->analog_ddc_bus && |
| 1738 | !intel_analog_is_connected(intel_output->base.dev)) { | 1743 | !intel_analog_is_connected(intel_encoder->base.dev)) { |
| 1739 | struct i2c_adapter *digital_ddc_bus; | 1744 | struct i2c_adapter *digital_ddc_bus; |
| 1740 | 1745 | ||
| 1741 | /* Switch to the analog ddc bus and try that | 1746 | /* Switch to the analog ddc bus and try that |
| 1742 | */ | 1747 | */ |
| 1743 | digital_ddc_bus = intel_output->ddc_bus; | 1748 | digital_ddc_bus = intel_encoder->ddc_bus; |
| 1744 | intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; | 1749 | intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; |
| 1745 | 1750 | ||
| 1746 | (void) intel_ddc_get_modes(intel_output); | 1751 | (void) intel_ddc_get_modes(intel_encoder); |
| 1747 | 1752 | ||
| 1748 | intel_output->ddc_bus = digital_ddc_bus; | 1753 | intel_encoder->ddc_bus = digital_ddc_bus; |
| 1749 | } | 1754 | } |
| 1750 | } | 1755 | } |
| 1751 | 1756 | ||
| @@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
| 1816 | 1821 | ||
| 1817 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1822 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
| 1818 | { | 1823 | { |
| 1819 | struct intel_output *output = to_intel_output(connector); | 1824 | struct intel_encoder *output = to_intel_encoder(connector); |
| 1820 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1825 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
| 1821 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1826 | struct intel_sdvo_sdtv_resolution_request tv_res; |
| 1822 | uint32_t reply = 0, format_map = 0; | 1827 | uint32_t reply = 0, format_map = 0; |
| @@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
| 1858 | 1863 | ||
| 1859 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1864 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
| 1860 | { | 1865 | { |
| 1861 | struct intel_output *intel_output = to_intel_output(connector); | 1866 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1862 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1867 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
| 1863 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1868 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1864 | struct drm_display_mode *newmode; | 1869 | struct drm_display_mode *newmode; |
| 1865 | 1870 | ||
| 1866 | /* | 1871 | /* |
| @@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
| 1868 | * Assume that the preferred modes are | 1873 | * Assume that the preferred modes are |
| 1869 | * arranged in priority order. | 1874 | * arranged in priority order. |
| 1870 | */ | 1875 | */ |
| 1871 | intel_ddc_get_modes(intel_output); | 1876 | intel_ddc_get_modes(intel_encoder); |
| 1872 | if (list_empty(&connector->probed_modes) == false) | 1877 | if (list_empty(&connector->probed_modes) == false) |
| 1873 | goto end; | 1878 | goto end; |
| 1874 | 1879 | ||
| @@ -1897,7 +1902,7 @@ end: | |||
| 1897 | 1902 | ||
| 1898 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1903 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
| 1899 | { | 1904 | { |
| 1900 | struct intel_output *output = to_intel_output(connector); | 1905 | struct intel_encoder *output = to_intel_encoder(connector); |
| 1901 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1906 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
| 1902 | 1907 | ||
| 1903 | if (sdvo_priv->is_tv) | 1908 | if (sdvo_priv->is_tv) |
| @@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
| 1915 | static | 1920 | static |
| 1916 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | 1921 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) |
| 1917 | { | 1922 | { |
| 1918 | struct intel_output *intel_output = to_intel_output(connector); | 1923 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1919 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1924 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1920 | struct drm_device *dev = connector->dev; | 1925 | struct drm_device *dev = connector->dev; |
| 1921 | 1926 | ||
| 1922 | if (sdvo_priv->is_tv) { | 1927 | if (sdvo_priv->is_tv) { |
| @@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
| 1953 | 1958 | ||
| 1954 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1959 | static void intel_sdvo_destroy(struct drm_connector *connector) |
| 1955 | { | 1960 | { |
| 1956 | struct intel_output *intel_output = to_intel_output(connector); | 1961 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1957 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1962 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1958 | 1963 | ||
| 1959 | if (intel_output->i2c_bus) | 1964 | if (intel_encoder->i2c_bus) |
| 1960 | intel_i2c_destroy(intel_output->i2c_bus); | 1965 | intel_i2c_destroy(intel_encoder->i2c_bus); |
| 1961 | if (intel_output->ddc_bus) | 1966 | if (intel_encoder->ddc_bus) |
| 1962 | intel_i2c_destroy(intel_output->ddc_bus); | 1967 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 1963 | if (sdvo_priv->analog_ddc_bus) | 1968 | if (sdvo_priv->analog_ddc_bus) |
| 1964 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 1969 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
| 1965 | 1970 | ||
| @@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
| 1977 | drm_sysfs_connector_remove(connector); | 1982 | drm_sysfs_connector_remove(connector); |
| 1978 | drm_connector_cleanup(connector); | 1983 | drm_connector_cleanup(connector); |
| 1979 | 1984 | ||
| 1980 | kfree(intel_output); | 1985 | kfree(intel_encoder); |
| 1981 | } | 1986 | } |
| 1982 | 1987 | ||
| 1983 | static int | 1988 | static int |
| @@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
| 1985 | struct drm_property *property, | 1990 | struct drm_property *property, |
| 1986 | uint64_t val) | 1991 | uint64_t val) |
| 1987 | { | 1992 | { |
| 1988 | struct intel_output *intel_output = to_intel_output(connector); | 1993 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1989 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1994 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 1990 | struct drm_encoder *encoder = &intel_output->enc; | 1995 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 1991 | struct drm_crtc *crtc = encoder->crtc; | 1996 | struct drm_crtc *crtc = encoder->crtc; |
| 1992 | int ret = 0; | 1997 | int ret = 0; |
| 1993 | bool changed = false; | 1998 | bool changed = false; |
| @@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
| 2095 | sdvo_priv->cur_brightness = temp_value; | 2100 | sdvo_priv->cur_brightness = temp_value; |
| 2096 | } | 2101 | } |
| 2097 | if (cmd) { | 2102 | if (cmd) { |
| 2098 | intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); | 2103 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); |
| 2099 | status = intel_sdvo_read_response(intel_output, | 2104 | status = intel_sdvo_read_response(intel_encoder, |
| 2100 | NULL, 0); | 2105 | NULL, 0); |
| 2101 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2106 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2102 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | 2107 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); |
| @@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | |||
| 2191 | } | 2196 | } |
| 2192 | 2197 | ||
| 2193 | static bool | 2198 | static bool |
| 2194 | intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | 2199 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) |
| 2195 | { | 2200 | { |
| 2196 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 2201 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
| 2197 | uint8_t status; | 2202 | uint8_t status; |
| @@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | |||
| 2205 | return true; | 2210 | return true; |
| 2206 | } | 2211 | } |
| 2207 | 2212 | ||
| 2208 | static struct intel_output * | 2213 | static struct intel_encoder * |
| 2209 | intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) | 2214 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) |
| 2210 | { | 2215 | { |
| 2211 | struct drm_device *dev = chan->drm_dev; | 2216 | struct drm_device *dev = chan->drm_dev; |
| 2212 | struct drm_connector *connector; | 2217 | struct drm_connector *connector; |
| 2213 | struct intel_output *intel_output = NULL; | 2218 | struct intel_encoder *intel_encoder = NULL; |
| 2214 | 2219 | ||
| 2215 | list_for_each_entry(connector, | 2220 | list_for_each_entry(connector, |
| 2216 | &dev->mode_config.connector_list, head) { | 2221 | &dev->mode_config.connector_list, head) { |
| 2217 | if (to_intel_output(connector)->ddc_bus == &chan->adapter) { | 2222 | if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { |
| 2218 | intel_output = to_intel_output(connector); | 2223 | intel_encoder = to_intel_encoder(connector); |
| 2219 | break; | 2224 | break; |
| 2220 | } | 2225 | } |
| 2221 | } | 2226 | } |
| 2222 | return intel_output; | 2227 | return intel_encoder; |
| 2223 | } | 2228 | } |
| 2224 | 2229 | ||
| 2225 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | 2230 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, |
| 2226 | struct i2c_msg msgs[], int num) | 2231 | struct i2c_msg msgs[], int num) |
| 2227 | { | 2232 | { |
| 2228 | struct intel_output *intel_output; | 2233 | struct intel_encoder *intel_encoder; |
| 2229 | struct intel_sdvo_priv *sdvo_priv; | 2234 | struct intel_sdvo_priv *sdvo_priv; |
| 2230 | struct i2c_algo_bit_data *algo_data; | 2235 | struct i2c_algo_bit_data *algo_data; |
| 2231 | const struct i2c_algorithm *algo; | 2236 | const struct i2c_algorithm *algo; |
| 2232 | 2237 | ||
| 2233 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 2238 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
| 2234 | intel_output = | 2239 | intel_encoder = |
| 2235 | intel_sdvo_chan_to_intel_output( | 2240 | intel_sdvo_chan_to_intel_encoder( |
| 2236 | (struct intel_i2c_chan *)(algo_data->data)); | 2241 | (struct intel_i2c_chan *)(algo_data->data)); |
| 2237 | if (intel_output == NULL) | 2242 | if (intel_encoder == NULL) |
| 2238 | return -EINVAL; | 2243 | return -EINVAL; |
| 2239 | 2244 | ||
| 2240 | sdvo_priv = intel_output->dev_priv; | 2245 | sdvo_priv = intel_encoder->dev_priv; |
| 2241 | algo = intel_output->i2c_bus->algo; | 2246 | algo = intel_encoder->i2c_bus->algo; |
| 2242 | 2247 | ||
| 2243 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | 2248 | intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); |
| 2244 | return algo->master_xfer(i2c_adap, msgs, num); | 2249 | return algo->master_xfer(i2c_adap, msgs, num); |
| 2245 | } | 2250 | } |
| 2246 | 2251 | ||
| @@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { | |||
| 2249 | }; | 2254 | }; |
| 2250 | 2255 | ||
| 2251 | static u8 | 2256 | static u8 |
| 2252 | intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | 2257 | intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) |
| 2253 | { | 2258 | { |
| 2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2259 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2255 | struct sdvo_device_mapping *my_mapping, *other_mapping; | 2260 | struct sdvo_device_mapping *my_mapping, *other_mapping; |
| 2256 | 2261 | ||
| 2257 | if (output_device == SDVOB) { | 2262 | if (sdvo_reg == SDVOB) { |
| 2258 | my_mapping = &dev_priv->sdvo_mappings[0]; | 2263 | my_mapping = &dev_priv->sdvo_mappings[0]; |
| 2259 | other_mapping = &dev_priv->sdvo_mappings[1]; | 2264 | other_mapping = &dev_priv->sdvo_mappings[1]; |
| 2260 | } else { | 2265 | } else { |
| @@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | |||
| 2279 | /* No SDVO device info is found for another DVO port, | 2284 | /* No SDVO device info is found for another DVO port, |
| 2280 | * so use mapping assumption we had before BIOS parsing. | 2285 | * so use mapping assumption we had before BIOS parsing. |
| 2281 | */ | 2286 | */ |
| 2282 | if (output_device == SDVOB) | 2287 | if (sdvo_reg == SDVOB) |
| 2283 | return 0x70; | 2288 | return 0x70; |
| 2284 | else | 2289 | else |
| 2285 | return 0x72; | 2290 | return 0x72; |
| @@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = { | |||
| 2305 | }; | 2310 | }; |
| 2306 | 2311 | ||
| 2307 | static bool | 2312 | static bool |
| 2308 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | 2313 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) |
| 2309 | { | 2314 | { |
| 2310 | struct drm_connector *connector = &intel_output->base; | 2315 | struct drm_connector *connector = &intel_encoder->base; |
| 2311 | struct drm_encoder *encoder = &intel_output->enc; | 2316 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 2312 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2317 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 2313 | bool ret = true, registered = false; | 2318 | bool ret = true, registered = false; |
| 2314 | 2319 | ||
| 2315 | sdvo_priv->is_tv = false; | 2320 | sdvo_priv->is_tv = false; |
| 2316 | intel_output->needs_tv_clock = false; | 2321 | intel_encoder->needs_tv_clock = false; |
| 2317 | sdvo_priv->is_lvds = false; | 2322 | sdvo_priv->is_lvds = false; |
| 2318 | 2323 | ||
| 2319 | if (device_is_registered(&connector->kdev)) { | 2324 | if (device_is_registered(&connector->kdev)) { |
| @@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2331 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2336 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
| 2332 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2337 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
| 2333 | 2338 | ||
| 2334 | if (intel_sdvo_get_supp_encode(intel_output, | 2339 | if (intel_sdvo_get_supp_encode(intel_encoder, |
| 2335 | &sdvo_priv->encode) && | 2340 | &sdvo_priv->encode) && |
| 2336 | intel_sdvo_get_digital_encoding_mode(intel_output) && | 2341 | intel_sdvo_get_digital_encoding_mode(intel_encoder) && |
| 2337 | sdvo_priv->is_hdmi) { | 2342 | sdvo_priv->is_hdmi) { |
| 2338 | /* enable hdmi encoding mode if supported */ | 2343 | /* enable hdmi encoding mode if supported */ |
| 2339 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | 2344 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); |
| 2340 | intel_sdvo_set_colorimetry(intel_output, | 2345 | intel_sdvo_set_colorimetry(intel_encoder, |
| 2341 | SDVO_COLORIMETRY_RGB256); | 2346 | SDVO_COLORIMETRY_RGB256); |
| 2342 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2347 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
| 2343 | intel_output->clone_mask = | 2348 | intel_encoder->clone_mask = |
| 2344 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2349 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
| 2345 | (1 << INTEL_ANALOG_CLONE_BIT); | 2350 | (1 << INTEL_ANALOG_CLONE_BIT); |
| 2346 | } | 2351 | } |
| @@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2356 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
| 2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2357 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
| 2353 | sdvo_priv->is_tv = true; | 2358 | sdvo_priv->is_tv = true; |
| 2354 | intel_output->needs_tv_clock = true; | 2359 | intel_encoder->needs_tv_clock = true; |
| 2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2360 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
| 2356 | } else if (flags & SDVO_OUTPUT_RGB0) { | 2361 | } else if (flags & SDVO_OUTPUT_RGB0) { |
| 2357 | 2362 | ||
| 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | 2363 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; |
| 2359 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2364 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
| 2360 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2365 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
| 2361 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2366 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
| 2362 | (1 << INTEL_ANALOG_CLONE_BIT); | 2367 | (1 << INTEL_ANALOG_CLONE_BIT); |
| 2363 | } else if (flags & SDVO_OUTPUT_RGB1) { | 2368 | } else if (flags & SDVO_OUTPUT_RGB1) { |
| 2364 | 2369 | ||
| 2365 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 2370 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
| 2366 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2371 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
| 2367 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2372 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
| 2368 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2373 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
| 2369 | (1 << INTEL_ANALOG_CLONE_BIT); | 2374 | (1 << INTEL_ANALOG_CLONE_BIT); |
| 2370 | } else if (flags & SDVO_OUTPUT_CVBS0) { | 2375 | } else if (flags & SDVO_OUTPUT_CVBS0) { |
| 2371 | 2376 | ||
| @@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2373 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2378 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
| 2374 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2379 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
| 2375 | sdvo_priv->is_tv = true; | 2380 | sdvo_priv->is_tv = true; |
| 2376 | intel_output->needs_tv_clock = true; | 2381 | intel_encoder->needs_tv_clock = true; |
| 2377 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2382 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
| 2378 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2383 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
| 2379 | 2384 | ||
| 2380 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2385 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
| 2381 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2386 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
| 2382 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2387 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
| 2383 | sdvo_priv->is_lvds = true; | 2388 | sdvo_priv->is_lvds = true; |
| 2384 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2389 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
| 2385 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2390 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
| 2386 | } else if (flags & SDVO_OUTPUT_LVDS1) { | 2391 | } else if (flags & SDVO_OUTPUT_LVDS1) { |
| 2387 | 2392 | ||
| @@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2389 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2394 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
| 2390 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2395 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
| 2391 | sdvo_priv->is_lvds = true; | 2396 | sdvo_priv->is_lvds = true; |
| 2392 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2397 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
| 2393 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2398 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
| 2394 | } else { | 2399 | } else { |
| 2395 | 2400 | ||
| @@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2402 | bytes[0], bytes[1]); | 2407 | bytes[0], bytes[1]); |
| 2403 | ret = false; | 2408 | ret = false; |
| 2404 | } | 2409 | } |
| 2405 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 2410 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 2406 | 2411 | ||
| 2407 | if (ret && registered) | 2412 | if (ret && registered) |
| 2408 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | 2413 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; |
| @@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
| 2414 | 2419 | ||
| 2415 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) | 2420 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) |
| 2416 | { | 2421 | { |
| 2417 | struct intel_output *intel_output = to_intel_output(connector); | 2422 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 2418 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2423 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 2419 | struct intel_sdvo_tv_format format; | 2424 | struct intel_sdvo_tv_format format; |
| 2420 | uint32_t format_map, i; | 2425 | uint32_t format_map, i; |
| 2421 | uint8_t status; | 2426 | uint8_t status; |
| 2422 | 2427 | ||
| 2423 | intel_sdvo_set_target_output(intel_output, | 2428 | intel_sdvo_set_target_output(intel_encoder, |
| 2424 | sdvo_priv->controlled_output); | 2429 | sdvo_priv->controlled_output); |
| 2425 | 2430 | ||
| 2426 | intel_sdvo_write_cmd(intel_output, | 2431 | intel_sdvo_write_cmd(intel_encoder, |
| 2427 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); | 2432 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); |
| 2428 | status = intel_sdvo_read_response(intel_output, | 2433 | status = intel_sdvo_read_response(intel_encoder, |
| 2429 | &format, sizeof(format)); | 2434 | &format, sizeof(format)); |
| 2430 | if (status != SDVO_CMD_STATUS_SUCCESS) | 2435 | if (status != SDVO_CMD_STATUS_SUCCESS) |
| 2431 | return; | 2436 | return; |
| @@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
| 2463 | 2468 | ||
| 2464 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | 2469 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) |
| 2465 | { | 2470 | { |
| 2466 | struct intel_output *intel_output = to_intel_output(connector); | 2471 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 2467 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2472 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
| 2468 | struct intel_sdvo_enhancements_reply sdvo_data; | 2473 | struct intel_sdvo_enhancements_reply sdvo_data; |
| 2469 | struct drm_device *dev = connector->dev; | 2474 | struct drm_device *dev = connector->dev; |
| 2470 | uint8_t status; | 2475 | uint8_t status; |
| 2471 | uint16_t response, data_value[2]; | 2476 | uint16_t response, data_value[2]; |
| 2472 | 2477 | ||
| 2473 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2478 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
| 2474 | NULL, 0); | 2479 | NULL, 0); |
| 2475 | status = intel_sdvo_read_response(intel_output, &sdvo_data, | 2480 | status = intel_sdvo_read_response(intel_encoder, &sdvo_data, |
| 2476 | sizeof(sdvo_data)); | 2481 | sizeof(sdvo_data)); |
| 2477 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2482 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2478 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | 2483 | DRM_DEBUG_KMS(" incorrect response is returned\n"); |
| @@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2488 | * property | 2493 | * property |
| 2489 | */ | 2494 | */ |
| 2490 | if (sdvo_data.overscan_h) { | 2495 | if (sdvo_data.overscan_h) { |
| 2491 | intel_sdvo_write_cmd(intel_output, | 2496 | intel_sdvo_write_cmd(intel_encoder, |
| 2492 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | 2497 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); |
| 2493 | status = intel_sdvo_read_response(intel_output, | 2498 | status = intel_sdvo_read_response(intel_encoder, |
| 2494 | &data_value, 4); | 2499 | &data_value, 4); |
| 2495 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2500 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2496 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2501 | DRM_DEBUG_KMS("Incorrect SDVO max " |
| 2497 | "h_overscan\n"); | 2502 | "h_overscan\n"); |
| 2498 | return; | 2503 | return; |
| 2499 | } | 2504 | } |
| 2500 | intel_sdvo_write_cmd(intel_output, | 2505 | intel_sdvo_write_cmd(intel_encoder, |
| 2501 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | 2506 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); |
| 2502 | status = intel_sdvo_read_response(intel_output, | 2507 | status = intel_sdvo_read_response(intel_encoder, |
| 2503 | &response, 2); | 2508 | &response, 2); |
| 2504 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2509 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2505 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | 2510 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); |
| @@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2529 | data_value[0], data_value[1], response); | 2534 | data_value[0], data_value[1], response); |
| 2530 | } | 2535 | } |
| 2531 | if (sdvo_data.overscan_v) { | 2536 | if (sdvo_data.overscan_v) { |
| 2532 | intel_sdvo_write_cmd(intel_output, | 2537 | intel_sdvo_write_cmd(intel_encoder, |
| 2533 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | 2538 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); |
| 2534 | status = intel_sdvo_read_response(intel_output, | 2539 | status = intel_sdvo_read_response(intel_encoder, |
| 2535 | &data_value, 4); | 2540 | &data_value, 4); |
| 2536 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2541 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2537 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2542 | DRM_DEBUG_KMS("Incorrect SDVO max " |
| 2538 | "v_overscan\n"); | 2543 | "v_overscan\n"); |
| 2539 | return; | 2544 | return; |
| 2540 | } | 2545 | } |
| 2541 | intel_sdvo_write_cmd(intel_output, | 2546 | intel_sdvo_write_cmd(intel_encoder, |
| 2542 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | 2547 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); |
| 2543 | status = intel_sdvo_read_response(intel_output, | 2548 | status = intel_sdvo_read_response(intel_encoder, |
| 2544 | &response, 2); | 2549 | &response, 2); |
| 2545 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2550 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2546 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | 2551 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); |
| @@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2570 | data_value[0], data_value[1], response); | 2575 | data_value[0], data_value[1], response); |
| 2571 | } | 2576 | } |
| 2572 | if (sdvo_data.position_h) { | 2577 | if (sdvo_data.position_h) { |
| 2573 | intel_sdvo_write_cmd(intel_output, | 2578 | intel_sdvo_write_cmd(intel_encoder, |
| 2574 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | 2579 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); |
| 2575 | status = intel_sdvo_read_response(intel_output, | 2580 | status = intel_sdvo_read_response(intel_encoder, |
| 2576 | &data_value, 4); | 2581 | &data_value, 4); |
| 2577 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2582 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2578 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | 2583 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); |
| 2579 | return; | 2584 | return; |
| 2580 | } | 2585 | } |
| 2581 | intel_sdvo_write_cmd(intel_output, | 2586 | intel_sdvo_write_cmd(intel_encoder, |
| 2582 | SDVO_CMD_GET_POSITION_H, NULL, 0); | 2587 | SDVO_CMD_GET_POSITION_H, NULL, 0); |
| 2583 | status = intel_sdvo_read_response(intel_output, | 2588 | status = intel_sdvo_read_response(intel_encoder, |
| 2584 | &response, 2); | 2589 | &response, 2); |
| 2585 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2590 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2586 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | 2591 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); |
| @@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2601 | data_value[0], data_value[1], response); | 2606 | data_value[0], data_value[1], response); |
| 2602 | } | 2607 | } |
| 2603 | if (sdvo_data.position_v) { | 2608 | if (sdvo_data.position_v) { |
| 2604 | intel_sdvo_write_cmd(intel_output, | 2609 | intel_sdvo_write_cmd(intel_encoder, |
| 2605 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | 2610 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); |
| 2606 | status = intel_sdvo_read_response(intel_output, | 2611 | status = intel_sdvo_read_response(intel_encoder, |
| 2607 | &data_value, 4); | 2612 | &data_value, 4); |
| 2608 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2613 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2609 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | 2614 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); |
| 2610 | return; | 2615 | return; |
| 2611 | } | 2616 | } |
| 2612 | intel_sdvo_write_cmd(intel_output, | 2617 | intel_sdvo_write_cmd(intel_encoder, |
| 2613 | SDVO_CMD_GET_POSITION_V, NULL, 0); | 2618 | SDVO_CMD_GET_POSITION_V, NULL, 0); |
| 2614 | status = intel_sdvo_read_response(intel_output, | 2619 | status = intel_sdvo_read_response(intel_encoder, |
| 2615 | &response, 2); | 2620 | &response, 2); |
| 2616 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2621 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2617 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | 2622 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); |
| @@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2634 | } | 2639 | } |
| 2635 | if (sdvo_priv->is_tv) { | 2640 | if (sdvo_priv->is_tv) { |
| 2636 | if (sdvo_data.saturation) { | 2641 | if (sdvo_data.saturation) { |
| 2637 | intel_sdvo_write_cmd(intel_output, | 2642 | intel_sdvo_write_cmd(intel_encoder, |
| 2638 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | 2643 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); |
| 2639 | status = intel_sdvo_read_response(intel_output, | 2644 | status = intel_sdvo_read_response(intel_encoder, |
| 2640 | &data_value, 4); | 2645 | &data_value, 4); |
| 2641 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2646 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2642 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | 2647 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); |
| 2643 | return; | 2648 | return; |
| 2644 | } | 2649 | } |
| 2645 | intel_sdvo_write_cmd(intel_output, | 2650 | intel_sdvo_write_cmd(intel_encoder, |
| 2646 | SDVO_CMD_GET_SATURATION, NULL, 0); | 2651 | SDVO_CMD_GET_SATURATION, NULL, 0); |
| 2647 | status = intel_sdvo_read_response(intel_output, | 2652 | status = intel_sdvo_read_response(intel_encoder, |
| 2648 | &response, 2); | 2653 | &response, 2); |
| 2649 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2654 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2650 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | 2655 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); |
| @@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2666 | data_value[0], data_value[1], response); | 2671 | data_value[0], data_value[1], response); |
| 2667 | } | 2672 | } |
| 2668 | if (sdvo_data.contrast) { | 2673 | if (sdvo_data.contrast) { |
| 2669 | intel_sdvo_write_cmd(intel_output, | 2674 | intel_sdvo_write_cmd(intel_encoder, |
| 2670 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | 2675 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); |
| 2671 | status = intel_sdvo_read_response(intel_output, | 2676 | status = intel_sdvo_read_response(intel_encoder, |
| 2672 | &data_value, 4); | 2677 | &data_value, 4); |
| 2673 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2678 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2674 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | 2679 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); |
| 2675 | return; | 2680 | return; |
| 2676 | } | 2681 | } |
| 2677 | intel_sdvo_write_cmd(intel_output, | 2682 | intel_sdvo_write_cmd(intel_encoder, |
| 2678 | SDVO_CMD_GET_CONTRAST, NULL, 0); | 2683 | SDVO_CMD_GET_CONTRAST, NULL, 0); |
| 2679 | status = intel_sdvo_read_response(intel_output, | 2684 | status = intel_sdvo_read_response(intel_encoder, |
| 2680 | &response, 2); | 2685 | &response, 2); |
| 2681 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2686 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2682 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | 2687 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); |
| @@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2697 | data_value[0], data_value[1], response); | 2702 | data_value[0], data_value[1], response); |
| 2698 | } | 2703 | } |
| 2699 | if (sdvo_data.hue) { | 2704 | if (sdvo_data.hue) { |
| 2700 | intel_sdvo_write_cmd(intel_output, | 2705 | intel_sdvo_write_cmd(intel_encoder, |
| 2701 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | 2706 | SDVO_CMD_GET_MAX_HUE, NULL, 0); |
| 2702 | status = intel_sdvo_read_response(intel_output, | 2707 | status = intel_sdvo_read_response(intel_encoder, |
| 2703 | &data_value, 4); | 2708 | &data_value, 4); |
| 2704 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2709 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2705 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | 2710 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); |
| 2706 | return; | 2711 | return; |
| 2707 | } | 2712 | } |
| 2708 | intel_sdvo_write_cmd(intel_output, | 2713 | intel_sdvo_write_cmd(intel_encoder, |
| 2709 | SDVO_CMD_GET_HUE, NULL, 0); | 2714 | SDVO_CMD_GET_HUE, NULL, 0); |
| 2710 | status = intel_sdvo_read_response(intel_output, | 2715 | status = intel_sdvo_read_response(intel_encoder, |
| 2711 | &response, 2); | 2716 | &response, 2); |
| 2712 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2717 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2713 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | 2718 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); |
| @@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2730 | } | 2735 | } |
| 2731 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 2736 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { |
| 2732 | if (sdvo_data.brightness) { | 2737 | if (sdvo_data.brightness) { |
| 2733 | intel_sdvo_write_cmd(intel_output, | 2738 | intel_sdvo_write_cmd(intel_encoder, |
| 2734 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | 2739 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); |
| 2735 | status = intel_sdvo_read_response(intel_output, | 2740 | status = intel_sdvo_read_response(intel_encoder, |
| 2736 | &data_value, 4); | 2741 | &data_value, 4); |
| 2737 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2742 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2738 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | 2743 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); |
| 2739 | return; | 2744 | return; |
| 2740 | } | 2745 | } |
| 2741 | intel_sdvo_write_cmd(intel_output, | 2746 | intel_sdvo_write_cmd(intel_encoder, |
| 2742 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | 2747 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); |
| 2743 | status = intel_sdvo_read_response(intel_output, | 2748 | status = intel_sdvo_read_response(intel_encoder, |
| 2744 | &response, 2); | 2749 | &response, 2); |
| 2745 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2750 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
| 2746 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | 2751 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); |
| @@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2765 | return; | 2770 | return; |
| 2766 | } | 2771 | } |
| 2767 | 2772 | ||
| 2768 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2773 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
| 2769 | { | 2774 | { |
| 2770 | struct drm_i915_private *dev_priv = dev->dev_private; | 2775 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2771 | struct drm_connector *connector; | 2776 | struct drm_connector *connector; |
| 2772 | struct intel_output *intel_output; | 2777 | struct intel_encoder *intel_encoder; |
| 2773 | struct intel_sdvo_priv *sdvo_priv; | 2778 | struct intel_sdvo_priv *sdvo_priv; |
| 2774 | 2779 | ||
| 2775 | u8 ch[0x40]; | 2780 | u8 ch[0x40]; |
| 2776 | int i; | 2781 | int i; |
| 2777 | 2782 | ||
| 2778 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2783 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
| 2779 | if (!intel_output) { | 2784 | if (!intel_encoder) { |
| 2780 | return false; | 2785 | return false; |
| 2781 | } | 2786 | } |
| 2782 | 2787 | ||
| 2783 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | 2788 | sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); |
| 2784 | sdvo_priv->output_device = output_device; | 2789 | sdvo_priv->sdvo_reg = sdvo_reg; |
| 2785 | 2790 | ||
| 2786 | intel_output->dev_priv = sdvo_priv; | 2791 | intel_encoder->dev_priv = sdvo_priv; |
| 2787 | intel_output->type = INTEL_OUTPUT_SDVO; | 2792 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
| 2788 | 2793 | ||
| 2789 | /* setup the DDC bus. */ | 2794 | /* setup the DDC bus. */ |
| 2790 | if (output_device == SDVOB) | 2795 | if (sdvo_reg == SDVOB) |
| 2791 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 2796 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); |
| 2792 | else | 2797 | else |
| 2793 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 2798 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); |
| 2794 | 2799 | ||
| 2795 | if (!intel_output->i2c_bus) | 2800 | if (!intel_encoder->i2c_bus) |
| 2796 | goto err_inteloutput; | 2801 | goto err_inteloutput; |
| 2797 | 2802 | ||
| 2798 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); | 2803 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); |
| 2799 | 2804 | ||
| 2800 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ | 2805 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
| 2801 | intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; | 2806 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; |
| 2802 | 2807 | ||
| 2803 | /* Read the regs to test if we can talk to the device */ | 2808 | /* Read the regs to test if we can talk to the device */ |
| 2804 | for (i = 0; i < 0x40; i++) { | 2809 | for (i = 0; i < 0x40; i++) { |
| 2805 | if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { | 2810 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { |
| 2806 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2811 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
| 2807 | output_device == SDVOB ? 'B' : 'C'); | 2812 | sdvo_reg == SDVOB ? 'B' : 'C'); |
| 2808 | goto err_i2c; | 2813 | goto err_i2c; |
| 2809 | } | 2814 | } |
| 2810 | } | 2815 | } |
| 2811 | 2816 | ||
| 2812 | /* setup the DDC bus. */ | 2817 | /* setup the DDC bus. */ |
| 2813 | if (output_device == SDVOB) { | 2818 | if (sdvo_reg == SDVOB) { |
| 2814 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2819 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
| 2815 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
| 2816 | "SDVOB/VGA DDC BUS"); | 2821 | "SDVOB/VGA DDC BUS"); |
| 2817 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2822 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
| 2818 | } else { | 2823 | } else { |
| 2819 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2824 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
| 2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2825 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
| 2821 | "SDVOC/VGA DDC BUS"); | 2826 | "SDVOC/VGA DDC BUS"); |
| 2822 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2827 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
| 2823 | } | 2828 | } |
| 2824 | 2829 | ||
| 2825 | if (intel_output->ddc_bus == NULL) | 2830 | if (intel_encoder->ddc_bus == NULL) |
| 2826 | goto err_i2c; | 2831 | goto err_i2c; |
| 2827 | 2832 | ||
| 2828 | /* Wrap with our custom algo which switches to DDC mode */ | 2833 | /* Wrap with our custom algo which switches to DDC mode */ |
| 2829 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2834 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
| 2830 | 2835 | ||
| 2831 | /* In default case sdvo lvds is false */ | 2836 | /* In default case sdvo lvds is false */ |
| 2832 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2837 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); |
| 2833 | 2838 | ||
| 2834 | if (intel_sdvo_output_setup(intel_output, | 2839 | if (intel_sdvo_output_setup(intel_encoder, |
| 2835 | sdvo_priv->caps.output_flags) != true) { | 2840 | sdvo_priv->caps.output_flags) != true) { |
| 2836 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2841 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
| 2837 | output_device == SDVOB ? 'B' : 'C'); | 2842 | sdvo_reg == SDVOB ? 'B' : 'C'); |
| 2838 | goto err_i2c; | 2843 | goto err_i2c; |
| 2839 | } | 2844 | } |
| 2840 | 2845 | ||
| 2841 | 2846 | ||
| 2842 | connector = &intel_output->base; | 2847 | connector = &intel_encoder->base; |
| 2843 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | 2848 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, |
| 2844 | connector->connector_type); | 2849 | connector->connector_type); |
| 2845 | 2850 | ||
| @@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
| 2848 | connector->doublescan_allowed = 0; | 2853 | connector->doublescan_allowed = 0; |
| 2849 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2854 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
| 2850 | 2855 | ||
| 2851 | drm_encoder_init(dev, &intel_output->enc, | 2856 | drm_encoder_init(dev, &intel_encoder->enc, |
| 2852 | &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); | 2857 | &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); |
| 2853 | 2858 | ||
| 2854 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | 2859 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); |
| 2855 | 2860 | ||
| 2856 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2861 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
| 2857 | if (sdvo_priv->is_tv) | 2862 | if (sdvo_priv->is_tv) |
| 2858 | intel_sdvo_tv_create_property(connector); | 2863 | intel_sdvo_tv_create_property(connector); |
| 2859 | 2864 | ||
| @@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
| 2865 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2870 | intel_sdvo_select_ddc_bus(sdvo_priv); |
| 2866 | 2871 | ||
| 2867 | /* Set the input timing to the screen. Assume always input 0. */ | 2872 | /* Set the input timing to the screen. Assume always input 0. */ |
| 2868 | intel_sdvo_set_target_input(intel_output, true, false); | 2873 | intel_sdvo_set_target_input(intel_encoder, true, false); |
| 2869 | 2874 | ||
| 2870 | intel_sdvo_get_input_pixel_clock_range(intel_output, | 2875 | intel_sdvo_get_input_pixel_clock_range(intel_encoder, |
| 2871 | &sdvo_priv->pixel_clock_min, | 2876 | &sdvo_priv->pixel_clock_min, |
| 2872 | &sdvo_priv->pixel_clock_max); | 2877 | &sdvo_priv->pixel_clock_max); |
| 2873 | 2878 | ||
| @@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
| 2894 | err_i2c: | 2899 | err_i2c: |
| 2895 | if (sdvo_priv->analog_ddc_bus != NULL) | 2900 | if (sdvo_priv->analog_ddc_bus != NULL) |
| 2896 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 2901 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
| 2897 | if (intel_output->ddc_bus != NULL) | 2902 | if (intel_encoder->ddc_bus != NULL) |
| 2898 | intel_i2c_destroy(intel_output->ddc_bus); | 2903 | intel_i2c_destroy(intel_encoder->ddc_bus); |
| 2899 | if (intel_output->i2c_bus != NULL) | 2904 | if (intel_encoder->i2c_bus != NULL) |
| 2900 | intel_i2c_destroy(intel_output->i2c_bus); | 2905 | intel_i2c_destroy(intel_encoder->i2c_bus); |
| 2901 | err_inteloutput: | 2906 | err_inteloutput: |
| 2902 | kfree(intel_output); | 2907 | kfree(intel_encoder); |
| 2903 | 2908 | ||
| 2904 | return false; | 2909 | return false; |
| 2905 | } | 2910 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..d7d39b2327df 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector) | |||
| 921 | { | 921 | { |
| 922 | struct drm_device *dev = connector->dev; | 922 | struct drm_device *dev = connector->dev; |
| 923 | struct drm_i915_private *dev_priv = dev->dev_private; | 923 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 924 | struct intel_output *intel_output = to_intel_output(connector); | 924 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 925 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 925 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 926 | int i; | 926 | int i; |
| 927 | 927 | ||
| 928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); | 928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); |
| @@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector) | |||
| 971 | { | 971 | { |
| 972 | struct drm_device *dev = connector->dev; | 972 | struct drm_device *dev = connector->dev; |
| 973 | struct drm_i915_private *dev_priv = dev->dev_private; | 973 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 974 | struct intel_output *intel_output = to_intel_output(connector); | 974 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 975 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 975 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 976 | struct drm_crtc *crtc = connector->encoder->crtc; | 976 | struct drm_crtc *crtc = connector->encoder->crtc; |
| 977 | struct intel_crtc *intel_crtc; | 977 | struct intel_crtc *intel_crtc; |
| 978 | int i; | 978 | int i; |
| @@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format) | |||
| 1068 | } | 1068 | } |
| 1069 | 1069 | ||
| 1070 | static const struct tv_mode * | 1070 | static const struct tv_mode * |
| 1071 | intel_tv_mode_find (struct intel_output *intel_output) | 1071 | intel_tv_mode_find (struct intel_encoder *intel_encoder) |
| 1072 | { | 1072 | { |
| 1073 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1073 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 1074 | 1074 | ||
| 1075 | return intel_tv_mode_lookup(tv_priv->tv_format); | 1075 | return intel_tv_mode_lookup(tv_priv->tv_format); |
| 1076 | } | 1076 | } |
| @@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output) | |||
| 1078 | static enum drm_mode_status | 1078 | static enum drm_mode_status |
| 1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) |
| 1080 | { | 1080 | { |
| 1081 | struct intel_output *intel_output = to_intel_output(connector); | 1081 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
| 1083 | 1083 | ||
| 1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
| 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
| @@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 1095 | { | 1095 | { |
| 1096 | struct drm_device *dev = encoder->dev; | 1096 | struct drm_device *dev = encoder->dev; |
| 1097 | struct drm_mode_config *drm_config = &dev->mode_config; | 1097 | struct drm_mode_config *drm_config = &dev->mode_config; |
| 1098 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1098 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); | 1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); |
| 1100 | struct drm_encoder *other_encoder; | 1100 | struct drm_encoder *other_encoder; |
| 1101 | 1101 | ||
| 1102 | if (!tv_mode) | 1102 | if (!tv_mode) |
| @@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 1121 | struct drm_i915_private *dev_priv = dev->dev_private; | 1121 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1122 | struct drm_crtc *crtc = encoder->crtc; | 1122 | struct drm_crtc *crtc = encoder->crtc; |
| 1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1124 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1124 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
| 1125 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1125 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
| 1127 | u32 tv_ctl; | 1127 | u32 tv_ctl; |
| 1128 | u32 hctl1, hctl2, hctl3; | 1128 | u32 hctl1, hctl2, hctl3; |
| 1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; | 1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; |
| @@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = { | |||
| 1360 | * \return false if TV is disconnected. | 1360 | * \return false if TV is disconnected. |
| 1361 | */ | 1361 | */ |
| 1362 | static int | 1362 | static int |
| 1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | 1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
| 1364 | { | 1364 | { |
| 1365 | struct drm_encoder *encoder = &intel_output->enc; | 1365 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 1366 | struct drm_device *dev = encoder->dev; | 1366 | struct drm_device *dev = encoder->dev; |
| 1367 | struct drm_i915_private *dev_priv = dev->dev_private; | 1367 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1368 | unsigned long irqflags; | 1368 | unsigned long irqflags; |
| @@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
| 1441 | */ | 1441 | */ |
| 1442 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1442 | static void intel_tv_find_better_format(struct drm_connector *connector) |
| 1443 | { | 1443 | { |
| 1444 | struct intel_output *intel_output = to_intel_output(connector); | 1444 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1445 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1445 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
| 1447 | int i; | 1447 | int i; |
| 1448 | 1448 | ||
| 1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == | 1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == |
| @@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
| 1475 | { | 1475 | { |
| 1476 | struct drm_crtc *crtc; | 1476 | struct drm_crtc *crtc; |
| 1477 | struct drm_display_mode mode; | 1477 | struct drm_display_mode mode; |
| 1478 | struct intel_output *intel_output = to_intel_output(connector); | 1478 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1479 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1479 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 1480 | struct drm_encoder *encoder = &intel_output->enc; | 1480 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 1481 | int dpms_mode; | 1481 | int dpms_mode; |
| 1482 | int type = tv_priv->type; | 1482 | int type = tv_priv->type; |
| 1483 | 1483 | ||
| @@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector) | |||
| 1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
| 1486 | 1486 | ||
| 1487 | if (encoder->crtc && encoder->crtc->enabled) { | 1487 | if (encoder->crtc && encoder->crtc->enabled) { |
| 1488 | type = intel_tv_detect_type(encoder->crtc, intel_output); | 1488 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); |
| 1489 | } else { | 1489 | } else { |
| 1490 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | 1490 | crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); |
| 1491 | if (crtc) { | 1491 | if (crtc) { |
| 1492 | type = intel_tv_detect_type(crtc, intel_output); | 1492 | type = intel_tv_detect_type(crtc, intel_encoder); |
| 1493 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 1493 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
| 1494 | } else | 1494 | } else |
| 1495 | type = -1; | 1495 | type = -1; |
| 1496 | } | 1496 | } |
| @@ -1525,8 +1525,8 @@ static void | |||
| 1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | 1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, |
| 1526 | struct drm_display_mode *mode_ptr) | 1526 | struct drm_display_mode *mode_ptr) |
| 1527 | { | 1527 | { |
| 1528 | struct intel_output *intel_output = to_intel_output(connector); | 1528 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
| 1530 | 1530 | ||
| 1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
| 1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | 1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; |
| @@ -1550,8 +1550,8 @@ static int | |||
| 1550 | intel_tv_get_modes(struct drm_connector *connector) | 1550 | intel_tv_get_modes(struct drm_connector *connector) |
| 1551 | { | 1551 | { |
| 1552 | struct drm_display_mode *mode_ptr; | 1552 | struct drm_display_mode *mode_ptr; |
| 1553 | struct intel_output *intel_output = to_intel_output(connector); | 1553 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
| 1555 | int j, count = 0; | 1555 | int j, count = 0; |
| 1556 | u64 tmp; | 1556 | u64 tmp; |
| 1557 | 1557 | ||
| @@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
| 1604 | static void | 1604 | static void |
| 1605 | intel_tv_destroy (struct drm_connector *connector) | 1605 | intel_tv_destroy (struct drm_connector *connector) |
| 1606 | { | 1606 | { |
| 1607 | struct intel_output *intel_output = to_intel_output(connector); | 1607 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1608 | 1608 | ||
| 1609 | drm_sysfs_connector_remove(connector); | 1609 | drm_sysfs_connector_remove(connector); |
| 1610 | drm_connector_cleanup(connector); | 1610 | drm_connector_cleanup(connector); |
| 1611 | kfree(intel_output); | 1611 | kfree(intel_encoder); |
| 1612 | } | 1612 | } |
| 1613 | 1613 | ||
| 1614 | 1614 | ||
| @@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
| 1617 | uint64_t val) | 1617 | uint64_t val) |
| 1618 | { | 1618 | { |
| 1619 | struct drm_device *dev = connector->dev; | 1619 | struct drm_device *dev = connector->dev; |
| 1620 | struct intel_output *intel_output = to_intel_output(connector); | 1620 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
| 1621 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1621 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
| 1622 | struct drm_encoder *encoder = &intel_output->enc; | 1622 | struct drm_encoder *encoder = &intel_encoder->enc; |
| 1623 | struct drm_crtc *crtc = encoder->crtc; | 1623 | struct drm_crtc *crtc = encoder->crtc; |
| 1624 | int ret = 0; | 1624 | int ret = 0; |
| 1625 | bool changed = false; | 1625 | bool changed = false; |
| @@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev) | |||
| 1740 | { | 1740 | { |
| 1741 | struct drm_i915_private *dev_priv = dev->dev_private; | 1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1742 | struct drm_connector *connector; | 1742 | struct drm_connector *connector; |
| 1743 | struct intel_output *intel_output; | 1743 | struct intel_encoder *intel_encoder; |
| 1744 | struct intel_tv_priv *tv_priv; | 1744 | struct intel_tv_priv *tv_priv; |
| 1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
| 1746 | char **tv_format_names; | 1746 | char **tv_format_names; |
| @@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev) | |||
| 1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) | 1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) |
| 1781 | return; | 1781 | return; |
| 1782 | 1782 | ||
| 1783 | intel_output = kzalloc(sizeof(struct intel_output) + | 1783 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
| 1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); | 1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); |
| 1785 | if (!intel_output) { | 1785 | if (!intel_encoder) { |
| 1786 | return; | 1786 | return; |
| 1787 | } | 1787 | } |
| 1788 | 1788 | ||
| 1789 | connector = &intel_output->base; | 1789 | connector = &intel_encoder->base; |
| 1790 | 1790 | ||
| 1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
| 1792 | DRM_MODE_CONNECTOR_SVIDEO); | 1792 | DRM_MODE_CONNECTOR_SVIDEO); |
| 1793 | 1793 | ||
| 1794 | drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, | 1794 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, |
| 1795 | DRM_MODE_ENCODER_TVDAC); | 1795 | DRM_MODE_ENCODER_TVDAC); |
| 1796 | 1796 | ||
| 1797 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1797 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
| 1798 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); | 1798 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); |
| 1799 | intel_output->type = INTEL_OUTPUT_TVOUT; | 1799 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
| 1800 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1800 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
| 1801 | intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1801 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); |
| 1802 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1802 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); |
| 1803 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1803 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
| 1804 | intel_output->dev_priv = tv_priv; | 1804 | intel_encoder->dev_priv = tv_priv; |
| 1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; | 1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; |
| 1806 | 1806 | ||
| 1807 | /* BIOS margin values */ | 1807 | /* BIOS margin values */ |
| @@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev) | |||
| 1812 | 1812 | ||
| 1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | 1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); |
| 1814 | 1814 | ||
| 1815 | drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); | 1815 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); |
| 1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | 1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); |
| 1817 | connector->interlace_allowed = false; | 1817 | connector->interlace_allowed = false; |
| 1818 | connector->doublescan_allowed = false; | 1818 | connector->doublescan_allowed = false; |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 7f0d807a0d0d..453df3f6053f 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
| @@ -22,7 +22,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
| 22 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ | 22 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ |
| 23 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ | 23 | nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ |
| 24 | nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ | 24 | nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ |
| 25 | nv17_gpio.o | 25 | nv17_gpio.o nv50_gpio.o |
| 26 | 26 | ||
| 27 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o | 27 | nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o |
| 28 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o | 28 | nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b5a9336a2e88..abc382a9918b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -2573,48 +2573,34 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2573 | * each GPIO according to various values listed in each entry | 2573 | * each GPIO according to various values listed in each entry |
| 2574 | */ | 2574 | */ |
| 2575 | 2575 | ||
| 2576 | const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | 2576 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
| 2577 | const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; | 2577 | const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; |
| 2578 | const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr]; | ||
| 2579 | const uint8_t *gpio_entry; | ||
| 2580 | int i; | 2578 | int i; |
| 2581 | 2579 | ||
| 2582 | if (!iexec->execute) | 2580 | if (dev_priv->card_type != NV_50) { |
| 2583 | return 1; | 2581 | NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); |
| 2584 | 2582 | return -ENODEV; | |
| 2585 | if (bios->dcb.version != 0x40) { | ||
| 2586 | NV_ERROR(bios->dev, "DCB table not version 4.0\n"); | ||
| 2587 | return 0; | ||
| 2588 | } | ||
| 2589 | |||
| 2590 | if (!bios->dcb.gpio_table_ptr) { | ||
| 2591 | NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); | ||
| 2592 | return 0; | ||
| 2593 | } | 2583 | } |
| 2594 | 2584 | ||
| 2595 | gpio_entry = gpio_table + gpio_table[1]; | 2585 | if (!iexec->execute) |
| 2596 | for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { | 2586 | return 1; |
| 2597 | uint32_t entry = ROM32(gpio_entry[0]), r, s, v; | ||
| 2598 | int line = (entry & 0x0000001f); | ||
| 2599 | 2587 | ||
| 2600 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); | 2588 | for (i = 0; i < bios->dcb.gpio.entries; i++) { |
| 2589 | struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; | ||
| 2590 | uint32_t r, s, v; | ||
| 2601 | 2591 | ||
| 2602 | if ((entry & 0x0000ff00) == 0x0000ff00) | 2592 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); |
| 2603 | continue; | ||
| 2604 | 2593 | ||
| 2605 | r = nv50_gpio_reg[line >> 3]; | 2594 | nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); |
| 2606 | s = (line & 0x07) << 2; | ||
| 2607 | v = bios_rd32(bios, r) & ~(0x00000003 << s); | ||
| 2608 | if (entry & 0x01000000) | ||
| 2609 | v |= (((entry & 0x60000000) >> 29) ^ 2) << s; | ||
| 2610 | else | ||
| 2611 | v |= (((entry & 0x18000000) >> 27) ^ 2) << s; | ||
| 2612 | bios_wr32(bios, r, v); | ||
| 2613 | 2595 | ||
| 2614 | r = nv50_gpio_ctl[line >> 4]; | 2596 | /* The NVIDIA binary driver doesn't appear to actually do |
| 2615 | s = (line & 0x0f); | 2597 | * any of this, my VBIOS does however. |
| 2598 | */ | ||
| 2599 | /* Not a clue, needs de-magicing */ | ||
| 2600 | r = nv50_gpio_ctl[gpio->line >> 4]; | ||
| 2601 | s = (gpio->line & 0x0f); | ||
| 2616 | v = bios_rd32(bios, r) & ~(0x00010001 << s); | 2602 | v = bios_rd32(bios, r) & ~(0x00010001 << s); |
| 2617 | switch ((entry & 0x06000000) >> 25) { | 2603 | switch ((gpio->entry & 0x06000000) >> 25) { |
| 2618 | case 1: | 2604 | case 1: |
| 2619 | v |= (0x00000001 << s); | 2605 | v |= (0x00000001 << s); |
| 2620 | break; | 2606 | break; |
| @@ -3198,7 +3184,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int | |||
| 3198 | struct nvbios *bios = &dev_priv->vbios; | 3184 | struct nvbios *bios = &dev_priv->vbios; |
| 3199 | unsigned int outputset = (dcbent->or == 4) ? 1 : 0; | 3185 | unsigned int outputset = (dcbent->or == 4) ? 1 : 0; |
| 3200 | uint16_t scriptptr = 0, clktable; | 3186 | uint16_t scriptptr = 0, clktable; |
| 3201 | uint8_t clktableptr = 0; | ||
| 3202 | 3187 | ||
| 3203 | /* | 3188 | /* |
| 3204 | * For now we assume version 3.0 table - g80 support will need some | 3189 | * For now we assume version 3.0 table - g80 support will need some |
| @@ -3217,26 +3202,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int | |||
| 3217 | scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); | 3202 | scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); |
| 3218 | break; | 3203 | break; |
| 3219 | case LVDS_RESET: | 3204 | case LVDS_RESET: |
| 3205 | clktable = bios->fp.lvdsmanufacturerpointer + 15; | ||
| 3206 | if (dcbent->or == 4) | ||
| 3207 | clktable += 8; | ||
| 3208 | |||
| 3220 | if (dcbent->lvdsconf.use_straps_for_mode) { | 3209 | if (dcbent->lvdsconf.use_straps_for_mode) { |
| 3221 | if (bios->fp.dual_link) | 3210 | if (bios->fp.dual_link) |
| 3222 | clktableptr += 2; | 3211 | clktable += 4; |
| 3223 | if (bios->fp.BITbit1) | 3212 | if (bios->fp.if_is_24bit) |
| 3224 | clktableptr++; | 3213 | clktable += 2; |
| 3225 | } else { | 3214 | } else { |
| 3226 | /* using EDID */ | 3215 | /* using EDID */ |
| 3227 | uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; | 3216 | int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; |
| 3228 | int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; | ||
| 3229 | 3217 | ||
| 3230 | if (bios->fp.dual_link) { | 3218 | if (bios->fp.dual_link) { |
| 3231 | clktableptr += 2; | 3219 | clktable += 4; |
| 3232 | fallbackcmpval *= 2; | 3220 | cmpval_24bit <<= 1; |
| 3233 | } | 3221 | } |
| 3234 | if (fallbackcmpval & fallback) | 3222 | |
| 3235 | clktableptr++; | 3223 | if (bios->fp.strapless_is_24bit & cmpval_24bit) |
| 3224 | clktable += 2; | ||
| 3236 | } | 3225 | } |
| 3237 | 3226 | ||
| 3238 | /* adding outputset * 8 may not be correct */ | 3227 | clktable = ROM16(bios->data[clktable]); |
| 3239 | clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); | ||
| 3240 | if (!clktable) { | 3228 | if (!clktable) { |
| 3241 | NV_ERROR(dev, "Pixel clock comparison table not found\n"); | 3229 | NV_ERROR(dev, "Pixel clock comparison table not found\n"); |
| 3242 | return -ENOENT; | 3230 | return -ENOENT; |
| @@ -3638,37 +3626,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b | |||
| 3638 | *if_is_24bit = bios->data[lvdsofs] & 16; | 3626 | *if_is_24bit = bios->data[lvdsofs] & 16; |
| 3639 | break; | 3627 | break; |
| 3640 | case 0x30: | 3628 | case 0x30: |
| 3641 | /* | 3629 | case 0x40: |
| 3642 | * My money would be on there being a 24 bit interface bit in | ||
| 3643 | * this table, but I have no example of a laptop bios with a | ||
| 3644 | * 24 bit panel to confirm that. Hence we shout loudly if any | ||
| 3645 | * bit other than bit 0 is set (I've not even seen bit 1) | ||
| 3646 | */ | ||
| 3647 | if (bios->data[lvdsofs] > 1) | ||
| 3648 | NV_ERROR(dev, | ||
| 3649 | "You have a very unusual laptop display; please report it\n"); | ||
| 3650 | /* | 3630 | /* |
| 3651 | * No sign of the "power off for reset" or "reset for panel | 3631 | * No sign of the "power off for reset" or "reset for panel |
| 3652 | * on" bits, but it's safer to assume we should | 3632 | * on" bits, but it's safer to assume we should |
| 3653 | */ | 3633 | */ |
| 3654 | bios->fp.power_off_for_reset = true; | 3634 | bios->fp.power_off_for_reset = true; |
| 3655 | bios->fp.reset_after_pclk_change = true; | 3635 | bios->fp.reset_after_pclk_change = true; |
| 3636 | |||
| 3656 | /* | 3637 | /* |
| 3657 | * It's ok lvdsofs is wrong for nv4x edid case; dual_link is | 3638 | * It's ok lvdsofs is wrong for nv4x edid case; dual_link is |
| 3658 | * over-written, and BITbit1 isn't used | 3639 | * over-written, and if_is_24bit isn't used |
| 3659 | */ | 3640 | */ |
| 3660 | bios->fp.dual_link = bios->data[lvdsofs] & 1; | 3641 | bios->fp.dual_link = bios->data[lvdsofs] & 1; |
| 3661 | bios->fp.BITbit1 = bios->data[lvdsofs] & 2; | ||
| 3662 | bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; | ||
| 3663 | break; | ||
| 3664 | case 0x40: | ||
| 3665 | bios->fp.dual_link = bios->data[lvdsofs] & 1; | ||
| 3666 | bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; | 3642 | bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; |
| 3667 | bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; | 3643 | bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; |
| 3668 | bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; | 3644 | bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; |
| 3669 | break; | 3645 | break; |
| 3670 | } | 3646 | } |
| 3671 | 3647 | ||
| 3648 | /* Dell Latitude D620 reports a too-high value for the dual-link | ||
| 3649 | * transition freq, causing us to program the panel incorrectly. | ||
| 3650 | * | ||
| 3651 | * It doesn't appear the VBIOS actually uses its transition freq | ||
| 3652 | * (90000kHz), instead it uses the "Number of LVDS channels" field | ||
| 3653 | * out of the panel ID structure (http://www.spwg.org/). | ||
| 3654 | * | ||
| 3655 | * For the moment, a quirk will do :) | ||
| 3656 | */ | ||
| 3657 | if ((dev->pdev->device == 0x01d7) && | ||
| 3658 | (dev->pdev->subsystem_vendor == 0x1028) && | ||
| 3659 | (dev->pdev->subsystem_device == 0x01c2)) { | ||
| 3660 | bios->fp.duallink_transition_clk = 80000; | ||
| 3661 | } | ||
| 3662 | |||
| 3672 | /* set dual_link flag for EDID case */ | 3663 | /* set dual_link flag for EDID case */ |
| 3673 | if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) | 3664 | if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) |
| 3674 | bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); | 3665 | bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); |
| @@ -5077,25 +5068,25 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) | |||
| 5077 | gpio->tag = tag; | 5068 | gpio->tag = tag; |
| 5078 | gpio->line = line; | 5069 | gpio->line = line; |
| 5079 | gpio->invert = flags != 4; | 5070 | gpio->invert = flags != 4; |
| 5071 | gpio->entry = ent; | ||
| 5080 | } | 5072 | } |
| 5081 | 5073 | ||
| 5082 | static void | 5074 | static void |
| 5083 | parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) | 5075 | parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) |
| 5084 | { | 5076 | { |
| 5077 | uint32_t entry = ROM32(bios->data[offset]); | ||
| 5085 | struct dcb_gpio_entry *gpio; | 5078 | struct dcb_gpio_entry *gpio; |
| 5086 | uint32_t ent = ROM32(bios->data[offset]); | ||
| 5087 | uint8_t line = ent & 0x1f, | ||
| 5088 | tag = ent >> 8 & 0xff; | ||
| 5089 | 5079 | ||
| 5090 | if (tag == 0xff) | 5080 | if ((entry & 0x0000ff00) == 0x0000ff00) |
| 5091 | return; | 5081 | return; |
| 5092 | 5082 | ||
| 5093 | gpio = new_gpio_entry(bios); | 5083 | gpio = new_gpio_entry(bios); |
| 5094 | 5084 | gpio->tag = (entry & 0x0000ff00) >> 8; | |
| 5095 | /* Currently unused, we may need more fields parsed at some | 5085 | gpio->line = (entry & 0x0000001f) >> 0; |
| 5096 | * point. */ | 5086 | gpio->state_default = (entry & 0x01000000) >> 24; |
| 5097 | gpio->tag = tag; | 5087 | gpio->state[0] = (entry & 0x18000000) >> 27; |
| 5098 | gpio->line = line; | 5088 | gpio->state[1] = (entry & 0x60000000) >> 29; |
| 5089 | gpio->entry = entry; | ||
| 5099 | } | 5090 | } |
| 5100 | 5091 | ||
| 5101 | static void | 5092 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 4f88e6924d27..c0d7b0a3ece0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
| @@ -49,6 +49,9 @@ struct dcb_gpio_entry { | |||
| 49 | enum dcb_gpio_tag tag; | 49 | enum dcb_gpio_tag tag; |
| 50 | int line; | 50 | int line; |
| 51 | bool invert; | 51 | bool invert; |
| 52 | uint32_t entry; | ||
| 53 | uint8_t state_default; | ||
| 54 | uint8_t state[2]; | ||
| 52 | }; | 55 | }; |
| 53 | 56 | ||
| 54 | struct dcb_gpio_table { | 57 | struct dcb_gpio_table { |
| @@ -267,7 +270,6 @@ struct nvbios { | |||
| 267 | bool reset_after_pclk_change; | 270 | bool reset_after_pclk_change; |
| 268 | bool dual_link; | 271 | bool dual_link; |
| 269 | bool link_c_increment; | 272 | bool link_c_increment; |
| 270 | bool BITbit1; | ||
| 271 | bool if_is_24bit; | 273 | bool if_is_24bit; |
| 272 | int duallink_transition_clk; | 274 | int duallink_transition_clk; |
| 273 | uint8_t strapless_is_24bit; | 275 | uint8_t strapless_is_24bit; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 9042dd7fb058..957d17629840 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -72,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
| 72 | * many small buffers. | 72 | * many small buffers. |
| 73 | */ | 73 | */ |
| 74 | if (dev_priv->card_type == NV_50) { | 74 | if (dev_priv->card_type == NV_50) { |
| 75 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | 75 | uint32_t block_size = dev_priv->vram_size >> 15; |
| 76 | int i; | 76 | int i; |
| 77 | 77 | ||
| 78 | switch (tile_flags) { | 78 | switch (tile_flags) { |
| @@ -154,7 +154,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 154 | 154 | ||
| 155 | nvbo->placement.fpfn = 0; | 155 | nvbo->placement.fpfn = 0; |
| 156 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | 156 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; |
| 157 | nouveau_bo_placement_set(nvbo, flags); | 157 | nouveau_bo_placement_set(nvbo, flags, 0); |
| 158 | 158 | ||
| 159 | nvbo->channel = chan; | 159 | nvbo->channel = chan; |
| 160 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 160 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
| @@ -173,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 173 | return 0; | 173 | return 0; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | static void | ||
| 177 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | ||
| 178 | { | ||
| 179 | *n = 0; | ||
| 180 | |||
| 181 | if (type & TTM_PL_FLAG_VRAM) | ||
| 182 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | ||
| 183 | if (type & TTM_PL_FLAG_TT) | ||
| 184 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | ||
| 185 | if (type & TTM_PL_FLAG_SYSTEM) | ||
| 186 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | ||
| 187 | } | ||
| 188 | |||
| 176 | void | 189 | void |
| 177 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) | 190 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
| 178 | { | 191 | { |
| 179 | int n = 0; | 192 | struct ttm_placement *pl = &nvbo->placement; |
| 180 | 193 | uint32_t flags = TTM_PL_MASK_CACHING | | |
| 181 | if (memtype & TTM_PL_FLAG_VRAM) | 194 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); |
| 182 | nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; | 195 | |
| 183 | if (memtype & TTM_PL_FLAG_TT) | 196 | pl->placement = nvbo->placements; |
| 184 | nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | 197 | set_placement_list(nvbo->placements, &pl->num_placement, |
| 185 | if (memtype & TTM_PL_FLAG_SYSTEM) | 198 | type, flags); |
| 186 | nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; | 199 | |
| 187 | nvbo->placement.placement = nvbo->placements; | 200 | pl->busy_placement = nvbo->busy_placements; |
| 188 | nvbo->placement.busy_placement = nvbo->placements; | 201 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
| 189 | nvbo->placement.num_placement = n; | 202 | type | busy, flags); |
| 190 | nvbo->placement.num_busy_placement = n; | ||
| 191 | |||
| 192 | if (nvbo->pin_refcnt) { | ||
| 193 | while (n--) | ||
| 194 | nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; | ||
| 195 | } | ||
| 196 | } | 203 | } |
| 197 | 204 | ||
| 198 | int | 205 | int |
| @@ -200,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |||
| 200 | { | 207 | { |
| 201 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 208 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
| 202 | struct ttm_buffer_object *bo = &nvbo->bo; | 209 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 203 | int ret, i; | 210 | int ret; |
| 204 | 211 | ||
| 205 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | 212 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { |
| 206 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | 213 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, |
| @@ -216,9 +223,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |||
| 216 | if (ret) | 223 | if (ret) |
| 217 | goto out; | 224 | goto out; |
| 218 | 225 | ||
| 219 | nouveau_bo_placement_set(nvbo, memtype); | 226 | nouveau_bo_placement_set(nvbo, memtype, 0); |
| 220 | for (i = 0; i < nvbo->placement.num_placement; i++) | ||
| 221 | nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
| 222 | 227 | ||
| 223 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | 228 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); |
| 224 | if (ret == 0) { | 229 | if (ret == 0) { |
| @@ -245,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
| 245 | { | 250 | { |
| 246 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 251 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
| 247 | struct ttm_buffer_object *bo = &nvbo->bo; | 252 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 248 | int ret, i; | 253 | int ret; |
| 249 | 254 | ||
| 250 | if (--nvbo->pin_refcnt) | 255 | if (--nvbo->pin_refcnt) |
| 251 | return 0; | 256 | return 0; |
| @@ -254,8 +259,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
| 254 | if (ret) | 259 | if (ret) |
| 255 | return ret; | 260 | return ret; |
| 256 | 261 | ||
| 257 | for (i = 0; i < nvbo->placement.num_placement; i++) | 262 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
| 258 | nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
| 259 | 263 | ||
| 260 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | 264 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); |
| 261 | if (ret == 0) { | 265 | if (ret == 0) { |
| @@ -396,8 +400,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 396 | man->io_addr = NULL; | 400 | man->io_addr = NULL; |
| 397 | man->io_offset = drm_get_resource_start(dev, 1); | 401 | man->io_offset = drm_get_resource_start(dev, 1); |
| 398 | man->io_size = drm_get_resource_len(dev, 1); | 402 | man->io_size = drm_get_resource_len(dev, 1); |
| 399 | if (man->io_size > nouveau_mem_fb_amount(dev)) | 403 | if (man->io_size > dev_priv->vram_size) |
| 400 | man->io_size = nouveau_mem_fb_amount(dev); | 404 | man->io_size = dev_priv->vram_size; |
| 401 | 405 | ||
| 402 | man->gpu_offset = dev_priv->vm_vram_base; | 406 | man->gpu_offset = dev_priv->vm_vram_base; |
| 403 | break; | 407 | break; |
| @@ -440,10 +444,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
| 440 | 444 | ||
| 441 | switch (bo->mem.mem_type) { | 445 | switch (bo->mem.mem_type) { |
| 442 | case TTM_PL_VRAM: | 446 | case TTM_PL_VRAM: |
| 443 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); | 447 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
| 448 | TTM_PL_FLAG_SYSTEM); | ||
| 444 | break; | 449 | break; |
| 445 | default: | 450 | default: |
| 446 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); | 451 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
| 447 | break; | 452 | break; |
| 448 | } | 453 | } |
| 449 | 454 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 6dfb425cbae9..1fc57ef58295 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
| @@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
| 142 | GFP_KERNEL); | 142 | GFP_KERNEL); |
| 143 | if (!dev_priv->fifos[channel]) | 143 | if (!dev_priv->fifos[channel]) |
| 144 | return -ENOMEM; | 144 | return -ENOMEM; |
| 145 | dev_priv->fifo_alloc_count++; | ||
| 146 | chan = dev_priv->fifos[channel]; | 145 | chan = dev_priv->fifos[channel]; |
| 147 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 146 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
| 148 | INIT_LIST_HEAD(&chan->fence.pending); | 147 | INIT_LIST_HEAD(&chan->fence.pending); |
| @@ -321,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
| 321 | iounmap(chan->user); | 320 | iounmap(chan->user); |
| 322 | 321 | ||
| 323 | dev_priv->fifos[chan->id] = NULL; | 322 | dev_priv->fifos[chan->id] = NULL; |
| 324 | dev_priv->fifo_alloc_count--; | ||
| 325 | kfree(chan); | 323 | kfree(chan); |
| 326 | } | 324 | } |
| 327 | 325 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 8ff9ef5d4b47..a251886a0ce6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | |||
| @@ -137,10 +137,9 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data) | |||
| 137 | { | 137 | { |
| 138 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 138 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 139 | struct drm_minor *minor = node->minor; | 139 | struct drm_minor *minor = node->minor; |
| 140 | struct drm_device *dev = minor->dev; | 140 | struct drm_nouveau_private *dev_priv = minor->dev->dev_private; |
| 141 | 141 | ||
| 142 | seq_printf(m, "VRAM total: %dKiB\n", | 142 | seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); |
| 143 | (int)(nouveau_mem_fb_amount(dev) >> 10)); | ||
| 144 | return 0; | 143 | return 0; |
| 145 | } | 144 | } |
| 146 | 145 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index f954ad93e81f..deeb21c6865c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
| @@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 483 | ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); | 483 | ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); |
| 484 | ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); | 484 | ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); |
| 485 | 485 | ||
| 486 | for (;;) { | 486 | for (i = 0; i < 16; i++) { |
| 487 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); | 487 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); |
| 488 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); | 488 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); |
| 489 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); | 489 | nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); |
| @@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 502 | break; | 502 | break; |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | if (i == 16) { | ||
| 506 | NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); | ||
| 507 | ret = -EREMOTEIO; | ||
| 508 | goto out; | ||
| 509 | } | ||
| 510 | |||
| 505 | if (cmd & 1) { | 511 | if (cmd & 1) { |
| 506 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | 512 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { |
| 507 | ret = -EREMOTEIO; | 513 | ret = -EREMOTEIO; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d8b559011777..ace630aa89e1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -76,6 +76,7 @@ struct nouveau_bo { | |||
| 76 | struct ttm_buffer_object bo; | 76 | struct ttm_buffer_object bo; |
| 77 | struct ttm_placement placement; | 77 | struct ttm_placement placement; |
| 78 | u32 placements[3]; | 78 | u32 placements[3]; |
| 79 | u32 busy_placements[3]; | ||
| 79 | struct ttm_bo_kmap_obj kmap; | 80 | struct ttm_bo_kmap_obj kmap; |
| 80 | struct list_head head; | 81 | struct list_head head; |
| 81 | 82 | ||
| @@ -519,6 +520,7 @@ struct drm_nouveau_private { | |||
| 519 | 520 | ||
| 520 | struct workqueue_struct *wq; | 521 | struct workqueue_struct *wq; |
| 521 | struct work_struct irq_work; | 522 | struct work_struct irq_work; |
| 523 | struct work_struct hpd_work; | ||
| 522 | 524 | ||
| 523 | struct list_head vbl_waiting; | 525 | struct list_head vbl_waiting; |
| 524 | 526 | ||
| @@ -533,7 +535,6 @@ struct drm_nouveau_private { | |||
| 533 | 535 | ||
| 534 | struct fb_info *fbdev_info; | 536 | struct fb_info *fbdev_info; |
| 535 | 537 | ||
| 536 | int fifo_alloc_count; | ||
| 537 | struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; | 538 | struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; |
| 538 | 539 | ||
| 539 | struct nouveau_engine engine; | 540 | struct nouveau_engine engine; |
| @@ -553,12 +554,6 @@ struct drm_nouveau_private { | |||
| 553 | uint32_t ramro_offset; | 554 | uint32_t ramro_offset; |
| 554 | uint32_t ramro_size; | 555 | uint32_t ramro_size; |
| 555 | 556 | ||
| 556 | /* base physical addresses */ | ||
| 557 | uint64_t fb_phys; | ||
| 558 | uint64_t fb_available_size; | ||
| 559 | uint64_t fb_mappable_pages; | ||
| 560 | uint64_t fb_aper_free; | ||
| 561 | |||
| 562 | struct { | 557 | struct { |
| 563 | enum { | 558 | enum { |
| 564 | NOUVEAU_GART_NONE = 0, | 559 | NOUVEAU_GART_NONE = 0, |
| @@ -572,10 +567,6 @@ struct drm_nouveau_private { | |||
| 572 | struct nouveau_gpuobj *sg_ctxdma; | 567 | struct nouveau_gpuobj *sg_ctxdma; |
| 573 | struct page *sg_dummy_page; | 568 | struct page *sg_dummy_page; |
| 574 | dma_addr_t sg_dummy_bus; | 569 | dma_addr_t sg_dummy_bus; |
| 575 | |||
| 576 | /* nottm hack */ | ||
| 577 | struct drm_ttm_backend *sg_be; | ||
| 578 | unsigned long sg_handle; | ||
| 579 | } gart_info; | 570 | } gart_info; |
| 580 | 571 | ||
| 581 | /* nv10-nv40 tiling regions */ | 572 | /* nv10-nv40 tiling regions */ |
| @@ -584,6 +575,16 @@ struct drm_nouveau_private { | |||
| 584 | spinlock_t lock; | 575 | spinlock_t lock; |
| 585 | } tile; | 576 | } tile; |
| 586 | 577 | ||
| 578 | /* VRAM/fb configuration */ | ||
| 579 | uint64_t vram_size; | ||
| 580 | uint64_t vram_sys_base; | ||
| 581 | |||
| 582 | uint64_t fb_phys; | ||
| 583 | uint64_t fb_available_size; | ||
| 584 | uint64_t fb_mappable_pages; | ||
| 585 | uint64_t fb_aper_free; | ||
| 586 | int fb_mtrr; | ||
| 587 | |||
| 587 | /* G8x/G9x virtual address space */ | 588 | /* G8x/G9x virtual address space */ |
| 588 | uint64_t vm_gart_base; | 589 | uint64_t vm_gart_base; |
| 589 | uint64_t vm_gart_size; | 590 | uint64_t vm_gart_size; |
| @@ -592,10 +593,6 @@ struct drm_nouveau_private { | |||
| 592 | uint64_t vm_end; | 593 | uint64_t vm_end; |
| 593 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | 594 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
| 594 | int vm_vram_pt_nr; | 595 | int vm_vram_pt_nr; |
| 595 | uint64_t vram_sys_base; | ||
| 596 | |||
| 597 | /* the mtrr covering the FB */ | ||
| 598 | int fb_mtrr; | ||
| 599 | 596 | ||
| 600 | struct mem_block *ramin_heap; | 597 | struct mem_block *ramin_heap; |
| 601 | 598 | ||
| @@ -614,11 +611,7 @@ struct drm_nouveau_private { | |||
| 614 | uint32_t dac_users[4]; | 611 | uint32_t dac_users[4]; |
| 615 | 612 | ||
| 616 | struct nouveau_suspend_resume { | 613 | struct nouveau_suspend_resume { |
| 617 | uint32_t fifo_mode; | ||
| 618 | uint32_t graph_ctx_control; | ||
| 619 | uint32_t graph_state; | ||
| 620 | uint32_t *ramin_copy; | 614 | uint32_t *ramin_copy; |
| 621 | uint64_t ramin_size; | ||
| 622 | } susres; | 615 | } susres; |
| 623 | 616 | ||
| 624 | struct backlight_device *backlight; | 617 | struct backlight_device *backlight; |
| @@ -717,7 +710,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, | |||
| 717 | struct drm_file *, int tail); | 710 | struct drm_file *, int tail); |
| 718 | extern void nouveau_mem_takedown(struct mem_block **heap); | 711 | extern void nouveau_mem_takedown(struct mem_block **heap); |
| 719 | extern void nouveau_mem_free_block(struct mem_block *); | 712 | extern void nouveau_mem_free_block(struct mem_block *); |
| 720 | extern uint64_t nouveau_mem_fb_amount(struct drm_device *); | 713 | extern int nouveau_mem_detect(struct drm_device *dev); |
| 721 | extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); | 714 | extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); |
| 722 | extern int nouveau_mem_init(struct drm_device *); | 715 | extern int nouveau_mem_init(struct drm_device *); |
| 723 | extern int nouveau_mem_init_agp(struct drm_device *); | 716 | extern int nouveau_mem_init_agp(struct drm_device *); |
| @@ -1124,7 +1117,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); | |||
| 1124 | extern int nouveau_bo_unpin(struct nouveau_bo *); | 1117 | extern int nouveau_bo_unpin(struct nouveau_bo *); |
| 1125 | extern int nouveau_bo_map(struct nouveau_bo *); | 1118 | extern int nouveau_bo_map(struct nouveau_bo *); |
| 1126 | extern void nouveau_bo_unmap(struct nouveau_bo *); | 1119 | extern void nouveau_bo_unmap(struct nouveau_bo *); |
| 1127 | extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); | 1120 | extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, |
| 1121 | uint32_t busy); | ||
| 1128 | extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | 1122 | extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); |
| 1129 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1123 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
| 1130 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1124 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
| @@ -1168,6 +1162,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, | |||
| 1168 | int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | 1162 | int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); |
| 1169 | int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | 1163 | int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); |
| 1170 | 1164 | ||
| 1165 | /* nv50_gpio.c */ | ||
| 1166 | int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); | ||
| 1167 | int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); | ||
| 1168 | |||
| 1171 | #ifndef ioread32_native | 1169 | #ifndef ioread32_native |
| 1172 | #ifdef __BIG_ENDIAN | 1170 | #ifdef __BIG_ENDIAN |
| 1173 | #define ioread16_native ioread16be | 1171 | #define ioread16_native ioread16be |
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index bc4a24029ed1..9f28b94e479b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h | |||
| @@ -47,6 +47,7 @@ struct nouveau_encoder { | |||
| 47 | 47 | ||
| 48 | union { | 48 | union { |
| 49 | struct { | 49 | struct { |
| 50 | int mc_unknown; | ||
| 50 | int dpcd_version; | 51 | int dpcd_version; |
| 51 | int link_nr; | 52 | int link_nr; |
| 52 | int link_bw; | 53 | int link_bw; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0d22f66f1c79..1bc0b38a5167 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -180,40 +180,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |||
| 180 | { | 180 | { |
| 181 | struct nouveau_bo *nvbo = gem->driver_private; | 181 | struct nouveau_bo *nvbo = gem->driver_private; |
| 182 | struct ttm_buffer_object *bo = &nvbo->bo; | 182 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 183 | uint64_t flags; | 183 | uint32_t domains = valid_domains & |
| 184 | (write_domains ? write_domains : read_domains); | ||
| 185 | uint32_t pref_flags = 0, valid_flags = 0; | ||
| 184 | 186 | ||
| 185 | if (!valid_domains || (!read_domains && !write_domains)) | 187 | if (!domains) |
| 186 | return -EINVAL; | 188 | return -EINVAL; |
| 187 | 189 | ||
| 188 | if (write_domains) { | 190 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
| 189 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | 191 | valid_flags |= TTM_PL_FLAG_VRAM; |
| 190 | (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | 192 | |
| 191 | flags = TTM_PL_FLAG_VRAM; | 193 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) |
| 192 | else | 194 | valid_flags |= TTM_PL_FLAG_TT; |
| 193 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | 195 | |
| 194 | (write_domains & NOUVEAU_GEM_DOMAIN_GART)) | 196 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && |
| 195 | flags = TTM_PL_FLAG_TT; | 197 | bo->mem.mem_type == TTM_PL_VRAM) |
| 196 | else | 198 | pref_flags |= TTM_PL_FLAG_VRAM; |
| 197 | return -EINVAL; | 199 | |
| 198 | } else { | 200 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && |
| 199 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | 201 | bo->mem.mem_type == TTM_PL_TT) |
| 200 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | 202 | pref_flags |= TTM_PL_FLAG_TT; |
| 201 | bo->mem.mem_type == TTM_PL_VRAM) | 203 | |
| 202 | flags = TTM_PL_FLAG_VRAM; | 204 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) |
| 203 | else | 205 | pref_flags |= TTM_PL_FLAG_VRAM; |
| 204 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && | 206 | |
| 205 | (read_domains & NOUVEAU_GEM_DOMAIN_GART) && | 207 | else |
| 206 | bo->mem.mem_type == TTM_PL_TT) | 208 | pref_flags |= TTM_PL_FLAG_TT; |
| 207 | flags = TTM_PL_FLAG_TT; | 209 | |
| 208 | else | 210 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); |
| 209 | if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | ||
| 210 | (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) | ||
| 211 | flags = TTM_PL_FLAG_VRAM; | ||
| 212 | else | ||
| 213 | flags = TTM_PL_FLAG_TT; | ||
| 214 | } | ||
| 215 | 211 | ||
| 216 | nouveau_bo_placement_set(nvbo, flags); | ||
| 217 | return 0; | 212 | return 0; |
| 218 | } | 213 | } |
| 219 | 214 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 2bd59a92fee5..13e73cee4c44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
| @@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev) | |||
| 51 | 51 | ||
| 52 | if (dev_priv->card_type == NV_50) { | 52 | if (dev_priv->card_type == NV_50) { |
| 53 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); | 53 | INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); |
| 54 | INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); | ||
| 54 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); | 55 | INIT_LIST_HEAD(&dev_priv->vbl_waiting); |
| 55 | } | 56 | } |
| 56 | } | 57 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2dc09dbd817d..775a7017af64 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
| 347 | return -EBUSY; | 347 | return -EBUSY; |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | nv_wr32(dev, 0x100c80, 0x00040001); | ||
| 351 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 352 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 353 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
| 354 | return -EBUSY; | ||
| 355 | } | ||
| 356 | |||
| 357 | nv_wr32(dev, 0x100c80, 0x00060001); | ||
| 358 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 359 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 360 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
| 361 | return -EBUSY; | ||
| 362 | } | ||
| 363 | |||
| 350 | return 0; | 364 | return 0; |
| 351 | } | 365 | } |
| 352 | 366 | ||
| @@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |||
| 387 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | 401 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { |
| 388 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | 402 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); |
| 389 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | 403 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); |
| 404 | return; | ||
| 405 | } | ||
| 406 | |||
| 407 | nv_wr32(dev, 0x100c80, 0x00040001); | ||
| 408 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 409 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 410 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
| 411 | return; | ||
| 412 | } | ||
| 413 | |||
| 414 | nv_wr32(dev, 0x100c80, 0x00060001); | ||
| 415 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 416 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 417 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
| 390 | } | 418 | } |
| 391 | } | 419 | } |
| 392 | 420 | ||
| @@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev) | |||
| 449 | } | 477 | } |
| 450 | } | 478 | } |
| 451 | 479 | ||
| 452 | /*XXX won't work on BSD because of pci_read_config_dword */ | ||
| 453 | static uint32_t | 480 | static uint32_t |
| 454 | nouveau_mem_fb_amount_igp(struct drm_device *dev) | 481 | nouveau_mem_detect_nv04(struct drm_device *dev) |
| 482 | { | ||
| 483 | uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); | ||
| 484 | |||
| 485 | if (boot0 & 0x00000100) | ||
| 486 | return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; | ||
| 487 | |||
| 488 | switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { | ||
| 489 | case NV04_BOOT_0_RAM_AMOUNT_32MB: | ||
| 490 | return 32 * 1024 * 1024; | ||
| 491 | case NV04_BOOT_0_RAM_AMOUNT_16MB: | ||
| 492 | return 16 * 1024 * 1024; | ||
| 493 | case NV04_BOOT_0_RAM_AMOUNT_8MB: | ||
| 494 | return 8 * 1024 * 1024; | ||
| 495 | case NV04_BOOT_0_RAM_AMOUNT_4MB: | ||
| 496 | return 4 * 1024 * 1024; | ||
| 497 | } | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static uint32_t | ||
| 503 | nouveau_mem_detect_nforce(struct drm_device *dev) | ||
| 455 | { | 504 | { |
| 456 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 505 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 457 | struct pci_dev *bridge; | 506 | struct pci_dev *bridge; |
| @@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) | |||
| 463 | return 0; | 512 | return 0; |
| 464 | } | 513 | } |
| 465 | 514 | ||
| 466 | if (dev_priv->flags&NV_NFORCE) { | 515 | if (dev_priv->flags & NV_NFORCE) { |
| 467 | pci_read_config_dword(bridge, 0x7C, &mem); | 516 | pci_read_config_dword(bridge, 0x7C, &mem); |
| 468 | return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; | 517 | return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; |
| 469 | } else | 518 | } else |
| 470 | if (dev_priv->flags&NV_NFORCE2) { | 519 | if (dev_priv->flags & NV_NFORCE2) { |
| 471 | pci_read_config_dword(bridge, 0x84, &mem); | 520 | pci_read_config_dword(bridge, 0x84, &mem); |
| 472 | return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; | 521 | return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; |
| 473 | } | 522 | } |
| @@ -477,50 +526,32 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) | |||
| 477 | } | 526 | } |
| 478 | 527 | ||
| 479 | /* returns the amount of FB ram in bytes */ | 528 | /* returns the amount of FB ram in bytes */ |
| 480 | uint64_t nouveau_mem_fb_amount(struct drm_device *dev) | 529 | int |
| 530 | nouveau_mem_detect(struct drm_device *dev) | ||
| 481 | { | 531 | { |
| 482 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 532 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 483 | uint32_t boot0; | 533 | |
| 484 | 534 | if (dev_priv->card_type == NV_04) { | |
| 485 | switch (dev_priv->card_type) { | 535 | dev_priv->vram_size = nouveau_mem_detect_nv04(dev); |
| 486 | case NV_04: | 536 | } else |
| 487 | boot0 = nv_rd32(dev, NV03_BOOT_0); | 537 | if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { |
| 488 | if (boot0 & 0x00000100) | 538 | dev_priv->vram_size = nouveau_mem_detect_nforce(dev); |
| 489 | return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; | 539 | } else { |
| 490 | 540 | dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); | |
| 491 | switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { | 541 | dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
| 492 | case NV04_BOOT_0_RAM_AMOUNT_32MB: | 542 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) |
| 493 | return 32 * 1024 * 1024; | 543 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; |
| 494 | case NV04_BOOT_0_RAM_AMOUNT_16MB: | ||
| 495 | return 16 * 1024 * 1024; | ||
| 496 | case NV04_BOOT_0_RAM_AMOUNT_8MB: | ||
| 497 | return 8 * 1024 * 1024; | ||
| 498 | case NV04_BOOT_0_RAM_AMOUNT_4MB: | ||
| 499 | return 4 * 1024 * 1024; | ||
| 500 | } | ||
| 501 | break; | ||
| 502 | case NV_10: | ||
| 503 | case NV_20: | ||
| 504 | case NV_30: | ||
| 505 | case NV_40: | ||
| 506 | case NV_50: | ||
| 507 | default: | ||
| 508 | if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { | ||
| 509 | return nouveau_mem_fb_amount_igp(dev); | ||
| 510 | } else { | ||
| 511 | uint64_t mem; | ||
| 512 | mem = (nv_rd32(dev, NV04_FIFO_DATA) & | ||
| 513 | NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> | ||
| 514 | NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; | ||
| 515 | return mem * 1024 * 1024; | ||
| 516 | } | ||
| 517 | break; | ||
| 518 | } | 544 | } |
| 519 | 545 | ||
| 520 | NV_ERROR(dev, | 546 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); |
| 521 | "Unable to detect video ram size. Please report your setup to " | 547 | if (dev_priv->vram_sys_base) { |
| 522 | DRIVER_EMAIL "\n"); | 548 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", |
| 523 | return 0; | 549 | dev_priv->vram_sys_base); |
| 550 | } | ||
| 551 | |||
| 552 | if (dev_priv->vram_size) | ||
| 553 | return 0; | ||
| 554 | return -ENOMEM; | ||
| 524 | } | 555 | } |
| 525 | 556 | ||
| 526 | #if __OS_HAS_AGP | 557 | #if __OS_HAS_AGP |
| @@ -631,15 +662,12 @@ nouveau_mem_init(struct drm_device *dev) | |||
| 631 | spin_lock_init(&dev_priv->ttm.bo_list_lock); | 662 | spin_lock_init(&dev_priv->ttm.bo_list_lock); |
| 632 | spin_lock_init(&dev_priv->tile.lock); | 663 | spin_lock_init(&dev_priv->tile.lock); |
| 633 | 664 | ||
| 634 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); | 665 | dev_priv->fb_available_size = dev_priv->vram_size; |
| 635 | |||
| 636 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | 666 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; |
| 637 | if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) | 667 | if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) |
| 638 | dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); | 668 | dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); |
| 639 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | 669 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; |
| 640 | 670 | ||
| 641 | NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); | ||
| 642 | |||
| 643 | /* remove reserved space at end of vram from available amount */ | 671 | /* remove reserved space at end of vram from available amount */ |
| 644 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; | 672 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; |
| 645 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | 673 | dev_priv->fb_aper_free = dev_priv->fb_available_size; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 86785b8d42ed..1d6ee8b55154 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -172,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be) | |||
| 172 | } | 172 | } |
| 173 | dev_priv->engine.instmem.finish_access(nvbe->dev); | 173 | dev_priv->engine.instmem.finish_access(nvbe->dev); |
| 174 | 174 | ||
| 175 | if (dev_priv->card_type == NV_50) { | ||
| 176 | nv_wr32(dev, 0x100c80, 0x00050001); | ||
| 177 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 178 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 179 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", | ||
| 180 | nv_rd32(dev, 0x100c80)); | ||
| 181 | return -EBUSY; | ||
| 182 | } | ||
| 183 | |||
| 184 | nv_wr32(dev, 0x100c80, 0x00000001); | ||
| 185 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
| 186 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
| 187 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", | ||
| 188 | nv_rd32(dev, 0x100c80)); | ||
| 189 | return -EBUSY; | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 175 | nvbe->bound = false; | 193 | nvbe->bound = false; |
| 176 | return 0; | 194 | return 0; |
| 177 | } | 195 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 10656a6be8e6..e1710640a278 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -341,7 +341,7 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
| 341 | 341 | ||
| 342 | gpuobj = NULL; | 342 | gpuobj = NULL; |
| 343 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | 343 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, |
| 344 | 0, nouveau_mem_fb_amount(dev), | 344 | 0, dev_priv->vram_size, |
| 345 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, | 345 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, |
| 346 | &gpuobj); | 346 | &gpuobj); |
| 347 | if (ret) | 347 | if (ret) |
| @@ -427,6 +427,10 @@ nouveau_card_init(struct drm_device *dev) | |||
| 427 | goto out; | 427 | goto out; |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | ret = nouveau_mem_detect(dev); | ||
| 431 | if (ret) | ||
| 432 | goto out_bios; | ||
| 433 | |||
| 430 | ret = nouveau_gpuobj_early_init(dev); | 434 | ret = nouveau_gpuobj_early_init(dev); |
| 431 | if (ret) | 435 | if (ret) |
| 432 | goto out_bios; | 436 | goto out_bios; |
| @@ -502,7 +506,7 @@ nouveau_card_init(struct drm_device *dev) | |||
| 502 | else | 506 | else |
| 503 | ret = nv04_display_create(dev); | 507 | ret = nv04_display_create(dev); |
| 504 | if (ret) | 508 | if (ret) |
| 505 | goto out_irq; | 509 | goto out_channel; |
| 506 | } | 510 | } |
| 507 | 511 | ||
| 508 | ret = nouveau_backlight_init(dev); | 512 | ret = nouveau_backlight_init(dev); |
| @@ -516,6 +520,11 @@ nouveau_card_init(struct drm_device *dev) | |||
| 516 | 520 | ||
| 517 | return 0; | 521 | return 0; |
| 518 | 522 | ||
| 523 | out_channel: | ||
| 524 | if (dev_priv->channel) { | ||
| 525 | nouveau_channel_free(dev_priv->channel); | ||
| 526 | dev_priv->channel = NULL; | ||
| 527 | } | ||
| 519 | out_irq: | 528 | out_irq: |
| 520 | drm_irq_uninstall(dev); | 529 | drm_irq_uninstall(dev); |
| 521 | out_fifo: | 530 | out_fifo: |
| @@ -533,6 +542,7 @@ out_mc: | |||
| 533 | out_gpuobj: | 542 | out_gpuobj: |
| 534 | nouveau_gpuobj_takedown(dev); | 543 | nouveau_gpuobj_takedown(dev); |
| 535 | out_mem: | 544 | out_mem: |
| 545 | nouveau_sgdma_takedown(dev); | ||
| 536 | nouveau_mem_close(dev); | 546 | nouveau_mem_close(dev); |
| 537 | out_instmem: | 547 | out_instmem: |
| 538 | engine->instmem.takedown(dev); | 548 | engine->instmem.takedown(dev); |
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 6b2ef4a9fce1..500ccfd3a0b8 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
| @@ -278,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) | |||
| 278 | default: | 278 | default: |
| 279 | nv_wr32(dev, 0x2230, 0); | 279 | nv_wr32(dev, 0x2230, 0); |
| 280 | nv_wr32(dev, NV40_PFIFO_RAMFC, | 280 | nv_wr32(dev, NV40_PFIFO_RAMFC, |
| 281 | ((nouveau_mem_fb_amount(dev) - 512 * 1024 + | 281 | ((dev_priv->vram_size - 512 * 1024 + |
| 282 | dev_priv->ramfc_offset) >> 16) | (3 << 16)); | 282 | dev_priv->ramfc_offset) >> 16) | (3 << 16)); |
| 283 | break; | 283 | break; |
| 284 | } | 284 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 53e8afe1dcd1..0616c96e4b67 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
| @@ -335,6 +335,27 @@ nv40_graph_init(struct drm_device *dev) | |||
| 335 | nv_wr32(dev, 0x400b38, 0x2ffff800); | 335 | nv_wr32(dev, 0x400b38, 0x2ffff800); |
| 336 | nv_wr32(dev, 0x400b3c, 0x00006000); | 336 | nv_wr32(dev, 0x400b3c, 0x00006000); |
| 337 | 337 | ||
| 338 | /* Tiling related stuff. */ | ||
| 339 | switch (dev_priv->chipset) { | ||
| 340 | case 0x44: | ||
| 341 | case 0x4a: | ||
| 342 | nv_wr32(dev, 0x400bc4, 0x1003d888); | ||
| 343 | nv_wr32(dev, 0x400bbc, 0xb7a7b500); | ||
| 344 | break; | ||
| 345 | case 0x46: | ||
| 346 | nv_wr32(dev, 0x400bc4, 0x0000e024); | ||
| 347 | nv_wr32(dev, 0x400bbc, 0xb7a7b520); | ||
| 348 | break; | ||
| 349 | case 0x4c: | ||
| 350 | case 0x4e: | ||
| 351 | case 0x67: | ||
| 352 | nv_wr32(dev, 0x400bc4, 0x1003d888); | ||
| 353 | nv_wr32(dev, 0x400bbc, 0xb7a7b540); | ||
| 354 | break; | ||
| 355 | default: | ||
| 356 | break; | ||
| 357 | } | ||
| 358 | |||
| 338 | /* Turn all the tiling regions off. */ | 359 | /* Turn all the tiling regions off. */ |
| 339 | for (i = 0; i < pfb->num_tiles; i++) | 360 | for (i = 0; i < pfb->num_tiles; i++) |
| 340 | nv40_graph_set_region_tiling(dev, i, 0, 0, 0); | 361 | nv40_graph_set_region_tiling(dev, i, 0, 0, 0); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index fac6c88a2b1f..649db4c1b690 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -143,7 +143,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | |||
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, | 145 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, |
| 146 | 0, nouveau_mem_fb_amount(dev)); | 146 | 0, dev_priv->vram_size); |
| 147 | if (ret) { | 147 | if (ret) { |
| 148 | nv50_evo_channel_del(pchan); | 148 | nv50_evo_channel_del(pchan); |
| 149 | return ret; | 149 | return ret; |
| @@ -231,7 +231,7 @@ nv50_display_init(struct drm_device *dev) | |||
| 231 | /* This used to be in crtc unblank, but seems out of place there. */ | 231 | /* This used to be in crtc unblank, but seems out of place there. */ |
| 232 | nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); | 232 | nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); |
| 233 | /* RAM is clamped to 256 MiB. */ | 233 | /* RAM is clamped to 256 MiB. */ |
| 234 | ram_amount = nouveau_mem_fb_amount(dev); | 234 | ram_amount = dev_priv->vram_size; |
| 235 | NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); | 235 | NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); |
| 236 | if (ram_amount > 256*1024*1024) | 236 | if (ram_amount > 256*1024*1024) |
| 237 | ram_amount = 256*1024*1024; | 237 | ram_amount = 256*1024*1024; |
| @@ -529,8 +529,10 @@ int nv50_display_create(struct drm_device *dev) | |||
| 529 | } | 529 | } |
| 530 | 530 | ||
| 531 | ret = nv50_display_init(dev); | 531 | ret = nv50_display_init(dev); |
| 532 | if (ret) | 532 | if (ret) { |
| 533 | nv50_display_destroy(dev); | ||
| 533 | return ret; | 534 | return ret; |
| 535 | } | ||
| 534 | 536 | ||
| 535 | return 0; | 537 | return 0; |
| 536 | } | 538 | } |
| @@ -885,10 +887,12 @@ nv50_display_error_handler(struct drm_device *dev) | |||
| 885 | nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); | 887 | nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); |
| 886 | } | 888 | } |
| 887 | 889 | ||
| 888 | static void | 890 | void |
| 889 | nv50_display_irq_hotplug(struct drm_device *dev) | 891 | nv50_display_irq_hotplug_bh(struct work_struct *work) |
| 890 | { | 892 | { |
| 891 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 893 | struct drm_nouveau_private *dev_priv = |
| 894 | container_of(work, struct drm_nouveau_private, hpd_work); | ||
| 895 | struct drm_device *dev = dev_priv->dev; | ||
| 892 | struct drm_connector *connector; | 896 | struct drm_connector *connector; |
| 893 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | 897 | const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; |
| 894 | uint32_t unplug_mask, plug_mask, change_mask; | 898 | uint32_t unplug_mask, plug_mask, change_mask; |
| @@ -949,8 +953,10 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
| 949 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 953 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 950 | uint32_t delayed = 0; | 954 | uint32_t delayed = 0; |
| 951 | 955 | ||
| 952 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) | 956 | if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { |
| 953 | nv50_display_irq_hotplug(dev); | 957 | if (!work_pending(&dev_priv->hpd_work)) |
| 958 | queue_work(dev_priv->wq, &dev_priv->hpd_work); | ||
| 959 | } | ||
| 954 | 960 | ||
| 955 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { | 961 | while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { |
| 956 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); | 962 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index 3ae8d0725f63..581d405ac014 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | void nv50_display_irq_handler(struct drm_device *dev); | 38 | void nv50_display_irq_handler(struct drm_device *dev); |
| 39 | void nv50_display_irq_handler_bh(struct work_struct *work); | 39 | void nv50_display_irq_handler_bh(struct work_struct *work); |
| 40 | void nv50_display_irq_hotplug_bh(struct work_struct *work); | ||
| 40 | int nv50_display_init(struct drm_device *dev); | 41 | int nv50_display_init(struct drm_device *dev); |
| 41 | int nv50_display_create(struct drm_device *dev); | 42 | int nv50_display_create(struct drm_device *dev); |
| 42 | int nv50_display_destroy(struct drm_device *dev); | 43 | int nv50_display_destroy(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 25a3cd8794f9..a8c70e7e9184 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -157,8 +157,11 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
| 157 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 157 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 158 | struct nouveau_channel *chan = dev_priv->channel; | 158 | struct nouveau_channel *chan = dev_priv->channel; |
| 159 | struct nouveau_gpuobj *eng2d = NULL; | 159 | struct nouveau_gpuobj *eng2d = NULL; |
| 160 | uint64_t fb; | ||
| 160 | int ret, format; | 161 | int ret, format; |
| 161 | 162 | ||
| 163 | fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; | ||
| 164 | |||
| 162 | switch (info->var.bits_per_pixel) { | 165 | switch (info->var.bits_per_pixel) { |
| 163 | case 8: | 166 | case 8: |
| 164 | format = 0xf3; | 167 | format = 0xf3; |
| @@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
| 248 | OUT_RING(chan, info->fix.line_length); | 251 | OUT_RING(chan, info->fix.line_length); |
| 249 | OUT_RING(chan, info->var.xres_virtual); | 252 | OUT_RING(chan, info->var.xres_virtual); |
| 250 | OUT_RING(chan, info->var.yres_virtual); | 253 | OUT_RING(chan, info->var.yres_virtual); |
| 251 | OUT_RING(chan, 0); | 254 | OUT_RING(chan, upper_32_bits(fb)); |
| 252 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + | 255 | OUT_RING(chan, lower_32_bits(fb)); |
| 253 | dev_priv->vm_vram_base); | ||
| 254 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); | 256 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); |
| 255 | OUT_RING(chan, format); | 257 | OUT_RING(chan, format); |
| 256 | OUT_RING(chan, 1); | 258 | OUT_RING(chan, 1); |
| @@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
| 258 | OUT_RING(chan, info->fix.line_length); | 260 | OUT_RING(chan, info->fix.line_length); |
| 259 | OUT_RING(chan, info->var.xres_virtual); | 261 | OUT_RING(chan, info->var.xres_virtual); |
| 260 | OUT_RING(chan, info->var.yres_virtual); | 262 | OUT_RING(chan, info->var.yres_virtual); |
| 261 | OUT_RING(chan, 0); | 263 | OUT_RING(chan, upper_32_bits(fb)); |
| 262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + | 264 | OUT_RING(chan, lower_32_bits(fb)); |
| 263 | dev_priv->vm_vram_base); | ||
| 264 | 265 | ||
| 265 | return 0; | 266 | return 0; |
| 266 | } | 267 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c new file mode 100644 index 000000000000..c61782b314e7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2010 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "drmP.h" | ||
| 26 | #include "nouveau_drv.h" | ||
| 27 | #include "nouveau_hw.h" | ||
| 28 | |||
| 29 | static int | ||
| 30 | nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) | ||
| 31 | { | ||
| 32 | const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; | ||
| 33 | |||
| 34 | if (gpio->line > 32) | ||
| 35 | return -EINVAL; | ||
| 36 | |||
| 37 | *reg = nv50_gpio_reg[gpio->line >> 3]; | ||
| 38 | *shift = (gpio->line & 7) << 2; | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | int | ||
| 43 | nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) | ||
| 44 | { | ||
| 45 | struct dcb_gpio_entry *gpio; | ||
| 46 | uint32_t r, s, v; | ||
| 47 | |||
| 48 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
| 49 | if (!gpio) | ||
| 50 | return -ENOENT; | ||
| 51 | |||
| 52 | if (nv50_gpio_location(gpio, &r, &s)) | ||
| 53 | return -EINVAL; | ||
| 54 | |||
| 55 | v = nv_rd32(dev, r) >> (s + 2); | ||
| 56 | return ((v & 1) == (gpio->state[1] & 1)); | ||
| 57 | } | ||
| 58 | |||
| 59 | int | ||
| 60 | nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) | ||
| 61 | { | ||
| 62 | struct dcb_gpio_entry *gpio; | ||
| 63 | uint32_t r, s, v; | ||
| 64 | |||
| 65 | gpio = nouveau_bios_gpio_entry(dev, tag); | ||
| 66 | if (!gpio) | ||
| 67 | return -ENOENT; | ||
| 68 | |||
| 69 | if (nv50_gpio_location(gpio, &r, &s)) | ||
| 70 | return -EINVAL; | ||
| 71 | |||
| 72 | v = nv_rd32(dev, r) & ~(0x3 << s); | ||
| 73 | v |= (gpio->state[state] ^ 2) << s; | ||
| 74 | nv_wr32(dev, r, v); | ||
| 75 | return 0; | ||
| 76 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index c62b33a02f88..b203d06f601f 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -410,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { | |||
| 410 | { 0x5039, false, NULL }, /* m2mf */ | 410 | { 0x5039, false, NULL }, /* m2mf */ |
| 411 | { 0x502d, false, NULL }, /* 2d */ | 411 | { 0x502d, false, NULL }, /* 2d */ |
| 412 | { 0x50c0, false, NULL }, /* compute */ | 412 | { 0x50c0, false, NULL }, /* compute */ |
| 413 | { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ | ||
| 413 | { 0x5097, false, NULL }, /* tesla (nv50) */ | 414 | { 0x5097, false, NULL }, /* tesla (nv50) */ |
| 414 | { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ | 415 | { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ |
| 415 | { 0x8397, false, NULL }, /* tesla (nva0) */ | 416 | { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ |
| 416 | { 0x8597, false, NULL }, /* tesla (nva8) */ | 417 | { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ |
| 417 | {} | 418 | {} |
| 418 | }; | 419 | }; |
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index 546b31949a30..42a8fb20c1e6 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
| @@ -55,12 +55,12 @@ | |||
| 55 | #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) | 55 | #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) |
| 56 | #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 | 56 | #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 |
| 57 | #define CP_FLAG_AUTO_LOAD_PENDING 1 | 57 | #define CP_FLAG_AUTO_LOAD_PENDING 1 |
| 58 | #define CP_FLAG_NEWCTX ((2 * 32) + 10) | ||
| 59 | #define CP_FLAG_NEWCTX_BUSY 0 | ||
| 60 | #define CP_FLAG_NEWCTX_DONE 1 | ||
| 58 | #define CP_FLAG_XFER ((2 * 32) + 11) | 61 | #define CP_FLAG_XFER ((2 * 32) + 11) |
| 59 | #define CP_FLAG_XFER_IDLE 0 | 62 | #define CP_FLAG_XFER_IDLE 0 |
| 60 | #define CP_FLAG_XFER_BUSY 1 | 63 | #define CP_FLAG_XFER_BUSY 1 |
| 61 | #define CP_FLAG_NEWCTX ((2 * 32) + 12) | ||
| 62 | #define CP_FLAG_NEWCTX_BUSY 0 | ||
| 63 | #define CP_FLAG_NEWCTX_DONE 1 | ||
| 64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) | 64 | #define CP_FLAG_ALWAYS ((2 * 32) + 13) |
| 65 | #define CP_FLAG_ALWAYS_FALSE 0 | 65 | #define CP_FLAG_ALWAYS_FALSE 0 |
| 66 | #define CP_FLAG_ALWAYS_TRUE 1 | 66 | #define CP_FLAG_ALWAYS_TRUE 1 |
| @@ -177,6 +177,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx) | |||
| 177 | case 0x96: | 177 | case 0x96: |
| 178 | case 0x98: | 178 | case 0x98: |
| 179 | case 0xa0: | 179 | case 0xa0: |
| 180 | case 0xa3: | ||
| 180 | case 0xa5: | 181 | case 0xa5: |
| 181 | case 0xa8: | 182 | case 0xa8: |
| 182 | case 0xaa: | 183 | case 0xaa: |
| @@ -364,6 +365,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
| 364 | case 0xac: | 365 | case 0xac: |
| 365 | gr_def(ctx, 0x401c00, 0x042500df); | 366 | gr_def(ctx, 0x401c00, 0x042500df); |
| 366 | break; | 367 | break; |
| 368 | case 0xa3: | ||
| 367 | case 0xa5: | 369 | case 0xa5: |
| 368 | case 0xa8: | 370 | case 0xa8: |
| 369 | gr_def(ctx, 0x401c00, 0x142500df); | 371 | gr_def(ctx, 0x401c00, 0x142500df); |
| @@ -418,6 +420,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
| 418 | break; | 420 | break; |
| 419 | case 0x84: | 421 | case 0x84: |
| 420 | case 0xa0: | 422 | case 0xa0: |
| 423 | case 0xa3: | ||
| 421 | case 0xa5: | 424 | case 0xa5: |
| 422 | case 0xa8: | 425 | case 0xa8: |
| 423 | case 0xaa: | 426 | case 0xaa: |
| @@ -792,6 +795,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
| 792 | case 0xa5: | 795 | case 0xa5: |
| 793 | gr_def(ctx, offset + 0x1c, 0x310c0000); | 796 | gr_def(ctx, offset + 0x1c, 0x310c0000); |
| 794 | break; | 797 | break; |
| 798 | case 0xa3: | ||
| 795 | case 0xa8: | 799 | case 0xa8: |
| 796 | case 0xaa: | 800 | case 0xaa: |
| 797 | case 0xac: | 801 | case 0xac: |
| @@ -859,6 +863,8 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
| 859 | else | 863 | else |
| 860 | gr_def(ctx, offset + 0x8, 0x05010202); | 864 | gr_def(ctx, offset + 0x8, 0x05010202); |
| 861 | gr_def(ctx, offset + 0xc, 0x00030201); | 865 | gr_def(ctx, offset + 0xc, 0x00030201); |
| 866 | if (dev_priv->chipset == 0xa3) | ||
| 867 | cp_ctx(ctx, base + 0x36c, 1); | ||
| 862 | 868 | ||
| 863 | cp_ctx(ctx, base + 0x400, 2); | 869 | cp_ctx(ctx, base + 0x400, 2); |
| 864 | gr_def(ctx, base + 0x404, 0x00000040); | 870 | gr_def(ctx, base + 0x404, 0x00000040); |
| @@ -1159,7 +1165,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) | |||
| 1159 | nv50_graph_construct_gene_unk8(ctx); | 1165 | nv50_graph_construct_gene_unk8(ctx); |
| 1160 | if (dev_priv->chipset == 0xa0) | 1166 | if (dev_priv->chipset == 0xa0) |
| 1161 | xf_emit(ctx, 0x189, 0); | 1167 | xf_emit(ctx, 0x189, 0); |
| 1162 | else if (dev_priv->chipset < 0xa8) | 1168 | else if (dev_priv->chipset == 0xa3) |
| 1169 | xf_emit(ctx, 0xd5, 0); | ||
| 1170 | else if (dev_priv->chipset == 0xa5) | ||
| 1163 | xf_emit(ctx, 0x99, 0); | 1171 | xf_emit(ctx, 0x99, 0); |
| 1164 | else if (dev_priv->chipset == 0xaa) | 1172 | else if (dev_priv->chipset == 0xaa) |
| 1165 | xf_emit(ctx, 0x65, 0); | 1173 | xf_emit(ctx, 0x65, 0); |
| @@ -1197,6 +1205,8 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) | |||
| 1197 | ctx->ctxvals_pos = offset + 4; | 1205 | ctx->ctxvals_pos = offset + 4; |
| 1198 | if (dev_priv->chipset == 0xa0) | 1206 | if (dev_priv->chipset == 0xa0) |
| 1199 | xf_emit(ctx, 0xa80, 0); | 1207 | xf_emit(ctx, 0xa80, 0); |
| 1208 | else if (dev_priv->chipset == 0xa3) | ||
| 1209 | xf_emit(ctx, 0xa7c, 0); | ||
| 1200 | else | 1210 | else |
| 1201 | xf_emit(ctx, 0xa7a, 0); | 1211 | xf_emit(ctx, 0xa7a, 0); |
| 1202 | xf_emit(ctx, 1, 0x3fffff); | 1212 | xf_emit(ctx, 1, 0x3fffff); |
| @@ -1341,6 +1351,7 @@ nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) | |||
| 1341 | xf_emit(ctx, 0x942, 0); | 1351 | xf_emit(ctx, 0x942, 0); |
| 1342 | break; | 1352 | break; |
| 1343 | case 0xa0: | 1353 | case 0xa0: |
| 1354 | case 0xa3: | ||
| 1344 | xf_emit(ctx, 0x2042, 0); | 1355 | xf_emit(ctx, 0x2042, 0); |
| 1345 | break; | 1356 | break; |
| 1346 | case 0xa5: | 1357 | case 0xa5: |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index de1f5b0062c5..5f21df31f3aa 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 64 | struct nouveau_channel *chan; | 64 | struct nouveau_channel *chan; |
| 65 | uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; | 65 | uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; |
| 66 | uint32_t save_nv001700; | ||
| 67 | uint64_t v; | ||
| 66 | struct nv50_instmem_priv *priv; | 68 | struct nv50_instmem_priv *priv; |
| 67 | int ret, i; | 69 | int ret, i; |
| 68 | uint32_t v, save_nv001700; | ||
| 69 | 70 | ||
| 70 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 71 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 71 | if (!priv) | 72 | if (!priv) |
| @@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 76 | for (i = 0x1700; i <= 0x1710; i += 4) | 77 | for (i = 0x1700; i <= 0x1710; i += 4) |
| 77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); | 78 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); |
| 78 | 79 | ||
| 79 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | ||
| 80 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | ||
| 81 | else | ||
| 82 | dev_priv->vram_sys_base = 0; | ||
| 83 | |||
| 84 | /* Reserve the last MiB of VRAM, we should probably try to avoid | 80 | /* Reserve the last MiB of VRAM, we should probably try to avoid |
| 85 | * setting up the below tables over the top of the VBIOS image at | 81 | * setting up the below tables over the top of the VBIOS image at |
| 86 | * some point. | 82 | * some point. |
| 87 | */ | 83 | */ |
| 88 | dev_priv->ramin_rsvd_vram = 1 << 20; | 84 | dev_priv->ramin_rsvd_vram = 1 << 20; |
| 89 | c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; | 85 | c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; |
| 90 | c_size = 128 << 10; | 86 | c_size = 128 << 10; |
| 91 | c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; | 87 | c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; |
| 92 | c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; | 88 | c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; |
| @@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 106 | dev_priv->vm_gart_size = NV50_VM_BLOCK; | 102 | dev_priv->vm_gart_size = NV50_VM_BLOCK; |
| 107 | 103 | ||
| 108 | dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; | 104 | dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; |
| 109 | dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); | 105 | dev_priv->vm_vram_size = dev_priv->vram_size; |
| 110 | if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) | 106 | if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) |
| 111 | dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; | 107 | dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; |
| 112 | dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); | 108 | dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); |
| @@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 189 | 185 | ||
| 190 | i = 0; | 186 | i = 0; |
| 191 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { | 187 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { |
| 192 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); | 188 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); |
| 193 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | 189 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); |
| 194 | v += 0x1000; | 190 | v += 0x1000; |
| 195 | i += 8; | 191 | i += 8; |
| 196 | } | 192 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index c2fff543b06f..0c68698f23df 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
| @@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 211 | mode_ctl = 0x0200; | 211 | mode_ctl = 0x0200; |
| 212 | break; | 212 | break; |
| 213 | case OUTPUT_DP: | 213 | case OUTPUT_DP: |
| 214 | mode_ctl |= 0x00050000; | 214 | mode_ctl |= (nv_encoder->dp.mc_unknown << 16); |
| 215 | if (nv_encoder->dcb->sorconf.link & 1) | 215 | if (nv_encoder->dcb->sorconf.link & 1) |
| 216 | mode_ctl |= 0x00000800; | 216 | mode_ctl |= 0x00000800; |
| 217 | else | 217 | else |
| @@ -274,6 +274,7 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { | |||
| 274 | int | 274 | int |
| 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) |
| 276 | { | 276 | { |
| 277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 277 | struct nouveau_encoder *nv_encoder = NULL; | 278 | struct nouveau_encoder *nv_encoder = NULL; |
| 278 | struct drm_encoder *encoder; | 279 | struct drm_encoder *encoder; |
| 279 | bool dum; | 280 | bool dum; |
| @@ -319,5 +320,27 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | |||
| 319 | encoder->possible_crtcs = entry->heads; | 320 | encoder->possible_crtcs = entry->heads; |
| 320 | encoder->possible_clones = 0; | 321 | encoder->possible_clones = 0; |
| 321 | 322 | ||
| 323 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
| 324 | uint32_t mc, or = nv_encoder->or; | ||
| 325 | |||
| 326 | if (dev_priv->chipset < 0x90 || | ||
| 327 | dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) | ||
| 328 | mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 329 | else | ||
| 330 | mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 331 | |||
| 332 | switch ((mc & 0x00000f00) >> 8) { | ||
| 333 | case 8: | ||
| 334 | case 9: | ||
| 335 | nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; | ||
| 336 | break; | ||
| 337 | default: | ||
| 338 | break; | ||
| 339 | } | ||
| 340 | |||
| 341 | if (!nv_encoder->dp.mc_unknown) | ||
| 342 | nv_encoder->dp.mc_unknown = 5; | ||
| 343 | } | ||
| 344 | |||
| 322 | return 0; | 345 | return 0; |
| 323 | } | 346 | } |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 07b7ebf1f466..1d569830ed99 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
| 908 | uint8_t attr = U8((*ptr)++), shift; | 908 | uint8_t attr = U8((*ptr)++), shift; |
| 909 | uint32_t saved, dst; | 909 | uint32_t saved, dst; |
| 910 | int dptr = *ptr; | 910 | int dptr = *ptr; |
| 911 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
| 911 | SDEBUG(" dst: "); | 912 | SDEBUG(" dst: "); |
| 912 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 913 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
| 914 | /* op needs to full dst value */ | ||
| 915 | dst = saved; | ||
| 913 | shift = atom_get_src(ctx, attr, ptr); | 916 | shift = atom_get_src(ctx, attr, ptr); |
| 914 | SDEBUG(" shift: %d\n", shift); | 917 | SDEBUG(" shift: %d\n", shift); |
| 915 | dst <<= shift; | 918 | dst <<= shift; |
| 919 | dst &= atom_arg_mask[dst_align]; | ||
| 920 | dst >>= atom_arg_shift[dst_align]; | ||
| 916 | SDEBUG(" dst: "); | 921 | SDEBUG(" dst: "); |
| 917 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 922 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
| 918 | } | 923 | } |
| @@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
| 922 | uint8_t attr = U8((*ptr)++), shift; | 927 | uint8_t attr = U8((*ptr)++), shift; |
| 923 | uint32_t saved, dst; | 928 | uint32_t saved, dst; |
| 924 | int dptr = *ptr; | 929 | int dptr = *ptr; |
| 930 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
| 925 | SDEBUG(" dst: "); | 931 | SDEBUG(" dst: "); |
| 926 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 932 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
| 933 | /* op needs to full dst value */ | ||
| 934 | dst = saved; | ||
| 927 | shift = atom_get_src(ctx, attr, ptr); | 935 | shift = atom_get_src(ctx, attr, ptr); |
| 928 | SDEBUG(" shift: %d\n", shift); | 936 | SDEBUG(" shift: %d\n", shift); |
| 929 | dst >>= shift; | 937 | dst >>= shift; |
| 938 | dst &= atom_arg_mask[dst_align]; | ||
| 939 | dst >>= atom_arg_shift[dst_align]; | ||
| 930 | SDEBUG(" dst: "); | 940 | SDEBUG(" dst: "); |
| 931 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 941 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
| 932 | } | 942 | } |
| @@ -1137,6 +1147,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 | |||
| 1137 | int len, ws, ps, ptr; | 1147 | int len, ws, ps, ptr; |
| 1138 | unsigned char op; | 1148 | unsigned char op; |
| 1139 | atom_exec_context ectx; | 1149 | atom_exec_context ectx; |
| 1150 | int ret = 0; | ||
| 1140 | 1151 | ||
| 1141 | if (!base) | 1152 | if (!base) |
| 1142 | return -EINVAL; | 1153 | return -EINVAL; |
| @@ -1169,7 +1180,8 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 | |||
| 1169 | if (ectx.abort) { | 1180 | if (ectx.abort) { |
| 1170 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | 1181 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", |
| 1171 | base, len, ws, ps, ptr - 1); | 1182 | base, len, ws, ps, ptr - 1); |
| 1172 | return -EINVAL; | 1183 | ret = -EINVAL; |
| 1184 | goto free; | ||
| 1173 | } | 1185 | } |
| 1174 | 1186 | ||
| 1175 | if (op < ATOM_OP_CNT && op > 0) | 1187 | if (op < ATOM_OP_CNT && op > 0) |
| @@ -1184,9 +1196,10 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 | |||
| 1184 | debug_depth--; | 1196 | debug_depth--; |
| 1185 | SDEBUG("<<\n"); | 1197 | SDEBUG("<<\n"); |
| 1186 | 1198 | ||
| 1199 | free: | ||
| 1187 | if (ws) | 1200 | if (ws) |
| 1188 | kfree(ectx.ws); | 1201 | kfree(ectx.ws); |
| 1189 | return 0; | 1202 | return ret; |
| 1190 | } | 1203 | } |
| 1191 | 1204 | ||
| 1192 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1205 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fd4ef6d18849..a87990b3ae84 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
| 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
| 523 | adjusted_clock = mode->clock * 2; | 523 | adjusted_clock = mode->clock * 2; |
| 524 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
| 525 | pll->algo = PLL_ALGO_LEGACY; | ||
| 526 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | ||
| 527 | } | ||
| 524 | } else { | 528 | } else { |
| 525 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 529 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
| 526 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 530 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9580497ede4..d7388fdb6d0b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
| 2891 | { | 2891 | { |
| 2892 | struct radeon_bo *robj; | 2892 | struct radeon_bo *robj; |
| 2893 | unsigned long size; | 2893 | unsigned long size; |
| 2894 | unsigned u, i, w, h; | 2894 | unsigned u, i, w, h, d; |
| 2895 | int ret; | 2895 | int ret; |
| 2896 | 2896 | ||
| 2897 | for (u = 0; u < track->num_texture; u++) { | 2897 | for (u = 0; u < track->num_texture; u++) { |
| @@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
| 2923 | h = h / (1 << i); | 2923 | h = h / (1 << i); |
| 2924 | if (track->textures[u].roundup_h) | 2924 | if (track->textures[u].roundup_h) |
| 2925 | h = roundup_pow_of_two(h); | 2925 | h = roundup_pow_of_two(h); |
| 2926 | if (track->textures[u].tex_coord_type == 1) { | ||
| 2927 | d = (1 << track->textures[u].txdepth) / (1 << i); | ||
| 2928 | if (!d) | ||
| 2929 | d = 1; | ||
| 2930 | } else { | ||
| 2931 | d = 1; | ||
| 2932 | } | ||
| 2926 | if (track->textures[u].compress_format) { | 2933 | if (track->textures[u].compress_format) { |
| 2927 | 2934 | ||
| 2928 | size += r100_track_compress_size(track->textures[u].compress_format, w, h); | 2935 | size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; |
| 2929 | /* compressed textures are block based */ | 2936 | /* compressed textures are block based */ |
| 2930 | } else | 2937 | } else |
| 2931 | size += w * h; | 2938 | size += w * h * d; |
| 2932 | } | 2939 | } |
| 2933 | size *= track->textures[u].cpp; | 2940 | size *= track->textures[u].cpp; |
| 2934 | 2941 | ||
| 2935 | switch (track->textures[u].tex_coord_type) { | 2942 | switch (track->textures[u].tex_coord_type) { |
| 2936 | case 0: | 2943 | case 0: |
| 2937 | break; | ||
| 2938 | case 1: | 2944 | case 1: |
| 2939 | size *= (1 << track->textures[u].txdepth); | ||
| 2940 | break; | 2945 | break; |
| 2941 | case 2: | 2946 | case 2: |
| 2942 | if (track->separate_cube) { | 2947 | if (track->separate_cube) { |
| @@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 3007 | } | 3012 | } |
| 3008 | } | 3013 | } |
| 3009 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3014 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
| 3010 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | 3015 | if (track->vap_vf_cntl & (1 << 14)) { |
| 3016 | nverts = track->vap_alt_nverts; | ||
| 3017 | } else { | ||
| 3018 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | ||
| 3019 | } | ||
| 3011 | switch (prim_walk) { | 3020 | switch (prim_walk) { |
| 3012 | case 1: | 3021 | case 1: |
| 3013 | for (i = 0; i < track->num_arrays; i++) { | 3022 | for (i = 0; i < track->num_arrays; i++) { |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index b27a6999d219..fadfe68de9cc 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
| @@ -64,6 +64,7 @@ struct r100_cs_track { | |||
| 64 | unsigned maxy; | 64 | unsigned maxy; |
| 65 | unsigned vtx_size; | 65 | unsigned vtx_size; |
| 66 | unsigned vap_vf_cntl; | 66 | unsigned vap_vf_cntl; |
| 67 | unsigned vap_alt_nverts; | ||
| 67 | unsigned immd_dwords; | 68 | unsigned immd_dwords; |
| 68 | unsigned num_arrays; | 69 | unsigned num_arrays; |
| 69 | unsigned max_indx; | 70 | unsigned max_indx; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 561048a7c0a4..bd75f99bd65e 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -325,11 +325,12 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
| 325 | 325 | ||
| 326 | r100_hdp_reset(rdev); | 326 | r100_hdp_reset(rdev); |
| 327 | /* FIXME: rv380 one pipes ? */ | 327 | /* FIXME: rv380 one pipes ? */ |
| 328 | if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { | 328 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
| 329 | (rdev->family == CHIP_R350)) { | ||
| 329 | /* r300,r350 */ | 330 | /* r300,r350 */ |
| 330 | rdev->num_gb_pipes = 2; | 331 | rdev->num_gb_pipes = 2; |
| 331 | } else { | 332 | } else { |
| 332 | /* rv350,rv370,rv380 */ | 333 | /* rv350,rv370,rv380,r300 AD */ |
| 333 | rdev->num_gb_pipes = 1; | 334 | rdev->num_gb_pipes = 1; |
| 334 | } | 335 | } |
| 335 | rdev->num_z_pipes = 1; | 336 | rdev->num_z_pipes = 1; |
| @@ -729,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 729 | /* VAP_VF_MAX_VTX_INDX */ | 730 | /* VAP_VF_MAX_VTX_INDX */ |
| 730 | track->max_indx = idx_value & 0x00FFFFFFUL; | 731 | track->max_indx = idx_value & 0x00FFFFFFUL; |
| 731 | break; | 732 | break; |
| 733 | case 0x2088: | ||
| 734 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ | ||
| 735 | if (p->rdev->family < CHIP_RV515) | ||
| 736 | goto fail; | ||
| 737 | track->vap_alt_nverts = idx_value & 0xFFFFFF; | ||
| 738 | break; | ||
| 732 | case 0x43E4: | 739 | case 0x43E4: |
| 733 | /* SC_SCISSOR1 */ | 740 | /* SC_SCISSOR1 */ |
| 734 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; | 741 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
| @@ -766,7 +773,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 766 | tmp = idx_value & ~(0x7 << 16); | 773 | tmp = idx_value & ~(0x7 << 16); |
| 767 | tmp |= tile_flags; | 774 | tmp |= tile_flags; |
| 768 | ib[idx] = tmp; | 775 | ib[idx] = tmp; |
| 769 | |||
| 770 | i = (reg - 0x4E38) >> 2; | 776 | i = (reg - 0x4E38) >> 2; |
| 771 | track->cb[i].pitch = idx_value & 0x3FFE; | 777 | track->cb[i].pitch = idx_value & 0x3FFE; |
| 772 | switch (((idx_value >> 21) & 0xF)) { | 778 | switch (((idx_value >> 21) & 0xF)) { |
| @@ -1051,11 +1057,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 1051 | break; | 1057 | break; |
| 1052 | /* fallthrough do not move */ | 1058 | /* fallthrough do not move */ |
| 1053 | default: | 1059 | default: |
| 1054 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1060 | goto fail; |
| 1055 | reg, idx); | ||
| 1056 | return -EINVAL; | ||
| 1057 | } | 1061 | } |
| 1058 | return 0; | 1062 | return 0; |
| 1063 | fail: | ||
| 1064 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | ||
| 1065 | reg, idx); | ||
| 1066 | return -EINVAL; | ||
| 1059 | } | 1067 | } |
| 1060 | 1068 | ||
| 1061 | static int r300_packet3_check(struct radeon_cs_parser *p, | 1069 | static int r300_packet3_check(struct radeon_cs_parser *p, |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index dac7042b797e..1d898051c631 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | */ | 35 | */ |
| 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
| 37 | { | 37 | { |
| 38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) |
| 39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
| 40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
| 41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 029fa1406d1d..2616b822ba68 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
| 314 | struct radeon_device *rdev = dev->dev_private; | 314 | struct radeon_device *rdev = dev->dev_private; |
| 315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; | 315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; |
| 316 | 316 | ||
| 317 | if (ASIC_IS_DCE4(rdev)) | ||
| 318 | return; | ||
| 319 | |||
| 317 | if (!offset) | 320 | if (!offset) |
| 318 | return; | 321 | return; |
| 319 | 322 | ||
| @@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder) | |||
| 484 | struct radeon_device *rdev = dev->dev_private; | 487 | struct radeon_device *rdev = dev->dev_private; |
| 485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 488 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 486 | 489 | ||
| 490 | if (ASIC_IS_DCE4(rdev)) | ||
| 491 | return; | ||
| 492 | |||
| 487 | if (!radeon_encoder->hdmi_offset) { | 493 | if (!radeon_encoder->hdmi_offset) { |
| 488 | r600_hdmi_assign_block(encoder); | 494 | r600_hdmi_assign_block(encoder); |
| 489 | if (!radeon_encoder->hdmi_offset) { | 495 | if (!radeon_encoder->hdmi_offset) { |
| @@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder) | |||
| 525 | struct radeon_device *rdev = dev->dev_private; | 531 | struct radeon_device *rdev = dev->dev_private; |
| 526 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 532 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 527 | 533 | ||
| 534 | if (ASIC_IS_DCE4(rdev)) | ||
| 535 | return; | ||
| 536 | |||
| 528 | if (!radeon_encoder->hdmi_offset) { | 537 | if (!radeon_encoder->hdmi_offset) { |
| 529 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); | 538 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); |
| 530 | return; | 539 | return; |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1fff95505cf5..5673665ff216 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -69,16 +69,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
| 69 | struct radeon_i2c_bus_rec i2c; | 69 | struct radeon_i2c_bus_rec i2c; |
| 70 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); | 70 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); |
| 71 | struct _ATOM_GPIO_I2C_INFO *i2c_info; | 71 | struct _ATOM_GPIO_I2C_INFO *i2c_info; |
| 72 | uint16_t data_offset; | 72 | uint16_t data_offset, size; |
| 73 | int i; | 73 | int i, num_indices; |
| 74 | 74 | ||
| 75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); | 75 | memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); |
| 76 | i2c.valid = false; | 76 | i2c.valid = false; |
| 77 | 77 | ||
| 78 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { | 78 | if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { |
| 79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | 79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); |
| 80 | 80 | ||
| 81 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 81 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
| 82 | sizeof(ATOM_GPIO_I2C_ASSIGMENT); | ||
| 83 | |||
| 84 | for (i = 0; i < num_indices; i++) { | ||
| 82 | gpio = &i2c_info->asGPIO_Info[i]; | 85 | gpio = &i2c_info->asGPIO_Info[i]; |
| 83 | 86 | ||
| 84 | if (gpio->sucI2cId.ucAccess == id) { | 87 | if (gpio->sucI2cId.ucAccess == id) { |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 2becdeda68a3..37db8adb2748 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -760,7 +760,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 760 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 760 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
| 761 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 761 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
| 762 | } | 762 | } |
| 763 | found = 1; | 763 | /* if the values are all zeros, use the table */ |
| 764 | if (p_dac->ps2_pdac_adj) | ||
| 765 | found = 1; | ||
| 764 | } | 766 | } |
| 765 | 767 | ||
| 766 | if (!found) /* fallback to defaults */ | 768 | if (!found) /* fallback to defaults */ |
| @@ -895,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
| 895 | bg = RBIOS8(dac_info + 0x10) & 0xf; | 897 | bg = RBIOS8(dac_info + 0x10) & 0xf; |
| 896 | dac = RBIOS8(dac_info + 0x11) & 0xf; | 898 | dac = RBIOS8(dac_info + 0x11) & 0xf; |
| 897 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 899 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
| 898 | found = 1; | 900 | /* if the values are all zeros, use the table */ |
| 901 | if (tv_dac->ps2_tvdac_adj) | ||
| 902 | found = 1; | ||
| 899 | } else if (rev > 1) { | 903 | } else if (rev > 1) { |
| 900 | bg = RBIOS8(dac_info + 0xc) & 0xf; | 904 | bg = RBIOS8(dac_info + 0xc) & 0xf; |
| 901 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; | 905 | dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; |
| @@ -908,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
| 908 | bg = RBIOS8(dac_info + 0xe) & 0xf; | 912 | bg = RBIOS8(dac_info + 0xe) & 0xf; |
| 909 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; | 913 | dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; |
| 910 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 914 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
| 911 | found = 1; | 915 | /* if the values are all zeros, use the table */ |
| 916 | if (tv_dac->ps2_tvdac_adj) | ||
| 917 | found = 1; | ||
| 912 | } | 918 | } |
| 913 | tv_dac->tv_std = radeon_combios_get_tv_info(rdev); | 919 | tv_dac->tv_std = radeon_combios_get_tv_info(rdev); |
| 914 | } | 920 | } |
| @@ -925,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
| 925 | (bg << 16) | (dac << 20); | 931 | (bg << 16) | (dac << 20); |
| 926 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 932 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
| 927 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 933 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
| 928 | found = 1; | 934 | /* if the values are all zeros, use the table */ |
| 935 | if (tv_dac->ps2_tvdac_adj) | ||
| 936 | found = 1; | ||
| 929 | } else { | 937 | } else { |
| 930 | bg = RBIOS8(dac_info + 0x4) & 0xf; | 938 | bg = RBIOS8(dac_info + 0x4) & 0xf; |
| 931 | dac = RBIOS8(dac_info + 0x5) & 0xf; | 939 | dac = RBIOS8(dac_info + 0x5) & 0xf; |
| @@ -933,7 +941,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
| 933 | (bg << 16) | (dac << 20); | 941 | (bg << 16) | (dac << 20); |
| 934 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; | 942 | tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
| 935 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; | 943 | tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
| 936 | found = 1; | 944 | /* if the values are all zeros, use the table */ |
| 945 | if (tv_dac->ps2_tvdac_adj) | ||
| 946 | found = 1; | ||
| 937 | } | 947 | } |
| 938 | } else { | 948 | } else { |
| 939 | DRM_INFO("No TV DAC info found in BIOS\n"); | 949 | DRM_INFO("No TV DAC info found in BIOS\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 60d59816b94f..1331351c5178 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
| 162 | { | 162 | { |
| 163 | struct drm_device *dev = connector->dev; | 163 | struct drm_device *dev = connector->dev; |
| 164 | struct drm_connector *conflict; | 164 | struct drm_connector *conflict; |
| 165 | struct radeon_connector *radeon_conflict; | ||
| 165 | int i; | 166 | int i; |
| 166 | 167 | ||
| 167 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { | 168 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { |
| 168 | if (conflict == connector) | 169 | if (conflict == connector) |
| 169 | continue; | 170 | continue; |
| 170 | 171 | ||
| 172 | radeon_conflict = to_radeon_connector(conflict); | ||
| 171 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 173 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
| 172 | if (conflict->encoder_ids[i] == 0) | 174 | if (conflict->encoder_ids[i] == 0) |
| 173 | break; | 175 | break; |
| @@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
| 177 | if (conflict->status != connector_status_connected) | 179 | if (conflict->status != connector_status_connected) |
| 178 | continue; | 180 | continue; |
| 179 | 181 | ||
| 182 | if (radeon_conflict->use_digital) | ||
| 183 | continue; | ||
| 184 | |||
| 180 | if (priority == true) { | 185 | if (priority == true) { |
| 181 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
| 182 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); |
| @@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
| 287 | 292 | ||
| 288 | if (property == rdev->mode_info.coherent_mode_property) { | 293 | if (property == rdev->mode_info.coherent_mode_property) { |
| 289 | struct radeon_encoder_atom_dig *dig; | 294 | struct radeon_encoder_atom_dig *dig; |
| 295 | bool new_coherent_mode; | ||
| 290 | 296 | ||
| 291 | /* need to find digital encoder on connector */ | 297 | /* need to find digital encoder on connector */ |
| 292 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | 298 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
| @@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
| 299 | return 0; | 305 | return 0; |
| 300 | 306 | ||
| 301 | dig = radeon_encoder->enc_priv; | 307 | dig = radeon_encoder->enc_priv; |
| 302 | dig->coherent_mode = val ? true : false; | 308 | new_coherent_mode = val ? true : false; |
| 303 | radeon_property_change_mode(&radeon_encoder->base); | 309 | if (dig->coherent_mode != new_coherent_mode) { |
| 310 | dig->coherent_mode = new_coherent_mode; | ||
| 311 | radeon_property_change_mode(&radeon_encoder->base); | ||
| 312 | } | ||
| 304 | } | 313 | } |
| 305 | 314 | ||
| 306 | if (property == rdev->mode_info.tv_std_property) { | 315 | if (property == rdev->mode_info.tv_std_property) { |
| @@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
| 315 | radeon_encoder = to_radeon_encoder(encoder); | 324 | radeon_encoder = to_radeon_encoder(encoder); |
| 316 | if (!radeon_encoder->enc_priv) | 325 | if (!radeon_encoder->enc_priv) |
| 317 | return 0; | 326 | return 0; |
| 318 | if (rdev->is_atom_bios) { | 327 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { |
| 319 | struct radeon_encoder_atom_dac *dac_int; | 328 | struct radeon_encoder_atom_dac *dac_int; |
| 320 | dac_int = radeon_encoder->enc_priv; | 329 | dac_int = radeon_encoder->enc_priv; |
| 321 | dac_int->tv_std = val; | 330 | dac_int->tv_std = val; |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index dc6eba6b96dd..419630dd2075 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
| @@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | |||
| 417 | return -EBUSY; | 417 | return -EBUSY; |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | 420 | static void radeon_init_pipes(struct drm_device *dev) |
| 421 | { | 421 | { |
| 422 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
| 422 | uint32_t gb_tile_config, gb_pipe_sel = 0; | 423 | uint32_t gb_tile_config, gb_pipe_sel = 0; |
| 423 | 424 | ||
| 424 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { | 425 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { |
| @@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | |||
| 436 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | 437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; |
| 437 | } else { | 438 | } else { |
| 438 | /* R3xx */ | 439 | /* R3xx */ |
| 439 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || | 440 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && |
| 441 | dev->pdev->device != 0x4144) || | ||
| 440 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { | 442 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { |
| 441 | dev_priv->num_gb_pipes = 2; | 443 | dev_priv->num_gb_pipes = 2; |
| 442 | } else { | 444 | } else { |
| 443 | /* R3Vxx */ | 445 | /* RV3xx/R300 AD */ |
| 444 | dev_priv->num_gb_pipes = 1; | 446 | dev_priv->num_gb_pipes = 1; |
| 445 | } | 447 | } |
| 446 | } | 448 | } |
| @@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev) | |||
| 736 | 738 | ||
| 737 | /* setup the raster pipes */ | 739 | /* setup the raster pipes */ |
| 738 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) | 740 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) |
| 739 | radeon_init_pipes(dev_priv); | 741 | radeon_init_pipes(dev); |
| 740 | 742 | ||
| 741 | /* Reset the CP ring */ | 743 | /* Reset the CP ring */ |
| 742 | radeon_do_cp_reset(dev_priv); | 744 | radeon_do_cp_reset(dev_priv); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bddf17f97da8..7b629e305560 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -36,6 +36,54 @@ | |||
| 36 | #include "radeon.h" | 36 | #include "radeon.h" |
| 37 | #include "atom.h" | 37 | #include "atom.h" |
| 38 | 38 | ||
| 39 | static const char radeon_family_name[][16] = { | ||
| 40 | "R100", | ||
| 41 | "RV100", | ||
| 42 | "RS100", | ||
| 43 | "RV200", | ||
| 44 | "RS200", | ||
| 45 | "R200", | ||
| 46 | "RV250", | ||
| 47 | "RS300", | ||
| 48 | "RV280", | ||
| 49 | "R300", | ||
| 50 | "R350", | ||
| 51 | "RV350", | ||
| 52 | "RV380", | ||
| 53 | "R420", | ||
| 54 | "R423", | ||
| 55 | "RV410", | ||
| 56 | "RS400", | ||
| 57 | "RS480", | ||
| 58 | "RS600", | ||
| 59 | "RS690", | ||
| 60 | "RS740", | ||
| 61 | "RV515", | ||
| 62 | "R520", | ||
| 63 | "RV530", | ||
| 64 | "RV560", | ||
| 65 | "RV570", | ||
| 66 | "R580", | ||
| 67 | "R600", | ||
| 68 | "RV610", | ||
| 69 | "RV630", | ||
| 70 | "RV670", | ||
| 71 | "RV620", | ||
| 72 | "RV635", | ||
| 73 | "RS780", | ||
| 74 | "RS880", | ||
| 75 | "RV770", | ||
| 76 | "RV730", | ||
| 77 | "RV710", | ||
| 78 | "RV740", | ||
| 79 | "CEDAR", | ||
| 80 | "REDWOOD", | ||
| 81 | "JUNIPER", | ||
| 82 | "CYPRESS", | ||
| 83 | "HEMLOCK", | ||
| 84 | "LAST", | ||
| 85 | }; | ||
| 86 | |||
| 39 | /* | 87 | /* |
| 40 | * Clear GPU surface registers. | 88 | * Clear GPU surface registers. |
| 41 | */ | 89 | */ |
| @@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 526 | int r; | 574 | int r; |
| 527 | int dma_bits; | 575 | int dma_bits; |
| 528 | 576 | ||
| 529 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | ||
| 530 | rdev->shutdown = false; | 577 | rdev->shutdown = false; |
| 531 | rdev->dev = &pdev->dev; | 578 | rdev->dev = &pdev->dev; |
| 532 | rdev->ddev = ddev; | 579 | rdev->ddev = ddev; |
| @@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 538 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 585 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
| 539 | rdev->gpu_lockup = false; | 586 | rdev->gpu_lockup = false; |
| 540 | rdev->accel_working = false; | 587 | rdev->accel_working = false; |
| 588 | |||
| 589 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | ||
| 590 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | ||
| 591 | |||
| 541 | /* mutex initialization are all done here so we | 592 | /* mutex initialization are all done here so we |
| 542 | * can recall function without having locking issues */ | 593 | * can recall function without having locking issues */ |
| 543 | mutex_init(&rdev->cs_mutex); | 594 | mutex_init(&rdev->cs_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 055a51732dcb..4b05563d99e1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -43,9 +43,10 @@ | |||
| 43 | * - 2.0.0 - initial interface | 43 | * - 2.0.0 - initial interface |
| 44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
| 45 | * - 2.2.0 - add r6xx/r7xx const buffer support | 45 | * - 2.2.0 - add r6xx/r7xx const buffer support |
| 46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs | ||
| 46 | */ | 47 | */ |
| 47 | #define KMS_DRIVER_MAJOR 2 | 48 | #define KMS_DRIVER_MAJOR 2 |
| 48 | #define KMS_DRIVER_MINOR 2 | 49 | #define KMS_DRIVER_MINOR 3 |
| 49 | #define KMS_DRIVER_PATCHLEVEL 0 | 50 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 50 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 51 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 51 | int radeon_driver_unload_kms(struct drm_device *dev); | 52 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 52d6f96f274b..30293bec0801 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -317,12 +317,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) | |||
| 317 | struct radeon_device *rdev = dev->dev_private; | 317 | struct radeon_device *rdev = dev->dev_private; |
| 318 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 318 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 319 | DAC_ENCODER_CONTROL_PS_ALLOCATION args; | 319 | DAC_ENCODER_CONTROL_PS_ALLOCATION args; |
| 320 | int index = 0, num = 0; | 320 | int index = 0; |
| 321 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | 321 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; |
| 322 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
| 323 | |||
| 324 | if (dac_info->tv_std) | ||
| 325 | tv_std = dac_info->tv_std; | ||
| 326 | 322 | ||
| 327 | memset(&args, 0, sizeof(args)); | 323 | memset(&args, 0, sizeof(args)); |
| 328 | 324 | ||
| @@ -330,12 +326,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) | |||
| 330 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 326 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
| 331 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 327 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
| 332 | index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); | 328 | index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); |
| 333 | num = 1; | ||
| 334 | break; | 329 | break; |
| 335 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 330 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
| 336 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 331 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
| 337 | index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); | 332 | index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); |
| 338 | num = 2; | ||
| 339 | break; | 333 | break; |
| 340 | } | 334 | } |
| 341 | 335 | ||
| @@ -346,7 +340,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) | |||
| 346 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | 340 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
| 347 | args.ucDacStandard = ATOM_DAC1_CV; | 341 | args.ucDacStandard = ATOM_DAC1_CV; |
| 348 | else { | 342 | else { |
| 349 | switch (tv_std) { | 343 | switch (dac_info->tv_std) { |
| 350 | case TV_STD_PAL: | 344 | case TV_STD_PAL: |
| 351 | case TV_STD_PAL_M: | 345 | case TV_STD_PAL_M: |
| 352 | case TV_STD_SCART_PAL: | 346 | case TV_STD_SCART_PAL: |
| @@ -377,10 +371,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
| 377 | TV_ENCODER_CONTROL_PS_ALLOCATION args; | 371 | TV_ENCODER_CONTROL_PS_ALLOCATION args; |
| 378 | int index = 0; | 372 | int index = 0; |
| 379 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | 373 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; |
| 380 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
| 381 | |||
| 382 | if (dac_info->tv_std) | ||
| 383 | tv_std = dac_info->tv_std; | ||
| 384 | 374 | ||
| 385 | memset(&args, 0, sizeof(args)); | 375 | memset(&args, 0, sizeof(args)); |
| 386 | 376 | ||
| @@ -391,7 +381,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
| 391 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | 381 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
| 392 | args.sTVEncoder.ucTvStandard = ATOM_TV_CV; | 382 | args.sTVEncoder.ucTvStandard = ATOM_TV_CV; |
| 393 | else { | 383 | else { |
| 394 | switch (tv_std) { | 384 | switch (dac_info->tv_std) { |
| 395 | case TV_STD_NTSC: | 385 | case TV_STD_NTSC: |
| 396 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; | 386 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; |
| 397 | break; | 387 | break; |
| @@ -875,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 875 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 865 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
| 876 | if (dig->coherent_mode) | 866 | if (dig->coherent_mode) |
| 877 | args.v3.acConfig.fCoherentMode = 1; | 867 | args.v3.acConfig.fCoherentMode = 1; |
| 868 | if (radeon_encoder->pixel_clock > 165000) | ||
| 869 | args.v3.acConfig.fDualLinkConnector = 1; | ||
| 878 | } | 870 | } |
| 879 | } else if (ASIC_IS_DCE32(rdev)) { | 871 | } else if (ASIC_IS_DCE32(rdev)) { |
| 880 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 872 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
| @@ -898,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 898 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 890 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
| 899 | if (dig->coherent_mode) | 891 | if (dig->coherent_mode) |
| 900 | args.v2.acConfig.fCoherentMode = 1; | 892 | args.v2.acConfig.fCoherentMode = 1; |
| 893 | if (radeon_encoder->pixel_clock > 165000) | ||
| 894 | args.v2.acConfig.fDualLinkConnector = 1; | ||
| 901 | } | 895 | } |
| 902 | } else { | 896 | } else { |
| 903 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 897 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
| @@ -1383,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 1383 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1377 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
| 1384 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1378 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
| 1385 | atombios_dac_setup(encoder, ATOM_ENABLE); | 1379 | atombios_dac_setup(encoder, ATOM_ENABLE); |
| 1386 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1380 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { |
| 1387 | atombios_tv_setup(encoder, ATOM_ENABLE); | 1381 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
| 1382 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
| 1383 | else | ||
| 1384 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
| 1385 | } | ||
| 1388 | break; | 1386 | break; |
| 1389 | } | 1387 | } |
| 1390 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1388 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
| @@ -1558,12 +1556,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = { | |||
| 1558 | struct radeon_encoder_atom_dac * | 1556 | struct radeon_encoder_atom_dac * |
| 1559 | radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) | 1557 | radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) |
| 1560 | { | 1558 | { |
| 1559 | struct drm_device *dev = radeon_encoder->base.dev; | ||
| 1560 | struct radeon_device *rdev = dev->dev_private; | ||
| 1561 | struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); | 1561 | struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); |
| 1562 | 1562 | ||
| 1563 | if (!dac) | 1563 | if (!dac) |
| 1564 | return NULL; | 1564 | return NULL; |
| 1565 | 1565 | ||
| 1566 | dac->tv_std = TV_STD_NTSC; | 1566 | dac->tv_std = radeon_atombios_get_tv_info(rdev); |
| 1567 | return dac; | 1567 | return dac; |
| 1568 | } | 1568 | } |
| 1569 | 1569 | ||
| @@ -1641,6 +1641,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1641 | break; | 1641 | break; |
| 1642 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1642 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
| 1643 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | 1643 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); |
| 1644 | radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); | ||
| 1644 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); | 1645 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); |
| 1645 | break; | 1646 | break; |
| 1646 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1647 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 93c7d5d41914..e329066dcabd 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
| @@ -36,7 +36,7 @@ | |||
| 36 | * Radeon chip families | 36 | * Radeon chip families |
| 37 | */ | 37 | */ |
| 38 | enum radeon_family { | 38 | enum radeon_family { |
| 39 | CHIP_R100, | 39 | CHIP_R100 = 0, |
| 40 | CHIP_RV100, | 40 | CHIP_RV100, |
| 41 | CHIP_RS100, | 41 | CHIP_RS100, |
| 42 | CHIP_RV200, | 42 | CHIP_RV200, |
| @@ -99,4 +99,5 @@ enum radeon_chip_flags { | |||
| 99 | RADEON_IS_PCI = 0x00800000UL, | 99 | RADEON_IS_PCI = 0x00800000UL, |
| 100 | RADEON_IS_IGPGART = 0x01000000UL, | 100 | RADEON_IS_IGPGART = 0x01000000UL, |
| 101 | }; | 101 | }; |
| 102 | |||
| 102 | #endif | 103 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index cf389ce50a8a..2441cca7d775 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -830,8 +830,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) | |||
| 830 | crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; | 830 | crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; |
| 831 | 831 | ||
| 832 | if (rdev->family == CHIP_R420 || | 832 | if (rdev->family == CHIP_R420 || |
| 833 | rdev->family == CHIP_R423 || | 833 | rdev->family == CHIP_R423 || |
| 834 | rdev->family == CHIP_RV410) | 834 | rdev->family == CHIP_RV410) |
| 835 | tv_dac_cntl |= (R420_TV_DAC_RDACPD | | 835 | tv_dac_cntl |= (R420_TV_DAC_RDACPD | |
| 836 | R420_TV_DAC_GDACPD | | 836 | R420_TV_DAC_GDACPD | |
| 837 | R420_TV_DAC_BDACPD | | 837 | R420_TV_DAC_BDACPD | |
| @@ -907,35 +907,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 907 | if (rdev->family != CHIP_R200) { | 907 | if (rdev->family != CHIP_R200) { |
| 908 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); | 908 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
| 909 | if (rdev->family == CHIP_R420 || | 909 | if (rdev->family == CHIP_R420 || |
| 910 | rdev->family == CHIP_R423 || | 910 | rdev->family == CHIP_R423 || |
| 911 | rdev->family == CHIP_RV410) { | 911 | rdev->family == CHIP_RV410) { |
| 912 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | | 912 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | |
| 913 | RADEON_TV_DAC_BGADJ_MASK | | 913 | RADEON_TV_DAC_BGADJ_MASK | |
| 914 | R420_TV_DAC_DACADJ_MASK | | 914 | R420_TV_DAC_DACADJ_MASK | |
| 915 | R420_TV_DAC_RDACPD | | 915 | R420_TV_DAC_RDACPD | |
| 916 | R420_TV_DAC_GDACPD | | 916 | R420_TV_DAC_GDACPD | |
| 917 | R420_TV_DAC_BDACPD | | 917 | R420_TV_DAC_BDACPD | |
| 918 | R420_TV_DAC_TVENABLE); | 918 | R420_TV_DAC_TVENABLE); |
| 919 | } else { | 919 | } else { |
| 920 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | | 920 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | |
| 921 | RADEON_TV_DAC_BGADJ_MASK | | 921 | RADEON_TV_DAC_BGADJ_MASK | |
| 922 | RADEON_TV_DAC_DACADJ_MASK | | 922 | RADEON_TV_DAC_DACADJ_MASK | |
| 923 | RADEON_TV_DAC_RDACPD | | 923 | RADEON_TV_DAC_RDACPD | |
| 924 | RADEON_TV_DAC_GDACPD | | 924 | RADEON_TV_DAC_GDACPD | |
| 925 | RADEON_TV_DAC_BDACPD); | 925 | RADEON_TV_DAC_BDACPD); |
| 926 | } | 926 | } |
| 927 | 927 | ||
| 928 | /* FIXME TV */ | 928 | tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; |
| 929 | if (tv_dac) { | 929 | |
| 930 | struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; | 930 | if (is_tv) { |
| 931 | tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | | 931 | if (tv_dac->tv_std == TV_STD_NTSC || |
| 932 | RADEON_TV_DAC_NHOLD | | 932 | tv_dac->tv_std == TV_STD_NTSC_J || |
| 933 | RADEON_TV_DAC_STD_PS2 | | 933 | tv_dac->tv_std == TV_STD_PAL_M || |
| 934 | tv_dac->ps2_tvdac_adj); | 934 | tv_dac->tv_std == TV_STD_PAL_60) |
| 935 | tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; | ||
| 936 | else | ||
| 937 | tv_dac_cntl |= tv_dac->pal_tvdac_adj; | ||
| 938 | |||
| 939 | if (tv_dac->tv_std == TV_STD_NTSC || | ||
| 940 | tv_dac->tv_std == TV_STD_NTSC_J) | ||
| 941 | tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; | ||
| 942 | else | ||
| 943 | tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; | ||
| 935 | } else | 944 | } else |
| 936 | tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | | 945 | tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | |
| 937 | RADEON_TV_DAC_NHOLD | | 946 | tv_dac->ps2_tvdac_adj); |
| 938 | RADEON_TV_DAC_STD_PS2); | ||
| 939 | 947 | ||
| 940 | WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); | 948 | WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); |
| 941 | } | 949 | } |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 19c4663fa9c6..1e97b2d129fd 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
| @@ -125,6 +125,8 @@ r300 0x4f60 | |||
| 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
| 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
| 127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
| 128 | 0x4010 GB_MSPOS0 | ||
| 129 | 0x4014 GB_MSPOS1 | ||
| 128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
| 129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
| 130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 989f7a020832..e958980d00f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
| @@ -125,6 +125,8 @@ r420 0x4f60 | |||
| 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
| 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
| 127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
| 128 | 0x4010 GB_MSPOS0 | ||
| 129 | 0x4014 GB_MSPOS1 | ||
| 128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
| 129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
| 130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 6801b865d1c4..83e8bc0c2bb2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
| @@ -125,6 +125,8 @@ rs600 0x6d40 | |||
| 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
| 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
| 127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
| 128 | 0x4010 GB_MSPOS0 | ||
| 129 | 0x4014 GB_MSPOS1 | ||
| 128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
| 129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
| 130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 38abf63bf2cd..1e46233985eb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
| @@ -35,6 +35,7 @@ rv515 0x6d40 | |||
| 35 | 0x1DA8 VAP_VPORT_ZSCALE | 35 | 0x1DA8 VAP_VPORT_ZSCALE |
| 36 | 0x1DAC VAP_VPORT_ZOFFSET | 36 | 0x1DAC VAP_VPORT_ZOFFSET |
| 37 | 0x2080 VAP_CNTL | 37 | 0x2080 VAP_CNTL |
| 38 | 0x208C VAP_INDEX_OFFSET | ||
| 38 | 0x2090 VAP_OUT_VTX_FMT_0 | 39 | 0x2090 VAP_OUT_VTX_FMT_0 |
| 39 | 0x2094 VAP_OUT_VTX_FMT_1 | 40 | 0x2094 VAP_OUT_VTX_FMT_1 |
| 40 | 0x20B0 VAP_VTE_CNTL | 41 | 0x20B0 VAP_VTE_CNTL |
| @@ -158,6 +159,8 @@ rv515 0x6d40 | |||
| 158 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 159 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
| 159 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 160 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
| 160 | 0x4008 GB_ENABLE | 161 | 0x4008 GB_ENABLE |
| 162 | 0x4010 GB_MSPOS0 | ||
| 163 | 0x4014 GB_MSPOS1 | ||
| 161 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
| 162 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
| 163 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index abf824c2123d..a81bc7a21e14 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
| 159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 160 | 160 | ||
| 161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); | 162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); |
| 163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 164 | 164 | ||
| 165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index c1605b528e8f..0f28d91f29d8 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
| @@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = { | |||
| 142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", | 142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", |
| 143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", | 143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", |
| 144 | NULL }, | 144 | NULL }, |
| 145 | /* Set 17: iMac 9,1 */ | ||
| 146 | { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P", | ||
| 147 | "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL }, | ||
| 148 | /* Set 18: MacBook Pro 2,2 */ | ||
| 149 | { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", | ||
| 150 | "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, | ||
| 145 | }; | 151 | }; |
| 146 | 152 | ||
| 147 | /* List of keys used to read/write fan speeds */ | 153 | /* List of keys used to read/write fan speeds */ |
| @@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { | |||
| 1350 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, | 1356 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, |
| 1351 | /* MacPro3,1: temperature set 16 */ | 1357 | /* MacPro3,1: temperature set 16 */ |
| 1352 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, | 1358 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, |
| 1359 | /* iMac 9,1: light sensor only, temperature set 17 */ | ||
| 1360 | { .accelerometer = 0, .light = 0, .temperature_set = 17 }, | ||
| 1361 | /* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ | ||
| 1362 | { .accelerometer = 1, .light = 1, .temperature_set = 18 }, | ||
| 1353 | }; | 1363 | }; |
| 1354 | 1364 | ||
| 1355 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". | 1365 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". |
| @@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
| 1375 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1385 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
| 1376 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, | 1386 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, |
| 1377 | &applesmc_dmi_data[9]}, | 1387 | &applesmc_dmi_data[9]}, |
| 1388 | { applesmc_dmi_match, "Apple MacBook Pro 2,2", { | ||
| 1389 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."), | ||
| 1390 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") }, | ||
| 1391 | &applesmc_dmi_data[18]}, | ||
| 1378 | { applesmc_dmi_match, "Apple MacBook Pro", { | 1392 | { applesmc_dmi_match, "Apple MacBook Pro", { |
| 1379 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1393 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
| 1380 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, | 1394 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, |
| @@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
| 1415 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1429 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
| 1416 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, | 1430 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, |
| 1417 | &applesmc_dmi_data[4]}, | 1431 | &applesmc_dmi_data[4]}, |
| 1432 | { applesmc_dmi_match, "Apple iMac 9,1", { | ||
| 1433 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), | ||
| 1434 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") }, | ||
| 1435 | &applesmc_dmi_data[17]}, | ||
| 1418 | { applesmc_dmi_match, "Apple iMac 8", { | 1436 | { applesmc_dmi_match, "Apple iMac 8", { |
| 1419 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1437 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
| 1420 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, | 1438 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 1002befd87d5..5be09c048c5f 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
| @@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
| 539 | 539 | ||
| 540 | struct it87_data *data = dev_get_drvdata(dev); | 540 | struct it87_data *data = dev_get_drvdata(dev); |
| 541 | long val; | 541 | long val; |
| 542 | u8 reg; | ||
| 542 | 543 | ||
| 543 | if (strict_strtol(buf, 10, &val) < 0) | 544 | if (strict_strtol(buf, 10, &val) < 0) |
| 544 | return -EINVAL; | 545 | return -EINVAL; |
| 545 | 546 | ||
| 546 | mutex_lock(&data->update_lock); | 547 | reg = it87_read_value(data, IT87_REG_TEMP_ENABLE); |
| 547 | 548 | reg &= ~(1 << nr); | |
| 548 | data->sensor &= ~(1 << nr); | 549 | reg &= ~(8 << nr); |
| 549 | data->sensor &= ~(8 << nr); | ||
| 550 | if (val == 2) { /* backwards compatibility */ | 550 | if (val == 2) { /* backwards compatibility */ |
| 551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " | 551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " |
| 552 | "instead\n"); | 552 | "instead\n"); |
| @@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
| 554 | } | 554 | } |
| 555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ | 555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ |
| 556 | if (val == 3) | 556 | if (val == 3) |
| 557 | data->sensor |= 1 << nr; | 557 | reg |= 1 << nr; |
| 558 | else if (val == 4) | 558 | else if (val == 4) |
| 559 | data->sensor |= 8 << nr; | 559 | reg |= 8 << nr; |
| 560 | else if (val != 0) { | 560 | else if (val != 0) |
| 561 | mutex_unlock(&data->update_lock); | ||
| 562 | return -EINVAL; | 561 | return -EINVAL; |
| 563 | } | 562 | |
| 563 | mutex_lock(&data->update_lock); | ||
| 564 | data->sensor = reg; | ||
| 564 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); | 565 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); |
| 566 | data->valid = 0; /* Force cache refresh */ | ||
| 565 | mutex_unlock(&data->update_lock); | 567 | mutex_unlock(&data->update_lock); |
| 566 | return count; | 568 | return count; |
| 567 | } | 569 | } |
| @@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev) | |||
| 1841 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); | 1843 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); |
| 1842 | } | 1844 | } |
| 1843 | 1845 | ||
| 1844 | /* Check if temperature channels are reset manually or by some reason */ | 1846 | /* Temperature channels are not forcibly enabled, as they can be |
| 1845 | tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); | 1847 | * set to two different sensor types and we can't guess which one |
| 1846 | if ((tmp & 0x3f) == 0) { | 1848 | * is correct for a given system. These channels can be enabled at |
| 1847 | /* Temp1,Temp3=thermistor; Temp2=thermal diode */ | 1849 | * run-time through the temp{1-3}_type sysfs accessors if needed. */ |
| 1848 | tmp = (tmp & 0xc0) | 0x2a; | ||
| 1849 | it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp); | ||
| 1850 | } | ||
| 1851 | data->sensor = tmp; | ||
| 1852 | 1850 | ||
| 1853 | /* Check if voltage monitors are reset manually or by some reason */ | 1851 | /* Check if voltage monitors are reset manually or by some reason */ |
| 1854 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); | 1852 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); |
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 6b2d8ae64fe1..a610e7880fb3 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c | |||
| @@ -303,13 +303,13 @@ error_ret: | |||
| 303 | **/ | 303 | **/ |
| 304 | static inline int sht15_calc_temp(struct sht15_data *data) | 304 | static inline int sht15_calc_temp(struct sht15_data *data) |
| 305 | { | 305 | { |
| 306 | int d1 = 0; | 306 | int d1 = temppoints[0].d1; |
| 307 | int i; | 307 | int i; |
| 308 | 308 | ||
| 309 | for (i = 1; i < ARRAY_SIZE(temppoints); i++) | 309 | for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) |
| 310 | /* Find pointer to interpolate */ | 310 | /* Find pointer to interpolate */ |
| 311 | if (data->supply_uV > temppoints[i - 1].vdd) { | 311 | if (data->supply_uV > temppoints[i - 1].vdd) { |
| 312 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | 312 | d1 = (data->supply_uV - temppoints[i - 1].vdd) |
| 313 | * (temppoints[i].d1 - temppoints[i - 1].d1) | 313 | * (temppoints[i].d1 - temppoints[i - 1].d1) |
| 314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) | 314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) |
| 315 | + temppoints[i - 1].d1; | 315 | + temppoints[i - 1].d1; |
| @@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev) | |||
| 542 | /* If a regulator is available, query what the supply voltage actually is!*/ | 542 | /* If a regulator is available, query what the supply voltage actually is!*/ |
| 543 | data->reg = regulator_get(data->dev, "vcc"); | 543 | data->reg = regulator_get(data->dev, "vcc"); |
| 544 | if (!IS_ERR(data->reg)) { | 544 | if (!IS_ERR(data->reg)) { |
| 545 | data->supply_uV = regulator_get_voltage(data->reg); | 545 | int voltage; |
| 546 | |||
| 547 | voltage = regulator_get_voltage(data->reg); | ||
| 548 | if (voltage) | ||
| 549 | data->supply_uV = voltage; | ||
| 550 | |||
| 546 | regulator_enable(data->reg); | 551 | regulator_enable(data->reg); |
| 547 | /* setup a notifier block to update this if another device | 552 | /* setup a notifier block to update this if another device |
| 548 | * causes the voltage to change */ | 553 | * causes the voltage to change */ |
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index a4046e94158d..f9daffd7d0e3 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
| @@ -264,8 +264,8 @@ void ide_retry_pc(ide_drive_t *drive) | |||
| 264 | * of it. The failed command will be retried after sense data | 264 | * of it. The failed command will be retried after sense data |
| 265 | * is acquired. | 265 | * is acquired. |
| 266 | */ | 266 | */ |
| 267 | blk_requeue_request(failed_rq->q, failed_rq); | ||
| 268 | drive->hwif->rq = NULL; | 267 | drive->hwif->rq = NULL; |
| 268 | ide_requeue_and_plug(drive, failed_rq); | ||
| 269 | if (ide_queue_sense_rq(drive, pc)) { | 269 | if (ide_queue_sense_rq(drive, pc)) { |
| 270 | blk_start_request(failed_rq); | 270 | blk_start_request(failed_rq); |
| 271 | ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); | 271 | ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 2c17e3fb43e3..06b14bc9a1d4 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
| @@ -493,6 +493,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | |||
| 493 | if (rq) { | 493 | if (rq) { |
| 494 | hwif->rq = NULL; | 494 | hwif->rq = NULL; |
| 495 | rq->errors = 0; | 495 | rq->errors = 0; |
| 496 | ide_requeue_and_plug(drive, rq); | ||
| 496 | } | 497 | } |
| 497 | return ret; | 498 | return ret; |
| 498 | } | 499 | } |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index db96138fefcd..172ac9218154 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
| @@ -566,7 +566,7 @@ plug_device_2: | |||
| 566 | blk_plug_device(q); | 566 | blk_plug_device(q); |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 569 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
| 570 | { | 570 | { |
| 571 | struct request_queue *q = drive->queue; | 571 | struct request_queue *q = drive->queue; |
| 572 | unsigned long flags; | 572 | unsigned long flags; |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index cc8633cbe133..67fb73559fd5 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
| @@ -428,13 +428,11 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, | |||
| 428 | { | 428 | { |
| 429 | struct request *rq; | 429 | struct request *rq; |
| 430 | int error; | 430 | int error; |
| 431 | int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; | ||
| 431 | 432 | ||
| 432 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 433 | rq = blk_get_request(drive->queue, rw, __GFP_WAIT); |
| 433 | rq->cmd_type = REQ_TYPE_ATA_TASKFILE; | 434 | rq->cmd_type = REQ_TYPE_ATA_TASKFILE; |
| 434 | 435 | ||
| 435 | if (cmd->tf_flags & IDE_TFLAG_WRITE) | ||
| 436 | rq->cmd_flags |= REQ_RW; | ||
| 437 | |||
| 438 | /* | 436 | /* |
| 439 | * (ks) We transfer currently only whole sectors. | 437 | * (ks) We transfer currently only whole sectors. |
| 440 | * This is suffient for now. But, it would be great, | 438 | * This is suffient for now. But, it would be great, |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index fc73d6ac11b6..ad63b79afac1 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3694,7 +3694,7 @@ static void cm_add_one(struct ib_device *ib_device) | |||
| 3694 | cm_dev->device = device_create(&cm_class, &ib_device->dev, | 3694 | cm_dev->device = device_create(&cm_class, &ib_device->dev, |
| 3695 | MKDEV(0, 0), NULL, | 3695 | MKDEV(0, 0), NULL, |
| 3696 | "%s", ib_device->name); | 3696 | "%s", ib_device->name); |
| 3697 | if (!cm_dev->device) { | 3697 | if (IS_ERR(cm_dev->device)) { |
| 3698 | kfree(cm_dev); | 3698 | kfree(cm_dev); |
| 3699 | return; | 3699 | return; |
| 3700 | } | 3700 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 7794249430ca..6d777069d86d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -1684,6 +1684,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, | |||
| 1684 | } | 1684 | } |
| 1685 | 1685 | ||
| 1686 | memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); | 1686 | memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); |
| 1687 | id->route.num_paths = num_paths; | ||
| 1687 | return 0; | 1688 | return 0; |
| 1688 | err: | 1689 | err: |
| 1689 | cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); | 1690 | cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 56147b28a23a..1d27b9a8e2d6 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -240,7 +240,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device | |||
| 240 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, | 240 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, |
| 241 | size, &mfrpl->map, | 241 | size, &mfrpl->map, |
| 242 | GFP_KERNEL); | 242 | GFP_KERNEL); |
| 243 | if (!mfrpl->ibfrpl.page_list) | 243 | if (!mfrpl->mapped_page_list) |
| 244 | goto err_free; | 244 | goto err_free; |
| 245 | 245 | ||
| 246 | WARN_ON(mfrpl->map & 0x3f); | 246 | WARN_ON(mfrpl->map & 0x3f); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 5a076e8f116a..e54f312e4bdc 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -2821,11 +2821,10 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 2821 | attr->cap.max_send_wr = nesqp->hwqp.sq_size; | 2821 | attr->cap.max_send_wr = nesqp->hwqp.sq_size; |
| 2822 | attr->cap.max_recv_wr = nesqp->hwqp.rq_size; | 2822 | attr->cap.max_recv_wr = nesqp->hwqp.rq_size; |
| 2823 | attr->cap.max_recv_sge = 1; | 2823 | attr->cap.max_recv_sge = 1; |
| 2824 | if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { | 2824 | if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) |
| 2825 | init_attr->cap.max_inline_data = 0; | 2825 | attr->cap.max_inline_data = 0; |
| 2826 | } else { | 2826 | else |
| 2827 | init_attr->cap.max_inline_data = 64; | 2827 | attr->cap.max_inline_data = 64; |
| 2828 | } | ||
| 2829 | 2828 | ||
| 2830 | init_attr->event_handler = nesqp->ibqp.event_handler; | 2829 | init_attr->event_handler = nesqp->ibqp.event_handler; |
| 2831 | init_attr->qp_context = nesqp->ibqp.qp_context; | 2830 | init_attr->qp_context = nesqp->ibqp.qp_context; |
diff --git a/drivers/input/input.c b/drivers/input/input.c index afd4e2b7658c..9c79bd56b51a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev, | |||
| 660 | int input_get_keycode(struct input_dev *dev, | 660 | int input_get_keycode(struct input_dev *dev, |
| 661 | unsigned int scancode, unsigned int *keycode) | 661 | unsigned int scancode, unsigned int *keycode) |
| 662 | { | 662 | { |
| 663 | return dev->getkeycode(dev, scancode, keycode); | 663 | unsigned long flags; |
| 664 | int retval; | ||
| 665 | |||
| 666 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 667 | retval = dev->getkeycode(dev, scancode, keycode); | ||
| 668 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 669 | |||
| 670 | return retval; | ||
| 664 | } | 671 | } |
| 665 | EXPORT_SYMBOL(input_get_keycode); | 672 | EXPORT_SYMBOL(input_get_keycode); |
| 666 | 673 | ||
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index ffc25cfcef7a..b443e088fd3c 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
| @@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
| 374 | input_dev->name = pdev->name; | 374 | input_dev->name = pdev->name; |
| 375 | input_dev->id.bustype = BUS_HOST; | 375 | input_dev->id.bustype = BUS_HOST; |
| 376 | input_dev->dev.parent = &pdev->dev; | 376 | input_dev->dev.parent = &pdev->dev; |
| 377 | input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); | 377 | input_dev->evbit[0] = BIT_MASK(EV_KEY); |
| 378 | if (!pdata->no_autorepeat) | ||
| 379 | input_dev->evbit[0] |= BIT_MASK(EV_REP); | ||
| 378 | input_dev->open = matrix_keypad_start; | 380 | input_dev->open = matrix_keypad_start; |
| 379 | input_dev->close = matrix_keypad_stop; | 381 | input_dev->close = matrix_keypad_stop; |
| 380 | 382 | ||
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 99d58764ef03..0d22cb9ce42e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
| @@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = { | |||
| 64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, | 64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, |
| 65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, | 65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, |
| 66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ | 66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ |
| 67 | { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */ | ||
| 67 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, | 68 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, |
| 68 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ | 69 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ |
| 69 | }; | 70 | }; |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 4f8fe0886b2a..b89879bd860f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
| @@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = { | |||
| 803 | .disconnect = bcm5974_disconnect, | 803 | .disconnect = bcm5974_disconnect, |
| 804 | .suspend = bcm5974_suspend, | 804 | .suspend = bcm5974_suspend, |
| 805 | .resume = bcm5974_resume, | 805 | .resume = bcm5974_resume, |
| 806 | .reset_resume = bcm5974_resume, | ||
| 807 | .id_table = bcm5974_table, | 806 | .id_table = bcm5974_table, |
| 808 | .supports_autosuspend = 1, | 807 | .supports_autosuspend = 1, |
| 809 | }; | 808 | }; |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 577688b5b951..6440a8f55686 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
| @@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); | |||
| 39 | 39 | ||
| 40 | static bool i8042_nomux; | 40 | static bool i8042_nomux; |
| 41 | module_param_named(nomux, i8042_nomux, bool, 0); | 41 | module_param_named(nomux, i8042_nomux, bool, 0); |
| 42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); | 42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); |
| 43 | 43 | ||
| 44 | static bool i8042_unlock; | 44 | static bool i8042_unlock; |
| 45 | module_param_named(unlock, i8042_unlock, bool, 0); | 45 | module_param_named(unlock, i8042_unlock, bool, 0); |
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 82ae18d29685..014248344763 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c | |||
| @@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev, | |||
| 68 | unsigned int scancode, | 68 | unsigned int scancode, |
| 69 | unsigned int *keycode) | 69 | unsigned int *keycode) |
| 70 | { | 70 | { |
| 71 | const struct key_entry *key = | 71 | const struct key_entry *key; |
| 72 | sparse_keymap_entry_from_scancode(dev, scancode); | ||
| 73 | 72 | ||
| 74 | if (key && key->type == KE_KEY) { | 73 | if (dev->keycode) { |
| 75 | *keycode = key->keycode; | 74 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
| 76 | return 0; | 75 | if (key && key->type == KE_KEY) { |
| 76 | *keycode = key->keycode; | ||
| 77 | return 0; | ||
| 78 | } | ||
| 77 | } | 79 | } |
| 78 | 80 | ||
| 79 | return -EINVAL; | 81 | return -EINVAL; |
| @@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev, | |||
| 86 | struct key_entry *key; | 88 | struct key_entry *key; |
| 87 | int old_keycode; | 89 | int old_keycode; |
| 88 | 90 | ||
| 89 | if (keycode < 0 || keycode > KEY_MAX) | 91 | if (dev->keycode) { |
| 90 | return -EINVAL; | 92 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
| 91 | 93 | if (key && key->type == KE_KEY) { | |
| 92 | key = sparse_keymap_entry_from_scancode(dev, scancode); | 94 | old_keycode = key->keycode; |
| 93 | if (key && key->type == KE_KEY) { | 95 | key->keycode = keycode; |
| 94 | old_keycode = key->keycode; | 96 | set_bit(keycode, dev->keybit); |
| 95 | key->keycode = keycode; | 97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) |
| 96 | set_bit(keycode, dev->keybit); | 98 | clear_bit(old_keycode, dev->keybit); |
| 97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) | 99 | return 0; |
| 98 | clear_bit(old_keycode, dev->keybit); | 100 | } |
| 99 | return 0; | ||
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | return -EINVAL; | 103 | return -EINVAL; |
| @@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev, | |||
| 164 | return 0; | 165 | return 0; |
| 165 | 166 | ||
| 166 | err_out: | 167 | err_out: |
| 167 | kfree(keymap); | 168 | kfree(map); |
| 168 | return error; | 169 | return error; |
| 169 | 170 | ||
| 170 | } | 171 | } |
| @@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup); | |||
| 176 | * | 177 | * |
| 177 | * This function is used to free memory allocated by sparse keymap | 178 | * This function is used to free memory allocated by sparse keymap |
| 178 | * in an input device that was set up by sparse_keymap_setup(). | 179 | * in an input device that was set up by sparse_keymap_setup(). |
| 180 | * NOTE: It is safe to cal this function while input device is | ||
| 181 | * still registered (however the drivers should care not to try to | ||
| 182 | * use freed keymap and thus have to shut off interrups/polling | ||
| 183 | * before freeing the keymap). | ||
| 179 | */ | 184 | */ |
| 180 | void sparse_keymap_free(struct input_dev *dev) | 185 | void sparse_keymap_free(struct input_dev *dev) |
| 181 | { | 186 | { |
| 187 | unsigned long flags; | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Take event lock to prevent racing with input_get_keycode() | ||
| 191 | * and input_set_keycode() if we are called while input device | ||
| 192 | * is still registered. | ||
| 193 | */ | ||
| 194 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 195 | |||
| 182 | kfree(dev->keycode); | 196 | kfree(dev->keycode); |
| 183 | dev->keycode = NULL; | 197 | dev->keycode = NULL; |
| 184 | dev->keycodemax = 0; | 198 | dev->keycodemax = 0; |
| 185 | dev->getkeycode = NULL; | 199 | |
| 186 | dev->setkeycode = NULL; | 200 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 187 | } | 201 | } |
| 188 | EXPORT_SYMBOL(sparse_keymap_free); | 202 | EXPORT_SYMBOL(sparse_keymap_free); |
| 189 | 203 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 8b5d2873f0c4..f46502589e4e 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
| @@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf) | |||
| 673 | int rv; | 673 | int rv; |
| 674 | 674 | ||
| 675 | mutex_lock(&wacom->lock); | 675 | mutex_lock(&wacom->lock); |
| 676 | if (wacom->open) { | 676 | |
| 677 | /* switch to wacom mode first */ | ||
| 678 | wacom_query_tablet_data(intf, features); | ||
| 679 | |||
| 680 | if (wacom->open) | ||
| 677 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); | 681 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); |
| 678 | /* switch to wacom mode if needed */ | 682 | else |
| 679 | if (!wacom_retrieve_hid_descriptor(intf, features)) | ||
| 680 | wacom_query_tablet_data(intf, features); | ||
| 681 | } else | ||
| 682 | rv = 0; | 683 | rv = 0; |
| 684 | |||
| 683 | mutex_unlock(&wacom->lock); | 685 | mutex_unlock(&wacom->lock); |
| 684 | 686 | ||
| 685 | return rv; | 687 | return rv; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3ba3437a2eb..4a852d815c68 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 155 | { | 155 | { |
| 156 | struct wacom_features *features = &wacom->features; | 156 | struct wacom_features *features = &wacom->features; |
| 157 | unsigned char *data = wacom->data; | 157 | unsigned char *data = wacom->data; |
| 158 | int x, y, prox; | 158 | int x, y, rw; |
| 159 | int rw = 0; | 159 | static int penData = 0; |
| 160 | int retval = 0; | ||
| 161 | 160 | ||
| 162 | if (data[0] != WACOM_REPORT_PENABLED) { | 161 | if (data[0] != WACOM_REPORT_PENABLED) { |
| 163 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); | 162 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); |
| 164 | goto exit; | 163 | return 0; |
| 165 | } | 164 | } |
| 166 | 165 | ||
| 167 | prox = data[1] & 0x80; | 166 | if (data[1] & 0x80) { |
| 168 | if (prox || wacom->id[0]) { | 167 | /* in prox and not a pad data */ |
| 169 | if (prox) { | 168 | penData = 1; |
| 170 | switch ((data[1] >> 5) & 3) { | 169 | |
| 170 | switch ((data[1] >> 5) & 3) { | ||
| 171 | 171 | ||
| 172 | case 0: /* Pen */ | 172 | case 0: /* Pen */ |
| 173 | wacom->tool[0] = BTN_TOOL_PEN; | 173 | wacom->tool[0] = BTN_TOOL_PEN; |
| @@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 181 | 181 | ||
| 182 | case 2: /* Mouse with wheel */ | 182 | case 2: /* Mouse with wheel */ |
| 183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); | 183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); |
| 184 | if (features->type == WACOM_G4 || features->type == WACOM_MO) { | ||
| 185 | rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03); | ||
| 186 | wacom_report_rel(wcombo, REL_WHEEL, -rw); | ||
| 187 | } else | ||
| 188 | wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]); | ||
| 184 | /* fall through */ | 189 | /* fall through */ |
| 185 | 190 | ||
| 186 | case 3: /* Mouse without wheel */ | 191 | case 3: /* Mouse without wheel */ |
| 187 | wacom->tool[0] = BTN_TOOL_MOUSE; | 192 | wacom->tool[0] = BTN_TOOL_MOUSE; |
| 188 | wacom->id[0] = CURSOR_DEVICE_ID; | 193 | wacom->id[0] = CURSOR_DEVICE_ID; |
| 194 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
| 195 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
| 196 | if (features->type == WACOM_G4 || features->type == WACOM_MO) | ||
| 197 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
| 198 | else | ||
| 199 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
| 189 | break; | 200 | break; |
| 190 | } | ||
| 191 | } | 201 | } |
| 192 | x = wacom_le16_to_cpu(&data[2]); | 202 | x = wacom_le16_to_cpu(&data[2]); |
| 193 | y = wacom_le16_to_cpu(&data[4]); | 203 | y = wacom_le16_to_cpu(&data[4]); |
| @@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 198 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); | 208 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); |
| 199 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 209 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
| 200 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); | 210 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); |
| 201 | } else { | ||
| 202 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
| 203 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
| 204 | if (features->type == WACOM_G4 || | ||
| 205 | features->type == WACOM_MO) { | ||
| 206 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
| 207 | rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); | ||
| 208 | } else { | ||
| 209 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
| 210 | rw = -(signed)data[6]; | ||
| 211 | } | ||
| 212 | wacom_report_rel(wcombo, REL_WHEEL, rw); | ||
| 213 | } | 211 | } |
| 214 | |||
| 215 | if (!prox) | ||
| 216 | wacom->id[0] = 0; | ||
| 217 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ | 212 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ |
| 218 | wacom_report_key(wcombo, wacom->tool[0], prox); | 213 | wacom_report_key(wcombo, wacom->tool[0], 1); |
| 219 | wacom_input_sync(wcombo); /* sync last event */ | 214 | } else if (wacom->id[0]) { |
| 215 | wacom_report_abs(wcombo, ABS_X, 0); | ||
| 216 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
| 217 | if (wacom->tool[0] == BTN_TOOL_MOUSE) { | ||
| 218 | wacom_report_key(wcombo, BTN_LEFT, 0); | ||
| 219 | wacom_report_key(wcombo, BTN_RIGHT, 0); | ||
| 220 | wacom_report_abs(wcombo, ABS_DISTANCE, 0); | ||
| 221 | } else { | ||
| 222 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
| 223 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
| 224 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
| 225 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
| 226 | } | ||
| 227 | wacom->id[0] = 0; | ||
| 228 | wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ | ||
| 229 | wacom_report_key(wcombo, wacom->tool[0], 0); | ||
| 220 | } | 230 | } |
| 221 | 231 | ||
| 222 | /* send pad data */ | 232 | /* send pad data */ |
| 223 | switch (features->type) { | 233 | switch (features->type) { |
| 224 | case WACOM_G4: | 234 | case WACOM_G4: |
| 225 | prox = data[7] & 0xf8; | 235 | if (data[7] & 0xf8) { |
| 226 | if (prox || wacom->id[1]) { | 236 | if (penData) { |
| 237 | wacom_input_sync(wcombo); /* sync last event */ | ||
| 238 | if (!wacom->id[0]) | ||
| 239 | penData = 0; | ||
| 240 | } | ||
| 227 | wacom->id[1] = PAD_DEVICE_ID; | 241 | wacom->id[1] = PAD_DEVICE_ID; |
| 228 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | 242 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); |
| 229 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | 243 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); |
| @@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 231 | wacom_report_rel(wcombo, REL_WHEEL, rw); | 245 | wacom_report_rel(wcombo, REL_WHEEL, rw); |
| 232 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 246 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
| 233 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 247 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
| 234 | if (!prox) | 248 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
| 235 | wacom->id[1] = 0; | 249 | } else if (wacom->id[1]) { |
| 236 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 250 | if (penData) { |
| 251 | wacom_input_sync(wcombo); /* sync last event */ | ||
| 252 | if (!wacom->id[0]) | ||
| 253 | penData = 0; | ||
| 254 | } | ||
| 255 | wacom->id[1] = 0; | ||
| 256 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | ||
| 257 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | ||
| 258 | wacom_report_rel(wcombo, REL_WHEEL, 0); | ||
| 259 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
| 260 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
| 237 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 261 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
| 238 | } | 262 | } |
| 239 | retval = 1; | ||
| 240 | break; | 263 | break; |
| 241 | case WACOM_MO: | 264 | case WACOM_MO: |
| 242 | prox = (data[7] & 0xf8) || data[8]; | 265 | if ((data[7] & 0xf8) || (data[8] & 0xff)) { |
| 243 | if (prox || wacom->id[1]) { | 266 | if (penData) { |
| 267 | wacom_input_sync(wcombo); /* sync last event */ | ||
| 268 | if (!wacom->id[0]) | ||
| 269 | penData = 0; | ||
| 270 | } | ||
| 244 | wacom->id[1] = PAD_DEVICE_ID; | 271 | wacom->id[1] = PAD_DEVICE_ID; |
| 245 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | 272 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); |
| 246 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | 273 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); |
| @@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 248 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | 275 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); |
| 249 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | 276 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); |
| 250 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 277 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
| 251 | if (!prox) | ||
| 252 | wacom->id[1] = 0; | ||
| 253 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 278 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
| 254 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 279 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
| 280 | } else if (wacom->id[1]) { | ||
| 281 | if (penData) { | ||
| 282 | wacom_input_sync(wcombo); /* sync last event */ | ||
| 283 | if (!wacom->id[0]) | ||
| 284 | penData = 0; | ||
| 285 | } | ||
| 286 | wacom->id[1] = 0; | ||
| 287 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | ||
| 288 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | ||
| 289 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x10)); | ||
| 290 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | ||
| 291 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | ||
| 292 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
| 293 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
| 294 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | ||
| 255 | } | 295 | } |
| 256 | retval = 1; | ||
| 257 | break; | 296 | break; |
| 258 | } | 297 | } |
| 259 | exit: | 298 | return 1; |
| 260 | return retval; | ||
| 261 | } | 299 | } |
| 262 | 300 | ||
| 263 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) | 301 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) |
| @@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 598 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) | 636 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) |
| 599 | { | 637 | { |
| 600 | wacom_report_abs(wcombo, ABS_X, | 638 | wacom_report_abs(wcombo, ABS_X, |
| 601 | data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); | 639 | (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8)); |
| 602 | wacom_report_abs(wcombo, ABS_Y, | 640 | wacom_report_abs(wcombo, ABS_Y, |
| 603 | data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); | 641 | (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8)); |
| 604 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); | 642 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); |
| 605 | wacom_report_key(wcombo, wacom->tool[idx], 1); | 643 | wacom_report_key(wcombo, wacom->tool[idx], 1); |
| 606 | if (idx) | 644 | if (idx) |
| @@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo) | |||
| 744 | 782 | ||
| 745 | touchInProx = 0; | 783 | touchInProx = 0; |
| 746 | 784 | ||
| 747 | if (!wacom->id[0]) { /* first in prox */ | 785 | if (prox) { /* in prox */ |
| 748 | /* Going into proximity select tool */ | 786 | if (!wacom->id[0]) { |
| 749 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; | 787 | /* Going into proximity select tool */ |
| 750 | if (wacom->tool[0] == BTN_TOOL_PEN) | 788 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; |
| 751 | wacom->id[0] = STYLUS_DEVICE_ID; | 789 | if (wacom->tool[0] == BTN_TOOL_PEN) |
| 752 | else | 790 | wacom->id[0] = STYLUS_DEVICE_ID; |
| 753 | wacom->id[0] = ERASER_DEVICE_ID; | 791 | else |
| 754 | } | 792 | wacom->id[0] = ERASER_DEVICE_ID; |
| 755 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 793 | } |
| 756 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); | 794 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
| 757 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); | 795 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); |
| 758 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); | 796 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); |
| 759 | pressure = ((data[7] & 0x01) << 8) | data[6]; | 797 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); |
| 760 | if (pressure < 0) | 798 | pressure = ((data[7] & 0x01) << 8) | data[6]; |
| 761 | pressure = features->pressure_max + pressure + 1; | 799 | if (pressure < 0) |
| 762 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); | 800 | pressure = features->pressure_max + pressure + 1; |
| 763 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); | 801 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); |
| 764 | if (!prox) { /* out-prox */ | 802 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); |
| 803 | } else { | ||
| 804 | wacom_report_abs(wcombo, ABS_X, 0); | ||
| 805 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
| 806 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
| 807 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
| 808 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
| 809 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
| 765 | wacom->id[0] = 0; | 810 | wacom->id[0] = 0; |
| 766 | /* pen is out so touch can be enabled now */ | 811 | /* pen is out so touch can be enabled now */ |
| 767 | touchInProx = 1; | 812 | touchInProx = 1; |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 0be15c70c16d..47a5ffec55a3 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
| @@ -14,11 +14,6 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
| 17 | |||
| 18 | #include <linux/errno.h> | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/timer.h> | ||
| 22 | #include <linux/usb.h> | 17 | #include <linux/usb.h> |
| 23 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 24 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index eb7e27105a82..964a55fb1486 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c | |||
| @@ -12,8 +12,6 @@ | |||
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/ctype.h> | ||
| 17 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
| 18 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
| 19 | #include <linux/isdn/capilli.h> | 17 | #include <linux/isdn/capilli.h> |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 0b39b387c125..f6f45f221920 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
| @@ -14,10 +14,8 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
| 17 | #include <linux/ctype.h> | ||
| 18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 19 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
| 20 | #include <linux/slab.h> | ||
| 21 | 19 | ||
| 22 | /* Version Information */ | 20 | /* Version Information */ |
| 23 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" | 21 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" |
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 9ef5b0463fd5..05947f9c1849 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h | |||
| @@ -20,11 +20,12 @@ | |||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 21 | 21 | ||
| 22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 23 | #include <linux/sched.h> | ||
| 23 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
| 24 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 26 | #include <linux/ctype.h> | ||
| 25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 26 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
| 27 | #include <linux/usb.h> | ||
| 28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
| 29 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
| 30 | #include <linux/ppp_defs.h> | 31 | #include <linux/ppp_defs.h> |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index c99fb9790a13..c22e5ace8276 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
| 17 | #include <linux/isdnif.h> | 17 | #include <linux/isdnif.h> |
| 18 | #include <linux/slab.h> | ||
| 19 | 18 | ||
| 20 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ | 19 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ |
| 21 | 20 | ||
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f0dc6c9cc283..c9f28dd40d5c 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
| 15 | #include <linux/gigaset_dev.h> | 15 | #include <linux/gigaset_dev.h> |
| 16 | #include <linux/tty.h> | ||
| 17 | #include <linux/tty_flip.h> | 16 | #include <linux/tty_flip.h> |
| 18 | 17 | ||
| 19 | /*** our ioctls ***/ | 18 | /*** our ioctls ***/ |
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c index b69f73a0668f..b943efbff44d 100644 --- a/drivers/isdn/gigaset/proc.c +++ b/drivers/isdn/gigaset/proc.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
| 17 | #include <linux/ctype.h> | ||
| 18 | 17 | ||
| 19 | static ssize_t show_cidmode(struct device *dev, | 18 | static ssize_t show_cidmode(struct device *dev, |
| 20 | struct device_attribute *attr, char *buf) | 19 | struct device_attribute *attr, char *buf) |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8b0afd203a07..e96c0586886c 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
| @@ -11,13 +11,10 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include "gigaset.h" | 13 | #include "gigaset.h" |
| 14 | |||
| 15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 16 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
| 17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 18 | #include <linux/tty.h> | ||
| 19 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
| 20 | #include <linux/slab.h> | ||
| 21 | 18 | ||
| 22 | /* Version Information */ | 19 | /* Version Information */ |
| 23 | #define DRIVER_AUTHOR "Tilman Schmidt" | 20 | #define DRIVER_AUTHOR "Tilman Schmidt" |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 9430a2bbb523..76dbb20f3065 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
| @@ -16,10 +16,6 @@ | |||
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include "gigaset.h" | 18 | #include "gigaset.h" |
| 19 | |||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
| 24 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 25 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 07090f379c63..69c84a1d88ea 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
| @@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status) | |||
| 178 | 178 | ||
| 179 | /* We set the status. */ | 179 | /* We set the status. */ |
| 180 | to_lgdev(vdev)->desc->status = status; | 180 | to_lgdev(vdev)->desc->status = status; |
| 181 | kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); | 181 | hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static void lg_set_status(struct virtio_device *vdev, u8 status) | 184 | static void lg_set_status(struct virtio_device *vdev, u8 status) |
| @@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq) | |||
| 229 | */ | 229 | */ |
| 230 | struct lguest_vq_info *lvq = vq->priv; | 230 | struct lguest_vq_info *lvq = vq->priv; |
| 231 | 231 | ||
| 232 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); | 232 | hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | /* An extern declaration inside a C file is bad form. Don't do it. */ | 235 | /* An extern declaration inside a C file is bad form. Don't do it. */ |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index fb2b7ef7868e..b4eb675a807e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
| @@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
| 288 | insn = lgread(cpu, physaddr, u8); | 288 | insn = lgread(cpu, physaddr, u8); |
| 289 | 289 | ||
| 290 | /* | 290 | /* |
| 291 | * Around 2.6.33, the kernel started using an emulation for the | ||
| 292 | * cmpxchg8b instruction in early boot on many configurations. This | ||
| 293 | * code isn't paravirtualized, and it tries to disable interrupts. | ||
| 294 | * Ignore it, which will Mostly Work. | ||
| 295 | */ | ||
| 296 | if (insn == 0xfa) { | ||
| 297 | /* "cli", or Clear Interrupt Enable instruction. Skip it. */ | ||
| 298 | cpu->regs->eip++; | ||
| 299 | return 1; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* | ||
| 291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits | 303 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits |
| 292 | * of the eax register. | 304 | * of the eax register. |
| 293 | */ | 305 | */ |
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index c092354591bb..ce8897933a84 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c | |||
| @@ -210,6 +210,7 @@ int wf_register_control(struct wf_control *new_ct) | |||
| 210 | kref_init(&new_ct->ref); | 210 | kref_init(&new_ct->ref); |
| 211 | list_add(&new_ct->link, &wf_controls); | 211 | list_add(&new_ct->link, &wf_controls); |
| 212 | 212 | ||
| 213 | sysfs_attr_init(&new_ct->attr.attr); | ||
| 213 | new_ct->attr.attr.name = new_ct->name; | 214 | new_ct->attr.attr.name = new_ct->name; |
| 214 | new_ct->attr.attr.mode = 0644; | 215 | new_ct->attr.attr.mode = 0644; |
| 215 | new_ct->attr.show = wf_show_control; | 216 | new_ct->attr.show = wf_show_control; |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c index 6c23456e0bda..71f50565f637 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c +++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c | |||
| @@ -423,10 +423,12 @@ static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp) | |||
| 423 | 423 | ||
| 424 | dip = kzalloc(sizeof(*dip),GFP_KERNEL); | 424 | dip = kzalloc(sizeof(*dip),GFP_KERNEL); |
| 425 | if (!dip) return; | 425 | if (!dip) return; |
| 426 | sysfs_attr_init(&dip->attr_debugcmd.attr); | ||
| 426 | dip->attr_debugcmd.attr.name = "debugcmd"; | 427 | dip->attr_debugcmd.attr.name = "debugcmd"; |
| 427 | dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP; | 428 | dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP; |
| 428 | dip->attr_debugcmd.show = debugcmd_show; | 429 | dip->attr_debugcmd.show = debugcmd_show; |
| 429 | dip->attr_debugcmd.store = debugcmd_store; | 430 | dip->attr_debugcmd.store = debugcmd_store; |
| 431 | sysfs_attr_init(&dip->attr_debuginfo.attr); | ||
| 430 | dip->attr_debuginfo.attr.name = "debuginfo"; | 432 | dip->attr_debuginfo.attr.name = "debuginfo"; |
| 431 | dip->attr_debuginfo.attr.mode = S_IRUGO; | 433 | dip->attr_debuginfo.attr.mode = S_IRUGO; |
| 432 | dip->attr_debuginfo.show = debuginfo_show; | 434 | dip->attr_debuginfo.show = debuginfo_show; |
| @@ -644,6 +646,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 644 | return; | 646 | return; |
| 645 | } | 647 | } |
| 646 | 648 | ||
| 649 | sysfs_attr_init(&sfp->attr_v4l_minor_number.attr); | ||
| 647 | sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number"; | 650 | sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number"; |
| 648 | sfp->attr_v4l_minor_number.attr.mode = S_IRUGO; | 651 | sfp->attr_v4l_minor_number.attr.mode = S_IRUGO; |
| 649 | sfp->attr_v4l_minor_number.show = v4l_minor_number_show; | 652 | sfp->attr_v4l_minor_number.show = v4l_minor_number_show; |
| @@ -658,6 +661,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 658 | sfp->v4l_minor_number_created_ok = !0; | 661 | sfp->v4l_minor_number_created_ok = !0; |
| 659 | } | 662 | } |
| 660 | 663 | ||
| 664 | sysfs_attr_init(&sfp->attr_v4l_radio_minor_number.attr); | ||
| 661 | sfp->attr_v4l_radio_minor_number.attr.name = "v4l_radio_minor_number"; | 665 | sfp->attr_v4l_radio_minor_number.attr.name = "v4l_radio_minor_number"; |
| 662 | sfp->attr_v4l_radio_minor_number.attr.mode = S_IRUGO; | 666 | sfp->attr_v4l_radio_minor_number.attr.mode = S_IRUGO; |
| 663 | sfp->attr_v4l_radio_minor_number.show = v4l_radio_minor_number_show; | 667 | sfp->attr_v4l_radio_minor_number.show = v4l_radio_minor_number_show; |
| @@ -672,6 +676,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 672 | sfp->v4l_radio_minor_number_created_ok = !0; | 676 | sfp->v4l_radio_minor_number_created_ok = !0; |
| 673 | } | 677 | } |
| 674 | 678 | ||
| 679 | sysfs_attr_init(&sfp->attr_unit_number.attr); | ||
| 675 | sfp->attr_unit_number.attr.name = "unit_number"; | 680 | sfp->attr_unit_number.attr.name = "unit_number"; |
| 676 | sfp->attr_unit_number.attr.mode = S_IRUGO; | 681 | sfp->attr_unit_number.attr.mode = S_IRUGO; |
| 677 | sfp->attr_unit_number.show = unit_number_show; | 682 | sfp->attr_unit_number.show = unit_number_show; |
| @@ -685,6 +690,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 685 | sfp->unit_number_created_ok = !0; | 690 | sfp->unit_number_created_ok = !0; |
| 686 | } | 691 | } |
| 687 | 692 | ||
| 693 | sysfs_attr_init(&sfp->attr_bus_info.attr); | ||
| 688 | sfp->attr_bus_info.attr.name = "bus_info_str"; | 694 | sfp->attr_bus_info.attr.name = "bus_info_str"; |
| 689 | sfp->attr_bus_info.attr.mode = S_IRUGO; | 695 | sfp->attr_bus_info.attr.mode = S_IRUGO; |
| 690 | sfp->attr_bus_info.show = bus_info_show; | 696 | sfp->attr_bus_info.show = bus_info_show; |
| @@ -699,6 +705,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 699 | sfp->bus_info_created_ok = !0; | 705 | sfp->bus_info_created_ok = !0; |
| 700 | } | 706 | } |
| 701 | 707 | ||
| 708 | sysfs_attr_init(&sfp->attr_hdw_name.attr); | ||
| 702 | sfp->attr_hdw_name.attr.name = "device_hardware_type"; | 709 | sfp->attr_hdw_name.attr.name = "device_hardware_type"; |
| 703 | sfp->attr_hdw_name.attr.mode = S_IRUGO; | 710 | sfp->attr_hdw_name.attr.mode = S_IRUGO; |
| 704 | sfp->attr_hdw_name.show = hdw_name_show; | 711 | sfp->attr_hdw_name.show = hdw_name_show; |
| @@ -713,6 +720,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, | |||
| 713 | sfp->hdw_name_created_ok = !0; | 720 | sfp->hdw_name_created_ok = !0; |
| 714 | } | 721 | } |
| 715 | 722 | ||
| 723 | sysfs_attr_init(&sfp->attr_hdw_desc.attr); | ||
| 716 | sfp->attr_hdw_desc.attr.name = "device_hardware_description"; | 724 | sfp->attr_hdw_desc.attr.name = "device_hardware_description"; |
| 717 | sfp->attr_hdw_desc.attr.mode = S_IRUGO; | 725 | sfp->attr_hdw_desc.attr.mode = S_IRUGO; |
| 718 | sfp->attr_hdw_desc.show = hdw_desc_show; | 726 | sfp->attr_hdw_desc.show = hdw_desc_show; |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 83f0affadcae..e9caf694c59e 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -1179,15 +1179,10 @@ static void omap_hsmmc_detect(struct work_struct *work) | |||
| 1179 | carddetect = -ENOSYS; | 1179 | carddetect = -ENOSYS; |
| 1180 | } | 1180 | } |
| 1181 | 1181 | ||
| 1182 | if (carddetect) { | 1182 | if (carddetect) |
| 1183 | mmc_detect_change(host->mmc, (HZ * 200) / 1000); | 1183 | mmc_detect_change(host->mmc, (HZ * 200) / 1000); |
| 1184 | } else { | 1184 | else |
| 1185 | mmc_host_enable(host->mmc); | ||
| 1186 | omap_hsmmc_reset_controller_fsm(host, SRD); | ||
| 1187 | mmc_host_lazy_disable(host->mmc); | ||
| 1188 | |||
| 1189 | mmc_detect_change(host->mmc, (HZ * 50) / 1000); | 1185 | mmc_detect_change(host->mmc, (HZ * 50) / 1000); |
| 1190 | } | ||
| 1191 | } | 1186 | } |
| 1192 | 1187 | ||
| 1193 | /* | 1188 | /* |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0ba5b8e50a7c..7b832c727f87 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2582,6 +2582,31 @@ config CHELSIO_T3 | |||
| 2582 | To compile this driver as a module, choose M here: the module | 2582 | To compile this driver as a module, choose M here: the module |
| 2583 | will be called cxgb3. | 2583 | will be called cxgb3. |
| 2584 | 2584 | ||
| 2585 | config CHELSIO_T4_DEPENDS | ||
| 2586 | tristate | ||
| 2587 | depends on PCI && INET | ||
| 2588 | default y | ||
| 2589 | |||
| 2590 | config CHELSIO_T4 | ||
| 2591 | tristate "Chelsio Communications T4 Ethernet support" | ||
| 2592 | depends on CHELSIO_T4_DEPENDS | ||
| 2593 | select FW_LOADER | ||
| 2594 | select MDIO | ||
| 2595 | help | ||
| 2596 | This driver supports Chelsio T4-based gigabit and 10Gb Ethernet | ||
| 2597 | adapters. | ||
| 2598 | |||
| 2599 | For general information about Chelsio and our products, visit | ||
| 2600 | our website at <http://www.chelsio.com>. | ||
| 2601 | |||
| 2602 | For customer support, please visit our customer support page at | ||
| 2603 | <http://www.chelsio.com/support.htm>. | ||
| 2604 | |||
| 2605 | Please send feedback to <linux-bugs@chelsio.com>. | ||
| 2606 | |||
| 2607 | To compile this driver as a module choose M here; the module | ||
| 2608 | will be called cxgb4. | ||
| 2609 | |||
| 2585 | config EHEA | 2610 | config EHEA |
| 2586 | tristate "eHEA Ethernet support" | 2611 | tristate "eHEA Ethernet support" |
| 2587 | depends on IBMEBUS && INET && SPARSEMEM | 2612 | depends on IBMEBUS && INET && SPARSEMEM |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 478886234c28..a583b50d9de8 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
| @@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/ | |||
| 19 | obj-$(CONFIG_IP1000) += ipg.o | 19 | obj-$(CONFIG_IP1000) += ipg.o |
| 20 | obj-$(CONFIG_CHELSIO_T1) += chelsio/ | 20 | obj-$(CONFIG_CHELSIO_T1) += chelsio/ |
| 21 | obj-$(CONFIG_CHELSIO_T3) += cxgb3/ | 21 | obj-$(CONFIG_CHELSIO_T3) += cxgb3/ |
| 22 | obj-$(CONFIG_CHELSIO_T4) += cxgb4/ | ||
| 22 | obj-$(CONFIG_EHEA) += ehea/ | 23 | obj-$(CONFIG_EHEA) += ehea/ |
| 23 | obj-$(CONFIG_CAN) += can/ | 24 | obj-$(CONFIG_CAN) += can/ |
| 24 | obj-$(CONFIG_BONDING) += bonding/ | 25 | obj-$(CONFIG_BONDING) += bonding/ |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 50e6259b50e4..d0ef4ac987cd 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -1464,8 +1464,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, | |||
| 1464 | 1464 | ||
| 1465 | req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); | 1465 | req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); |
| 1466 | req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); | 1466 | req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); |
| 1467 | req->params.offset = offset; | 1467 | req->params.offset = cpu_to_le32(offset); |
| 1468 | req->params.data_buf_size = 0x4; | 1468 | req->params.data_buf_size = cpu_to_le32(0x4); |
| 1469 | 1469 | ||
| 1470 | status = be_mcc_notify_wait(adapter); | 1470 | status = be_mcc_notify_wait(adapter); |
| 1471 | if (!status) | 1471 | if (!status) |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f9236..ec6ace802256 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -807,7 +807,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, | |||
| 807 | return; | 807 | return; |
| 808 | } | 808 | } |
| 809 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | 809 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); |
| 810 | vid = be16_to_cpu(vid); | 810 | vid = swab16(vid); |
| 811 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); | 811 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); |
| 812 | } else { | 812 | } else { |
| 813 | netif_receive_skb(skb); | 813 | netif_receive_skb(skb); |
| @@ -884,7 +884,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
| 884 | napi_gro_frags(&eq_obj->napi); | 884 | napi_gro_frags(&eq_obj->napi); |
| 885 | } else { | 885 | } else { |
| 886 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | 886 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); |
| 887 | vid = be16_to_cpu(vid); | 887 | vid = swab16(vid); |
| 888 | 888 | ||
| 889 | if (!adapter->vlan_grp || adapter->vlans_added == 0) | 889 | if (!adapter->vlan_grp || adapter->vlans_added == 0) |
| 890 | return; | 890 | return; |
| @@ -1855,7 +1855,7 @@ static bool be_flash_redboot(struct be_adapter *adapter, | |||
| 1855 | p += crc_offset; | 1855 | p += crc_offset; |
| 1856 | 1856 | ||
| 1857 | status = be_cmd_get_flash_crc(adapter, flashed_crc, | 1857 | status = be_cmd_get_flash_crc(adapter, flashed_crc, |
| 1858 | (img_start + image_size - 4)); | 1858 | (image_size - 4)); |
| 1859 | if (status) { | 1859 | if (status) { |
| 1860 | dev_err(&adapter->pdev->dev, | 1860 | dev_err(&adapter->pdev->dev, |
| 1861 | "could not get crc from flash, not flashing redboot\n"); | 1861 | "could not get crc from flash, not flashing redboot\n"); |
| @@ -1991,7 +1991,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) | |||
| 1991 | struct flash_file_hdr_g3 *fhdr3; | 1991 | struct flash_file_hdr_g3 *fhdr3; |
| 1992 | struct image_hdr *img_hdr_ptr = NULL; | 1992 | struct image_hdr *img_hdr_ptr = NULL; |
| 1993 | struct be_dma_mem flash_cmd; | 1993 | struct be_dma_mem flash_cmd; |
| 1994 | int status, i = 0; | 1994 | int status, i = 0, num_imgs = 0; |
| 1995 | const u8 *p; | 1995 | const u8 *p; |
| 1996 | 1996 | ||
| 1997 | strcpy(fw_file, func); | 1997 | strcpy(fw_file, func); |
| @@ -2017,15 +2017,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) | |||
| 2017 | if ((adapter->generation == BE_GEN3) && | 2017 | if ((adapter->generation == BE_GEN3) && |
| 2018 | (get_ufigen_type(fhdr) == BE_GEN3)) { | 2018 | (get_ufigen_type(fhdr) == BE_GEN3)) { |
| 2019 | fhdr3 = (struct flash_file_hdr_g3 *) fw->data; | 2019 | fhdr3 = (struct flash_file_hdr_g3 *) fw->data; |
| 2020 | for (i = 0; i < fhdr3->num_imgs; i++) { | 2020 | num_imgs = le32_to_cpu(fhdr3->num_imgs); |
| 2021 | for (i = 0; i < num_imgs; i++) { | ||
| 2021 | img_hdr_ptr = (struct image_hdr *) (fw->data + | 2022 | img_hdr_ptr = (struct image_hdr *) (fw->data + |
| 2022 | (sizeof(struct flash_file_hdr_g3) + | 2023 | (sizeof(struct flash_file_hdr_g3) + |
| 2023 | i * sizeof(struct image_hdr))); | 2024 | i * sizeof(struct image_hdr))); |
| 2024 | if (img_hdr_ptr->imageid == 1) { | 2025 | if (le32_to_cpu(img_hdr_ptr->imageid) == 1) |
| 2025 | status = be_flash_data(adapter, fw, | 2026 | status = be_flash_data(adapter, fw, &flash_cmd, |
| 2026 | &flash_cmd, fhdr3->num_imgs); | 2027 | num_imgs); |
| 2027 | } | ||
| 2028 | |||
| 2029 | } | 2028 | } |
| 2030 | } else if ((adapter->generation == BE_GEN2) && | 2029 | } else if ((adapter->generation == BE_GEN2) && |
| 2031 | (get_ufigen_type(fhdr) == BE_GEN2)) { | 2030 | (get_ufigen_type(fhdr) == BE_GEN2)) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5b92fbff431d..0075514bf32f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -4156,7 +4156,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
| 4156 | * send the join/membership reports. The curr_active_slave found | 4156 | * send the join/membership reports. The curr_active_slave found |
| 4157 | * will send all of this type of traffic. | 4157 | * will send all of this type of traffic. |
| 4158 | */ | 4158 | */ |
| 4159 | if ((iph->protocol == htons(IPPROTO_IGMP)) && | 4159 | if ((iph->protocol == IPPROTO_IGMP) && |
| 4160 | (skb->protocol == htons(ETH_P_IP))) { | 4160 | (skb->protocol == htons(ETH_P_IP))) { |
| 4161 | 4161 | ||
| 4162 | read_lock(&bond->curr_slave_lock); | 4162 | read_lock(&bond->curr_slave_lock); |
| @@ -4450,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = { | |||
| 4450 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, | 4450 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, |
| 4451 | }; | 4451 | }; |
| 4452 | 4452 | ||
| 4453 | static void bond_destructor(struct net_device *bond_dev) | ||
| 4454 | { | ||
| 4455 | struct bonding *bond = netdev_priv(bond_dev); | ||
| 4456 | if (bond->wq) | ||
| 4457 | destroy_workqueue(bond->wq); | ||
| 4458 | free_netdev(bond_dev); | ||
| 4459 | } | ||
| 4460 | |||
| 4453 | static void bond_setup(struct net_device *bond_dev) | 4461 | static void bond_setup(struct net_device *bond_dev) |
| 4454 | { | 4462 | { |
| 4455 | struct bonding *bond = netdev_priv(bond_dev); | 4463 | struct bonding *bond = netdev_priv(bond_dev); |
| @@ -4470,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev) | |||
| 4470 | bond_dev->ethtool_ops = &bond_ethtool_ops; | 4478 | bond_dev->ethtool_ops = &bond_ethtool_ops; |
| 4471 | bond_set_mode_ops(bond, bond->params.mode); | 4479 | bond_set_mode_ops(bond, bond->params.mode); |
| 4472 | 4480 | ||
| 4473 | bond_dev->destructor = free_netdev; | 4481 | bond_dev->destructor = bond_destructor; |
| 4474 | 4482 | ||
| 4475 | /* Initialize the device options */ | 4483 | /* Initialize the device options */ |
| 4476 | bond_dev->tx_queue_len = 0; | 4484 | bond_dev->tx_queue_len = 0; |
| @@ -4542,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev) | |||
| 4542 | 4550 | ||
| 4543 | bond_remove_proc_entry(bond); | 4551 | bond_remove_proc_entry(bond); |
| 4544 | 4552 | ||
| 4545 | if (bond->wq) | ||
| 4546 | destroy_workqueue(bond->wq); | ||
| 4547 | |||
| 4548 | netif_addr_lock_bh(bond_dev); | 4553 | netif_addr_lock_bh(bond_dev); |
| 4549 | bond_mc_list_destroy(bond); | 4554 | bond_mc_list_destroy(bond); |
| 4550 | netif_addr_unlock_bh(bond_dev); | 4555 | netif_addr_unlock_bh(bond_dev); |
| @@ -4956,8 +4961,8 @@ int bond_create(struct net *net, const char *name) | |||
| 4956 | bond_setup); | 4961 | bond_setup); |
| 4957 | if (!bond_dev) { | 4962 | if (!bond_dev) { |
| 4958 | pr_err("%s: eek! can't alloc netdev!\n", name); | 4963 | pr_err("%s: eek! can't alloc netdev!\n", name); |
| 4959 | res = -ENOMEM; | 4964 | rtnl_unlock(); |
| 4960 | goto out; | 4965 | return -ENOMEM; |
| 4961 | } | 4966 | } |
| 4962 | 4967 | ||
| 4963 | dev_net_set(bond_dev, net); | 4968 | dev_net_set(bond_dev, net); |
| @@ -4966,19 +4971,16 @@ int bond_create(struct net *net, const char *name) | |||
| 4966 | if (!name) { | 4971 | if (!name) { |
| 4967 | res = dev_alloc_name(bond_dev, "bond%d"); | 4972 | res = dev_alloc_name(bond_dev, "bond%d"); |
| 4968 | if (res < 0) | 4973 | if (res < 0) |
| 4969 | goto out_netdev; | 4974 | goto out; |
| 4970 | } | 4975 | } |
| 4971 | 4976 | ||
| 4972 | res = register_netdevice(bond_dev); | 4977 | res = register_netdevice(bond_dev); |
| 4973 | if (res < 0) | ||
| 4974 | goto out_netdev; | ||
| 4975 | 4978 | ||
| 4976 | out: | 4979 | out: |
| 4977 | rtnl_unlock(); | 4980 | rtnl_unlock(); |
| 4981 | if (res < 0) | ||
| 4982 | bond_destructor(bond_dev); | ||
| 4978 | return res; | 4983 | return res; |
| 4979 | out_netdev: | ||
| 4980 | free_netdev(bond_dev); | ||
| 4981 | goto out; | ||
| 4982 | } | 4984 | } |
| 4983 | 4985 | ||
| 4984 | static int __net_init bond_net_init(struct net *net) | 4986 | static int __net_init bond_net_init(struct net *net) |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 9781942992e9..4b451a7c03e9 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
| @@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk) | |||
| 2334 | struct cnic_local *cp = dev->cnic_priv; | 2334 | struct cnic_local *cp = dev->cnic_priv; |
| 2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | 2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; |
| 2336 | 2336 | ||
| 2337 | prefetch(cp->status_blk.bnx2x); | 2337 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { |
| 2338 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | 2338 | prefetch(cp->status_blk.bnx2x); |
| 2339 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
| 2339 | 2340 | ||
| 2340 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
| 2341 | tasklet_schedule(&cp->cnic_irq_task); | 2341 | tasklet_schedule(&cp->cnic_irq_task); |
| 2342 | 2342 | cnic_chk_pkt_rings(cp); | |
| 2343 | cnic_chk_pkt_rings(cp); | 2343 | } |
| 2344 | 2344 | ||
| 2345 | return 0; | 2345 | return 0; |
| 2346 | } | 2346 | } |
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile new file mode 100644 index 000000000000..498667487f52 --- /dev/null +++ b/drivers/net/cxgb4/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | # | ||
| 2 | # Chelsio T4 driver | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-$(CONFIG_CHELSIO_T4) += cxgb4.o | ||
| 6 | |||
| 7 | cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o | ||
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h new file mode 100644 index 000000000000..3d8ff4889b56 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4.h | |||
| @@ -0,0 +1,741 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __CXGB4_H__ | ||
| 36 | #define __CXGB4_H__ | ||
| 37 | |||
| 38 | #include <linux/bitops.h> | ||
| 39 | #include <linux/cache.h> | ||
| 40 | #include <linux/interrupt.h> | ||
| 41 | #include <linux/list.h> | ||
| 42 | #include <linux/netdevice.h> | ||
| 43 | #include <linux/pci.h> | ||
| 44 | #include <linux/spinlock.h> | ||
| 45 | #include <linux/timer.h> | ||
| 46 | #include <asm/io.h> | ||
| 47 | #include "cxgb4_uld.h" | ||
| 48 | #include "t4_hw.h" | ||
| 49 | |||
| 50 | #define FW_VERSION_MAJOR 1 | ||
| 51 | #define FW_VERSION_MINOR 1 | ||
| 52 | #define FW_VERSION_MICRO 0 | ||
| 53 | |||
| 54 | enum { | ||
| 55 | MAX_NPORTS = 4, /* max # of ports */ | ||
| 56 | SERNUM_LEN = 16, /* Serial # length */ | ||
| 57 | EC_LEN = 16, /* E/C length */ | ||
| 58 | ID_LEN = 16, /* ID length */ | ||
| 59 | }; | ||
| 60 | |||
| 61 | enum { | ||
| 62 | MEM_EDC0, | ||
| 63 | MEM_EDC1, | ||
| 64 | MEM_MC | ||
| 65 | }; | ||
| 66 | |||
| 67 | enum dev_master { | ||
| 68 | MASTER_CANT, | ||
| 69 | MASTER_MAY, | ||
| 70 | MASTER_MUST | ||
| 71 | }; | ||
| 72 | |||
| 73 | enum dev_state { | ||
| 74 | DEV_STATE_UNINIT, | ||
| 75 | DEV_STATE_INIT, | ||
| 76 | DEV_STATE_ERR | ||
| 77 | }; | ||
| 78 | |||
| 79 | enum { | ||
| 80 | PAUSE_RX = 1 << 0, | ||
| 81 | PAUSE_TX = 1 << 1, | ||
| 82 | PAUSE_AUTONEG = 1 << 2 | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct port_stats { | ||
| 86 | u64 tx_octets; /* total # of octets in good frames */ | ||
| 87 | u64 tx_frames; /* all good frames */ | ||
| 88 | u64 tx_bcast_frames; /* all broadcast frames */ | ||
| 89 | u64 tx_mcast_frames; /* all multicast frames */ | ||
| 90 | u64 tx_ucast_frames; /* all unicast frames */ | ||
| 91 | u64 tx_error_frames; /* all error frames */ | ||
| 92 | |||
| 93 | u64 tx_frames_64; /* # of Tx frames in a particular range */ | ||
| 94 | u64 tx_frames_65_127; | ||
| 95 | u64 tx_frames_128_255; | ||
| 96 | u64 tx_frames_256_511; | ||
| 97 | u64 tx_frames_512_1023; | ||
| 98 | u64 tx_frames_1024_1518; | ||
| 99 | u64 tx_frames_1519_max; | ||
| 100 | |||
| 101 | u64 tx_drop; /* # of dropped Tx frames */ | ||
| 102 | u64 tx_pause; /* # of transmitted pause frames */ | ||
| 103 | u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ | ||
| 104 | u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ | ||
| 105 | u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ | ||
| 106 | u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ | ||
| 107 | u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ | ||
| 108 | u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ | ||
| 109 | u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ | ||
| 110 | u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ | ||
| 111 | |||
| 112 | u64 rx_octets; /* total # of octets in good frames */ | ||
| 113 | u64 rx_frames; /* all good frames */ | ||
| 114 | u64 rx_bcast_frames; /* all broadcast frames */ | ||
| 115 | u64 rx_mcast_frames; /* all multicast frames */ | ||
| 116 | u64 rx_ucast_frames; /* all unicast frames */ | ||
| 117 | u64 rx_too_long; /* # of frames exceeding MTU */ | ||
| 118 | u64 rx_jabber; /* # of jabber frames */ | ||
| 119 | u64 rx_fcs_err; /* # of received frames with bad FCS */ | ||
| 120 | u64 rx_len_err; /* # of received frames with length error */ | ||
| 121 | u64 rx_symbol_err; /* symbol errors */ | ||
| 122 | u64 rx_runt; /* # of short frames */ | ||
| 123 | |||
| 124 | u64 rx_frames_64; /* # of Rx frames in a particular range */ | ||
| 125 | u64 rx_frames_65_127; | ||
| 126 | u64 rx_frames_128_255; | ||
| 127 | u64 rx_frames_256_511; | ||
| 128 | u64 rx_frames_512_1023; | ||
| 129 | u64 rx_frames_1024_1518; | ||
| 130 | u64 rx_frames_1519_max; | ||
| 131 | |||
| 132 | u64 rx_pause; /* # of received pause frames */ | ||
| 133 | u64 rx_ppp0; /* # of received PPP prio 0 frames */ | ||
| 134 | u64 rx_ppp1; /* # of received PPP prio 1 frames */ | ||
| 135 | u64 rx_ppp2; /* # of received PPP prio 2 frames */ | ||
| 136 | u64 rx_ppp3; /* # of received PPP prio 3 frames */ | ||
| 137 | u64 rx_ppp4; /* # of received PPP prio 4 frames */ | ||
| 138 | u64 rx_ppp5; /* # of received PPP prio 5 frames */ | ||
| 139 | u64 rx_ppp6; /* # of received PPP prio 6 frames */ | ||
| 140 | u64 rx_ppp7; /* # of received PPP prio 7 frames */ | ||
| 141 | |||
| 142 | u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ | ||
| 143 | u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ | ||
| 144 | u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ | ||
| 145 | u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ | ||
| 146 | u64 rx_trunc0; /* buffer-group 0 truncated packets */ | ||
| 147 | u64 rx_trunc1; /* buffer-group 1 truncated packets */ | ||
| 148 | u64 rx_trunc2; /* buffer-group 2 truncated packets */ | ||
| 149 | u64 rx_trunc3; /* buffer-group 3 truncated packets */ | ||
| 150 | }; | ||
| 151 | |||
| 152 | struct lb_port_stats { | ||
| 153 | u64 octets; | ||
| 154 | u64 frames; | ||
| 155 | u64 bcast_frames; | ||
| 156 | u64 mcast_frames; | ||
| 157 | u64 ucast_frames; | ||
| 158 | u64 error_frames; | ||
| 159 | |||
| 160 | u64 frames_64; | ||
| 161 | u64 frames_65_127; | ||
| 162 | u64 frames_128_255; | ||
| 163 | u64 frames_256_511; | ||
| 164 | u64 frames_512_1023; | ||
| 165 | u64 frames_1024_1518; | ||
| 166 | u64 frames_1519_max; | ||
| 167 | |||
| 168 | u64 drop; | ||
| 169 | |||
| 170 | u64 ovflow0; | ||
| 171 | u64 ovflow1; | ||
| 172 | u64 ovflow2; | ||
| 173 | u64 ovflow3; | ||
| 174 | u64 trunc0; | ||
| 175 | u64 trunc1; | ||
| 176 | u64 trunc2; | ||
| 177 | u64 trunc3; | ||
| 178 | }; | ||
| 179 | |||
| 180 | struct tp_tcp_stats { | ||
| 181 | u32 tcpOutRsts; | ||
| 182 | u64 tcpInSegs; | ||
| 183 | u64 tcpOutSegs; | ||
| 184 | u64 tcpRetransSegs; | ||
| 185 | }; | ||
| 186 | |||
| 187 | struct tp_err_stats { | ||
| 188 | u32 macInErrs[4]; | ||
| 189 | u32 hdrInErrs[4]; | ||
| 190 | u32 tcpInErrs[4]; | ||
| 191 | u32 tnlCongDrops[4]; | ||
| 192 | u32 ofldChanDrops[4]; | ||
| 193 | u32 tnlTxDrops[4]; | ||
| 194 | u32 ofldVlanDrops[4]; | ||
| 195 | u32 tcp6InErrs[4]; | ||
| 196 | u32 ofldNoNeigh; | ||
| 197 | u32 ofldCongDefer; | ||
| 198 | }; | ||
| 199 | |||
| 200 | struct tp_params { | ||
| 201 | unsigned int ntxchan; /* # of Tx channels */ | ||
| 202 | unsigned int tre; /* log2 of core clocks per TP tick */ | ||
| 203 | }; | ||
| 204 | |||
| 205 | struct vpd_params { | ||
| 206 | unsigned int cclk; | ||
| 207 | u8 ec[EC_LEN + 1]; | ||
| 208 | u8 sn[SERNUM_LEN + 1]; | ||
| 209 | u8 id[ID_LEN + 1]; | ||
| 210 | }; | ||
| 211 | |||
| 212 | struct pci_params { | ||
| 213 | unsigned char speed; | ||
| 214 | unsigned char width; | ||
| 215 | }; | ||
| 216 | |||
| 217 | struct adapter_params { | ||
| 218 | struct tp_params tp; | ||
| 219 | struct vpd_params vpd; | ||
| 220 | struct pci_params pci; | ||
| 221 | |||
| 222 | unsigned int fw_vers; | ||
| 223 | unsigned int tp_vers; | ||
| 224 | u8 api_vers[7]; | ||
| 225 | |||
| 226 | unsigned short mtus[NMTUS]; | ||
| 227 | unsigned short a_wnd[NCCTRL_WIN]; | ||
| 228 | unsigned short b_wnd[NCCTRL_WIN]; | ||
| 229 | |||
| 230 | unsigned char nports; /* # of ethernet ports */ | ||
| 231 | unsigned char portvec; | ||
| 232 | unsigned char rev; /* chip revision */ | ||
| 233 | unsigned char offload; | ||
| 234 | |||
| 235 | unsigned int ofldq_wr_cred; | ||
| 236 | }; | ||
| 237 | |||
| 238 | struct trace_params { | ||
| 239 | u32 data[TRACE_LEN / 4]; | ||
| 240 | u32 mask[TRACE_LEN / 4]; | ||
| 241 | unsigned short snap_len; | ||
| 242 | unsigned short min_len; | ||
| 243 | unsigned char skip_ofst; | ||
| 244 | unsigned char skip_len; | ||
| 245 | unsigned char invert; | ||
| 246 | unsigned char port; | ||
| 247 | }; | ||
| 248 | |||
| 249 | struct link_config { | ||
| 250 | unsigned short supported; /* link capabilities */ | ||
| 251 | unsigned short advertising; /* advertised capabilities */ | ||
| 252 | unsigned short requested_speed; /* speed user has requested */ | ||
| 253 | unsigned short speed; /* actual link speed */ | ||
| 254 | unsigned char requested_fc; /* flow control user has requested */ | ||
| 255 | unsigned char fc; /* actual link flow control */ | ||
| 256 | unsigned char autoneg; /* autonegotiating? */ | ||
| 257 | unsigned char link_ok; /* link up? */ | ||
| 258 | }; | ||
| 259 | |||
| 260 | #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) | ||
| 261 | |||
| 262 | enum { | ||
| 263 | MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ | ||
| 264 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ | ||
| 265 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ | ||
| 266 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ | ||
| 267 | }; | ||
| 268 | |||
| 269 | enum { | ||
| 270 | MAX_EGRQ = 128, /* max # of egress queues, including FLs */ | ||
| 271 | MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ | ||
| 272 | }; | ||
| 273 | |||
| 274 | struct adapter; | ||
| 275 | struct vlan_group; | ||
| 276 | struct sge_rspq; | ||
| 277 | |||
| 278 | struct port_info { | ||
| 279 | struct adapter *adapter; | ||
| 280 | struct vlan_group *vlan_grp; | ||
| 281 | u16 viid; | ||
| 282 | s16 xact_addr_filt; /* index of exact MAC address filter */ | ||
| 283 | u16 rss_size; /* size of VI's RSS table slice */ | ||
| 284 | s8 mdio_addr; | ||
| 285 | u8 port_type; | ||
| 286 | u8 mod_type; | ||
| 287 | u8 port_id; | ||
| 288 | u8 tx_chan; | ||
| 289 | u8 lport; /* associated offload logical port */ | ||
| 290 | u8 rx_offload; /* CSO, etc */ | ||
| 291 | u8 nqsets; /* # of qsets */ | ||
| 292 | u8 first_qset; /* index of first qset */ | ||
| 293 | struct link_config link_cfg; | ||
| 294 | }; | ||
| 295 | |||
| 296 | /* port_info.rx_offload flags */ | ||
| 297 | enum { | ||
| 298 | RX_CSO = 1 << 0, | ||
| 299 | }; | ||
| 300 | |||
| 301 | struct dentry; | ||
| 302 | struct work_struct; | ||
| 303 | |||
| 304 | enum { /* adapter flags */ | ||
| 305 | FULL_INIT_DONE = (1 << 0), | ||
| 306 | USING_MSI = (1 << 1), | ||
| 307 | USING_MSIX = (1 << 2), | ||
| 308 | QUEUES_BOUND = (1 << 3), | ||
| 309 | FW_OK = (1 << 4), | ||
| 310 | }; | ||
| 311 | |||
| 312 | struct rx_sw_desc; | ||
| 313 | |||
| 314 | struct sge_fl { /* SGE free-buffer queue state */ | ||
| 315 | unsigned int avail; /* # of available Rx buffers */ | ||
| 316 | unsigned int pend_cred; /* new buffers since last FL DB ring */ | ||
| 317 | unsigned int cidx; /* consumer index */ | ||
| 318 | unsigned int pidx; /* producer index */ | ||
| 319 | unsigned long alloc_failed; /* # of times buffer allocation failed */ | ||
| 320 | unsigned long large_alloc_failed; | ||
| 321 | unsigned long starving; | ||
| 322 | /* RO fields */ | ||
| 323 | unsigned int cntxt_id; /* SGE context id for the free list */ | ||
| 324 | unsigned int size; /* capacity of free list */ | ||
| 325 | struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ | ||
| 326 | __be64 *desc; /* address of HW Rx descriptor ring */ | ||
| 327 | dma_addr_t addr; /* bus address of HW ring start */ | ||
| 328 | }; | ||
| 329 | |||
| 330 | /* A packet gather list */ | ||
| 331 | struct pkt_gl { | ||
| 332 | skb_frag_t frags[MAX_SKB_FRAGS]; | ||
| 333 | void *va; /* virtual address of first byte */ | ||
| 334 | unsigned int nfrags; /* # of fragments */ | ||
| 335 | unsigned int tot_len; /* total length of fragments */ | ||
| 336 | }; | ||
| 337 | |||
| 338 | typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, | ||
| 339 | const struct pkt_gl *gl); | ||
| 340 | |||
| 341 | struct sge_rspq { /* state for an SGE response queue */ | ||
| 342 | struct napi_struct napi; | ||
| 343 | const __be64 *cur_desc; /* current descriptor in queue */ | ||
| 344 | unsigned int cidx; /* consumer index */ | ||
| 345 | u8 gen; /* current generation bit */ | ||
| 346 | u8 intr_params; /* interrupt holdoff parameters */ | ||
| 347 | u8 next_intr_params; /* holdoff params for next interrupt */ | ||
| 348 | u8 pktcnt_idx; /* interrupt packet threshold */ | ||
| 349 | u8 uld; /* ULD handling this queue */ | ||
| 350 | u8 idx; /* queue index within its group */ | ||
| 351 | int offset; /* offset into current Rx buffer */ | ||
| 352 | u16 cntxt_id; /* SGE context id for the response q */ | ||
| 353 | u16 abs_id; /* absolute SGE id for the response q */ | ||
| 354 | __be64 *desc; /* address of HW response ring */ | ||
| 355 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
| 356 | unsigned int iqe_len; /* entry size */ | ||
| 357 | unsigned int size; /* capacity of response queue */ | ||
| 358 | struct adapter *adap; | ||
| 359 | struct net_device *netdev; /* associated net device */ | ||
| 360 | rspq_handler_t handler; | ||
| 361 | }; | ||
| 362 | |||
| 363 | struct sge_eth_stats { /* Ethernet queue statistics */ | ||
| 364 | unsigned long pkts; /* # of ethernet packets */ | ||
| 365 | unsigned long lro_pkts; /* # of LRO super packets */ | ||
| 366 | unsigned long lro_merged; /* # of wire packets merged by LRO */ | ||
| 367 | unsigned long rx_cso; /* # of Rx checksum offloads */ | ||
| 368 | unsigned long vlan_ex; /* # of Rx VLAN extractions */ | ||
| 369 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | ||
| 370 | }; | ||
| 371 | |||
| 372 | struct sge_eth_rxq { /* SW Ethernet Rx queue */ | ||
| 373 | struct sge_rspq rspq; | ||
| 374 | struct sge_fl fl; | ||
| 375 | struct sge_eth_stats stats; | ||
| 376 | } ____cacheline_aligned_in_smp; | ||
| 377 | |||
| 378 | struct sge_ofld_stats { /* offload queue statistics */ | ||
| 379 | unsigned long pkts; /* # of packets */ | ||
| 380 | unsigned long imm; /* # of immediate-data packets */ | ||
| 381 | unsigned long an; /* # of asynchronous notifications */ | ||
| 382 | unsigned long nomem; /* # of responses deferred due to no mem */ | ||
| 383 | }; | ||
| 384 | |||
| 385 | struct sge_ofld_rxq { /* SW offload Rx queue */ | ||
| 386 | struct sge_rspq rspq; | ||
| 387 | struct sge_fl fl; | ||
| 388 | struct sge_ofld_stats stats; | ||
| 389 | } ____cacheline_aligned_in_smp; | ||
| 390 | |||
| 391 | struct tx_desc { | ||
| 392 | __be64 flit[8]; | ||
| 393 | }; | ||
| 394 | |||
| 395 | struct tx_sw_desc; | ||
| 396 | |||
| 397 | struct sge_txq { | ||
| 398 | unsigned int in_use; /* # of in-use Tx descriptors */ | ||
| 399 | unsigned int size; /* # of descriptors */ | ||
| 400 | unsigned int cidx; /* SW consumer index */ | ||
| 401 | unsigned int pidx; /* producer index */ | ||
| 402 | unsigned long stops; /* # of times q has been stopped */ | ||
| 403 | unsigned long restarts; /* # of queue restarts */ | ||
| 404 | unsigned int cntxt_id; /* SGE context id for the Tx q */ | ||
| 405 | struct tx_desc *desc; /* address of HW Tx descriptor ring */ | ||
| 406 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | ||
| 407 | struct sge_qstat *stat; /* queue status entry */ | ||
| 408 | dma_addr_t phys_addr; /* physical address of the ring */ | ||
| 409 | }; | ||
| 410 | |||
| 411 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | ||
| 412 | struct sge_txq q; | ||
| 413 | struct netdev_queue *txq; /* associated netdev TX queue */ | ||
| 414 | unsigned long tso; /* # of TSO requests */ | ||
| 415 | unsigned long tx_cso; /* # of Tx checksum offloads */ | ||
| 416 | unsigned long vlan_ins; /* # of Tx VLAN insertions */ | ||
| 417 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | ||
| 418 | } ____cacheline_aligned_in_smp; | ||
| 419 | |||
| 420 | struct sge_ofld_txq { /* state for an SGE offload Tx queue */ | ||
| 421 | struct sge_txq q; | ||
| 422 | struct adapter *adap; | ||
| 423 | struct sk_buff_head sendq; /* list of backpressured packets */ | ||
| 424 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | ||
| 425 | u8 full; /* the Tx ring is full */ | ||
| 426 | unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ | ||
| 427 | } ____cacheline_aligned_in_smp; | ||
| 428 | |||
| 429 | struct sge_ctrl_txq { /* state for an SGE control Tx queue */ | ||
| 430 | struct sge_txq q; | ||
| 431 | struct adapter *adap; | ||
| 432 | struct sk_buff_head sendq; /* list of backpressured packets */ | ||
| 433 | struct tasklet_struct qresume_tsk; /* restarts the queue */ | ||
| 434 | u8 full; /* the Tx ring is full */ | ||
| 435 | } ____cacheline_aligned_in_smp; | ||
| 436 | |||
| 437 | struct sge { | ||
| 438 | struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; | ||
| 439 | struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; | ||
| 440 | struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; | ||
| 441 | |||
| 442 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; | ||
| 443 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; | ||
| 444 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; | ||
| 445 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; | ||
| 446 | |||
| 447 | struct sge_rspq intrq ____cacheline_aligned_in_smp; | ||
| 448 | spinlock_t intrq_lock; | ||
| 449 | |||
| 450 | u16 max_ethqsets; /* # of available Ethernet queue sets */ | ||
| 451 | u16 ethqsets; /* # of active Ethernet queue sets */ | ||
| 452 | u16 ethtxq_rover; /* Tx queue to clean up next */ | ||
| 453 | u16 ofldqsets; /* # of active offload queue sets */ | ||
| 454 | u16 rdmaqs; /* # of available RDMA Rx queues */ | ||
| 455 | u16 ofld_rxq[MAX_OFLD_QSETS]; | ||
| 456 | u16 rdma_rxq[NCHAN]; | ||
| 457 | u16 timer_val[SGE_NTIMERS]; | ||
| 458 | u8 counter_val[SGE_NCOUNTERS]; | ||
| 459 | unsigned int starve_thres; | ||
| 460 | u8 idma_state[2]; | ||
| 461 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | ||
| 462 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | ||
| 463 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | ||
| 464 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | ||
| 465 | struct timer_list rx_timer; /* refills starving FLs */ | ||
| 466 | struct timer_list tx_timer; /* checks Tx queues */ | ||
| 467 | }; | ||
| 468 | |||
| 469 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) | ||
| 470 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) | ||
| 471 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) | ||
| 472 | |||
| 473 | struct l2t_data; | ||
| 474 | |||
| 475 | struct adapter { | ||
| 476 | void __iomem *regs; | ||
| 477 | struct pci_dev *pdev; | ||
| 478 | struct device *pdev_dev; | ||
| 479 | unsigned long registered_device_map; | ||
| 480 | unsigned long open_device_map; | ||
| 481 | unsigned long flags; | ||
| 482 | |||
| 483 | const char *name; | ||
| 484 | int msg_enable; | ||
| 485 | |||
| 486 | struct adapter_params params; | ||
| 487 | struct cxgb4_virt_res vres; | ||
| 488 | unsigned int swintr; | ||
| 489 | |||
| 490 | unsigned int wol; | ||
| 491 | |||
| 492 | struct { | ||
| 493 | unsigned short vec; | ||
| 494 | char desc[14]; | ||
| 495 | } msix_info[MAX_INGQ + 1]; | ||
| 496 | |||
| 497 | struct sge sge; | ||
| 498 | |||
| 499 | struct net_device *port[MAX_NPORTS]; | ||
| 500 | u8 chan_map[NCHAN]; /* channel -> port map */ | ||
| 501 | |||
| 502 | struct l2t_data *l2t; | ||
| 503 | void *uld_handle[CXGB4_ULD_MAX]; | ||
| 504 | struct list_head list_node; | ||
| 505 | |||
| 506 | struct tid_info tids; | ||
| 507 | void **tid_release_head; | ||
| 508 | spinlock_t tid_release_lock; | ||
| 509 | struct work_struct tid_release_task; | ||
| 510 | bool tid_release_task_busy; | ||
| 511 | |||
| 512 | struct dentry *debugfs_root; | ||
| 513 | |||
| 514 | spinlock_t stats_lock; | ||
| 515 | }; | ||
| 516 | |||
| 517 | static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) | ||
| 518 | { | ||
| 519 | return readl(adap->regs + reg_addr); | ||
| 520 | } | ||
| 521 | |||
| 522 | static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) | ||
| 523 | { | ||
| 524 | writel(val, adap->regs + reg_addr); | ||
| 525 | } | ||
| 526 | |||
| 527 | #ifndef readq | ||
| 528 | static inline u64 readq(const volatile void __iomem *addr) | ||
| 529 | { | ||
| 530 | return readl(addr) + ((u64)readl(addr + 4) << 32); | ||
| 531 | } | ||
| 532 | |||
| 533 | static inline void writeq(u64 val, volatile void __iomem *addr) | ||
| 534 | { | ||
| 535 | writel(val, addr); | ||
| 536 | writel(val >> 32, addr + 4); | ||
| 537 | } | ||
| 538 | #endif | ||
| 539 | |||
| 540 | static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) | ||
| 541 | { | ||
| 542 | return readq(adap->regs + reg_addr); | ||
| 543 | } | ||
| 544 | |||
| 545 | static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) | ||
| 546 | { | ||
| 547 | writeq(val, adap->regs + reg_addr); | ||
| 548 | } | ||
| 549 | |||
| 550 | /** | ||
| 551 | * netdev2pinfo - return the port_info structure associated with a net_device | ||
| 552 | * @dev: the netdev | ||
| 553 | * | ||
| 554 | * Return the struct port_info associated with a net_device | ||
| 555 | */ | ||
| 556 | static inline struct port_info *netdev2pinfo(const struct net_device *dev) | ||
| 557 | { | ||
| 558 | return netdev_priv(dev); | ||
| 559 | } | ||
| 560 | |||
| 561 | /** | ||
| 562 | * adap2pinfo - return the port_info of a port | ||
| 563 | * @adap: the adapter | ||
| 564 | * @idx: the port index | ||
| 565 | * | ||
| 566 | * Return the port_info structure for the port of the given index. | ||
| 567 | */ | ||
| 568 | static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) | ||
| 569 | { | ||
| 570 | return netdev_priv(adap->port[idx]); | ||
| 571 | } | ||
| 572 | |||
| 573 | /** | ||
| 574 | * netdev2adap - return the adapter structure associated with a net_device | ||
| 575 | * @dev: the netdev | ||
| 576 | * | ||
| 577 | * Return the struct adapter associated with a net_device | ||
| 578 | */ | ||
| 579 | static inline struct adapter *netdev2adap(const struct net_device *dev) | ||
| 580 | { | ||
| 581 | return netdev2pinfo(dev)->adapter; | ||
| 582 | } | ||
| 583 | |||
| 584 | void t4_os_portmod_changed(const struct adapter *adap, int port_id); | ||
| 585 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); | ||
| 586 | |||
| 587 | void *t4_alloc_mem(size_t size); | ||
| 588 | void t4_free_mem(void *addr); | ||
| 589 | |||
| 590 | void t4_free_sge_resources(struct adapter *adap); | ||
| 591 | irq_handler_t t4_intr_handler(struct adapter *adap); | ||
| 592 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); | ||
| 593 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
| 594 | const struct pkt_gl *gl); | ||
| 595 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); | ||
| 596 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); | ||
| 597 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | ||
| 598 | struct net_device *dev, int intr_idx, | ||
| 599 | struct sge_fl *fl, rspq_handler_t hnd); | ||
| 600 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | ||
| 601 | struct net_device *dev, struct netdev_queue *netdevq, | ||
| 602 | unsigned int iqid); | ||
| 603 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | ||
| 604 | struct net_device *dev, unsigned int iqid, | ||
| 605 | unsigned int cmplqid); | ||
| 606 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | ||
| 607 | struct net_device *dev, unsigned int iqid); | ||
| 608 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | ||
| 609 | void t4_sge_init(struct adapter *adap); | ||
| 610 | void t4_sge_start(struct adapter *adap); | ||
| 611 | void t4_sge_stop(struct adapter *adap); | ||
| 612 | |||
| 613 | #define for_each_port(adapter, iter) \ | ||
| 614 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | ||
| 615 | |||
| 616 | static inline unsigned int core_ticks_per_usec(const struct adapter *adap) | ||
| 617 | { | ||
| 618 | return adap->params.vpd.cclk / 1000; | ||
| 619 | } | ||
| 620 | |||
| 621 | static inline unsigned int us_to_core_ticks(const struct adapter *adap, | ||
| 622 | unsigned int us) | ||
| 623 | { | ||
| 624 | return (us * adap->params.vpd.cclk) / 1000; | ||
| 625 | } | ||
| 626 | |||
| 627 | void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, | ||
| 628 | u32 val); | ||
| 629 | |||
| 630 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | ||
| 631 | void *rpl, bool sleep_ok); | ||
| 632 | |||
| 633 | static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, | ||
| 634 | int size, void *rpl) | ||
| 635 | { | ||
| 636 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); | ||
| 637 | } | ||
| 638 | |||
| 639 | static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, | ||
| 640 | int size, void *rpl) | ||
| 641 | { | ||
| 642 | return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); | ||
| 643 | } | ||
| 644 | |||
| 645 | void t4_intr_enable(struct adapter *adapter); | ||
| 646 | void t4_intr_disable(struct adapter *adapter); | ||
| 647 | void t4_intr_clear(struct adapter *adapter); | ||
| 648 | int t4_slow_intr_handler(struct adapter *adapter); | ||
| 649 | |||
| 650 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
| 651 | struct link_config *lc); | ||
| 652 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | ||
| 653 | int t4_seeprom_wp(struct adapter *adapter, bool enable); | ||
| 654 | int t4_read_flash(struct adapter *adapter, unsigned int addr, | ||
| 655 | unsigned int nwords, u32 *data, int byte_oriented); | ||
| 656 | int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); | ||
| 657 | int t4_check_fw_version(struct adapter *adapter); | ||
| 658 | int t4_prep_adapter(struct adapter *adapter); | ||
| 659 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | ||
| 660 | void t4_fatal_err(struct adapter *adapter); | ||
| 661 | void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); | ||
| 662 | int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, | ||
| 663 | int filter_index, int enable); | ||
| 664 | void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, | ||
| 665 | int filter_index, int *enabled); | ||
| 666 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | ||
| 667 | int start, int n, const u16 *rspq, unsigned int nrspq); | ||
| 668 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | ||
| 669 | unsigned int flags); | ||
| 670 | int t4_read_rss(struct adapter *adapter, u16 *entries); | ||
| 671 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); | ||
| 672 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, | ||
| 673 | u64 *parity); | ||
| 674 | |||
| 675 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); | ||
| 676 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); | ||
| 677 | |||
| 678 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); | ||
| 679 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); | ||
| 680 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | ||
| 681 | struct tp_tcp_stats *v6); | ||
| 682 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | ||
| 683 | const unsigned short *alpha, const unsigned short *beta); | ||
| 684 | |||
| 685 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | ||
| 686 | const u8 *addr); | ||
| 687 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | ||
| 688 | u64 mask0, u64 mask1, unsigned int crc, bool enable); | ||
| 689 | |||
| 690 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | ||
| 691 | enum dev_master master, enum dev_state *state); | ||
| 692 | int t4_fw_bye(struct adapter *adap, unsigned int mbox); | ||
| 693 | int t4_early_init(struct adapter *adap, unsigned int mbox); | ||
| 694 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); | ||
| 695 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 696 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
| 697 | u32 *val); | ||
| 698 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 699 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
| 700 | const u32 *val); | ||
| 701 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 702 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | ||
| 703 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | ||
| 704 | unsigned int vi, unsigned int cmask, unsigned int pmask, | ||
| 705 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); | ||
| 706 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
| 707 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | ||
| 708 | unsigned int *rss_size); | ||
| 709 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 710 | unsigned int vf, unsigned int viid); | ||
| 711 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 712 | int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); | ||
| 713 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | ||
| 714 | unsigned int viid, bool free, unsigned int naddr, | ||
| 715 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); | ||
| 716 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 717 | int idx, const u8 *addr, bool persist, bool add_smt); | ||
| 718 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 719 | bool ucast, u64 vec, bool sleep_ok); | ||
| 720 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 721 | bool rx_en, bool tx_en); | ||
| 722 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 723 | unsigned int nblinks); | ||
| 724 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
| 725 | unsigned int mmd, unsigned int reg, u16 *valp); | ||
| 726 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
| 727 | unsigned int mmd, unsigned int reg, u16 val); | ||
| 728 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
| 729 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
| 730 | unsigned int fl0id, unsigned int fl1id); | ||
| 731 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 732 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | ||
| 733 | unsigned int fl0id, unsigned int fl1id); | ||
| 734 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 735 | unsigned int vf, unsigned int eqid); | ||
| 736 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 737 | unsigned int vf, unsigned int eqid); | ||
| 738 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 739 | unsigned int vf, unsigned int eqid); | ||
| 740 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); | ||
| 741 | #endif /* __CXGB4_H__ */ | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c new file mode 100644 index 000000000000..a7e30a23d322 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
| @@ -0,0 +1,3388 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 36 | |||
| 37 | #include <linux/bitmap.h> | ||
| 38 | #include <linux/crc32.h> | ||
| 39 | #include <linux/ctype.h> | ||
| 40 | #include <linux/debugfs.h> | ||
| 41 | #include <linux/err.h> | ||
| 42 | #include <linux/etherdevice.h> | ||
| 43 | #include <linux/firmware.h> | ||
| 44 | #include <linux/if_vlan.h> | ||
| 45 | #include <linux/init.h> | ||
| 46 | #include <linux/log2.h> | ||
| 47 | #include <linux/mdio.h> | ||
| 48 | #include <linux/module.h> | ||
| 49 | #include <linux/moduleparam.h> | ||
| 50 | #include <linux/mutex.h> | ||
| 51 | #include <linux/netdevice.h> | ||
| 52 | #include <linux/pci.h> | ||
| 53 | #include <linux/aer.h> | ||
| 54 | #include <linux/rtnetlink.h> | ||
| 55 | #include <linux/sched.h> | ||
| 56 | #include <linux/seq_file.h> | ||
| 57 | #include <linux/sockios.h> | ||
| 58 | #include <linux/vmalloc.h> | ||
| 59 | #include <linux/workqueue.h> | ||
| 60 | #include <net/neighbour.h> | ||
| 61 | #include <net/netevent.h> | ||
| 62 | #include <asm/uaccess.h> | ||
| 63 | |||
| 64 | #include "cxgb4.h" | ||
| 65 | #include "t4_regs.h" | ||
| 66 | #include "t4_msg.h" | ||
| 67 | #include "t4fw_api.h" | ||
| 68 | #include "l2t.h" | ||
| 69 | |||
| 70 | #define DRV_VERSION "1.0.0-ko" | ||
| 71 | #define DRV_DESC "Chelsio T4 Network Driver" | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Max interrupt hold-off timer value in us. Queues fall back to this value | ||
| 75 | * under extreme memory pressure so it's largish to give the system time to | ||
| 76 | * recover. | ||
| 77 | */ | ||
| 78 | #define MAX_SGE_TIMERVAL 200U | ||
| 79 | |||
| 80 | enum { | ||
| 81 | MEMWIN0_APERTURE = 65536, | ||
| 82 | MEMWIN0_BASE = 0x30000, | ||
| 83 | MEMWIN1_APERTURE = 32768, | ||
| 84 | MEMWIN1_BASE = 0x28000, | ||
| 85 | MEMWIN2_APERTURE = 2048, | ||
| 86 | MEMWIN2_BASE = 0x1b800, | ||
| 87 | }; | ||
| 88 | |||
| 89 | enum { | ||
| 90 | MAX_TXQ_ENTRIES = 16384, | ||
| 91 | MAX_CTRL_TXQ_ENTRIES = 1024, | ||
| 92 | MAX_RSPQ_ENTRIES = 16384, | ||
| 93 | MAX_RX_BUFFERS = 16384, | ||
| 94 | MIN_TXQ_ENTRIES = 32, | ||
| 95 | MIN_CTRL_TXQ_ENTRIES = 32, | ||
| 96 | MIN_RSPQ_ENTRIES = 128, | ||
| 97 | MIN_FL_ENTRIES = 16 | ||
| 98 | }; | ||
| 99 | |||
| 100 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | ||
| 101 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | ||
| 102 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | ||
| 103 | |||
| 104 | #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } | ||
| 105 | |||
| 106 | static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { | ||
| 107 | CH_DEVICE(0xa000), /* PE10K */ | ||
| 108 | { 0, } | ||
| 109 | }; | ||
| 110 | |||
| 111 | #define FW_FNAME "cxgb4/t4fw.bin" | ||
| 112 | |||
| 113 | MODULE_DESCRIPTION(DRV_DESC); | ||
| 114 | MODULE_AUTHOR("Chelsio Communications"); | ||
| 115 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 116 | MODULE_VERSION(DRV_VERSION); | ||
| 117 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); | ||
| 118 | MODULE_FIRMWARE(FW_FNAME); | ||
| 119 | |||
| 120 | static int dflt_msg_enable = DFLT_MSG_ENABLE; | ||
| 121 | |||
| 122 | module_param(dflt_msg_enable, int, 0644); | ||
| 123 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); | ||
| 124 | |||
| 125 | /* | ||
| 126 | * The driver uses the best interrupt scheme available on a platform in the | ||
| 127 | * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which | ||
| 128 | * of these schemes the driver may consider as follows: | ||
| 129 | * | ||
| 130 | * msi = 2: choose from among all three options | ||
| 131 | * msi = 1: only consider MSI and INTx interrupts | ||
| 132 | * msi = 0: force INTx interrupts | ||
| 133 | */ | ||
| 134 | static int msi = 2; | ||
| 135 | |||
| 136 | module_param(msi, int, 0644); | ||
| 137 | MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); | ||
| 138 | |||
| 139 | /* | ||
| 140 | * Queue interrupt hold-off timer values. Queues default to the first of these | ||
| 141 | * upon creation. | ||
| 142 | */ | ||
| 143 | static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; | ||
| 144 | |||
| 145 | module_param_array(intr_holdoff, uint, NULL, 0644); | ||
| 146 | MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " | ||
| 147 | "0..4 in microseconds"); | ||
| 148 | |||
| 149 | static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; | ||
| 150 | |||
| 151 | module_param_array(intr_cnt, uint, NULL, 0644); | ||
| 152 | MODULE_PARM_DESC(intr_cnt, | ||
| 153 | "thresholds 1..3 for queue interrupt packet counters"); | ||
| 154 | |||
| 155 | static int vf_acls; | ||
| 156 | |||
| 157 | #ifdef CONFIG_PCI_IOV | ||
| 158 | module_param(vf_acls, bool, 0644); | ||
| 159 | MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); | ||
| 160 | |||
| 161 | static unsigned int num_vf[4]; | ||
| 162 | |||
| 163 | module_param_array(num_vf, uint, NULL, 0644); | ||
| 164 | MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); | ||
| 165 | #endif | ||
| 166 | |||
| 167 | static struct dentry *cxgb4_debugfs_root; | ||
| 168 | |||
| 169 | static LIST_HEAD(adapter_list); | ||
| 170 | static DEFINE_MUTEX(uld_mutex); | ||
| 171 | static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; | ||
| 172 | static const char *uld_str[] = { "RDMA", "iSCSI" }; | ||
| 173 | |||
| 174 | static void link_report(struct net_device *dev) | ||
| 175 | { | ||
| 176 | if (!netif_carrier_ok(dev)) | ||
| 177 | netdev_info(dev, "link down\n"); | ||
| 178 | else { | ||
| 179 | static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; | ||
| 180 | |||
| 181 | const char *s = "10Mbps"; | ||
| 182 | const struct port_info *p = netdev_priv(dev); | ||
| 183 | |||
| 184 | switch (p->link_cfg.speed) { | ||
| 185 | case SPEED_10000: | ||
| 186 | s = "10Gbps"; | ||
| 187 | break; | ||
| 188 | case SPEED_1000: | ||
| 189 | s = "1000Mbps"; | ||
| 190 | break; | ||
| 191 | case SPEED_100: | ||
| 192 | s = "100Mbps"; | ||
| 193 | break; | ||
| 194 | } | ||
| 195 | |||
| 196 | netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, | ||
| 197 | fc[p->link_cfg.fc]); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) | ||
| 202 | { | ||
| 203 | struct net_device *dev = adapter->port[port_id]; | ||
| 204 | |||
| 205 | /* Skip changes from disabled ports. */ | ||
| 206 | if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { | ||
| 207 | if (link_stat) | ||
| 208 | netif_carrier_on(dev); | ||
| 209 | else | ||
| 210 | netif_carrier_off(dev); | ||
| 211 | |||
| 212 | link_report(dev); | ||
| 213 | } | ||
| 214 | } | ||
| 215 | |||
| 216 | void t4_os_portmod_changed(const struct adapter *adap, int port_id) | ||
| 217 | { | ||
| 218 | static const char *mod_str[] = { | ||
| 219 | NULL, "LR", "SR", "ER", "passive DA", "active DA" | ||
| 220 | }; | ||
| 221 | |||
| 222 | const struct net_device *dev = adap->port[port_id]; | ||
| 223 | const struct port_info *pi = netdev_priv(dev); | ||
| 224 | |||
| 225 | if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) | ||
| 226 | netdev_info(dev, "port module unplugged\n"); | ||
| 227 | else | ||
| 228 | netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); | ||
| 229 | } | ||
| 230 | |||
| 231 | /* | ||
| 232 | * Configure the exact and hash address filters to handle a port's multicast | ||
| 233 | * and secondary unicast MAC addresses. | ||
| 234 | */ | ||
| 235 | static int set_addr_filters(const struct net_device *dev, bool sleep) | ||
| 236 | { | ||
| 237 | u64 mhash = 0; | ||
| 238 | u64 uhash = 0; | ||
| 239 | bool free = true; | ||
| 240 | u16 filt_idx[7]; | ||
| 241 | const u8 *addr[7]; | ||
| 242 | int ret, naddr = 0; | ||
| 243 | const struct dev_addr_list *d; | ||
| 244 | const struct netdev_hw_addr *ha; | ||
| 245 | int uc_cnt = netdev_uc_count(dev); | ||
| 246 | const struct port_info *pi = netdev_priv(dev); | ||
| 247 | |||
| 248 | /* first do the secondary unicast addresses */ | ||
| 249 | netdev_for_each_uc_addr(ha, dev) { | ||
| 250 | addr[naddr++] = ha->addr; | ||
| 251 | if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { | ||
| 252 | ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, | ||
| 253 | naddr, addr, filt_idx, &uhash, sleep); | ||
| 254 | if (ret < 0) | ||
| 255 | return ret; | ||
| 256 | |||
| 257 | free = false; | ||
| 258 | naddr = 0; | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 262 | /* next set up the multicast addresses */ | ||
| 263 | netdev_for_each_mc_addr(d, dev) { | ||
| 264 | addr[naddr++] = d->dmi_addr; | ||
| 265 | if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { | ||
| 266 | ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, | ||
| 267 | naddr, addr, filt_idx, &mhash, sleep); | ||
| 268 | if (ret < 0) | ||
| 269 | return ret; | ||
| 270 | |||
| 271 | free = false; | ||
| 272 | naddr = 0; | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 276 | return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, | ||
| 277 | uhash | mhash, sleep); | ||
| 278 | } | ||
| 279 | |||
| 280 | /* | ||
| 281 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. | ||
| 282 | * If @mtu is -1 it is left unchanged. | ||
| 283 | */ | ||
| 284 | static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | ||
| 285 | { | ||
| 286 | int ret; | ||
| 287 | struct port_info *pi = netdev_priv(dev); | ||
| 288 | |||
| 289 | ret = set_addr_filters(dev, sleep_ok); | ||
| 290 | if (ret == 0) | ||
| 291 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, | ||
| 292 | (dev->flags & IFF_PROMISC) ? 1 : 0, | ||
| 293 | (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, | ||
| 294 | sleep_ok); | ||
| 295 | return ret; | ||
| 296 | } | ||
| 297 | |||
| 298 | /** | ||
| 299 | * link_start - enable a port | ||
| 300 | * @dev: the port to enable | ||
| 301 | * | ||
| 302 | * Performs the MAC and PHY actions needed to enable a port. | ||
| 303 | */ | ||
| 304 | static int link_start(struct net_device *dev) | ||
| 305 | { | ||
| 306 | int ret; | ||
| 307 | struct port_info *pi = netdev_priv(dev); | ||
| 308 | |||
| 309 | /* | ||
| 310 | * We do not set address filters and promiscuity here, the stack does | ||
| 311 | * that step explicitly. | ||
| 312 | */ | ||
| 313 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, | ||
| 314 | true); | ||
| 315 | if (ret == 0) { | ||
| 316 | ret = t4_change_mac(pi->adapter, 0, pi->viid, | ||
| 317 | pi->xact_addr_filt, dev->dev_addr, true, | ||
| 318 | false); | ||
| 319 | if (ret >= 0) { | ||
| 320 | pi->xact_addr_filt = ret; | ||
| 321 | ret = 0; | ||
| 322 | } | ||
| 323 | } | ||
| 324 | if (ret == 0) | ||
| 325 | ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); | ||
| 326 | if (ret == 0) | ||
| 327 | ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); | ||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* | ||
| 332 | * Response queue handler for the FW event queue. | ||
| 333 | */ | ||
| 334 | static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, | ||
| 335 | const struct pkt_gl *gl) | ||
| 336 | { | ||
| 337 | u8 opcode = ((const struct rss_header *)rsp)->opcode; | ||
| 338 | |||
| 339 | rsp++; /* skip RSS header */ | ||
| 340 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { | ||
| 341 | const struct cpl_sge_egr_update *p = (void *)rsp; | ||
| 342 | unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); | ||
| 343 | struct sge_txq *txq = q->adap->sge.egr_map[qid]; | ||
| 344 | |||
| 345 | txq->restarts++; | ||
| 346 | if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { | ||
| 347 | struct sge_eth_txq *eq; | ||
| 348 | |||
| 349 | eq = container_of(txq, struct sge_eth_txq, q); | ||
| 350 | netif_tx_wake_queue(eq->txq); | ||
| 351 | } else { | ||
| 352 | struct sge_ofld_txq *oq; | ||
| 353 | |||
| 354 | oq = container_of(txq, struct sge_ofld_txq, q); | ||
| 355 | tasklet_schedule(&oq->qresume_tsk); | ||
| 356 | } | ||
| 357 | } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { | ||
| 358 | const struct cpl_fw6_msg *p = (void *)rsp; | ||
| 359 | |||
| 360 | if (p->type == 0) | ||
| 361 | t4_handle_fw_rpl(q->adap, p->data); | ||
| 362 | } else if (opcode == CPL_L2T_WRITE_RPL) { | ||
| 363 | const struct cpl_l2t_write_rpl *p = (void *)rsp; | ||
| 364 | |||
| 365 | do_l2t_write_rpl(q->adap, p); | ||
| 366 | } else | ||
| 367 | dev_err(q->adap->pdev_dev, | ||
| 368 | "unexpected CPL %#x on FW event queue\n", opcode); | ||
| 369 | return 0; | ||
| 370 | } | ||
| 371 | |||
| 372 | /** | ||
| 373 | * uldrx_handler - response queue handler for ULD queues | ||
| 374 | * @q: the response queue that received the packet | ||
| 375 | * @rsp: the response queue descriptor holding the offload message | ||
| 376 | * @gl: the gather list of packet fragments | ||
| 377 | * | ||
| 378 | * Deliver an ingress offload packet to a ULD. All processing is done by | ||
| 379 | * the ULD, we just maintain statistics. | ||
| 380 | */ | ||
| 381 | static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
| 382 | const struct pkt_gl *gl) | ||
| 383 | { | ||
| 384 | struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); | ||
| 385 | |||
| 386 | if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { | ||
| 387 | rxq->stats.nomem++; | ||
| 388 | return -1; | ||
| 389 | } | ||
| 390 | if (gl == NULL) | ||
| 391 | rxq->stats.imm++; | ||
| 392 | else if (gl == CXGB4_MSG_AN) | ||
| 393 | rxq->stats.an++; | ||
| 394 | else | ||
| 395 | rxq->stats.pkts++; | ||
| 396 | return 0; | ||
| 397 | } | ||
| 398 | |||
| 399 | static void disable_msi(struct adapter *adapter) | ||
| 400 | { | ||
| 401 | if (adapter->flags & USING_MSIX) { | ||
| 402 | pci_disable_msix(adapter->pdev); | ||
| 403 | adapter->flags &= ~USING_MSIX; | ||
| 404 | } else if (adapter->flags & USING_MSI) { | ||
| 405 | pci_disable_msi(adapter->pdev); | ||
| 406 | adapter->flags &= ~USING_MSI; | ||
| 407 | } | ||
| 408 | } | ||
| 409 | |||
| 410 | /* | ||
| 411 | * Interrupt handler for non-data events used with MSI-X. | ||
| 412 | */ | ||
| 413 | static irqreturn_t t4_nondata_intr(int irq, void *cookie) | ||
| 414 | { | ||
| 415 | struct adapter *adap = cookie; | ||
| 416 | |||
| 417 | u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); | ||
| 418 | if (v & PFSW) { | ||
| 419 | adap->swintr = 1; | ||
| 420 | t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); | ||
| 421 | } | ||
| 422 | t4_slow_intr_handler(adap); | ||
| 423 | return IRQ_HANDLED; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* | ||
| 427 | * Name the MSI-X interrupts. | ||
| 428 | */ | ||
| 429 | static void name_msix_vecs(struct adapter *adap) | ||
| 430 | { | ||
| 431 | int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; | ||
| 432 | |||
| 433 | /* non-data interrupts */ | ||
| 434 | snprintf(adap->msix_info[0].desc, n, "%s", adap->name); | ||
| 435 | adap->msix_info[0].desc[n] = 0; | ||
| 436 | |||
| 437 | /* FW events */ | ||
| 438 | snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); | ||
| 439 | adap->msix_info[1].desc[n] = 0; | ||
| 440 | |||
| 441 | /* Ethernet queues */ | ||
| 442 | for_each_port(adap, j) { | ||
| 443 | struct net_device *d = adap->port[j]; | ||
| 444 | const struct port_info *pi = netdev_priv(d); | ||
| 445 | |||
| 446 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | ||
| 447 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", | ||
| 448 | d->name, i); | ||
| 449 | adap->msix_info[msi_idx].desc[n] = 0; | ||
| 450 | } | ||
| 451 | } | ||
| 452 | |||
| 453 | /* offload queues */ | ||
| 454 | for_each_ofldrxq(&adap->sge, i) { | ||
| 455 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", | ||
| 456 | adap->name, i); | ||
| 457 | adap->msix_info[msi_idx++].desc[n] = 0; | ||
| 458 | } | ||
| 459 | for_each_rdmarxq(&adap->sge, i) { | ||
| 460 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", | ||
| 461 | adap->name, i); | ||
| 462 | adap->msix_info[msi_idx++].desc[n] = 0; | ||
| 463 | } | ||
| 464 | } | ||
| 465 | |||
| 466 | static int request_msix_queue_irqs(struct adapter *adap) | ||
| 467 | { | ||
| 468 | struct sge *s = &adap->sge; | ||
| 469 | int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; | ||
| 470 | |||
| 471 | err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, | ||
| 472 | adap->msix_info[1].desc, &s->fw_evtq); | ||
| 473 | if (err) | ||
| 474 | return err; | ||
| 475 | |||
| 476 | for_each_ethrxq(s, ethqidx) { | ||
| 477 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
| 478 | adap->msix_info[msi].desc, | ||
| 479 | &s->ethrxq[ethqidx].rspq); | ||
| 480 | if (err) | ||
| 481 | goto unwind; | ||
| 482 | msi++; | ||
| 483 | } | ||
| 484 | for_each_ofldrxq(s, ofldqidx) { | ||
| 485 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
| 486 | adap->msix_info[msi].desc, | ||
| 487 | &s->ofldrxq[ofldqidx].rspq); | ||
| 488 | if (err) | ||
| 489 | goto unwind; | ||
| 490 | msi++; | ||
| 491 | } | ||
| 492 | for_each_rdmarxq(s, rdmaqidx) { | ||
| 493 | err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, | ||
| 494 | adap->msix_info[msi].desc, | ||
| 495 | &s->rdmarxq[rdmaqidx].rspq); | ||
| 496 | if (err) | ||
| 497 | goto unwind; | ||
| 498 | msi++; | ||
| 499 | } | ||
| 500 | return 0; | ||
| 501 | |||
| 502 | unwind: | ||
| 503 | while (--rdmaqidx >= 0) | ||
| 504 | free_irq(adap->msix_info[--msi].vec, | ||
| 505 | &s->rdmarxq[rdmaqidx].rspq); | ||
| 506 | while (--ofldqidx >= 0) | ||
| 507 | free_irq(adap->msix_info[--msi].vec, | ||
| 508 | &s->ofldrxq[ofldqidx].rspq); | ||
| 509 | while (--ethqidx >= 0) | ||
| 510 | free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); | ||
| 511 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); | ||
| 512 | return err; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void free_msix_queue_irqs(struct adapter *adap) | ||
| 516 | { | ||
| 517 | int i, msi = 2; | ||
| 518 | struct sge *s = &adap->sge; | ||
| 519 | |||
| 520 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); | ||
| 521 | for_each_ethrxq(s, i) | ||
| 522 | free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); | ||
| 523 | for_each_ofldrxq(s, i) | ||
| 524 | free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); | ||
| 525 | for_each_rdmarxq(s, i) | ||
| 526 | free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); | ||
| 527 | } | ||
| 528 | |||
| 529 | /** | ||
| 530 | * setup_rss - configure RSS | ||
| 531 | * @adap: the adapter | ||
| 532 | * | ||
| 533 | * Sets up RSS to distribute packets to multiple receive queues. We | ||
| 534 | * configure the RSS CPU lookup table to distribute to the number of HW | ||
| 535 | * receive queues, and the response queue lookup table to narrow that | ||
| 536 | * down to the response queues actually configured for each port. | ||
| 537 | * We always configure the RSS mapping for all ports since the mapping | ||
| 538 | * table has plenty of entries. | ||
| 539 | */ | ||
| 540 | static int setup_rss(struct adapter *adap) | ||
| 541 | { | ||
| 542 | int i, j, err; | ||
| 543 | u16 rss[MAX_ETH_QSETS]; | ||
| 544 | |||
| 545 | for_each_port(adap, i) { | ||
| 546 | const struct port_info *pi = adap2pinfo(adap, i); | ||
| 547 | const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; | ||
| 548 | |||
| 549 | for (j = 0; j < pi->nqsets; j++) | ||
| 550 | rss[j] = q[j].rspq.abs_id; | ||
| 551 | |||
| 552 | err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, | ||
| 553 | rss, pi->nqsets); | ||
| 554 | if (err) | ||
| 555 | return err; | ||
| 556 | } | ||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | |||
| 560 | /* | ||
| 561 | * Wait until all NAPI handlers are descheduled. | ||
| 562 | */ | ||
| 563 | static void quiesce_rx(struct adapter *adap) | ||
| 564 | { | ||
| 565 | int i; | ||
| 566 | |||
| 567 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | ||
| 568 | struct sge_rspq *q = adap->sge.ingr_map[i]; | ||
| 569 | |||
| 570 | if (q && q->handler) | ||
| 571 | napi_disable(&q->napi); | ||
| 572 | } | ||
| 573 | } | ||
| 574 | |||
| 575 | /* | ||
| 576 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | ||
| 577 | */ | ||
| 578 | static void enable_rx(struct adapter *adap) | ||
| 579 | { | ||
| 580 | int i; | ||
| 581 | |||
| 582 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | ||
| 583 | struct sge_rspq *q = adap->sge.ingr_map[i]; | ||
| 584 | |||
| 585 | if (!q) | ||
| 586 | continue; | ||
| 587 | if (q->handler) | ||
| 588 | napi_enable(&q->napi); | ||
| 589 | /* 0-increment GTS to start the timer and enable interrupts */ | ||
| 590 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), | ||
| 591 | SEINTARM(q->intr_params) | | ||
| 592 | INGRESSQID(q->cntxt_id)); | ||
| 593 | } | ||
| 594 | } | ||
| 595 | |||
| 596 | /** | ||
| 597 | * setup_sge_queues - configure SGE Tx/Rx/response queues | ||
| 598 | * @adap: the adapter | ||
| 599 | * | ||
| 600 | * Determines how many sets of SGE queues to use and initializes them. | ||
| 601 | * We support multiple queue sets per port if we have MSI-X, otherwise | ||
| 602 | * just one queue set per port. | ||
| 603 | */ | ||
| 604 | static int setup_sge_queues(struct adapter *adap) | ||
| 605 | { | ||
| 606 | int err, msi_idx, i, j; | ||
| 607 | struct sge *s = &adap->sge; | ||
| 608 | |||
| 609 | bitmap_zero(s->starving_fl, MAX_EGRQ); | ||
| 610 | bitmap_zero(s->txq_maperr, MAX_EGRQ); | ||
| 611 | |||
| 612 | if (adap->flags & USING_MSIX) | ||
| 613 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ | ||
| 614 | else { | ||
| 615 | err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, | ||
| 616 | NULL, NULL); | ||
| 617 | if (err) | ||
| 618 | return err; | ||
| 619 | msi_idx = -((int)s->intrq.abs_id + 1); | ||
| 620 | } | ||
| 621 | |||
| 622 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | ||
| 623 | msi_idx, NULL, fwevtq_handler); | ||
| 624 | if (err) { | ||
| 625 | freeout: t4_free_sge_resources(adap); | ||
| 626 | return err; | ||
| 627 | } | ||
| 628 | |||
| 629 | for_each_port(adap, i) { | ||
| 630 | struct net_device *dev = adap->port[i]; | ||
| 631 | struct port_info *pi = netdev_priv(dev); | ||
| 632 | struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; | ||
| 633 | struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; | ||
| 634 | |||
| 635 | for (j = 0; j < pi->nqsets; j++, q++) { | ||
| 636 | if (msi_idx > 0) | ||
| 637 | msi_idx++; | ||
| 638 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, | ||
| 639 | msi_idx, &q->fl, | ||
| 640 | t4_ethrx_handler); | ||
| 641 | if (err) | ||
| 642 | goto freeout; | ||
| 643 | q->rspq.idx = j; | ||
| 644 | memset(&q->stats, 0, sizeof(q->stats)); | ||
| 645 | } | ||
| 646 | for (j = 0; j < pi->nqsets; j++, t++) { | ||
| 647 | err = t4_sge_alloc_eth_txq(adap, t, dev, | ||
| 648 | netdev_get_tx_queue(dev, j), | ||
| 649 | s->fw_evtq.cntxt_id); | ||
| 650 | if (err) | ||
| 651 | goto freeout; | ||
| 652 | } | ||
| 653 | } | ||
| 654 | |||
| 655 | j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ | ||
| 656 | for_each_ofldrxq(s, i) { | ||
| 657 | struct sge_ofld_rxq *q = &s->ofldrxq[i]; | ||
| 658 | struct net_device *dev = adap->port[i / j]; | ||
| 659 | |||
| 660 | if (msi_idx > 0) | ||
| 661 | msi_idx++; | ||
| 662 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, | ||
| 663 | &q->fl, uldrx_handler); | ||
| 664 | if (err) | ||
| 665 | goto freeout; | ||
| 666 | memset(&q->stats, 0, sizeof(q->stats)); | ||
| 667 | s->ofld_rxq[i] = q->rspq.abs_id; | ||
| 668 | err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, | ||
| 669 | s->fw_evtq.cntxt_id); | ||
| 670 | if (err) | ||
| 671 | goto freeout; | ||
| 672 | } | ||
| 673 | |||
| 674 | for_each_rdmarxq(s, i) { | ||
| 675 | struct sge_ofld_rxq *q = &s->rdmarxq[i]; | ||
| 676 | |||
| 677 | if (msi_idx > 0) | ||
| 678 | msi_idx++; | ||
| 679 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], | ||
| 680 | msi_idx, &q->fl, uldrx_handler); | ||
| 681 | if (err) | ||
| 682 | goto freeout; | ||
| 683 | memset(&q->stats, 0, sizeof(q->stats)); | ||
| 684 | s->rdma_rxq[i] = q->rspq.abs_id; | ||
| 685 | } | ||
| 686 | |||
| 687 | for_each_port(adap, i) { | ||
| 688 | /* | ||
| 689 | * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't | ||
| 690 | * have RDMA queues, and that's the right value. | ||
| 691 | */ | ||
| 692 | err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], | ||
| 693 | s->fw_evtq.cntxt_id, | ||
| 694 | s->rdmarxq[i].rspq.cntxt_id); | ||
| 695 | if (err) | ||
| 696 | goto freeout; | ||
| 697 | } | ||
| 698 | |||
| 699 | t4_write_reg(adap, MPS_TRC_RSS_CONTROL, | ||
| 700 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | | ||
| 701 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); | ||
| 702 | return 0; | ||
| 703 | } | ||
| 704 | |||
| 705 | /* | ||
| 706 | * Returns 0 if new FW was successfully loaded, a positive errno if a load was | ||
| 707 | * started but failed, and a negative errno if flash load couldn't start. | ||
| 708 | */ | ||
| 709 | static int upgrade_fw(struct adapter *adap) | ||
| 710 | { | ||
| 711 | int ret; | ||
| 712 | u32 vers; | ||
| 713 | const struct fw_hdr *hdr; | ||
| 714 | const struct firmware *fw; | ||
| 715 | struct device *dev = adap->pdev_dev; | ||
| 716 | |||
| 717 | ret = request_firmware(&fw, FW_FNAME, dev); | ||
| 718 | if (ret < 0) { | ||
| 719 | dev_err(dev, "unable to load firmware image " FW_FNAME | ||
| 720 | ", error %d\n", ret); | ||
| 721 | return ret; | ||
| 722 | } | ||
| 723 | |||
| 724 | hdr = (const struct fw_hdr *)fw->data; | ||
| 725 | vers = ntohl(hdr->fw_ver); | ||
| 726 | if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { | ||
| 727 | ret = -EINVAL; /* wrong major version, won't do */ | ||
| 728 | goto out; | ||
| 729 | } | ||
| 730 | |||
| 731 | /* | ||
| 732 | * If the flash FW is unusable or we found something newer, load it. | ||
| 733 | */ | ||
| 734 | if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || | ||
| 735 | vers > adap->params.fw_vers) { | ||
| 736 | ret = -t4_load_fw(adap, fw->data, fw->size); | ||
| 737 | if (!ret) | ||
| 738 | dev_info(dev, "firmware upgraded to version %pI4 from " | ||
| 739 | FW_FNAME "\n", &hdr->fw_ver); | ||
| 740 | } | ||
| 741 | out: release_firmware(fw); | ||
| 742 | return ret; | ||
| 743 | } | ||
| 744 | |||
| 745 | /* | ||
| 746 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | ||
| 747 | * The allocated memory is cleared. | ||
| 748 | */ | ||
| 749 | void *t4_alloc_mem(size_t size) | ||
| 750 | { | ||
| 751 | void *p = kmalloc(size, GFP_KERNEL); | ||
| 752 | |||
| 753 | if (!p) | ||
| 754 | p = vmalloc(size); | ||
| 755 | if (p) | ||
| 756 | memset(p, 0, size); | ||
| 757 | return p; | ||
| 758 | } | ||
| 759 | |||
| 760 | /* | ||
| 761 | * Free memory allocated through alloc_mem(). | ||
| 762 | */ | ||
| 763 | void t4_free_mem(void *addr) | ||
| 764 | { | ||
| 765 | if (is_vmalloc_addr(addr)) | ||
| 766 | vfree(addr); | ||
| 767 | else | ||
| 768 | kfree(addr); | ||
| 769 | } | ||
| 770 | |||
| 771 | static inline int is_offload(const struct adapter *adap) | ||
| 772 | { | ||
| 773 | return adap->params.offload; | ||
| 774 | } | ||
| 775 | |||
| 776 | /* | ||
| 777 | * Implementation of ethtool operations. | ||
| 778 | */ | ||
| 779 | |||
| 780 | static u32 get_msglevel(struct net_device *dev) | ||
| 781 | { | ||
| 782 | return netdev2adap(dev)->msg_enable; | ||
| 783 | } | ||
| 784 | |||
| 785 | static void set_msglevel(struct net_device *dev, u32 val) | ||
| 786 | { | ||
| 787 | netdev2adap(dev)->msg_enable = val; | ||
| 788 | } | ||
| 789 | |||
| 790 | static char stats_strings[][ETH_GSTRING_LEN] = { | ||
| 791 | "TxOctetsOK ", | ||
| 792 | "TxFramesOK ", | ||
| 793 | "TxBroadcastFrames ", | ||
| 794 | "TxMulticastFrames ", | ||
| 795 | "TxUnicastFrames ", | ||
| 796 | "TxErrorFrames ", | ||
| 797 | |||
| 798 | "TxFrames64 ", | ||
| 799 | "TxFrames65To127 ", | ||
| 800 | "TxFrames128To255 ", | ||
| 801 | "TxFrames256To511 ", | ||
| 802 | "TxFrames512To1023 ", | ||
| 803 | "TxFrames1024To1518 ", | ||
| 804 | "TxFrames1519ToMax ", | ||
| 805 | |||
| 806 | "TxFramesDropped ", | ||
| 807 | "TxPauseFrames ", | ||
| 808 | "TxPPP0Frames ", | ||
| 809 | "TxPPP1Frames ", | ||
| 810 | "TxPPP2Frames ", | ||
| 811 | "TxPPP3Frames ", | ||
| 812 | "TxPPP4Frames ", | ||
| 813 | "TxPPP5Frames ", | ||
| 814 | "TxPPP6Frames ", | ||
| 815 | "TxPPP7Frames ", | ||
| 816 | |||
| 817 | "RxOctetsOK ", | ||
| 818 | "RxFramesOK ", | ||
| 819 | "RxBroadcastFrames ", | ||
| 820 | "RxMulticastFrames ", | ||
| 821 | "RxUnicastFrames ", | ||
| 822 | |||
| 823 | "RxFramesTooLong ", | ||
| 824 | "RxJabberErrors ", | ||
| 825 | "RxFCSErrors ", | ||
| 826 | "RxLengthErrors ", | ||
| 827 | "RxSymbolErrors ", | ||
| 828 | "RxRuntFrames ", | ||
| 829 | |||
| 830 | "RxFrames64 ", | ||
| 831 | "RxFrames65To127 ", | ||
| 832 | "RxFrames128To255 ", | ||
| 833 | "RxFrames256To511 ", | ||
| 834 | "RxFrames512To1023 ", | ||
| 835 | "RxFrames1024To1518 ", | ||
| 836 | "RxFrames1519ToMax ", | ||
| 837 | |||
| 838 | "RxPauseFrames ", | ||
| 839 | "RxPPP0Frames ", | ||
| 840 | "RxPPP1Frames ", | ||
| 841 | "RxPPP2Frames ", | ||
| 842 | "RxPPP3Frames ", | ||
| 843 | "RxPPP4Frames ", | ||
| 844 | "RxPPP5Frames ", | ||
| 845 | "RxPPP6Frames ", | ||
| 846 | "RxPPP7Frames ", | ||
| 847 | |||
| 848 | "RxBG0FramesDropped ", | ||
| 849 | "RxBG1FramesDropped ", | ||
| 850 | "RxBG2FramesDropped ", | ||
| 851 | "RxBG3FramesDropped ", | ||
| 852 | "RxBG0FramesTrunc ", | ||
| 853 | "RxBG1FramesTrunc ", | ||
| 854 | "RxBG2FramesTrunc ", | ||
| 855 | "RxBG3FramesTrunc ", | ||
| 856 | |||
| 857 | "TSO ", | ||
| 858 | "TxCsumOffload ", | ||
| 859 | "RxCsumGood ", | ||
| 860 | "VLANextractions ", | ||
| 861 | "VLANinsertions ", | ||
| 862 | }; | ||
| 863 | |||
| 864 | static int get_sset_count(struct net_device *dev, int sset) | ||
| 865 | { | ||
| 866 | switch (sset) { | ||
| 867 | case ETH_SS_STATS: | ||
| 868 | return ARRAY_SIZE(stats_strings); | ||
| 869 | default: | ||
| 870 | return -EOPNOTSUPP; | ||
| 871 | } | ||
| 872 | } | ||
| 873 | |||
| 874 | #define T4_REGMAP_SIZE (160 * 1024) | ||
| 875 | |||
| 876 | static int get_regs_len(struct net_device *dev) | ||
| 877 | { | ||
| 878 | return T4_REGMAP_SIZE; | ||
| 879 | } | ||
| 880 | |||
| 881 | static int get_eeprom_len(struct net_device *dev) | ||
| 882 | { | ||
| 883 | return EEPROMSIZE; | ||
| 884 | } | ||
| 885 | |||
| 886 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
| 887 | { | ||
| 888 | struct adapter *adapter = netdev2adap(dev); | ||
| 889 | |||
| 890 | strcpy(info->driver, KBUILD_MODNAME); | ||
| 891 | strcpy(info->version, DRV_VERSION); | ||
| 892 | strcpy(info->bus_info, pci_name(adapter->pdev)); | ||
| 893 | |||
| 894 | if (!adapter->params.fw_vers) | ||
| 895 | strcpy(info->fw_version, "N/A"); | ||
| 896 | else | ||
| 897 | snprintf(info->fw_version, sizeof(info->fw_version), | ||
| 898 | "%u.%u.%u.%u, TP %u.%u.%u.%u", | ||
| 899 | FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), | ||
| 900 | FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), | ||
| 901 | FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), | ||
| 902 | FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), | ||
| 903 | FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), | ||
| 904 | FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), | ||
| 905 | FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), | ||
| 906 | FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); | ||
| 907 | } | ||
| 908 | |||
| 909 | static void get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
| 910 | { | ||
| 911 | if (stringset == ETH_SS_STATS) | ||
| 912 | memcpy(data, stats_strings, sizeof(stats_strings)); | ||
| 913 | } | ||
| 914 | |||
| 915 | /* | ||
| 916 | * port stats maintained per queue of the port. They should be in the same | ||
| 917 | * order as in stats_strings above. | ||
| 918 | */ | ||
| 919 | struct queue_port_stats { | ||
| 920 | u64 tso; | ||
| 921 | u64 tx_csum; | ||
| 922 | u64 rx_csum; | ||
| 923 | u64 vlan_ex; | ||
| 924 | u64 vlan_ins; | ||
| 925 | }; | ||
| 926 | |||
| 927 | static void collect_sge_port_stats(const struct adapter *adap, | ||
| 928 | const struct port_info *p, struct queue_port_stats *s) | ||
| 929 | { | ||
| 930 | int i; | ||
| 931 | const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; | ||
| 932 | const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; | ||
| 933 | |||
| 934 | memset(s, 0, sizeof(*s)); | ||
| 935 | for (i = 0; i < p->nqsets; i++, rx++, tx++) { | ||
| 936 | s->tso += tx->tso; | ||
| 937 | s->tx_csum += tx->tx_cso; | ||
| 938 | s->rx_csum += rx->stats.rx_cso; | ||
| 939 | s->vlan_ex += rx->stats.vlan_ex; | ||
| 940 | s->vlan_ins += tx->vlan_ins; | ||
| 941 | } | ||
| 942 | } | ||
| 943 | |||
| 944 | static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | ||
| 945 | u64 *data) | ||
| 946 | { | ||
| 947 | struct port_info *pi = netdev_priv(dev); | ||
| 948 | struct adapter *adapter = pi->adapter; | ||
| 949 | |||
| 950 | t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); | ||
| 951 | |||
| 952 | data += sizeof(struct port_stats) / sizeof(u64); | ||
| 953 | collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); | ||
| 954 | } | ||
| 955 | |||
| 956 | /* | ||
| 957 | * Return a version number to identify the type of adapter. The scheme is: | ||
| 958 | * - bits 0..9: chip version | ||
| 959 | * - bits 10..15: chip revision | ||
| 960 | */ | ||
| 961 | static inline unsigned int mk_adap_vers(const struct adapter *ap) | ||
| 962 | { | ||
| 963 | return 4 | (ap->params.rev << 10); | ||
| 964 | } | ||
| 965 | |||
| 966 | static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, | ||
| 967 | unsigned int end) | ||
| 968 | { | ||
| 969 | u32 *p = buf + start; | ||
| 970 | |||
| 971 | for ( ; start <= end; start += sizeof(u32)) | ||
| 972 | *p++ = t4_read_reg(ap, start); | ||
| 973 | } | ||
| 974 | |||
| 975 | static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
| 976 | void *buf) | ||
| 977 | { | ||
| 978 | static const unsigned int reg_ranges[] = { | ||
| 979 | 0x1008, 0x1108, | ||
| 980 | 0x1180, 0x11b4, | ||
| 981 | 0x11fc, 0x123c, | ||
| 982 | 0x1300, 0x173c, | ||
| 983 | 0x1800, 0x18fc, | ||
| 984 | 0x3000, 0x30d8, | ||
| 985 | 0x30e0, 0x5924, | ||
| 986 | 0x5960, 0x59d4, | ||
| 987 | 0x5a00, 0x5af8, | ||
| 988 | 0x6000, 0x6098, | ||
| 989 | 0x6100, 0x6150, | ||
| 990 | 0x6200, 0x6208, | ||
| 991 | 0x6240, 0x6248, | ||
| 992 | 0x6280, 0x6338, | ||
| 993 | 0x6370, 0x638c, | ||
| 994 | 0x6400, 0x643c, | ||
| 995 | 0x6500, 0x6524, | ||
| 996 | 0x6a00, 0x6a38, | ||
| 997 | 0x6a60, 0x6a78, | ||
| 998 | 0x6b00, 0x6b84, | ||
| 999 | 0x6bf0, 0x6c84, | ||
| 1000 | 0x6cf0, 0x6d84, | ||
| 1001 | 0x6df0, 0x6e84, | ||
| 1002 | 0x6ef0, 0x6f84, | ||
| 1003 | 0x6ff0, 0x7084, | ||
| 1004 | 0x70f0, 0x7184, | ||
| 1005 | 0x71f0, 0x7284, | ||
| 1006 | 0x72f0, 0x7384, | ||
| 1007 | 0x73f0, 0x7450, | ||
| 1008 | 0x7500, 0x7530, | ||
| 1009 | 0x7600, 0x761c, | ||
| 1010 | 0x7680, 0x76cc, | ||
| 1011 | 0x7700, 0x7798, | ||
| 1012 | 0x77c0, 0x77fc, | ||
| 1013 | 0x7900, 0x79fc, | ||
| 1014 | 0x7b00, 0x7c38, | ||
| 1015 | 0x7d00, 0x7efc, | ||
| 1016 | 0x8dc0, 0x8e1c, | ||
| 1017 | 0x8e30, 0x8e78, | ||
| 1018 | 0x8ea0, 0x8f6c, | ||
| 1019 | 0x8fc0, 0x9074, | ||
| 1020 | 0x90fc, 0x90fc, | ||
| 1021 | 0x9400, 0x9458, | ||
| 1022 | 0x9600, 0x96bc, | ||
| 1023 | 0x9800, 0x9808, | ||
| 1024 | 0x9820, 0x983c, | ||
| 1025 | 0x9850, 0x9864, | ||
| 1026 | 0x9c00, 0x9c6c, | ||
| 1027 | 0x9c80, 0x9cec, | ||
| 1028 | 0x9d00, 0x9d6c, | ||
| 1029 | 0x9d80, 0x9dec, | ||
| 1030 | 0x9e00, 0x9e6c, | ||
| 1031 | 0x9e80, 0x9eec, | ||
| 1032 | 0x9f00, 0x9f6c, | ||
| 1033 | 0x9f80, 0x9fec, | ||
| 1034 | 0xd004, 0xd03c, | ||
| 1035 | 0xdfc0, 0xdfe0, | ||
| 1036 | 0xe000, 0xea7c, | ||
| 1037 | 0xf000, 0x11190, | ||
| 1038 | 0x19040, 0x19124, | ||
| 1039 | 0x19150, 0x191b0, | ||
| 1040 | 0x191d0, 0x191e8, | ||
| 1041 | 0x19238, 0x1924c, | ||
| 1042 | 0x193f8, 0x19474, | ||
| 1043 | 0x19490, 0x194f8, | ||
| 1044 | 0x19800, 0x19f30, | ||
| 1045 | 0x1a000, 0x1a06c, | ||
| 1046 | 0x1a0b0, 0x1a120, | ||
| 1047 | 0x1a128, 0x1a138, | ||
| 1048 | 0x1a190, 0x1a1c4, | ||
| 1049 | 0x1a1fc, 0x1a1fc, | ||
| 1050 | 0x1e040, 0x1e04c, | ||
| 1051 | 0x1e240, 0x1e28c, | ||
| 1052 | 0x1e2c0, 0x1e2c0, | ||
| 1053 | 0x1e2e0, 0x1e2e0, | ||
| 1054 | 0x1e300, 0x1e384, | ||
| 1055 | 0x1e3c0, 0x1e3c8, | ||
| 1056 | 0x1e440, 0x1e44c, | ||
| 1057 | 0x1e640, 0x1e68c, | ||
| 1058 | 0x1e6c0, 0x1e6c0, | ||
| 1059 | 0x1e6e0, 0x1e6e0, | ||
| 1060 | 0x1e700, 0x1e784, | ||
| 1061 | 0x1e7c0, 0x1e7c8, | ||
| 1062 | 0x1e840, 0x1e84c, | ||
| 1063 | 0x1ea40, 0x1ea8c, | ||
| 1064 | 0x1eac0, 0x1eac0, | ||
| 1065 | 0x1eae0, 0x1eae0, | ||
| 1066 | 0x1eb00, 0x1eb84, | ||
| 1067 | 0x1ebc0, 0x1ebc8, | ||
| 1068 | 0x1ec40, 0x1ec4c, | ||
| 1069 | 0x1ee40, 0x1ee8c, | ||
| 1070 | 0x1eec0, 0x1eec0, | ||
| 1071 | 0x1eee0, 0x1eee0, | ||
| 1072 | 0x1ef00, 0x1ef84, | ||
| 1073 | 0x1efc0, 0x1efc8, | ||
| 1074 | 0x1f040, 0x1f04c, | ||
| 1075 | 0x1f240, 0x1f28c, | ||
| 1076 | 0x1f2c0, 0x1f2c0, | ||
| 1077 | 0x1f2e0, 0x1f2e0, | ||
| 1078 | 0x1f300, 0x1f384, | ||
| 1079 | 0x1f3c0, 0x1f3c8, | ||
| 1080 | 0x1f440, 0x1f44c, | ||
| 1081 | 0x1f640, 0x1f68c, | ||
| 1082 | 0x1f6c0, 0x1f6c0, | ||
| 1083 | 0x1f6e0, 0x1f6e0, | ||
| 1084 | 0x1f700, 0x1f784, | ||
| 1085 | 0x1f7c0, 0x1f7c8, | ||
| 1086 | 0x1f840, 0x1f84c, | ||
| 1087 | 0x1fa40, 0x1fa8c, | ||
| 1088 | 0x1fac0, 0x1fac0, | ||
| 1089 | 0x1fae0, 0x1fae0, | ||
| 1090 | 0x1fb00, 0x1fb84, | ||
| 1091 | 0x1fbc0, 0x1fbc8, | ||
| 1092 | 0x1fc40, 0x1fc4c, | ||
| 1093 | 0x1fe40, 0x1fe8c, | ||
| 1094 | 0x1fec0, 0x1fec0, | ||
| 1095 | 0x1fee0, 0x1fee0, | ||
| 1096 | 0x1ff00, 0x1ff84, | ||
| 1097 | 0x1ffc0, 0x1ffc8, | ||
| 1098 | 0x20000, 0x2002c, | ||
| 1099 | 0x20100, 0x2013c, | ||
| 1100 | 0x20190, 0x201c8, | ||
| 1101 | 0x20200, 0x20318, | ||
| 1102 | 0x20400, 0x20528, | ||
| 1103 | 0x20540, 0x20614, | ||
| 1104 | 0x21000, 0x21040, | ||
| 1105 | 0x2104c, 0x21060, | ||
| 1106 | 0x210c0, 0x210ec, | ||
| 1107 | 0x21200, 0x21268, | ||
| 1108 | 0x21270, 0x21284, | ||
| 1109 | 0x212fc, 0x21388, | ||
| 1110 | 0x21400, 0x21404, | ||
| 1111 | 0x21500, 0x21518, | ||
| 1112 | 0x2152c, 0x2153c, | ||
| 1113 | 0x21550, 0x21554, | ||
| 1114 | 0x21600, 0x21600, | ||
| 1115 | 0x21608, 0x21628, | ||
| 1116 | 0x21630, 0x2163c, | ||
| 1117 | 0x21700, 0x2171c, | ||
| 1118 | 0x21780, 0x2178c, | ||
| 1119 | 0x21800, 0x21c38, | ||
| 1120 | 0x21c80, 0x21d7c, | ||
| 1121 | 0x21e00, 0x21e04, | ||
| 1122 | 0x22000, 0x2202c, | ||
| 1123 | 0x22100, 0x2213c, | ||
| 1124 | 0x22190, 0x221c8, | ||
| 1125 | 0x22200, 0x22318, | ||
| 1126 | 0x22400, 0x22528, | ||
| 1127 | 0x22540, 0x22614, | ||
| 1128 | 0x23000, 0x23040, | ||
| 1129 | 0x2304c, 0x23060, | ||
| 1130 | 0x230c0, 0x230ec, | ||
| 1131 | 0x23200, 0x23268, | ||
| 1132 | 0x23270, 0x23284, | ||
| 1133 | 0x232fc, 0x23388, | ||
| 1134 | 0x23400, 0x23404, | ||
| 1135 | 0x23500, 0x23518, | ||
| 1136 | 0x2352c, 0x2353c, | ||
| 1137 | 0x23550, 0x23554, | ||
| 1138 | 0x23600, 0x23600, | ||
| 1139 | 0x23608, 0x23628, | ||
| 1140 | 0x23630, 0x2363c, | ||
| 1141 | 0x23700, 0x2371c, | ||
| 1142 | 0x23780, 0x2378c, | ||
| 1143 | 0x23800, 0x23c38, | ||
| 1144 | 0x23c80, 0x23d7c, | ||
| 1145 | 0x23e00, 0x23e04, | ||
| 1146 | 0x24000, 0x2402c, | ||
| 1147 | 0x24100, 0x2413c, | ||
| 1148 | 0x24190, 0x241c8, | ||
| 1149 | 0x24200, 0x24318, | ||
| 1150 | 0x24400, 0x24528, | ||
| 1151 | 0x24540, 0x24614, | ||
| 1152 | 0x25000, 0x25040, | ||
| 1153 | 0x2504c, 0x25060, | ||
| 1154 | 0x250c0, 0x250ec, | ||
| 1155 | 0x25200, 0x25268, | ||
| 1156 | 0x25270, 0x25284, | ||
| 1157 | 0x252fc, 0x25388, | ||
| 1158 | 0x25400, 0x25404, | ||
| 1159 | 0x25500, 0x25518, | ||
| 1160 | 0x2552c, 0x2553c, | ||
| 1161 | 0x25550, 0x25554, | ||
| 1162 | 0x25600, 0x25600, | ||
| 1163 | 0x25608, 0x25628, | ||
| 1164 | 0x25630, 0x2563c, | ||
| 1165 | 0x25700, 0x2571c, | ||
| 1166 | 0x25780, 0x2578c, | ||
| 1167 | 0x25800, 0x25c38, | ||
| 1168 | 0x25c80, 0x25d7c, | ||
| 1169 | 0x25e00, 0x25e04, | ||
| 1170 | 0x26000, 0x2602c, | ||
| 1171 | 0x26100, 0x2613c, | ||
| 1172 | 0x26190, 0x261c8, | ||
| 1173 | 0x26200, 0x26318, | ||
| 1174 | 0x26400, 0x26528, | ||
| 1175 | 0x26540, 0x26614, | ||
| 1176 | 0x27000, 0x27040, | ||
| 1177 | 0x2704c, 0x27060, | ||
| 1178 | 0x270c0, 0x270ec, | ||
| 1179 | 0x27200, 0x27268, | ||
| 1180 | 0x27270, 0x27284, | ||
| 1181 | 0x272fc, 0x27388, | ||
| 1182 | 0x27400, 0x27404, | ||
| 1183 | 0x27500, 0x27518, | ||
| 1184 | 0x2752c, 0x2753c, | ||
| 1185 | 0x27550, 0x27554, | ||
| 1186 | 0x27600, 0x27600, | ||
| 1187 | 0x27608, 0x27628, | ||
| 1188 | 0x27630, 0x2763c, | ||
| 1189 | 0x27700, 0x2771c, | ||
| 1190 | 0x27780, 0x2778c, | ||
| 1191 | 0x27800, 0x27c38, | ||
| 1192 | 0x27c80, 0x27d7c, | ||
| 1193 | 0x27e00, 0x27e04 | ||
| 1194 | }; | ||
| 1195 | |||
| 1196 | int i; | ||
| 1197 | struct adapter *ap = netdev2adap(dev); | ||
| 1198 | |||
| 1199 | regs->version = mk_adap_vers(ap); | ||
| 1200 | |||
| 1201 | memset(buf, 0, T4_REGMAP_SIZE); | ||
| 1202 | for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) | ||
| 1203 | reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | static int restart_autoneg(struct net_device *dev) | ||
| 1207 | { | ||
| 1208 | struct port_info *p = netdev_priv(dev); | ||
| 1209 | |||
| 1210 | if (!netif_running(dev)) | ||
| 1211 | return -EAGAIN; | ||
| 1212 | if (p->link_cfg.autoneg != AUTONEG_ENABLE) | ||
| 1213 | return -EINVAL; | ||
| 1214 | t4_restart_aneg(p->adapter, 0, p->tx_chan); | ||
| 1215 | return 0; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | static int identify_port(struct net_device *dev, u32 data) | ||
| 1219 | { | ||
| 1220 | if (data == 0) | ||
| 1221 | data = 2; /* default to 2 seconds */ | ||
| 1222 | |||
| 1223 | return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, | ||
| 1224 | data * 5); | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | ||
| 1228 | { | ||
| 1229 | unsigned int v = 0; | ||
| 1230 | |||
| 1231 | if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { | ||
| 1232 | v |= SUPPORTED_TP; | ||
| 1233 | if (caps & FW_PORT_CAP_SPEED_100M) | ||
| 1234 | v |= SUPPORTED_100baseT_Full; | ||
| 1235 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
| 1236 | v |= SUPPORTED_1000baseT_Full; | ||
| 1237 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
| 1238 | v |= SUPPORTED_10000baseT_Full; | ||
| 1239 | } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { | ||
| 1240 | v |= SUPPORTED_Backplane; | ||
| 1241 | if (caps & FW_PORT_CAP_SPEED_1G) | ||
| 1242 | v |= SUPPORTED_1000baseKX_Full; | ||
| 1243 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
| 1244 | v |= SUPPORTED_10000baseKX4_Full; | ||
| 1245 | } else if (type == FW_PORT_TYPE_KR) | ||
| 1246 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; | ||
| 1247 | else if (type == FW_PORT_TYPE_FIBER) | ||
| 1248 | v |= SUPPORTED_FIBRE; | ||
| 1249 | |||
| 1250 | if (caps & FW_PORT_CAP_ANEG) | ||
| 1251 | v |= SUPPORTED_Autoneg; | ||
| 1252 | return v; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | static unsigned int to_fw_linkcaps(unsigned int caps) | ||
| 1256 | { | ||
| 1257 | unsigned int v = 0; | ||
| 1258 | |||
| 1259 | if (caps & ADVERTISED_100baseT_Full) | ||
| 1260 | v |= FW_PORT_CAP_SPEED_100M; | ||
| 1261 | if (caps & ADVERTISED_1000baseT_Full) | ||
| 1262 | v |= FW_PORT_CAP_SPEED_1G; | ||
| 1263 | if (caps & ADVERTISED_10000baseT_Full) | ||
| 1264 | v |= FW_PORT_CAP_SPEED_10G; | ||
| 1265 | return v; | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 1269 | { | ||
| 1270 | const struct port_info *p = netdev_priv(dev); | ||
| 1271 | |||
| 1272 | if (p->port_type == FW_PORT_TYPE_BT_SGMII || | ||
| 1273 | p->port_type == FW_PORT_TYPE_BT_XAUI) | ||
| 1274 | cmd->port = PORT_TP; | ||
| 1275 | else if (p->port_type == FW_PORT_TYPE_FIBER) | ||
| 1276 | cmd->port = PORT_FIBRE; | ||
| 1277 | else if (p->port_type == FW_PORT_TYPE_TWINAX) | ||
| 1278 | cmd->port = PORT_DA; | ||
| 1279 | else | ||
| 1280 | cmd->port = PORT_OTHER; | ||
| 1281 | |||
| 1282 | if (p->mdio_addr >= 0) { | ||
| 1283 | cmd->phy_address = p->mdio_addr; | ||
| 1284 | cmd->transceiver = XCVR_EXTERNAL; | ||
| 1285 | cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? | ||
| 1286 | MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; | ||
| 1287 | } else { | ||
| 1288 | cmd->phy_address = 0; /* not really, but no better option */ | ||
| 1289 | cmd->transceiver = XCVR_INTERNAL; | ||
| 1290 | cmd->mdio_support = 0; | ||
| 1291 | } | ||
| 1292 | |||
| 1293 | cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); | ||
| 1294 | cmd->advertising = from_fw_linkcaps(p->port_type, | ||
| 1295 | p->link_cfg.advertising); | ||
| 1296 | cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; | ||
| 1297 | cmd->duplex = DUPLEX_FULL; | ||
| 1298 | cmd->autoneg = p->link_cfg.autoneg; | ||
| 1299 | cmd->maxtxpkt = 0; | ||
| 1300 | cmd->maxrxpkt = 0; | ||
| 1301 | return 0; | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | static unsigned int speed_to_caps(int speed) | ||
| 1305 | { | ||
| 1306 | if (speed == SPEED_100) | ||
| 1307 | return FW_PORT_CAP_SPEED_100M; | ||
| 1308 | if (speed == SPEED_1000) | ||
| 1309 | return FW_PORT_CAP_SPEED_1G; | ||
| 1310 | if (speed == SPEED_10000) | ||
| 1311 | return FW_PORT_CAP_SPEED_10G; | ||
| 1312 | return 0; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 1316 | { | ||
| 1317 | unsigned int cap; | ||
| 1318 | struct port_info *p = netdev_priv(dev); | ||
| 1319 | struct link_config *lc = &p->link_cfg; | ||
| 1320 | |||
| 1321 | if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ | ||
| 1322 | return -EINVAL; | ||
| 1323 | |||
| 1324 | if (!(lc->supported & FW_PORT_CAP_ANEG)) { | ||
| 1325 | /* | ||
| 1326 | * PHY offers a single speed. See if that's what's | ||
| 1327 | * being requested. | ||
| 1328 | */ | ||
| 1329 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
| 1330 | (lc->supported & speed_to_caps(cmd->speed))) | ||
| 1331 | return 0; | ||
| 1332 | return -EINVAL; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | if (cmd->autoneg == AUTONEG_DISABLE) { | ||
| 1336 | cap = speed_to_caps(cmd->speed); | ||
| 1337 | |||
| 1338 | if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || | ||
| 1339 | cmd->speed == SPEED_10000) | ||
| 1340 | return -EINVAL; | ||
| 1341 | lc->requested_speed = cap; | ||
| 1342 | lc->advertising = 0; | ||
| 1343 | } else { | ||
| 1344 | cap = to_fw_linkcaps(cmd->advertising); | ||
| 1345 | if (!(lc->supported & cap)) | ||
| 1346 | return -EINVAL; | ||
| 1347 | lc->requested_speed = 0; | ||
| 1348 | lc->advertising = cap | FW_PORT_CAP_ANEG; | ||
| 1349 | } | ||
| 1350 | lc->autoneg = cmd->autoneg; | ||
| 1351 | |||
| 1352 | if (netif_running(dev)) | ||
| 1353 | return t4_link_start(p->adapter, 0, p->tx_chan, lc); | ||
| 1354 | return 0; | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | static void get_pauseparam(struct net_device *dev, | ||
| 1358 | struct ethtool_pauseparam *epause) | ||
| 1359 | { | ||
| 1360 | struct port_info *p = netdev_priv(dev); | ||
| 1361 | |||
| 1362 | epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; | ||
| 1363 | epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; | ||
| 1364 | epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | static int set_pauseparam(struct net_device *dev, | ||
| 1368 | struct ethtool_pauseparam *epause) | ||
| 1369 | { | ||
| 1370 | struct port_info *p = netdev_priv(dev); | ||
| 1371 | struct link_config *lc = &p->link_cfg; | ||
| 1372 | |||
| 1373 | if (epause->autoneg == AUTONEG_DISABLE) | ||
| 1374 | lc->requested_fc = 0; | ||
| 1375 | else if (lc->supported & FW_PORT_CAP_ANEG) | ||
| 1376 | lc->requested_fc = PAUSE_AUTONEG; | ||
| 1377 | else | ||
| 1378 | return -EINVAL; | ||
| 1379 | |||
| 1380 | if (epause->rx_pause) | ||
| 1381 | lc->requested_fc |= PAUSE_RX; | ||
| 1382 | if (epause->tx_pause) | ||
| 1383 | lc->requested_fc |= PAUSE_TX; | ||
| 1384 | if (netif_running(dev)) | ||
| 1385 | return t4_link_start(p->adapter, 0, p->tx_chan, lc); | ||
| 1386 | return 0; | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | static u32 get_rx_csum(struct net_device *dev) | ||
| 1390 | { | ||
| 1391 | struct port_info *p = netdev_priv(dev); | ||
| 1392 | |||
| 1393 | return p->rx_offload & RX_CSO; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | static int set_rx_csum(struct net_device *dev, u32 data) | ||
| 1397 | { | ||
| 1398 | struct port_info *p = netdev_priv(dev); | ||
| 1399 | |||
| 1400 | if (data) | ||
| 1401 | p->rx_offload |= RX_CSO; | ||
| 1402 | else | ||
| 1403 | p->rx_offload &= ~RX_CSO; | ||
| 1404 | return 0; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
| 1408 | { | ||
| 1409 | const struct port_info *pi = netdev_priv(dev); | ||
| 1410 | const struct sge *s = &pi->adapter->sge; | ||
| 1411 | |||
| 1412 | e->rx_max_pending = MAX_RX_BUFFERS; | ||
| 1413 | e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; | ||
| 1414 | e->rx_jumbo_max_pending = 0; | ||
| 1415 | e->tx_max_pending = MAX_TXQ_ENTRIES; | ||
| 1416 | |||
| 1417 | e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; | ||
| 1418 | e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; | ||
| 1419 | e->rx_jumbo_pending = 0; | ||
| 1420 | e->tx_pending = s->ethtxq[pi->first_qset].q.size; | ||
| 1421 | } | ||
| 1422 | |||
| 1423 | static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
| 1424 | { | ||
| 1425 | int i; | ||
| 1426 | const struct port_info *pi = netdev_priv(dev); | ||
| 1427 | struct adapter *adapter = pi->adapter; | ||
| 1428 | struct sge *s = &adapter->sge; | ||
| 1429 | |||
| 1430 | if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || | ||
| 1431 | e->tx_pending > MAX_TXQ_ENTRIES || | ||
| 1432 | e->rx_mini_pending > MAX_RSPQ_ENTRIES || | ||
| 1433 | e->rx_mini_pending < MIN_RSPQ_ENTRIES || | ||
| 1434 | e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) | ||
| 1435 | return -EINVAL; | ||
| 1436 | |||
| 1437 | if (adapter->flags & FULL_INIT_DONE) | ||
| 1438 | return -EBUSY; | ||
| 1439 | |||
| 1440 | for (i = 0; i < pi->nqsets; ++i) { | ||
| 1441 | s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; | ||
| 1442 | s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; | ||
| 1443 | s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; | ||
| 1444 | } | ||
| 1445 | return 0; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | static int closest_timer(const struct sge *s, int time) | ||
| 1449 | { | ||
| 1450 | int i, delta, match = 0, min_delta = INT_MAX; | ||
| 1451 | |||
| 1452 | for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { | ||
| 1453 | delta = time - s->timer_val[i]; | ||
| 1454 | if (delta < 0) | ||
| 1455 | delta = -delta; | ||
| 1456 | if (delta < min_delta) { | ||
| 1457 | min_delta = delta; | ||
| 1458 | match = i; | ||
| 1459 | } | ||
| 1460 | } | ||
| 1461 | return match; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | static int closest_thres(const struct sge *s, int thres) | ||
| 1465 | { | ||
| 1466 | int i, delta, match = 0, min_delta = INT_MAX; | ||
| 1467 | |||
| 1468 | for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { | ||
| 1469 | delta = thres - s->counter_val[i]; | ||
| 1470 | if (delta < 0) | ||
| 1471 | delta = -delta; | ||
| 1472 | if (delta < min_delta) { | ||
| 1473 | min_delta = delta; | ||
| 1474 | match = i; | ||
| 1475 | } | ||
| 1476 | } | ||
| 1477 | return match; | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | /* | ||
| 1481 | * Return a queue's interrupt hold-off time in us. 0 means no timer. | ||
| 1482 | */ | ||
| 1483 | static unsigned int qtimer_val(const struct adapter *adap, | ||
| 1484 | const struct sge_rspq *q) | ||
| 1485 | { | ||
| 1486 | unsigned int idx = q->intr_params >> 1; | ||
| 1487 | |||
| 1488 | return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | /** | ||
| 1492 | * set_rxq_intr_params - set a queue's interrupt holdoff parameters | ||
| 1493 | * @adap: the adapter | ||
| 1494 | * @q: the Rx queue | ||
| 1495 | * @us: the hold-off time in us, or 0 to disable timer | ||
| 1496 | * @cnt: the hold-off packet count, or 0 to disable counter | ||
| 1497 | * | ||
| 1498 | * Sets an Rx queue's interrupt hold-off time and packet count. At least | ||
| 1499 | * one of the two needs to be enabled for the queue to generate interrupts. | ||
| 1500 | */ | ||
| 1501 | static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, | ||
| 1502 | unsigned int us, unsigned int cnt) | ||
| 1503 | { | ||
| 1504 | if ((us | cnt) == 0) | ||
| 1505 | cnt = 1; | ||
| 1506 | |||
| 1507 | if (cnt) { | ||
| 1508 | int err; | ||
| 1509 | u32 v, new_idx; | ||
| 1510 | |||
| 1511 | new_idx = closest_thres(&adap->sge, cnt); | ||
| 1512 | if (q->desc && q->pktcnt_idx != new_idx) { | ||
| 1513 | /* the queue has already been created, update it */ | ||
| 1514 | v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | | ||
| 1515 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | | ||
| 1516 | FW_PARAMS_PARAM_YZ(q->cntxt_id); | ||
| 1517 | err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); | ||
| 1518 | if (err) | ||
| 1519 | return err; | ||
| 1520 | } | ||
| 1521 | q->pktcnt_idx = new_idx; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | us = us == 0 ? 6 : closest_timer(&adap->sge, us); | ||
| 1525 | q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); | ||
| 1526 | return 0; | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
| 1530 | { | ||
| 1531 | const struct port_info *pi = netdev_priv(dev); | ||
| 1532 | struct adapter *adap = pi->adapter; | ||
| 1533 | |||
| 1534 | return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, | ||
| 1535 | c->rx_coalesce_usecs, c->rx_max_coalesced_frames); | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
| 1539 | { | ||
| 1540 | const struct port_info *pi = netdev_priv(dev); | ||
| 1541 | const struct adapter *adap = pi->adapter; | ||
| 1542 | const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; | ||
| 1543 | |||
| 1544 | c->rx_coalesce_usecs = qtimer_val(adap, rq); | ||
| 1545 | c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? | ||
| 1546 | adap->sge.counter_val[rq->pktcnt_idx] : 0; | ||
| 1547 | return 0; | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | /* | ||
| 1551 | * Translate a physical EEPROM address to virtual. The first 1K is accessed | ||
| 1552 | * through virtual addresses starting at 31K, the rest is accessed through | ||
| 1553 | * virtual addresses starting at 0. This mapping is correct only for PF0. | ||
| 1554 | */ | ||
| 1555 | static int eeprom_ptov(unsigned int phys_addr) | ||
| 1556 | { | ||
| 1557 | if (phys_addr < 1024) | ||
| 1558 | return phys_addr + (31 << 10); | ||
| 1559 | if (phys_addr < EEPROMSIZE) | ||
| 1560 | return phys_addr - 1024; | ||
| 1561 | return -EINVAL; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | /* | ||
| 1565 | * The next two routines implement eeprom read/write from physical addresses. | ||
| 1566 | * The physical->virtual translation is correct only for PF0. | ||
| 1567 | */ | ||
| 1568 | static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) | ||
| 1569 | { | ||
| 1570 | int vaddr = eeprom_ptov(phys_addr); | ||
| 1571 | |||
| 1572 | if (vaddr >= 0) | ||
| 1573 | vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); | ||
| 1574 | return vaddr < 0 ? vaddr : 0; | ||
| 1575 | } | ||
| 1576 | |||
| 1577 | static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) | ||
| 1578 | { | ||
| 1579 | int vaddr = eeprom_ptov(phys_addr); | ||
| 1580 | |||
| 1581 | if (vaddr >= 0) | ||
| 1582 | vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); | ||
| 1583 | return vaddr < 0 ? vaddr : 0; | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | #define EEPROM_MAGIC 0x38E2F10C | ||
| 1587 | |||
| 1588 | static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, | ||
| 1589 | u8 *data) | ||
| 1590 | { | ||
| 1591 | int i, err = 0; | ||
| 1592 | struct adapter *adapter = netdev2adap(dev); | ||
| 1593 | |||
| 1594 | u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); | ||
| 1595 | if (!buf) | ||
| 1596 | return -ENOMEM; | ||
| 1597 | |||
| 1598 | e->magic = EEPROM_MAGIC; | ||
| 1599 | for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) | ||
| 1600 | err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); | ||
| 1601 | |||
| 1602 | if (!err) | ||
| 1603 | memcpy(data, buf + e->offset, e->len); | ||
| 1604 | kfree(buf); | ||
| 1605 | return err; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
| 1609 | u8 *data) | ||
| 1610 | { | ||
| 1611 | u8 *buf; | ||
| 1612 | int err = 0; | ||
| 1613 | u32 aligned_offset, aligned_len, *p; | ||
| 1614 | struct adapter *adapter = netdev2adap(dev); | ||
| 1615 | |||
| 1616 | if (eeprom->magic != EEPROM_MAGIC) | ||
| 1617 | return -EINVAL; | ||
| 1618 | |||
| 1619 | aligned_offset = eeprom->offset & ~3; | ||
| 1620 | aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; | ||
| 1621 | |||
| 1622 | if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { | ||
| 1623 | /* | ||
| 1624 | * RMW possibly needed for first or last words. | ||
| 1625 | */ | ||
| 1626 | buf = kmalloc(aligned_len, GFP_KERNEL); | ||
| 1627 | if (!buf) | ||
| 1628 | return -ENOMEM; | ||
| 1629 | err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); | ||
| 1630 | if (!err && aligned_len > 4) | ||
| 1631 | err = eeprom_rd_phys(adapter, | ||
| 1632 | aligned_offset + aligned_len - 4, | ||
| 1633 | (u32 *)&buf[aligned_len - 4]); | ||
| 1634 | if (err) | ||
| 1635 | goto out; | ||
| 1636 | memcpy(buf + (eeprom->offset & 3), data, eeprom->len); | ||
| 1637 | } else | ||
| 1638 | buf = data; | ||
| 1639 | |||
| 1640 | err = t4_seeprom_wp(adapter, false); | ||
| 1641 | if (err) | ||
| 1642 | goto out; | ||
| 1643 | |||
| 1644 | for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { | ||
| 1645 | err = eeprom_wr_phys(adapter, aligned_offset, *p); | ||
| 1646 | aligned_offset += 4; | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | if (!err) | ||
| 1650 | err = t4_seeprom_wp(adapter, true); | ||
| 1651 | out: | ||
| 1652 | if (buf != data) | ||
| 1653 | kfree(buf); | ||
| 1654 | return err; | ||
| 1655 | } | ||
| 1656 | |||
| 1657 | static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) | ||
| 1658 | { | ||
| 1659 | int ret; | ||
| 1660 | const struct firmware *fw; | ||
| 1661 | struct adapter *adap = netdev2adap(netdev); | ||
| 1662 | |||
| 1663 | ef->data[sizeof(ef->data) - 1] = '\0'; | ||
| 1664 | ret = request_firmware(&fw, ef->data, adap->pdev_dev); | ||
| 1665 | if (ret < 0) | ||
| 1666 | return ret; | ||
| 1667 | |||
| 1668 | ret = t4_load_fw(adap, fw->data, fw->size); | ||
| 1669 | release_firmware(fw); | ||
| 1670 | if (!ret) | ||
| 1671 | dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); | ||
| 1672 | return ret; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) | ||
| 1676 | #define BCAST_CRC 0xa0ccc1a6 | ||
| 1677 | |||
| 1678 | static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
| 1679 | { | ||
| 1680 | wol->supported = WAKE_BCAST | WAKE_MAGIC; | ||
| 1681 | wol->wolopts = netdev2adap(dev)->wol; | ||
| 1682 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
| 1683 | } | ||
| 1684 | |||
| 1685 | static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
| 1686 | { | ||
| 1687 | int err = 0; | ||
| 1688 | struct port_info *pi = netdev_priv(dev); | ||
| 1689 | |||
| 1690 | if (wol->wolopts & ~WOL_SUPPORTED) | ||
| 1691 | return -EINVAL; | ||
| 1692 | t4_wol_magic_enable(pi->adapter, pi->tx_chan, | ||
| 1693 | (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); | ||
| 1694 | if (wol->wolopts & WAKE_BCAST) { | ||
| 1695 | err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, | ||
| 1696 | ~0ULL, 0, false); | ||
| 1697 | if (!err) | ||
| 1698 | err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, | ||
| 1699 | ~6ULL, ~0ULL, BCAST_CRC, true); | ||
| 1700 | } else | ||
| 1701 | t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); | ||
| 1702 | return err; | ||
| 1703 | } | ||
| 1704 | |||
| 1705 | static int set_tso(struct net_device *dev, u32 value) | ||
| 1706 | { | ||
| 1707 | if (value) | ||
| 1708 | dev->features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
| 1709 | else | ||
| 1710 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
| 1711 | return 0; | ||
| 1712 | } | ||
| 1713 | |||
| 1714 | static struct ethtool_ops cxgb_ethtool_ops = { | ||
| 1715 | .get_settings = get_settings, | ||
| 1716 | .set_settings = set_settings, | ||
| 1717 | .get_drvinfo = get_drvinfo, | ||
| 1718 | .get_msglevel = get_msglevel, | ||
| 1719 | .set_msglevel = set_msglevel, | ||
| 1720 | .get_ringparam = get_sge_param, | ||
| 1721 | .set_ringparam = set_sge_param, | ||
| 1722 | .get_coalesce = get_coalesce, | ||
| 1723 | .set_coalesce = set_coalesce, | ||
| 1724 | .get_eeprom_len = get_eeprom_len, | ||
| 1725 | .get_eeprom = get_eeprom, | ||
| 1726 | .set_eeprom = set_eeprom, | ||
| 1727 | .get_pauseparam = get_pauseparam, | ||
| 1728 | .set_pauseparam = set_pauseparam, | ||
| 1729 | .get_rx_csum = get_rx_csum, | ||
| 1730 | .set_rx_csum = set_rx_csum, | ||
| 1731 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
| 1732 | .set_sg = ethtool_op_set_sg, | ||
| 1733 | .get_link = ethtool_op_get_link, | ||
| 1734 | .get_strings = get_strings, | ||
| 1735 | .phys_id = identify_port, | ||
| 1736 | .nway_reset = restart_autoneg, | ||
| 1737 | .get_sset_count = get_sset_count, | ||
| 1738 | .get_ethtool_stats = get_stats, | ||
| 1739 | .get_regs_len = get_regs_len, | ||
| 1740 | .get_regs = get_regs, | ||
| 1741 | .get_wol = get_wol, | ||
| 1742 | .set_wol = set_wol, | ||
| 1743 | .set_tso = set_tso, | ||
| 1744 | .flash_device = set_flash, | ||
| 1745 | }; | ||
| 1746 | |||
| 1747 | /* | ||
| 1748 | * debugfs support | ||
| 1749 | */ | ||
| 1750 | |||
| 1751 | static int mem_open(struct inode *inode, struct file *file) | ||
| 1752 | { | ||
| 1753 | file->private_data = inode->i_private; | ||
| 1754 | return 0; | ||
| 1755 | } | ||
| 1756 | |||
| 1757 | static ssize_t mem_read(struct file *file, char __user *buf, size_t count, | ||
| 1758 | loff_t *ppos) | ||
| 1759 | { | ||
| 1760 | loff_t pos = *ppos; | ||
| 1761 | loff_t avail = file->f_path.dentry->d_inode->i_size; | ||
| 1762 | unsigned int mem = (uintptr_t)file->private_data & 3; | ||
| 1763 | struct adapter *adap = file->private_data - mem; | ||
| 1764 | |||
| 1765 | if (pos < 0) | ||
| 1766 | return -EINVAL; | ||
| 1767 | if (pos >= avail) | ||
| 1768 | return 0; | ||
| 1769 | if (count > avail - pos) | ||
| 1770 | count = avail - pos; | ||
| 1771 | |||
| 1772 | while (count) { | ||
| 1773 | size_t len; | ||
| 1774 | int ret, ofst; | ||
| 1775 | __be32 data[16]; | ||
| 1776 | |||
| 1777 | if (mem == MEM_MC) | ||
| 1778 | ret = t4_mc_read(adap, pos, data, NULL); | ||
| 1779 | else | ||
| 1780 | ret = t4_edc_read(adap, mem, pos, data, NULL); | ||
| 1781 | if (ret) | ||
| 1782 | return ret; | ||
| 1783 | |||
| 1784 | ofst = pos % sizeof(data); | ||
| 1785 | len = min(count, sizeof(data) - ofst); | ||
| 1786 | if (copy_to_user(buf, (u8 *)data + ofst, len)) | ||
| 1787 | return -EFAULT; | ||
| 1788 | |||
| 1789 | buf += len; | ||
| 1790 | pos += len; | ||
| 1791 | count -= len; | ||
| 1792 | } | ||
| 1793 | count = pos - *ppos; | ||
| 1794 | *ppos = pos; | ||
| 1795 | return count; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static const struct file_operations mem_debugfs_fops = { | ||
| 1799 | .owner = THIS_MODULE, | ||
| 1800 | .open = mem_open, | ||
| 1801 | .read = mem_read, | ||
| 1802 | }; | ||
| 1803 | |||
| 1804 | static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, | ||
| 1805 | unsigned int idx, unsigned int size_mb) | ||
| 1806 | { | ||
| 1807 | struct dentry *de; | ||
| 1808 | |||
| 1809 | de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, | ||
| 1810 | (void *)adap + idx, &mem_debugfs_fops); | ||
| 1811 | if (de && de->d_inode) | ||
| 1812 | de->d_inode->i_size = size_mb << 20; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | static int __devinit setup_debugfs(struct adapter *adap) | ||
| 1816 | { | ||
| 1817 | int i; | ||
| 1818 | |||
| 1819 | if (IS_ERR_OR_NULL(adap->debugfs_root)) | ||
| 1820 | return -1; | ||
| 1821 | |||
| 1822 | i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); | ||
| 1823 | if (i & EDRAM0_ENABLE) | ||
| 1824 | add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); | ||
| 1825 | if (i & EDRAM1_ENABLE) | ||
| 1826 | add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); | ||
| 1827 | if (i & EXT_MEM_ENABLE) | ||
| 1828 | add_debugfs_mem(adap, "mc", MEM_MC, | ||
| 1829 | EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); | ||
| 1830 | if (adap->l2t) | ||
| 1831 | debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, | ||
| 1832 | &t4_l2t_fops); | ||
| 1833 | return 0; | ||
| 1834 | } | ||
| 1835 | |||
| 1836 | /* | ||
| 1837 | * upper-layer driver support | ||
| 1838 | */ | ||
| 1839 | |||
| 1840 | /* | ||
| 1841 | * Allocate an active-open TID and set it to the supplied value. | ||
| 1842 | */ | ||
| 1843 | int cxgb4_alloc_atid(struct tid_info *t, void *data) | ||
| 1844 | { | ||
| 1845 | int atid = -1; | ||
| 1846 | |||
| 1847 | spin_lock_bh(&t->atid_lock); | ||
| 1848 | if (t->afree) { | ||
| 1849 | union aopen_entry *p = t->afree; | ||
| 1850 | |||
| 1851 | atid = p - t->atid_tab; | ||
| 1852 | t->afree = p->next; | ||
| 1853 | p->data = data; | ||
| 1854 | t->atids_in_use++; | ||
| 1855 | } | ||
| 1856 | spin_unlock_bh(&t->atid_lock); | ||
| 1857 | return atid; | ||
| 1858 | } | ||
| 1859 | EXPORT_SYMBOL(cxgb4_alloc_atid); | ||
| 1860 | |||
| 1861 | /* | ||
| 1862 | * Release an active-open TID. | ||
| 1863 | */ | ||
| 1864 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid) | ||
| 1865 | { | ||
| 1866 | union aopen_entry *p = &t->atid_tab[atid]; | ||
| 1867 | |||
| 1868 | spin_lock_bh(&t->atid_lock); | ||
| 1869 | p->next = t->afree; | ||
| 1870 | t->afree = p; | ||
| 1871 | t->atids_in_use--; | ||
| 1872 | spin_unlock_bh(&t->atid_lock); | ||
| 1873 | } | ||
| 1874 | EXPORT_SYMBOL(cxgb4_free_atid); | ||
| 1875 | |||
| 1876 | /* | ||
| 1877 | * Allocate a server TID and set it to the supplied value. | ||
| 1878 | */ | ||
| 1879 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | ||
| 1880 | { | ||
| 1881 | int stid; | ||
| 1882 | |||
| 1883 | spin_lock_bh(&t->stid_lock); | ||
| 1884 | if (family == PF_INET) { | ||
| 1885 | stid = find_first_zero_bit(t->stid_bmap, t->nstids); | ||
| 1886 | if (stid < t->nstids) | ||
| 1887 | __set_bit(stid, t->stid_bmap); | ||
| 1888 | else | ||
| 1889 | stid = -1; | ||
| 1890 | } else { | ||
| 1891 | stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); | ||
| 1892 | if (stid < 0) | ||
| 1893 | stid = -1; | ||
| 1894 | } | ||
| 1895 | if (stid >= 0) { | ||
| 1896 | t->stid_tab[stid].data = data; | ||
| 1897 | stid += t->stid_base; | ||
| 1898 | t->stids_in_use++; | ||
| 1899 | } | ||
| 1900 | spin_unlock_bh(&t->stid_lock); | ||
| 1901 | return stid; | ||
| 1902 | } | ||
| 1903 | EXPORT_SYMBOL(cxgb4_alloc_stid); | ||
| 1904 | |||
| 1905 | /* | ||
| 1906 | * Release a server TID. | ||
| 1907 | */ | ||
| 1908 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | ||
| 1909 | { | ||
| 1910 | stid -= t->stid_base; | ||
| 1911 | spin_lock_bh(&t->stid_lock); | ||
| 1912 | if (family == PF_INET) | ||
| 1913 | __clear_bit(stid, t->stid_bmap); | ||
| 1914 | else | ||
| 1915 | bitmap_release_region(t->stid_bmap, stid, 2); | ||
| 1916 | t->stid_tab[stid].data = NULL; | ||
| 1917 | t->stids_in_use--; | ||
| 1918 | spin_unlock_bh(&t->stid_lock); | ||
| 1919 | } | ||
| 1920 | EXPORT_SYMBOL(cxgb4_free_stid); | ||
| 1921 | |||
| 1922 | /* | ||
| 1923 | * Populate a TID_RELEASE WR. Caller must properly size the skb. | ||
| 1924 | */ | ||
| 1925 | static void mk_tid_release(struct sk_buff *skb, unsigned int chan, | ||
| 1926 | unsigned int tid) | ||
| 1927 | { | ||
| 1928 | struct cpl_tid_release *req; | ||
| 1929 | |||
| 1930 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); | ||
| 1931 | req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); | ||
| 1932 | INIT_TP_WR(req, tid); | ||
| 1933 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); | ||
| 1934 | } | ||
| 1935 | |||
| 1936 | /* | ||
| 1937 | * Queue a TID release request and if necessary schedule a work queue to | ||
| 1938 | * process it. | ||
| 1939 | */ | ||
| 1940 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | ||
| 1941 | unsigned int tid) | ||
| 1942 | { | ||
| 1943 | void **p = &t->tid_tab[tid]; | ||
| 1944 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
| 1945 | |||
| 1946 | spin_lock_bh(&adap->tid_release_lock); | ||
| 1947 | *p = adap->tid_release_head; | ||
| 1948 | /* Low 2 bits encode the Tx channel number */ | ||
| 1949 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | ||
| 1950 | if (!adap->tid_release_task_busy) { | ||
| 1951 | adap->tid_release_task_busy = true; | ||
| 1952 | schedule_work(&adap->tid_release_task); | ||
| 1953 | } | ||
| 1954 | spin_unlock_bh(&adap->tid_release_lock); | ||
| 1955 | } | ||
| 1956 | EXPORT_SYMBOL(cxgb4_queue_tid_release); | ||
| 1957 | |||
| 1958 | /* | ||
| 1959 | * Process the list of pending TID release requests. | ||
| 1960 | */ | ||
| 1961 | static void process_tid_release_list(struct work_struct *work) | ||
| 1962 | { | ||
| 1963 | struct sk_buff *skb; | ||
| 1964 | struct adapter *adap; | ||
| 1965 | |||
| 1966 | adap = container_of(work, struct adapter, tid_release_task); | ||
| 1967 | |||
| 1968 | spin_lock_bh(&adap->tid_release_lock); | ||
| 1969 | while (adap->tid_release_head) { | ||
| 1970 | void **p = adap->tid_release_head; | ||
| 1971 | unsigned int chan = (uintptr_t)p & 3; | ||
| 1972 | p = (void *)p - chan; | ||
| 1973 | |||
| 1974 | adap->tid_release_head = *p; | ||
| 1975 | *p = NULL; | ||
| 1976 | spin_unlock_bh(&adap->tid_release_lock); | ||
| 1977 | |||
| 1978 | while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), | ||
| 1979 | GFP_KERNEL))) | ||
| 1980 | schedule_timeout_uninterruptible(1); | ||
| 1981 | |||
| 1982 | mk_tid_release(skb, chan, p - adap->tids.tid_tab); | ||
| 1983 | t4_ofld_send(adap, skb); | ||
| 1984 | spin_lock_bh(&adap->tid_release_lock); | ||
| 1985 | } | ||
| 1986 | adap->tid_release_task_busy = false; | ||
| 1987 | spin_unlock_bh(&adap->tid_release_lock); | ||
| 1988 | } | ||
| 1989 | |||
| 1990 | /* | ||
| 1991 | * Release a TID and inform HW. If we are unable to allocate the release | ||
| 1992 | * message we defer to a work queue. | ||
| 1993 | */ | ||
| 1994 | void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) | ||
| 1995 | { | ||
| 1996 | void *old; | ||
| 1997 | struct sk_buff *skb; | ||
| 1998 | struct adapter *adap = container_of(t, struct adapter, tids); | ||
| 1999 | |||
| 2000 | old = t->tid_tab[tid]; | ||
| 2001 | skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); | ||
| 2002 | if (likely(skb)) { | ||
| 2003 | t->tid_tab[tid] = NULL; | ||
| 2004 | mk_tid_release(skb, chan, tid); | ||
| 2005 | t4_ofld_send(adap, skb); | ||
| 2006 | } else | ||
| 2007 | cxgb4_queue_tid_release(t, chan, tid); | ||
| 2008 | if (old) | ||
| 2009 | atomic_dec(&t->tids_in_use); | ||
| 2010 | } | ||
| 2011 | EXPORT_SYMBOL(cxgb4_remove_tid); | ||
| 2012 | |||
| 2013 | /* | ||
| 2014 | * Allocate and initialize the TID tables. Returns 0 on success. | ||
| 2015 | */ | ||
| 2016 | static int tid_init(struct tid_info *t) | ||
| 2017 | { | ||
| 2018 | size_t size; | ||
| 2019 | unsigned int natids = t->natids; | ||
| 2020 | |||
| 2021 | size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + | ||
| 2022 | t->nstids * sizeof(*t->stid_tab) + | ||
| 2023 | BITS_TO_LONGS(t->nstids) * sizeof(long); | ||
| 2024 | t->tid_tab = t4_alloc_mem(size); | ||
| 2025 | if (!t->tid_tab) | ||
| 2026 | return -ENOMEM; | ||
| 2027 | |||
| 2028 | t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; | ||
| 2029 | t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; | ||
| 2030 | t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; | ||
| 2031 | spin_lock_init(&t->stid_lock); | ||
| 2032 | spin_lock_init(&t->atid_lock); | ||
| 2033 | |||
| 2034 | t->stids_in_use = 0; | ||
| 2035 | t->afree = NULL; | ||
| 2036 | t->atids_in_use = 0; | ||
| 2037 | atomic_set(&t->tids_in_use, 0); | ||
| 2038 | |||
| 2039 | /* Setup the free list for atid_tab and clear the stid bitmap. */ | ||
| 2040 | if (natids) { | ||
| 2041 | while (--natids) | ||
| 2042 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; | ||
| 2043 | t->afree = t->atid_tab; | ||
| 2044 | } | ||
| 2045 | bitmap_zero(t->stid_bmap, t->nstids); | ||
| 2046 | return 0; | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | /** | ||
| 2050 | * cxgb4_create_server - create an IP server | ||
| 2051 | * @dev: the device | ||
| 2052 | * @stid: the server TID | ||
| 2053 | * @sip: local IP address to bind server to | ||
| 2054 | * @sport: the server's TCP port | ||
| 2055 | * @queue: queue to direct messages from this server to | ||
| 2056 | * | ||
| 2057 | * Create an IP server for the given port and address. | ||
| 2058 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
| 2059 | */ | ||
| 2060 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | ||
| 2061 | __be32 sip, __be16 sport, unsigned int queue) | ||
| 2062 | { | ||
| 2063 | unsigned int chan; | ||
| 2064 | struct sk_buff *skb; | ||
| 2065 | struct adapter *adap; | ||
| 2066 | struct cpl_pass_open_req *req; | ||
| 2067 | |||
| 2068 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
| 2069 | if (!skb) | ||
| 2070 | return -ENOMEM; | ||
| 2071 | |||
| 2072 | adap = netdev2adap(dev); | ||
| 2073 | req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); | ||
| 2074 | INIT_TP_WR(req, 0); | ||
| 2075 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); | ||
| 2076 | req->local_port = sport; | ||
| 2077 | req->peer_port = htons(0); | ||
| 2078 | req->local_ip = sip; | ||
| 2079 | req->peer_ip = htonl(0); | ||
| 2080 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | ||
| 2081 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
| 2082 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
| 2083 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
| 2084 | return t4_mgmt_tx(adap, skb); | ||
| 2085 | } | ||
| 2086 | EXPORT_SYMBOL(cxgb4_create_server); | ||
| 2087 | |||
| 2088 | /** | ||
| 2089 | * cxgb4_create_server6 - create an IPv6 server | ||
| 2090 | * @dev: the device | ||
| 2091 | * @stid: the server TID | ||
| 2092 | * @sip: local IPv6 address to bind server to | ||
| 2093 | * @sport: the server's TCP port | ||
| 2094 | * @queue: queue to direct messages from this server to | ||
| 2095 | * | ||
| 2096 | * Create an IPv6 server for the given port and address. | ||
| 2097 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
| 2098 | */ | ||
| 2099 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
| 2100 | const struct in6_addr *sip, __be16 sport, | ||
| 2101 | unsigned int queue) | ||
| 2102 | { | ||
| 2103 | unsigned int chan; | ||
| 2104 | struct sk_buff *skb; | ||
| 2105 | struct adapter *adap; | ||
| 2106 | struct cpl_pass_open_req6 *req; | ||
| 2107 | |||
| 2108 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
| 2109 | if (!skb) | ||
| 2110 | return -ENOMEM; | ||
| 2111 | |||
| 2112 | adap = netdev2adap(dev); | ||
| 2113 | req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); | ||
| 2114 | INIT_TP_WR(req, 0); | ||
| 2115 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); | ||
| 2116 | req->local_port = sport; | ||
| 2117 | req->peer_port = htons(0); | ||
| 2118 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); | ||
| 2119 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); | ||
| 2120 | req->peer_ip_hi = cpu_to_be64(0); | ||
| 2121 | req->peer_ip_lo = cpu_to_be64(0); | ||
| 2122 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | ||
| 2123 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
| 2124 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
| 2125 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
| 2126 | return t4_mgmt_tx(adap, skb); | ||
| 2127 | } | ||
| 2128 | EXPORT_SYMBOL(cxgb4_create_server6); | ||
| 2129 | |||
| 2130 | /** | ||
| 2131 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU | ||
| 2132 | * @mtus: the HW MTU table | ||
| 2133 | * @mtu: the target MTU | ||
| 2134 | * @idx: index of selected entry in the MTU table | ||
| 2135 | * | ||
| 2136 | * Returns the index and the value in the HW MTU table that is closest to | ||
| 2137 | * but does not exceed @mtu, unless @mtu is smaller than any value in the | ||
| 2138 | * table, in which case that smallest available value is selected. | ||
| 2139 | */ | ||
| 2140 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
| 2141 | unsigned int *idx) | ||
| 2142 | { | ||
| 2143 | unsigned int i = 0; | ||
| 2144 | |||
| 2145 | while (i < NMTUS - 1 && mtus[i + 1] <= mtu) | ||
| 2146 | ++i; | ||
| 2147 | if (idx) | ||
| 2148 | *idx = i; | ||
| 2149 | return mtus[i]; | ||
| 2150 | } | ||
| 2151 | EXPORT_SYMBOL(cxgb4_best_mtu); | ||
| 2152 | |||
| 2153 | /** | ||
| 2154 | * cxgb4_port_chan - get the HW channel of a port | ||
| 2155 | * @dev: the net device for the port | ||
| 2156 | * | ||
| 2157 | * Return the HW Tx channel of the given port. | ||
| 2158 | */ | ||
| 2159 | unsigned int cxgb4_port_chan(const struct net_device *dev) | ||
| 2160 | { | ||
| 2161 | return netdev2pinfo(dev)->tx_chan; | ||
| 2162 | } | ||
| 2163 | EXPORT_SYMBOL(cxgb4_port_chan); | ||
| 2164 | |||
| 2165 | /** | ||
| 2166 | * cxgb4_port_viid - get the VI id of a port | ||
| 2167 | * @dev: the net device for the port | ||
| 2168 | * | ||
| 2169 | * Return the VI id of the given port. | ||
| 2170 | */ | ||
| 2171 | unsigned int cxgb4_port_viid(const struct net_device *dev) | ||
| 2172 | { | ||
| 2173 | return netdev2pinfo(dev)->viid; | ||
| 2174 | } | ||
| 2175 | EXPORT_SYMBOL(cxgb4_port_viid); | ||
| 2176 | |||
| 2177 | /** | ||
| 2178 | * cxgb4_port_idx - get the index of a port | ||
| 2179 | * @dev: the net device for the port | ||
| 2180 | * | ||
| 2181 | * Return the index of the given port. | ||
| 2182 | */ | ||
| 2183 | unsigned int cxgb4_port_idx(const struct net_device *dev) | ||
| 2184 | { | ||
| 2185 | return netdev2pinfo(dev)->port_id; | ||
| 2186 | } | ||
| 2187 | EXPORT_SYMBOL(cxgb4_port_idx); | ||
| 2188 | |||
| 2189 | /** | ||
| 2190 | * cxgb4_netdev_by_hwid - return the net device of a HW port | ||
| 2191 | * @pdev: identifies the adapter | ||
| 2192 | * @id: the HW port id | ||
| 2193 | * | ||
| 2194 | * Return the net device associated with the interface with the given HW | ||
| 2195 | * id. | ||
| 2196 | */ | ||
| 2197 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) | ||
| 2198 | { | ||
| 2199 | const struct adapter *adap = pci_get_drvdata(pdev); | ||
| 2200 | |||
| 2201 | if (!adap || id >= NCHAN) | ||
| 2202 | return NULL; | ||
| 2203 | id = adap->chan_map[id]; | ||
| 2204 | return id < MAX_NPORTS ? adap->port[id] : NULL; | ||
| 2205 | } | ||
| 2206 | EXPORT_SYMBOL(cxgb4_netdev_by_hwid); | ||
| 2207 | |||
| 2208 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | ||
| 2209 | struct tp_tcp_stats *v6) | ||
| 2210 | { | ||
| 2211 | struct adapter *adap = pci_get_drvdata(pdev); | ||
| 2212 | |||
| 2213 | spin_lock(&adap->stats_lock); | ||
| 2214 | t4_tp_get_tcp_stats(adap, v4, v6); | ||
| 2215 | spin_unlock(&adap->stats_lock); | ||
| 2216 | } | ||
| 2217 | EXPORT_SYMBOL(cxgb4_get_tcp_stats); | ||
| 2218 | |||
| 2219 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | ||
| 2220 | const unsigned int *pgsz_order) | ||
| 2221 | { | ||
| 2222 | struct adapter *adap = netdev2adap(dev); | ||
| 2223 | |||
| 2224 | t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); | ||
| 2225 | t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | | ||
| 2226 | HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | | ||
| 2227 | HPZ3(pgsz_order[3])); | ||
| 2228 | } | ||
| 2229 | EXPORT_SYMBOL(cxgb4_iscsi_init); | ||
| 2230 | |||
| 2231 | static struct pci_driver cxgb4_driver; | ||
| 2232 | |||
| 2233 | static void check_neigh_update(struct neighbour *neigh) | ||
| 2234 | { | ||
| 2235 | const struct device *parent; | ||
| 2236 | const struct net_device *netdev = neigh->dev; | ||
| 2237 | |||
| 2238 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | ||
| 2239 | netdev = vlan_dev_real_dev(netdev); | ||
| 2240 | parent = netdev->dev.parent; | ||
| 2241 | if (parent && parent->driver == &cxgb4_driver.driver) | ||
| 2242 | t4_l2t_update(dev_get_drvdata(parent), neigh); | ||
| 2243 | } | ||
| 2244 | |||
| 2245 | static int netevent_cb(struct notifier_block *nb, unsigned long event, | ||
| 2246 | void *data) | ||
| 2247 | { | ||
| 2248 | switch (event) { | ||
| 2249 | case NETEVENT_NEIGH_UPDATE: | ||
| 2250 | check_neigh_update(data); | ||
| 2251 | break; | ||
| 2252 | case NETEVENT_PMTU_UPDATE: | ||
| 2253 | case NETEVENT_REDIRECT: | ||
| 2254 | default: | ||
| 2255 | break; | ||
| 2256 | } | ||
| 2257 | return 0; | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | static bool netevent_registered; | ||
| 2261 | static struct notifier_block cxgb4_netevent_nb = { | ||
| 2262 | .notifier_call = netevent_cb | ||
| 2263 | }; | ||
| 2264 | |||
| 2265 | static void uld_attach(struct adapter *adap, unsigned int uld) | ||
| 2266 | { | ||
| 2267 | void *handle; | ||
| 2268 | struct cxgb4_lld_info lli; | ||
| 2269 | |||
| 2270 | lli.pdev = adap->pdev; | ||
| 2271 | lli.l2t = adap->l2t; | ||
| 2272 | lli.tids = &adap->tids; | ||
| 2273 | lli.ports = adap->port; | ||
| 2274 | lli.vr = &adap->vres; | ||
| 2275 | lli.mtus = adap->params.mtus; | ||
| 2276 | if (uld == CXGB4_ULD_RDMA) { | ||
| 2277 | lli.rxq_ids = adap->sge.rdma_rxq; | ||
| 2278 | lli.nrxq = adap->sge.rdmaqs; | ||
| 2279 | } else if (uld == CXGB4_ULD_ISCSI) { | ||
| 2280 | lli.rxq_ids = adap->sge.ofld_rxq; | ||
| 2281 | lli.nrxq = adap->sge.ofldqsets; | ||
| 2282 | } | ||
| 2283 | lli.ntxq = adap->sge.ofldqsets; | ||
| 2284 | lli.nchan = adap->params.nports; | ||
| 2285 | lli.nports = adap->params.nports; | ||
| 2286 | lli.wr_cred = adap->params.ofldq_wr_cred; | ||
| 2287 | lli.adapter_type = adap->params.rev; | ||
| 2288 | lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); | ||
| 2289 | lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( | ||
| 2290 | t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); | ||
| 2291 | lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( | ||
| 2292 | t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); | ||
| 2293 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); | ||
| 2294 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); | ||
| 2295 | lli.fw_vers = adap->params.fw_vers; | ||
| 2296 | |||
| 2297 | handle = ulds[uld].add(&lli); | ||
| 2298 | if (IS_ERR(handle)) { | ||
| 2299 | dev_warn(adap->pdev_dev, | ||
| 2300 | "could not attach to the %s driver, error %ld\n", | ||
| 2301 | uld_str[uld], PTR_ERR(handle)); | ||
| 2302 | return; | ||
| 2303 | } | ||
| 2304 | |||
| 2305 | adap->uld_handle[uld] = handle; | ||
| 2306 | |||
| 2307 | if (!netevent_registered) { | ||
| 2308 | register_netevent_notifier(&cxgb4_netevent_nb); | ||
| 2309 | netevent_registered = true; | ||
| 2310 | } | ||
| 2311 | } | ||
| 2312 | |||
| 2313 | static void attach_ulds(struct adapter *adap) | ||
| 2314 | { | ||
| 2315 | unsigned int i; | ||
| 2316 | |||
| 2317 | mutex_lock(&uld_mutex); | ||
| 2318 | list_add_tail(&adap->list_node, &adapter_list); | ||
| 2319 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
| 2320 | if (ulds[i].add) | ||
| 2321 | uld_attach(adap, i); | ||
| 2322 | mutex_unlock(&uld_mutex); | ||
| 2323 | } | ||
| 2324 | |||
| 2325 | static void detach_ulds(struct adapter *adap) | ||
| 2326 | { | ||
| 2327 | unsigned int i; | ||
| 2328 | |||
| 2329 | mutex_lock(&uld_mutex); | ||
| 2330 | list_del(&adap->list_node); | ||
| 2331 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
| 2332 | if (adap->uld_handle[i]) { | ||
| 2333 | ulds[i].state_change(adap->uld_handle[i], | ||
| 2334 | CXGB4_STATE_DETACH); | ||
| 2335 | adap->uld_handle[i] = NULL; | ||
| 2336 | } | ||
| 2337 | if (netevent_registered && list_empty(&adapter_list)) { | ||
| 2338 | unregister_netevent_notifier(&cxgb4_netevent_nb); | ||
| 2339 | netevent_registered = false; | ||
| 2340 | } | ||
| 2341 | mutex_unlock(&uld_mutex); | ||
| 2342 | } | ||
| 2343 | |||
| 2344 | static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) | ||
| 2345 | { | ||
| 2346 | unsigned int i; | ||
| 2347 | |||
| 2348 | mutex_lock(&uld_mutex); | ||
| 2349 | for (i = 0; i < CXGB4_ULD_MAX; i++) | ||
| 2350 | if (adap->uld_handle[i]) | ||
| 2351 | ulds[i].state_change(adap->uld_handle[i], new_state); | ||
| 2352 | mutex_unlock(&uld_mutex); | ||
| 2353 | } | ||
| 2354 | |||
| 2355 | /** | ||
| 2356 | * cxgb4_register_uld - register an upper-layer driver | ||
| 2357 | * @type: the ULD type | ||
| 2358 | * @p: the ULD methods | ||
| 2359 | * | ||
| 2360 | * Registers an upper-layer driver with this driver and notifies the ULD | ||
| 2361 | * about any presently available devices that support its type. Returns | ||
| 2362 | * %-EBUSY if a ULD of the same type is already registered. | ||
| 2363 | */ | ||
| 2364 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) | ||
| 2365 | { | ||
| 2366 | int ret = 0; | ||
| 2367 | struct adapter *adap; | ||
| 2368 | |||
| 2369 | if (type >= CXGB4_ULD_MAX) | ||
| 2370 | return -EINVAL; | ||
| 2371 | mutex_lock(&uld_mutex); | ||
| 2372 | if (ulds[type].add) { | ||
| 2373 | ret = -EBUSY; | ||
| 2374 | goto out; | ||
| 2375 | } | ||
| 2376 | ulds[type] = *p; | ||
| 2377 | list_for_each_entry(adap, &adapter_list, list_node) | ||
| 2378 | uld_attach(adap, type); | ||
| 2379 | out: mutex_unlock(&uld_mutex); | ||
| 2380 | return ret; | ||
| 2381 | } | ||
| 2382 | EXPORT_SYMBOL(cxgb4_register_uld); | ||
| 2383 | |||
| 2384 | /** | ||
| 2385 | * cxgb4_unregister_uld - unregister an upper-layer driver | ||
| 2386 | * @type: the ULD type | ||
| 2387 | * | ||
| 2388 | * Unregisters an existing upper-layer driver. | ||
| 2389 | */ | ||
| 2390 | int cxgb4_unregister_uld(enum cxgb4_uld type) | ||
| 2391 | { | ||
| 2392 | struct adapter *adap; | ||
| 2393 | |||
| 2394 | if (type >= CXGB4_ULD_MAX) | ||
| 2395 | return -EINVAL; | ||
| 2396 | mutex_lock(&uld_mutex); | ||
| 2397 | list_for_each_entry(adap, &adapter_list, list_node) | ||
| 2398 | adap->uld_handle[type] = NULL; | ||
| 2399 | ulds[type].add = NULL; | ||
| 2400 | mutex_unlock(&uld_mutex); | ||
| 2401 | return 0; | ||
| 2402 | } | ||
| 2403 | EXPORT_SYMBOL(cxgb4_unregister_uld); | ||
| 2404 | |||
| 2405 | /** | ||
| 2406 | * cxgb_up - enable the adapter | ||
| 2407 | * @adap: adapter being enabled | ||
| 2408 | * | ||
| 2409 | * Called when the first port is enabled, this function performs the | ||
| 2410 | * actions necessary to make an adapter operational, such as completing | ||
| 2411 | * the initialization of HW modules, and enabling interrupts. | ||
| 2412 | * | ||
| 2413 | * Must be called with the rtnl lock held. | ||
| 2414 | */ | ||
| 2415 | static int cxgb_up(struct adapter *adap) | ||
| 2416 | { | ||
| 2417 | int err = 0; | ||
| 2418 | |||
| 2419 | if (!(adap->flags & FULL_INIT_DONE)) { | ||
| 2420 | err = setup_sge_queues(adap); | ||
| 2421 | if (err) | ||
| 2422 | goto out; | ||
| 2423 | err = setup_rss(adap); | ||
| 2424 | if (err) { | ||
| 2425 | t4_free_sge_resources(adap); | ||
| 2426 | goto out; | ||
| 2427 | } | ||
| 2428 | if (adap->flags & USING_MSIX) | ||
| 2429 | name_msix_vecs(adap); | ||
| 2430 | adap->flags |= FULL_INIT_DONE; | ||
| 2431 | } | ||
| 2432 | |||
| 2433 | if (adap->flags & USING_MSIX) { | ||
| 2434 | err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, | ||
| 2435 | adap->msix_info[0].desc, adap); | ||
| 2436 | if (err) | ||
| 2437 | goto irq_err; | ||
| 2438 | |||
| 2439 | err = request_msix_queue_irqs(adap); | ||
| 2440 | if (err) { | ||
| 2441 | free_irq(adap->msix_info[0].vec, adap); | ||
| 2442 | goto irq_err; | ||
| 2443 | } | ||
| 2444 | } else { | ||
| 2445 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), | ||
| 2446 | (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, | ||
| 2447 | adap->name, adap); | ||
| 2448 | if (err) | ||
| 2449 | goto irq_err; | ||
| 2450 | } | ||
| 2451 | enable_rx(adap); | ||
| 2452 | t4_sge_start(adap); | ||
| 2453 | t4_intr_enable(adap); | ||
| 2454 | notify_ulds(adap, CXGB4_STATE_UP); | ||
| 2455 | out: | ||
| 2456 | return err; | ||
| 2457 | irq_err: | ||
| 2458 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); | ||
| 2459 | goto out; | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | static void cxgb_down(struct adapter *adapter) | ||
| 2463 | { | ||
| 2464 | t4_intr_disable(adapter); | ||
| 2465 | cancel_work_sync(&adapter->tid_release_task); | ||
| 2466 | adapter->tid_release_task_busy = false; | ||
| 2467 | |||
| 2468 | if (adapter->flags & USING_MSIX) { | ||
| 2469 | free_msix_queue_irqs(adapter); | ||
| 2470 | free_irq(adapter->msix_info[0].vec, adapter); | ||
| 2471 | } else | ||
| 2472 | free_irq(adapter->pdev->irq, adapter); | ||
| 2473 | quiesce_rx(adapter); | ||
| 2474 | } | ||
| 2475 | |||
| 2476 | /* | ||
| 2477 | * net_device operations | ||
| 2478 | */ | ||
| 2479 | static int cxgb_open(struct net_device *dev) | ||
| 2480 | { | ||
| 2481 | int err; | ||
| 2482 | struct port_info *pi = netdev_priv(dev); | ||
| 2483 | struct adapter *adapter = pi->adapter; | ||
| 2484 | |||
| 2485 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) | ||
| 2486 | return err; | ||
| 2487 | |||
| 2488 | dev->real_num_tx_queues = pi->nqsets; | ||
| 2489 | set_bit(pi->tx_chan, &adapter->open_device_map); | ||
| 2490 | link_start(dev); | ||
| 2491 | netif_tx_start_all_queues(dev); | ||
| 2492 | return 0; | ||
| 2493 | } | ||
| 2494 | |||
| 2495 | static int cxgb_close(struct net_device *dev) | ||
| 2496 | { | ||
| 2497 | int ret; | ||
| 2498 | struct port_info *pi = netdev_priv(dev); | ||
| 2499 | struct adapter *adapter = pi->adapter; | ||
| 2500 | |||
| 2501 | netif_tx_stop_all_queues(dev); | ||
| 2502 | netif_carrier_off(dev); | ||
| 2503 | ret = t4_enable_vi(adapter, 0, pi->viid, false, false); | ||
| 2504 | |||
| 2505 | clear_bit(pi->tx_chan, &adapter->open_device_map); | ||
| 2506 | |||
| 2507 | if (!adapter->open_device_map) | ||
| 2508 | cxgb_down(adapter); | ||
| 2509 | return 0; | ||
| 2510 | } | ||
| 2511 | |||
| 2512 | static struct net_device_stats *cxgb_get_stats(struct net_device *dev) | ||
| 2513 | { | ||
| 2514 | struct port_stats stats; | ||
| 2515 | struct port_info *p = netdev_priv(dev); | ||
| 2516 | struct adapter *adapter = p->adapter; | ||
| 2517 | struct net_device_stats *ns = &dev->stats; | ||
| 2518 | |||
| 2519 | spin_lock(&adapter->stats_lock); | ||
| 2520 | t4_get_port_stats(adapter, p->tx_chan, &stats); | ||
| 2521 | spin_unlock(&adapter->stats_lock); | ||
| 2522 | |||
| 2523 | ns->tx_bytes = stats.tx_octets; | ||
| 2524 | ns->tx_packets = stats.tx_frames; | ||
| 2525 | ns->rx_bytes = stats.rx_octets; | ||
| 2526 | ns->rx_packets = stats.rx_frames; | ||
| 2527 | ns->multicast = stats.rx_mcast_frames; | ||
| 2528 | |||
| 2529 | /* detailed rx_errors */ | ||
| 2530 | ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + | ||
| 2531 | stats.rx_runt; | ||
| 2532 | ns->rx_over_errors = 0; | ||
| 2533 | ns->rx_crc_errors = stats.rx_fcs_err; | ||
| 2534 | ns->rx_frame_errors = stats.rx_symbol_err; | ||
| 2535 | ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + | ||
| 2536 | stats.rx_ovflow2 + stats.rx_ovflow3 + | ||
| 2537 | stats.rx_trunc0 + stats.rx_trunc1 + | ||
| 2538 | stats.rx_trunc2 + stats.rx_trunc3; | ||
| 2539 | ns->rx_missed_errors = 0; | ||
| 2540 | |||
| 2541 | /* detailed tx_errors */ | ||
| 2542 | ns->tx_aborted_errors = 0; | ||
| 2543 | ns->tx_carrier_errors = 0; | ||
| 2544 | ns->tx_fifo_errors = 0; | ||
| 2545 | ns->tx_heartbeat_errors = 0; | ||
| 2546 | ns->tx_window_errors = 0; | ||
| 2547 | |||
| 2548 | ns->tx_errors = stats.tx_error_frames; | ||
| 2549 | ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + | ||
| 2550 | ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; | ||
| 2551 | return ns; | ||
| 2552 | } | ||
| 2553 | |||
| 2554 | static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | ||
| 2555 | { | ||
| 2556 | int ret = 0, prtad, devad; | ||
| 2557 | struct port_info *pi = netdev_priv(dev); | ||
| 2558 | struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; | ||
| 2559 | |||
| 2560 | switch (cmd) { | ||
| 2561 | case SIOCGMIIPHY: | ||
| 2562 | if (pi->mdio_addr < 0) | ||
| 2563 | return -EOPNOTSUPP; | ||
| 2564 | data->phy_id = pi->mdio_addr; | ||
| 2565 | break; | ||
| 2566 | case SIOCGMIIREG: | ||
| 2567 | case SIOCSMIIREG: | ||
| 2568 | if (mdio_phy_id_is_c45(data->phy_id)) { | ||
| 2569 | prtad = mdio_phy_id_prtad(data->phy_id); | ||
| 2570 | devad = mdio_phy_id_devad(data->phy_id); | ||
| 2571 | } else if (data->phy_id < 32) { | ||
| 2572 | prtad = data->phy_id; | ||
| 2573 | devad = 0; | ||
| 2574 | data->reg_num &= 0x1f; | ||
| 2575 | } else | ||
| 2576 | return -EINVAL; | ||
| 2577 | |||
| 2578 | if (cmd == SIOCGMIIREG) | ||
| 2579 | ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, | ||
| 2580 | data->reg_num, &data->val_out); | ||
| 2581 | else | ||
| 2582 | ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, | ||
| 2583 | data->reg_num, data->val_in); | ||
| 2584 | break; | ||
| 2585 | default: | ||
| 2586 | return -EOPNOTSUPP; | ||
| 2587 | } | ||
| 2588 | return ret; | ||
| 2589 | } | ||
| 2590 | |||
| 2591 | static void cxgb_set_rxmode(struct net_device *dev) | ||
| 2592 | { | ||
| 2593 | /* unfortunately we can't return errors to the stack */ | ||
| 2594 | set_rxmode(dev, -1, false); | ||
| 2595 | } | ||
| 2596 | |||
| 2597 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) | ||
| 2598 | { | ||
| 2599 | int ret; | ||
| 2600 | struct port_info *pi = netdev_priv(dev); | ||
| 2601 | |||
| 2602 | if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ | ||
| 2603 | return -EINVAL; | ||
| 2604 | ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, | ||
| 2605 | true); | ||
| 2606 | if (!ret) | ||
| 2607 | dev->mtu = new_mtu; | ||
| 2608 | return ret; | ||
| 2609 | } | ||
| 2610 | |||
| 2611 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) | ||
| 2612 | { | ||
| 2613 | int ret; | ||
| 2614 | struct sockaddr *addr = p; | ||
| 2615 | struct port_info *pi = netdev_priv(dev); | ||
| 2616 | |||
| 2617 | if (!is_valid_ether_addr(addr->sa_data)) | ||
| 2618 | return -EINVAL; | ||
| 2619 | |||
| 2620 | ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, | ||
| 2621 | addr->sa_data, true, true); | ||
| 2622 | if (ret < 0) | ||
| 2623 | return ret; | ||
| 2624 | |||
| 2625 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
| 2626 | pi->xact_addr_filt = ret; | ||
| 2627 | return 0; | ||
| 2628 | } | ||
| 2629 | |||
| 2630 | static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
| 2631 | { | ||
| 2632 | struct port_info *pi = netdev_priv(dev); | ||
| 2633 | |||
| 2634 | pi->vlan_grp = grp; | ||
| 2635 | t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); | ||
| 2636 | } | ||
| 2637 | |||
| 2638 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2639 | static void cxgb_netpoll(struct net_device *dev) | ||
| 2640 | { | ||
| 2641 | struct port_info *pi = netdev_priv(dev); | ||
| 2642 | struct adapter *adap = pi->adapter; | ||
| 2643 | |||
| 2644 | if (adap->flags & USING_MSIX) { | ||
| 2645 | int i; | ||
| 2646 | struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; | ||
| 2647 | |||
| 2648 | for (i = pi->nqsets; i; i--, rx++) | ||
| 2649 | t4_sge_intr_msix(0, &rx->rspq); | ||
| 2650 | } else | ||
| 2651 | t4_intr_handler(adap)(0, adap); | ||
| 2652 | } | ||
| 2653 | #endif | ||
| 2654 | |||
| 2655 | static const struct net_device_ops cxgb4_netdev_ops = { | ||
| 2656 | .ndo_open = cxgb_open, | ||
| 2657 | .ndo_stop = cxgb_close, | ||
| 2658 | .ndo_start_xmit = t4_eth_xmit, | ||
| 2659 | .ndo_get_stats = cxgb_get_stats, | ||
| 2660 | .ndo_set_rx_mode = cxgb_set_rxmode, | ||
| 2661 | .ndo_set_mac_address = cxgb_set_mac_addr, | ||
| 2662 | .ndo_validate_addr = eth_validate_addr, | ||
| 2663 | .ndo_do_ioctl = cxgb_ioctl, | ||
| 2664 | .ndo_change_mtu = cxgb_change_mtu, | ||
| 2665 | .ndo_vlan_rx_register = vlan_rx_register, | ||
| 2666 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2667 | .ndo_poll_controller = cxgb_netpoll, | ||
| 2668 | #endif | ||
| 2669 | }; | ||
| 2670 | |||
| 2671 | void t4_fatal_err(struct adapter *adap) | ||
| 2672 | { | ||
| 2673 | t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); | ||
| 2674 | t4_intr_disable(adap); | ||
| 2675 | dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); | ||
| 2676 | } | ||
| 2677 | |||
| 2678 | static void setup_memwin(struct adapter *adap) | ||
| 2679 | { | ||
| 2680 | u32 bar0; | ||
| 2681 | |||
| 2682 | bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ | ||
| 2683 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), | ||
| 2684 | (bar0 + MEMWIN0_BASE) | BIR(0) | | ||
| 2685 | WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); | ||
| 2686 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), | ||
| 2687 | (bar0 + MEMWIN1_BASE) | BIR(0) | | ||
| 2688 | WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); | ||
| 2689 | t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), | ||
| 2690 | (bar0 + MEMWIN2_BASE) | BIR(0) | | ||
| 2691 | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); | ||
| 2692 | } | ||
| 2693 | |||
| 2694 | /* | ||
| 2695 | * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. | ||
| 2696 | */ | ||
| 2697 | #define MAX_ATIDS 8192U | ||
| 2698 | |||
| 2699 | /* | ||
| 2700 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | ||
| 2701 | */ | ||
| 2702 | static int adap_init0(struct adapter *adap) | ||
| 2703 | { | ||
| 2704 | int ret; | ||
| 2705 | u32 v, port_vec; | ||
| 2706 | enum dev_state state; | ||
| 2707 | u32 params[7], val[7]; | ||
| 2708 | struct fw_caps_config_cmd c; | ||
| 2709 | |||
| 2710 | ret = t4_check_fw_version(adap); | ||
| 2711 | if (ret == -EINVAL || ret > 0) { | ||
| 2712 | if (upgrade_fw(adap) >= 0) /* recache FW version */ | ||
| 2713 | ret = t4_check_fw_version(adap); | ||
| 2714 | } | ||
| 2715 | if (ret < 0) | ||
| 2716 | return ret; | ||
| 2717 | |||
| 2718 | /* contact FW, request master */ | ||
| 2719 | ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); | ||
| 2720 | if (ret < 0) { | ||
| 2721 | dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", | ||
| 2722 | ret); | ||
| 2723 | return ret; | ||
| 2724 | } | ||
| 2725 | |||
| 2726 | /* reset device */ | ||
| 2727 | ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); | ||
| 2728 | if (ret < 0) | ||
| 2729 | goto bye; | ||
| 2730 | |||
| 2731 | /* get device capabilities */ | ||
| 2732 | memset(&c, 0, sizeof(c)); | ||
| 2733 | c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
| 2734 | FW_CMD_REQUEST | FW_CMD_READ); | ||
| 2735 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 2736 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
| 2737 | if (ret < 0) | ||
| 2738 | goto bye; | ||
| 2739 | |||
| 2740 | /* select capabilities we'll be using */ | ||
| 2741 | if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { | ||
| 2742 | if (!vf_acls) | ||
| 2743 | c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); | ||
| 2744 | else | ||
| 2745 | c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); | ||
| 2746 | } else if (vf_acls) { | ||
| 2747 | dev_err(adap->pdev_dev, "virtualization ACLs not supported"); | ||
| 2748 | goto bye; | ||
| 2749 | } | ||
| 2750 | c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | | ||
| 2751 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
| 2752 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); | ||
| 2753 | if (ret < 0) | ||
| 2754 | goto bye; | ||
| 2755 | |||
| 2756 | ret = t4_config_glbl_rss(adap, 0, | ||
| 2757 | FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, | ||
| 2758 | FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | | ||
| 2759 | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); | ||
| 2760 | if (ret < 0) | ||
| 2761 | goto bye; | ||
| 2762 | |||
| 2763 | ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, | ||
| 2764 | FW_CMD_CAP_PF, FW_CMD_CAP_PF); | ||
| 2765 | if (ret < 0) | ||
| 2766 | goto bye; | ||
| 2767 | |||
| 2768 | for (v = 0; v < SGE_NTIMERS - 1; v++) | ||
| 2769 | adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); | ||
| 2770 | adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; | ||
| 2771 | adap->sge.counter_val[0] = 1; | ||
| 2772 | for (v = 1; v < SGE_NCOUNTERS; v++) | ||
| 2773 | adap->sge.counter_val[v] = min(intr_cnt[v - 1], | ||
| 2774 | THRESHOLD_3_MASK); | ||
| 2775 | t4_sge_init(adap); | ||
| 2776 | |||
| 2777 | /* get basic stuff going */ | ||
| 2778 | ret = t4_early_init(adap, 0); | ||
| 2779 | if (ret < 0) | ||
| 2780 | goto bye; | ||
| 2781 | |||
| 2782 | #define FW_PARAM_DEV(param) \ | ||
| 2783 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ | ||
| 2784 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) | ||
| 2785 | |||
| 2786 | #define FW_PARAM_PFVF(param) \ | ||
| 2787 | (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ | ||
| 2788 | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) | ||
| 2789 | |||
| 2790 | params[0] = FW_PARAM_DEV(PORTVEC); | ||
| 2791 | params[1] = FW_PARAM_PFVF(L2T_START); | ||
| 2792 | params[2] = FW_PARAM_PFVF(L2T_END); | ||
| 2793 | params[3] = FW_PARAM_PFVF(FILTER_START); | ||
| 2794 | params[4] = FW_PARAM_PFVF(FILTER_END); | ||
| 2795 | ret = t4_query_params(adap, 0, 0, 0, 5, params, val); | ||
| 2796 | if (ret < 0) | ||
| 2797 | goto bye; | ||
| 2798 | port_vec = val[0]; | ||
| 2799 | adap->tids.ftid_base = val[3]; | ||
| 2800 | adap->tids.nftids = val[4] - val[3] + 1; | ||
| 2801 | |||
| 2802 | if (c.ofldcaps) { | ||
| 2803 | /* query offload-related parameters */ | ||
| 2804 | params[0] = FW_PARAM_DEV(NTID); | ||
| 2805 | params[1] = FW_PARAM_PFVF(SERVER_START); | ||
| 2806 | params[2] = FW_PARAM_PFVF(SERVER_END); | ||
| 2807 | params[3] = FW_PARAM_PFVF(TDDP_START); | ||
| 2808 | params[4] = FW_PARAM_PFVF(TDDP_END); | ||
| 2809 | params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); | ||
| 2810 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | ||
| 2811 | if (ret < 0) | ||
| 2812 | goto bye; | ||
| 2813 | adap->tids.ntids = val[0]; | ||
| 2814 | adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); | ||
| 2815 | adap->tids.stid_base = val[1]; | ||
| 2816 | adap->tids.nstids = val[2] - val[1] + 1; | ||
| 2817 | adap->vres.ddp.start = val[3]; | ||
| 2818 | adap->vres.ddp.size = val[4] - val[3] + 1; | ||
| 2819 | adap->params.ofldq_wr_cred = val[5]; | ||
| 2820 | adap->params.offload = 1; | ||
| 2821 | } | ||
| 2822 | if (c.rdmacaps) { | ||
| 2823 | params[0] = FW_PARAM_PFVF(STAG_START); | ||
| 2824 | params[1] = FW_PARAM_PFVF(STAG_END); | ||
| 2825 | params[2] = FW_PARAM_PFVF(RQ_START); | ||
| 2826 | params[3] = FW_PARAM_PFVF(RQ_END); | ||
| 2827 | params[4] = FW_PARAM_PFVF(PBL_START); | ||
| 2828 | params[5] = FW_PARAM_PFVF(PBL_END); | ||
| 2829 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | ||
| 2830 | if (ret < 0) | ||
| 2831 | goto bye; | ||
| 2832 | adap->vres.stag.start = val[0]; | ||
| 2833 | adap->vres.stag.size = val[1] - val[0] + 1; | ||
| 2834 | adap->vres.rq.start = val[2]; | ||
| 2835 | adap->vres.rq.size = val[3] - val[2] + 1; | ||
| 2836 | adap->vres.pbl.start = val[4]; | ||
| 2837 | adap->vres.pbl.size = val[5] - val[4] + 1; | ||
| 2838 | } | ||
| 2839 | if (c.iscsicaps) { | ||
| 2840 | params[0] = FW_PARAM_PFVF(ISCSI_START); | ||
| 2841 | params[1] = FW_PARAM_PFVF(ISCSI_END); | ||
| 2842 | ret = t4_query_params(adap, 0, 0, 0, 2, params, val); | ||
| 2843 | if (ret < 0) | ||
| 2844 | goto bye; | ||
| 2845 | adap->vres.iscsi.start = val[0]; | ||
| 2846 | adap->vres.iscsi.size = val[1] - val[0] + 1; | ||
| 2847 | } | ||
| 2848 | #undef FW_PARAM_PFVF | ||
| 2849 | #undef FW_PARAM_DEV | ||
| 2850 | |||
| 2851 | adap->params.nports = hweight32(port_vec); | ||
| 2852 | adap->params.portvec = port_vec; | ||
| 2853 | adap->flags |= FW_OK; | ||
| 2854 | |||
| 2855 | /* These are finalized by FW initialization, load their values now */ | ||
| 2856 | v = t4_read_reg(adap, TP_TIMER_RESOLUTION); | ||
| 2857 | adap->params.tp.tre = TIMERRESOLUTION_GET(v); | ||
| 2858 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | ||
| 2859 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | ||
| 2860 | adap->params.b_wnd); | ||
| 2861 | |||
| 2862 | /* tweak some settings */ | ||
| 2863 | t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); | ||
| 2864 | t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); | ||
| 2865 | t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); | ||
| 2866 | v = t4_read_reg(adap, TP_PIO_DATA); | ||
| 2867 | t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); | ||
| 2868 | setup_memwin(adap); | ||
| 2869 | return 0; | ||
| 2870 | |||
| 2871 | /* | ||
| 2872 | * If a command timed out or failed with EIO FW does not operate within | ||
| 2873 | * its spec or something catastrophic happened to HW/FW, stop issuing | ||
| 2874 | * commands. | ||
| 2875 | */ | ||
| 2876 | bye: if (ret != -ETIMEDOUT && ret != -EIO) | ||
| 2877 | t4_fw_bye(adap, 0); | ||
| 2878 | return ret; | ||
| 2879 | } | ||
| 2880 | |||
| 2881 | static inline bool is_10g_port(const struct link_config *lc) | ||
| 2882 | { | ||
| 2883 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; | ||
| 2884 | } | ||
| 2885 | |||
| 2886 | static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, | ||
| 2887 | unsigned int size, unsigned int iqe_size) | ||
| 2888 | { | ||
| 2889 | q->intr_params = QINTR_TIMER_IDX(timer_idx) | | ||
| 2890 | (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); | ||
| 2891 | q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; | ||
| 2892 | q->iqe_len = iqe_size; | ||
| 2893 | q->size = size; | ||
| 2894 | } | ||
| 2895 | |||
| 2896 | /* | ||
| 2897 | * Perform default configuration of DMA queues depending on the number and type | ||
| 2898 | * of ports we found and the number of available CPUs. Most settings can be | ||
| 2899 | * modified by the admin prior to actual use. | ||
| 2900 | */ | ||
| 2901 | static void __devinit cfg_queues(struct adapter *adap) | ||
| 2902 | { | ||
| 2903 | struct sge *s = &adap->sge; | ||
| 2904 | int i, q10g = 0, n10g = 0, qidx = 0; | ||
| 2905 | |||
| 2906 | for_each_port(adap, i) | ||
| 2907 | n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); | ||
| 2908 | |||
| 2909 | /* | ||
| 2910 | * We default to 1 queue per non-10G port and up to # of cores queues | ||
| 2911 | * per 10G port. | ||
| 2912 | */ | ||
| 2913 | if (n10g) | ||
| 2914 | q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; | ||
| 2915 | if (q10g > num_online_cpus()) | ||
| 2916 | q10g = num_online_cpus(); | ||
| 2917 | |||
| 2918 | for_each_port(adap, i) { | ||
| 2919 | struct port_info *pi = adap2pinfo(adap, i); | ||
| 2920 | |||
| 2921 | pi->first_qset = qidx; | ||
| 2922 | pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; | ||
| 2923 | qidx += pi->nqsets; | ||
| 2924 | } | ||
| 2925 | |||
| 2926 | s->ethqsets = qidx; | ||
| 2927 | s->max_ethqsets = qidx; /* MSI-X may lower it later */ | ||
| 2928 | |||
| 2929 | if (is_offload(adap)) { | ||
| 2930 | /* | ||
| 2931 | * For offload we use 1 queue/channel if all ports are up to 1G, | ||
| 2932 | * otherwise we divide all available queues amongst the channels | ||
| 2933 | * capped by the number of available cores. | ||
| 2934 | */ | ||
| 2935 | if (n10g) { | ||
| 2936 | i = min_t(int, ARRAY_SIZE(s->ofldrxq), | ||
| 2937 | num_online_cpus()); | ||
| 2938 | s->ofldqsets = roundup(i, adap->params.nports); | ||
| 2939 | } else | ||
| 2940 | s->ofldqsets = adap->params.nports; | ||
| 2941 | /* For RDMA one Rx queue per channel suffices */ | ||
| 2942 | s->rdmaqs = adap->params.nports; | ||
| 2943 | } | ||
| 2944 | |||
| 2945 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { | ||
| 2946 | struct sge_eth_rxq *r = &s->ethrxq[i]; | ||
| 2947 | |||
| 2948 | init_rspq(&r->rspq, 0, 0, 1024, 64); | ||
| 2949 | r->fl.size = 72; | ||
| 2950 | } | ||
| 2951 | |||
| 2952 | for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) | ||
| 2953 | s->ethtxq[i].q.size = 1024; | ||
| 2954 | |||
| 2955 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) | ||
| 2956 | s->ctrlq[i].q.size = 512; | ||
| 2957 | |||
| 2958 | for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) | ||
| 2959 | s->ofldtxq[i].q.size = 1024; | ||
| 2960 | |||
| 2961 | for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { | ||
| 2962 | struct sge_ofld_rxq *r = &s->ofldrxq[i]; | ||
| 2963 | |||
| 2964 | init_rspq(&r->rspq, 0, 0, 1024, 64); | ||
| 2965 | r->rspq.uld = CXGB4_ULD_ISCSI; | ||
| 2966 | r->fl.size = 72; | ||
| 2967 | } | ||
| 2968 | |||
| 2969 | for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { | ||
| 2970 | struct sge_ofld_rxq *r = &s->rdmarxq[i]; | ||
| 2971 | |||
| 2972 | init_rspq(&r->rspq, 0, 0, 511, 64); | ||
| 2973 | r->rspq.uld = CXGB4_ULD_RDMA; | ||
| 2974 | r->fl.size = 72; | ||
| 2975 | } | ||
| 2976 | |||
| 2977 | init_rspq(&s->fw_evtq, 6, 0, 512, 64); | ||
| 2978 | init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); | ||
| 2979 | } | ||
| 2980 | |||
| 2981 | /* | ||
| 2982 | * Reduce the number of Ethernet queues across all ports to at most n. | ||
| 2983 | * n provides at least one queue per port. | ||
| 2984 | */ | ||
| 2985 | static void __devinit reduce_ethqs(struct adapter *adap, int n) | ||
| 2986 | { | ||
| 2987 | int i; | ||
| 2988 | struct port_info *pi; | ||
| 2989 | |||
| 2990 | while (n < adap->sge.ethqsets) | ||
| 2991 | for_each_port(adap, i) { | ||
| 2992 | pi = adap2pinfo(adap, i); | ||
| 2993 | if (pi->nqsets > 1) { | ||
| 2994 | pi->nqsets--; | ||
| 2995 | adap->sge.ethqsets--; | ||
| 2996 | if (adap->sge.ethqsets <= n) | ||
| 2997 | break; | ||
| 2998 | } | ||
| 2999 | } | ||
| 3000 | |||
| 3001 | n = 0; | ||
| 3002 | for_each_port(adap, i) { | ||
| 3003 | pi = adap2pinfo(adap, i); | ||
| 3004 | pi->first_qset = n; | ||
| 3005 | n += pi->nqsets; | ||
| 3006 | } | ||
| 3007 | } | ||
| 3008 | |||
| 3009 | /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ | ||
| 3010 | #define EXTRA_VECS 2 | ||
| 3011 | |||
| 3012 | static int __devinit enable_msix(struct adapter *adap) | ||
| 3013 | { | ||
| 3014 | int ofld_need = 0; | ||
| 3015 | int i, err, want, need; | ||
| 3016 | struct sge *s = &adap->sge; | ||
| 3017 | unsigned int nchan = adap->params.nports; | ||
| 3018 | struct msix_entry entries[MAX_INGQ + 1]; | ||
| 3019 | |||
| 3020 | for (i = 0; i < ARRAY_SIZE(entries); ++i) | ||
| 3021 | entries[i].entry = i; | ||
| 3022 | |||
| 3023 | want = s->max_ethqsets + EXTRA_VECS; | ||
| 3024 | if (is_offload(adap)) { | ||
| 3025 | want += s->rdmaqs + s->ofldqsets; | ||
| 3026 | /* need nchan for each possible ULD */ | ||
| 3027 | ofld_need = 2 * nchan; | ||
| 3028 | } | ||
| 3029 | need = adap->params.nports + EXTRA_VECS + ofld_need; | ||
| 3030 | |||
| 3031 | while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) | ||
| 3032 | want = err; | ||
| 3033 | |||
| 3034 | if (!err) { | ||
| 3035 | /* | ||
| 3036 | * Distribute available vectors to the various queue groups. | ||
| 3037 | * Every group gets its minimum requirement and NIC gets top | ||
| 3038 | * priority for leftovers. | ||
| 3039 | */ | ||
| 3040 | i = want - EXTRA_VECS - ofld_need; | ||
| 3041 | if (i < s->max_ethqsets) { | ||
| 3042 | s->max_ethqsets = i; | ||
| 3043 | if (i < s->ethqsets) | ||
| 3044 | reduce_ethqs(adap, i); | ||
| 3045 | } | ||
| 3046 | if (is_offload(adap)) { | ||
| 3047 | i = want - EXTRA_VECS - s->max_ethqsets; | ||
| 3048 | i -= ofld_need - nchan; | ||
| 3049 | s->ofldqsets = (i / nchan) * nchan; /* round down */ | ||
| 3050 | } | ||
| 3051 | for (i = 0; i < want; ++i) | ||
| 3052 | adap->msix_info[i].vec = entries[i].vector; | ||
| 3053 | } else if (err > 0) | ||
| 3054 | dev_info(adap->pdev_dev, | ||
| 3055 | "only %d MSI-X vectors left, not using MSI-X\n", err); | ||
| 3056 | return err; | ||
| 3057 | } | ||
| 3058 | |||
| 3059 | #undef EXTRA_VECS | ||
| 3060 | |||
| 3061 | static void __devinit print_port_info(struct adapter *adap) | ||
| 3062 | { | ||
| 3063 | static const char *base[] = { | ||
| 3064 | "R", "KX4", "T", "KX", "T", "KR", "CX4" | ||
| 3065 | }; | ||
| 3066 | |||
| 3067 | int i; | ||
| 3068 | char buf[80]; | ||
| 3069 | |||
| 3070 | for_each_port(adap, i) { | ||
| 3071 | struct net_device *dev = adap->port[i]; | ||
| 3072 | const struct port_info *pi = netdev_priv(dev); | ||
| 3073 | char *bufp = buf; | ||
| 3074 | |||
| 3075 | if (!test_bit(i, &adap->registered_device_map)) | ||
| 3076 | continue; | ||
| 3077 | |||
| 3078 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) | ||
| 3079 | bufp += sprintf(bufp, "100/"); | ||
| 3080 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) | ||
| 3081 | bufp += sprintf(bufp, "1000/"); | ||
| 3082 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) | ||
| 3083 | bufp += sprintf(bufp, "10G/"); | ||
| 3084 | if (bufp != buf) | ||
| 3085 | --bufp; | ||
| 3086 | sprintf(bufp, "BASE-%s", base[pi->port_type]); | ||
| 3087 | |||
| 3088 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", | ||
| 3089 | adap->params.vpd.id, adap->params.rev, | ||
| 3090 | buf, is_offload(adap) ? "R" : "", | ||
| 3091 | adap->params.pci.width, | ||
| 3092 | (adap->flags & USING_MSIX) ? " MSI-X" : | ||
| 3093 | (adap->flags & USING_MSI) ? " MSI" : ""); | ||
| 3094 | if (adap->name == dev->name) | ||
| 3095 | netdev_info(dev, "S/N: %s, E/C: %s\n", | ||
| 3096 | adap->params.vpd.sn, adap->params.vpd.ec); | ||
| 3097 | } | ||
| 3098 | } | ||
| 3099 | |||
| 3100 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ | ||
| 3101 | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) | ||
| 3102 | |||
| 3103 | static int __devinit init_one(struct pci_dev *pdev, | ||
| 3104 | const struct pci_device_id *ent) | ||
| 3105 | { | ||
| 3106 | int func, i, err; | ||
| 3107 | struct port_info *pi; | ||
| 3108 | unsigned int highdma = 0; | ||
| 3109 | struct adapter *adapter = NULL; | ||
| 3110 | |||
| 3111 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | ||
| 3112 | |||
| 3113 | err = pci_request_regions(pdev, KBUILD_MODNAME); | ||
| 3114 | if (err) { | ||
| 3115 | /* Just info, some other driver may have claimed the device. */ | ||
| 3116 | dev_info(&pdev->dev, "cannot obtain PCI resources\n"); | ||
| 3117 | return err; | ||
| 3118 | } | ||
| 3119 | |||
| 3120 | /* We control everything through PF 0 */ | ||
| 3121 | func = PCI_FUNC(pdev->devfn); | ||
| 3122 | if (func > 0) | ||
| 3123 | goto sriov; | ||
| 3124 | |||
| 3125 | err = pci_enable_device(pdev); | ||
| 3126 | if (err) { | ||
| 3127 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | ||
| 3128 | goto out_release_regions; | ||
| 3129 | } | ||
| 3130 | |||
| 3131 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
| 3132 | highdma = NETIF_F_HIGHDMA; | ||
| 3133 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
| 3134 | if (err) { | ||
| 3135 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | ||
| 3136 | "coherent allocations\n"); | ||
| 3137 | goto out_disable_device; | ||
| 3138 | } | ||
| 3139 | } else { | ||
| 3140 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 3141 | if (err) { | ||
| 3142 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | ||
| 3143 | goto out_disable_device; | ||
| 3144 | } | ||
| 3145 | } | ||
| 3146 | |||
| 3147 | pci_enable_pcie_error_reporting(pdev); | ||
| 3148 | pci_set_master(pdev); | ||
| 3149 | pci_save_state(pdev); | ||
| 3150 | |||
| 3151 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | ||
| 3152 | if (!adapter) { | ||
| 3153 | err = -ENOMEM; | ||
| 3154 | goto out_disable_device; | ||
| 3155 | } | ||
| 3156 | |||
| 3157 | adapter->regs = pci_ioremap_bar(pdev, 0); | ||
| 3158 | if (!adapter->regs) { | ||
| 3159 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
| 3160 | err = -ENOMEM; | ||
| 3161 | goto out_free_adapter; | ||
| 3162 | } | ||
| 3163 | |||
| 3164 | adapter->pdev = pdev; | ||
| 3165 | adapter->pdev_dev = &pdev->dev; | ||
| 3166 | adapter->name = pci_name(pdev); | ||
| 3167 | adapter->msg_enable = dflt_msg_enable; | ||
| 3168 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | ||
| 3169 | |||
| 3170 | spin_lock_init(&adapter->stats_lock); | ||
| 3171 | spin_lock_init(&adapter->tid_release_lock); | ||
| 3172 | |||
| 3173 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | ||
| 3174 | |||
| 3175 | err = t4_prep_adapter(adapter); | ||
| 3176 | if (err) | ||
| 3177 | goto out_unmap_bar; | ||
| 3178 | err = adap_init0(adapter); | ||
| 3179 | if (err) | ||
| 3180 | goto out_unmap_bar; | ||
| 3181 | |||
| 3182 | for_each_port(adapter, i) { | ||
| 3183 | struct net_device *netdev; | ||
| 3184 | |||
| 3185 | netdev = alloc_etherdev_mq(sizeof(struct port_info), | ||
| 3186 | MAX_ETH_QSETS); | ||
| 3187 | if (!netdev) { | ||
| 3188 | err = -ENOMEM; | ||
| 3189 | goto out_free_dev; | ||
| 3190 | } | ||
| 3191 | |||
| 3192 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
| 3193 | |||
| 3194 | adapter->port[i] = netdev; | ||
| 3195 | pi = netdev_priv(netdev); | ||
| 3196 | pi->adapter = adapter; | ||
| 3197 | pi->xact_addr_filt = -1; | ||
| 3198 | pi->rx_offload = RX_CSO; | ||
| 3199 | pi->port_id = i; | ||
| 3200 | netif_carrier_off(netdev); | ||
| 3201 | netif_tx_stop_all_queues(netdev); | ||
| 3202 | netdev->irq = pdev->irq; | ||
| 3203 | |||
| 3204 | netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; | ||
| 3205 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
| 3206 | netdev->features |= NETIF_F_GRO | highdma; | ||
| 3207 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
| 3208 | netdev->vlan_features = netdev->features & VLAN_FEAT; | ||
| 3209 | |||
| 3210 | netdev->netdev_ops = &cxgb4_netdev_ops; | ||
| 3211 | SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); | ||
| 3212 | } | ||
| 3213 | |||
| 3214 | pci_set_drvdata(pdev, adapter); | ||
| 3215 | |||
| 3216 | if (adapter->flags & FW_OK) { | ||
| 3217 | err = t4_port_init(adapter, 0, 0, 0); | ||
| 3218 | if (err) | ||
| 3219 | goto out_free_dev; | ||
| 3220 | } | ||
| 3221 | |||
| 3222 | /* | ||
| 3223 | * Configure queues and allocate tables now, they can be needed as | ||
| 3224 | * soon as the first register_netdev completes. | ||
| 3225 | */ | ||
| 3226 | cfg_queues(adapter); | ||
| 3227 | |||
| 3228 | adapter->l2t = t4_init_l2t(); | ||
| 3229 | if (!adapter->l2t) { | ||
| 3230 | /* We tolerate a lack of L2T, giving up some functionality */ | ||
| 3231 | dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); | ||
| 3232 | adapter->params.offload = 0; | ||
| 3233 | } | ||
| 3234 | |||
| 3235 | if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { | ||
| 3236 | dev_warn(&pdev->dev, "could not allocate TID table, " | ||
| 3237 | "continuing\n"); | ||
| 3238 | adapter->params.offload = 0; | ||
| 3239 | } | ||
| 3240 | |||
| 3241 | /* | ||
| 3242 | * The card is now ready to go. If any errors occur during device | ||
| 3243 | * registration we do not fail the whole card but rather proceed only | ||
| 3244 | * with the ports we manage to register successfully. However we must | ||
| 3245 | * register at least one net device. | ||
| 3246 | */ | ||
| 3247 | for_each_port(adapter, i) { | ||
| 3248 | err = register_netdev(adapter->port[i]); | ||
| 3249 | if (err) | ||
| 3250 | dev_warn(&pdev->dev, | ||
| 3251 | "cannot register net device %s, skipping\n", | ||
| 3252 | adapter->port[i]->name); | ||
| 3253 | else { | ||
| 3254 | /* | ||
| 3255 | * Change the name we use for messages to the name of | ||
| 3256 | * the first successfully registered interface. | ||
| 3257 | */ | ||
| 3258 | if (!adapter->registered_device_map) | ||
| 3259 | adapter->name = adapter->port[i]->name; | ||
| 3260 | |||
| 3261 | __set_bit(i, &adapter->registered_device_map); | ||
| 3262 | adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; | ||
| 3263 | } | ||
| 3264 | } | ||
| 3265 | if (!adapter->registered_device_map) { | ||
| 3266 | dev_err(&pdev->dev, "could not register any net devices\n"); | ||
| 3267 | goto out_free_dev; | ||
| 3268 | } | ||
| 3269 | |||
| 3270 | if (cxgb4_debugfs_root) { | ||
| 3271 | adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), | ||
| 3272 | cxgb4_debugfs_root); | ||
| 3273 | setup_debugfs(adapter); | ||
| 3274 | } | ||
| 3275 | |||
| 3276 | /* See what interrupts we'll be using */ | ||
| 3277 | if (msi > 1 && enable_msix(adapter) == 0) | ||
| 3278 | adapter->flags |= USING_MSIX; | ||
| 3279 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | ||
| 3280 | adapter->flags |= USING_MSI; | ||
| 3281 | |||
| 3282 | if (is_offload(adapter)) | ||
| 3283 | attach_ulds(adapter); | ||
| 3284 | |||
| 3285 | print_port_info(adapter); | ||
| 3286 | |||
| 3287 | sriov: | ||
| 3288 | #ifdef CONFIG_PCI_IOV | ||
| 3289 | if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) | ||
| 3290 | if (pci_enable_sriov(pdev, num_vf[func]) == 0) | ||
| 3291 | dev_info(&pdev->dev, | ||
| 3292 | "instantiated %u virtual functions\n", | ||
| 3293 | num_vf[func]); | ||
| 3294 | #endif | ||
| 3295 | return 0; | ||
| 3296 | |||
| 3297 | out_free_dev: | ||
| 3298 | t4_free_mem(adapter->tids.tid_tab); | ||
| 3299 | t4_free_mem(adapter->l2t); | ||
| 3300 | for_each_port(adapter, i) | ||
| 3301 | if (adapter->port[i]) | ||
| 3302 | free_netdev(adapter->port[i]); | ||
| 3303 | if (adapter->flags & FW_OK) | ||
| 3304 | t4_fw_bye(adapter, 0); | ||
| 3305 | out_unmap_bar: | ||
| 3306 | iounmap(adapter->regs); | ||
| 3307 | out_free_adapter: | ||
| 3308 | kfree(adapter); | ||
| 3309 | out_disable_device: | ||
| 3310 | pci_disable_pcie_error_reporting(pdev); | ||
| 3311 | pci_disable_device(pdev); | ||
| 3312 | out_release_regions: | ||
| 3313 | pci_release_regions(pdev); | ||
| 3314 | pci_set_drvdata(pdev, NULL); | ||
| 3315 | return err; | ||
| 3316 | } | ||
| 3317 | |||
| 3318 | static void __devexit remove_one(struct pci_dev *pdev) | ||
| 3319 | { | ||
| 3320 | struct adapter *adapter = pci_get_drvdata(pdev); | ||
| 3321 | |||
| 3322 | pci_disable_sriov(pdev); | ||
| 3323 | |||
| 3324 | if (adapter) { | ||
| 3325 | int i; | ||
| 3326 | |||
| 3327 | if (is_offload(adapter)) | ||
| 3328 | detach_ulds(adapter); | ||
| 3329 | |||
| 3330 | for_each_port(adapter, i) | ||
| 3331 | if (test_bit(i, &adapter->registered_device_map)) | ||
| 3332 | unregister_netdev(adapter->port[i]); | ||
| 3333 | |||
| 3334 | if (adapter->debugfs_root) | ||
| 3335 | debugfs_remove_recursive(adapter->debugfs_root); | ||
| 3336 | |||
| 3337 | t4_sge_stop(adapter); | ||
| 3338 | t4_free_sge_resources(adapter); | ||
| 3339 | t4_free_mem(adapter->l2t); | ||
| 3340 | t4_free_mem(adapter->tids.tid_tab); | ||
| 3341 | disable_msi(adapter); | ||
| 3342 | |||
| 3343 | for_each_port(adapter, i) | ||
| 3344 | if (adapter->port[i]) | ||
| 3345 | free_netdev(adapter->port[i]); | ||
| 3346 | |||
| 3347 | if (adapter->flags & FW_OK) | ||
| 3348 | t4_fw_bye(adapter, 0); | ||
| 3349 | iounmap(adapter->regs); | ||
| 3350 | kfree(adapter); | ||
| 3351 | pci_disable_pcie_error_reporting(pdev); | ||
| 3352 | pci_disable_device(pdev); | ||
| 3353 | pci_release_regions(pdev); | ||
| 3354 | pci_set_drvdata(pdev, NULL); | ||
| 3355 | } else if (PCI_FUNC(pdev->devfn) > 0) | ||
| 3356 | pci_release_regions(pdev); | ||
| 3357 | } | ||
| 3358 | |||
| 3359 | static struct pci_driver cxgb4_driver = { | ||
| 3360 | .name = KBUILD_MODNAME, | ||
| 3361 | .id_table = cxgb4_pci_tbl, | ||
| 3362 | .probe = init_one, | ||
| 3363 | .remove = __devexit_p(remove_one), | ||
| 3364 | }; | ||
| 3365 | |||
| 3366 | static int __init cxgb4_init_module(void) | ||
| 3367 | { | ||
| 3368 | int ret; | ||
| 3369 | |||
| 3370 | /* Debugfs support is optional, just warn if this fails */ | ||
| 3371 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
| 3372 | if (!cxgb4_debugfs_root) | ||
| 3373 | pr_warning("could not create debugfs entry, continuing\n"); | ||
| 3374 | |||
| 3375 | ret = pci_register_driver(&cxgb4_driver); | ||
| 3376 | if (ret < 0) | ||
| 3377 | debugfs_remove(cxgb4_debugfs_root); | ||
| 3378 | return ret; | ||
| 3379 | } | ||
| 3380 | |||
| 3381 | static void __exit cxgb4_cleanup_module(void) | ||
| 3382 | { | ||
| 3383 | pci_unregister_driver(&cxgb4_driver); | ||
| 3384 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | ||
| 3385 | } | ||
| 3386 | |||
| 3387 | module_init(cxgb4_init_module); | ||
| 3388 | module_exit(cxgb4_cleanup_module); | ||
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h new file mode 100644 index 000000000000..5b98546ac92d --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_uld.h | |||
| @@ -0,0 +1,239 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __CXGB4_OFLD_H | ||
| 36 | #define __CXGB4_OFLD_H | ||
| 37 | |||
| 38 | #include <linux/cache.h> | ||
| 39 | #include <linux/spinlock.h> | ||
| 40 | #include <linux/skbuff.h> | ||
| 41 | #include <asm/atomic.h> | ||
| 42 | |||
| 43 | /* CPL message priority levels */ | ||
| 44 | enum { | ||
| 45 | CPL_PRIORITY_DATA = 0, /* data messages */ | ||
| 46 | CPL_PRIORITY_SETUP = 1, /* connection setup messages */ | ||
| 47 | CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ | ||
| 48 | CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ | ||
| 49 | CPL_PRIORITY_ACK = 1, /* RX ACK messages */ | ||
| 50 | CPL_PRIORITY_CONTROL = 1 /* control messages */ | ||
| 51 | }; | ||
| 52 | |||
| 53 | #define INIT_TP_WR(w, tid) do { \ | ||
| 54 | (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ | ||
| 55 | FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ | ||
| 56 | (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ | ||
| 57 | FW_WR_FLOWID(tid)); \ | ||
| 58 | (w)->wr.wr_lo = cpu_to_be64(0); \ | ||
| 59 | } while (0) | ||
| 60 | |||
| 61 | #define INIT_TP_WR_CPL(w, cpl, tid) do { \ | ||
| 62 | INIT_TP_WR(w, tid); \ | ||
| 63 | OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ | ||
| 64 | } while (0) | ||
| 65 | |||
| 66 | #define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ | ||
| 67 | (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ | ||
| 68 | (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ | ||
| 69 | FW_WR_FLOWID(tid)); \ | ||
| 70 | (w)->wr.wr_lo = cpu_to_be64(0); \ | ||
| 71 | } while (0) | ||
| 72 | |||
| 73 | /* Special asynchronous notification message */ | ||
| 74 | #define CXGB4_MSG_AN ((void *)1) | ||
| 75 | |||
| 76 | struct serv_entry { | ||
| 77 | void *data; | ||
| 78 | }; | ||
| 79 | |||
| 80 | union aopen_entry { | ||
| 81 | void *data; | ||
| 82 | union aopen_entry *next; | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Holds the size, base address, free list start, etc of the TID, server TID, | ||
| 87 | * and active-open TID tables. The tables themselves are allocated dynamically. | ||
| 88 | */ | ||
| 89 | struct tid_info { | ||
| 90 | void **tid_tab; | ||
| 91 | unsigned int ntids; | ||
| 92 | |||
| 93 | struct serv_entry *stid_tab; | ||
| 94 | unsigned long *stid_bmap; | ||
| 95 | unsigned int nstids; | ||
| 96 | unsigned int stid_base; | ||
| 97 | |||
| 98 | union aopen_entry *atid_tab; | ||
| 99 | unsigned int natids; | ||
| 100 | |||
| 101 | unsigned int nftids; | ||
| 102 | unsigned int ftid_base; | ||
| 103 | |||
| 104 | spinlock_t atid_lock ____cacheline_aligned_in_smp; | ||
| 105 | union aopen_entry *afree; | ||
| 106 | unsigned int atids_in_use; | ||
| 107 | |||
| 108 | spinlock_t stid_lock; | ||
| 109 | unsigned int stids_in_use; | ||
| 110 | |||
| 111 | atomic_t tids_in_use; | ||
| 112 | }; | ||
| 113 | |||
| 114 | static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) | ||
| 115 | { | ||
| 116 | return tid < t->ntids ? t->tid_tab[tid] : NULL; | ||
| 117 | } | ||
| 118 | |||
| 119 | static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) | ||
| 120 | { | ||
| 121 | return atid < t->natids ? t->atid_tab[atid].data : NULL; | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) | ||
| 125 | { | ||
| 126 | stid -= t->stid_base; | ||
| 127 | return stid < t->nstids ? t->stid_tab[stid].data : NULL; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline void cxgb4_insert_tid(struct tid_info *t, void *data, | ||
| 131 | unsigned int tid) | ||
| 132 | { | ||
| 133 | t->tid_tab[tid] = data; | ||
| 134 | atomic_inc(&t->tids_in_use); | ||
| 135 | } | ||
| 136 | |||
| 137 | int cxgb4_alloc_atid(struct tid_info *t, void *data); | ||
| 138 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); | ||
| 139 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid); | ||
| 140 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); | ||
| 141 | void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); | ||
| 142 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | ||
| 143 | unsigned int tid); | ||
| 144 | |||
| 145 | struct in6_addr; | ||
| 146 | |||
| 147 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | ||
| 148 | __be32 sip, __be16 sport, unsigned int queue); | ||
| 149 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
| 150 | const struct in6_addr *sip, __be16 sport, | ||
| 151 | unsigned int queue); | ||
| 152 | |||
| 153 | static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) | ||
| 154 | { | ||
| 155 | skb_set_queue_mapping(skb, (queue << 1) | prio); | ||
| 156 | } | ||
| 157 | |||
| 158 | enum cxgb4_uld { | ||
| 159 | CXGB4_ULD_RDMA, | ||
| 160 | CXGB4_ULD_ISCSI, | ||
| 161 | CXGB4_ULD_MAX | ||
| 162 | }; | ||
| 163 | |||
| 164 | enum cxgb4_state { | ||
| 165 | CXGB4_STATE_UP, | ||
| 166 | CXGB4_STATE_START_RECOVERY, | ||
| 167 | CXGB4_STATE_DOWN, | ||
| 168 | CXGB4_STATE_DETACH | ||
| 169 | }; | ||
| 170 | |||
| 171 | struct pci_dev; | ||
| 172 | struct l2t_data; | ||
| 173 | struct net_device; | ||
| 174 | struct pkt_gl; | ||
| 175 | struct tp_tcp_stats; | ||
| 176 | |||
| 177 | struct cxgb4_range { | ||
| 178 | unsigned int start; | ||
| 179 | unsigned int size; | ||
| 180 | }; | ||
| 181 | |||
| 182 | struct cxgb4_virt_res { /* virtualized HW resources */ | ||
| 183 | struct cxgb4_range ddp; | ||
| 184 | struct cxgb4_range iscsi; | ||
| 185 | struct cxgb4_range stag; | ||
| 186 | struct cxgb4_range rq; | ||
| 187 | struct cxgb4_range pbl; | ||
| 188 | }; | ||
| 189 | |||
| 190 | /* | ||
| 191 | * Block of information the LLD provides to ULDs attaching to a device. | ||
| 192 | */ | ||
| 193 | struct cxgb4_lld_info { | ||
| 194 | struct pci_dev *pdev; /* associated PCI device */ | ||
| 195 | struct l2t_data *l2t; /* L2 table */ | ||
| 196 | struct tid_info *tids; /* TID table */ | ||
| 197 | struct net_device **ports; /* device ports */ | ||
| 198 | const struct cxgb4_virt_res *vr; /* assorted HW resources */ | ||
| 199 | const unsigned short *mtus; /* MTU table */ | ||
| 200 | const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ | ||
| 201 | unsigned short nrxq; /* # of Rx queues */ | ||
| 202 | unsigned short ntxq; /* # of Tx queues */ | ||
| 203 | unsigned char nchan:4; /* # of channels */ | ||
| 204 | unsigned char nports:4; /* # of ports */ | ||
| 205 | unsigned char wr_cred; /* WR 16-byte credits */ | ||
| 206 | unsigned char adapter_type; /* type of adapter */ | ||
| 207 | unsigned char fw_api_ver; /* FW API version */ | ||
| 208 | unsigned int fw_vers; /* FW version */ | ||
| 209 | unsigned int iscsi_iolen; /* iSCSI max I/O length */ | ||
| 210 | unsigned short udb_density; /* # of user DB/page */ | ||
| 211 | unsigned short ucq_density; /* # of user CQs/page */ | ||
| 212 | void __iomem *gts_reg; /* address of GTS register */ | ||
| 213 | void __iomem *db_reg; /* address of kernel doorbell */ | ||
| 214 | }; | ||
| 215 | |||
| 216 | struct cxgb4_uld_info { | ||
| 217 | const char *name; | ||
| 218 | void *(*add)(const struct cxgb4_lld_info *p); | ||
| 219 | int (*rx_handler)(void *handle, const __be64 *rsp, | ||
| 220 | const struct pkt_gl *gl); | ||
| 221 | int (*state_change)(void *handle, enum cxgb4_state new_state); | ||
| 222 | }; | ||
| 223 | |||
| 224 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); | ||
| 225 | int cxgb4_unregister_uld(enum cxgb4_uld type); | ||
| 226 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); | ||
| 227 | unsigned int cxgb4_port_chan(const struct net_device *dev); | ||
| 228 | unsigned int cxgb4_port_viid(const struct net_device *dev); | ||
| 229 | unsigned int cxgb4_port_idx(const struct net_device *dev); | ||
| 230 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); | ||
| 231 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
| 232 | unsigned int *idx); | ||
| 233 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | ||
| 234 | struct tp_tcp_stats *v6); | ||
| 235 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | ||
| 236 | const unsigned int *pgsz_order); | ||
| 237 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | ||
| 238 | unsigned int skb_len, unsigned int pull_len); | ||
| 239 | #endif /* !__CXGB4_OFLD_H */ | ||
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c new file mode 100644 index 000000000000..9f96724a133a --- /dev/null +++ b/drivers/net/cxgb4/l2t.c | |||
| @@ -0,0 +1,624 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/skbuff.h> | ||
| 36 | #include <linux/netdevice.h> | ||
| 37 | #include <linux/if.h> | ||
| 38 | #include <linux/if_vlan.h> | ||
| 39 | #include <linux/jhash.h> | ||
| 40 | #include <net/neighbour.h> | ||
| 41 | #include "cxgb4.h" | ||
| 42 | #include "l2t.h" | ||
| 43 | #include "t4_msg.h" | ||
| 44 | #include "t4fw_api.h" | ||
| 45 | |||
| 46 | #define VLAN_NONE 0xfff | ||
| 47 | |||
| 48 | /* identifies sync vs async L2T_WRITE_REQs */ | ||
| 49 | #define F_SYNC_WR (1 << 12) | ||
| 50 | |||
| 51 | enum { | ||
| 52 | L2T_STATE_VALID, /* entry is up to date */ | ||
| 53 | L2T_STATE_STALE, /* entry may be used but needs revalidation */ | ||
| 54 | L2T_STATE_RESOLVING, /* entry needs address resolution */ | ||
| 55 | L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ | ||
| 56 | |||
| 57 | /* when state is one of the below the entry is not hashed */ | ||
| 58 | L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ | ||
| 59 | L2T_STATE_UNUSED /* entry not in use */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct l2t_data { | ||
| 63 | rwlock_t lock; | ||
| 64 | atomic_t nfree; /* number of free entries */ | ||
| 65 | struct l2t_entry *rover; /* starting point for next allocation */ | ||
| 66 | struct l2t_entry l2tab[L2T_SIZE]; | ||
| 67 | }; | ||
| 68 | |||
| 69 | static inline unsigned int vlan_prio(const struct l2t_entry *e) | ||
| 70 | { | ||
| 71 | return e->vlan >> 13; | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | ||
| 75 | { | ||
| 76 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | ||
| 77 | atomic_dec(&d->nfree); | ||
| 78 | } | ||
| 79 | |||
| 80 | /* | ||
| 81 | * To avoid having to check address families we do not allow v4 and v6 | ||
| 82 | * neighbors to be on the same hash chain. We keep v4 entries in the first | ||
| 83 | * half of available hash buckets and v6 in the second. | ||
| 84 | */ | ||
| 85 | enum { | ||
| 86 | L2T_SZ_HALF = L2T_SIZE / 2, | ||
| 87 | L2T_HASH_MASK = L2T_SZ_HALF - 1 | ||
| 88 | }; | ||
| 89 | |||
| 90 | static inline unsigned int arp_hash(const u32 *key, int ifindex) | ||
| 91 | { | ||
| 92 | return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline unsigned int ipv6_hash(const u32 *key, int ifindex) | ||
| 96 | { | ||
| 97 | u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; | ||
| 98 | |||
| 99 | return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); | ||
| 100 | } | ||
| 101 | |||
| 102 | static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) | ||
| 103 | { | ||
| 104 | return addr_len == 4 ? arp_hash(addr, ifindex) : | ||
| 105 | ipv6_hash(addr, ifindex); | ||
| 106 | } | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Checks if an L2T entry is for the given IP/IPv6 address. It does not check | ||
| 110 | * whether the L2T entry and the address are of the same address family. | ||
| 111 | * Callers ensure an address is only checked against L2T entries of the same | ||
| 112 | * family, something made trivial by the separation of IP and IPv6 hash chains | ||
| 113 | * mentioned above. Returns 0 if there's a match, | ||
| 114 | */ | ||
| 115 | static int addreq(const struct l2t_entry *e, const u32 *addr) | ||
| 116 | { | ||
| 117 | if (e->v6) | ||
| 118 | return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | | ||
| 119 | (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); | ||
| 120 | return e->addr[0] ^ addr[0]; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void neigh_replace(struct l2t_entry *e, struct neighbour *n) | ||
| 124 | { | ||
| 125 | neigh_hold(n); | ||
| 126 | if (e->neigh) | ||
| 127 | neigh_release(e->neigh); | ||
| 128 | e->neigh = n; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Write an L2T entry. Must be called with the entry locked. | ||
| 133 | * The write may be synchronous or asynchronous. | ||
| 134 | */ | ||
| 135 | static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) | ||
| 136 | { | ||
| 137 | struct sk_buff *skb; | ||
| 138 | struct cpl_l2t_write_req *req; | ||
| 139 | |||
| 140 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | ||
| 141 | if (!skb) | ||
| 142 | return -ENOMEM; | ||
| 143 | |||
| 144 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); | ||
| 145 | INIT_TP_WR(req, 0); | ||
| 146 | |||
| 147 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, | ||
| 148 | e->idx | (sync ? F_SYNC_WR : 0) | | ||
| 149 | TID_QID(adap->sge.fw_evtq.abs_id))); | ||
| 150 | req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); | ||
| 151 | req->l2t_idx = htons(e->idx); | ||
| 152 | req->vlan = htons(e->vlan); | ||
| 153 | if (e->neigh) | ||
| 154 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); | ||
| 155 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | ||
| 156 | |||
| 157 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | ||
| 158 | t4_ofld_send(adap, skb); | ||
| 159 | |||
| 160 | if (sync && e->state != L2T_STATE_SWITCHING) | ||
| 161 | e->state = L2T_STATE_SYNC_WRITE; | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Send packets waiting in an L2T entry's ARP queue. Must be called with the | ||
| 167 | * entry locked. | ||
| 168 | */ | ||
| 169 | static void send_pending(struct adapter *adap, struct l2t_entry *e) | ||
| 170 | { | ||
| 171 | while (e->arpq_head) { | ||
| 172 | struct sk_buff *skb = e->arpq_head; | ||
| 173 | |||
| 174 | e->arpq_head = skb->next; | ||
| 175 | skb->next = NULL; | ||
| 176 | t4_ofld_send(adap, skb); | ||
| 177 | } | ||
| 178 | e->arpq_tail = NULL; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a | ||
| 183 | * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T | ||
| 184 | * index it refers to. | ||
| 185 | */ | ||
| 186 | void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) | ||
| 187 | { | ||
| 188 | unsigned int tid = GET_TID(rpl); | ||
| 189 | unsigned int idx = tid & (L2T_SIZE - 1); | ||
| 190 | |||
| 191 | if (unlikely(rpl->status != CPL_ERR_NONE)) { | ||
| 192 | dev_err(adap->pdev_dev, | ||
| 193 | "Unexpected L2T_WRITE_RPL status %u for entry %u\n", | ||
| 194 | rpl->status, idx); | ||
| 195 | return; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (tid & F_SYNC_WR) { | ||
| 199 | struct l2t_entry *e = &adap->l2t->l2tab[idx]; | ||
| 200 | |||
| 201 | spin_lock(&e->lock); | ||
| 202 | if (e->state != L2T_STATE_SWITCHING) { | ||
| 203 | send_pending(adap, e); | ||
| 204 | e->state = (e->neigh->nud_state & NUD_STALE) ? | ||
| 205 | L2T_STATE_STALE : L2T_STATE_VALID; | ||
| 206 | } | ||
| 207 | spin_unlock(&e->lock); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | |||
| 211 | /* | ||
| 212 | * Add a packet to an L2T entry's queue of packets awaiting resolution. | ||
| 213 | * Must be called with the entry's lock held. | ||
| 214 | */ | ||
| 215 | static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) | ||
| 216 | { | ||
| 217 | skb->next = NULL; | ||
| 218 | if (e->arpq_head) | ||
| 219 | e->arpq_tail->next = skb; | ||
| 220 | else | ||
| 221 | e->arpq_head = skb; | ||
| 222 | e->arpq_tail = skb; | ||
| 223 | } | ||
| 224 | |||
| 225 | int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | ||
| 226 | struct l2t_entry *e) | ||
| 227 | { | ||
| 228 | struct adapter *adap = netdev2adap(dev); | ||
| 229 | |||
| 230 | again: | ||
| 231 | switch (e->state) { | ||
| 232 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | ||
| 233 | neigh_event_send(e->neigh, NULL); | ||
| 234 | spin_lock_bh(&e->lock); | ||
| 235 | if (e->state == L2T_STATE_STALE) | ||
| 236 | e->state = L2T_STATE_VALID; | ||
| 237 | spin_unlock_bh(&e->lock); | ||
| 238 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | ||
| 239 | return t4_ofld_send(adap, skb); | ||
| 240 | case L2T_STATE_RESOLVING: | ||
| 241 | case L2T_STATE_SYNC_WRITE: | ||
| 242 | spin_lock_bh(&e->lock); | ||
| 243 | if (e->state != L2T_STATE_SYNC_WRITE && | ||
| 244 | e->state != L2T_STATE_RESOLVING) { | ||
| 245 | spin_unlock_bh(&e->lock); | ||
| 246 | goto again; | ||
| 247 | } | ||
| 248 | arpq_enqueue(e, skb); | ||
| 249 | spin_unlock_bh(&e->lock); | ||
| 250 | |||
| 251 | if (e->state == L2T_STATE_RESOLVING && | ||
| 252 | !neigh_event_send(e->neigh, NULL)) { | ||
| 253 | spin_lock_bh(&e->lock); | ||
| 254 | if (e->state == L2T_STATE_RESOLVING && e->arpq_head) | ||
| 255 | write_l2e(adap, e, 1); | ||
| 256 | spin_unlock_bh(&e->lock); | ||
| 257 | } | ||
| 258 | } | ||
| 259 | return 0; | ||
| 260 | } | ||
| 261 | EXPORT_SYMBOL(cxgb4_l2t_send); | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Allocate a free L2T entry. Must be called with l2t_data.lock held. | ||
| 265 | */ | ||
| 266 | static struct l2t_entry *alloc_l2e(struct l2t_data *d) | ||
| 267 | { | ||
| 268 | struct l2t_entry *end, *e, **p; | ||
| 269 | |||
| 270 | if (!atomic_read(&d->nfree)) | ||
| 271 | return NULL; | ||
| 272 | |||
| 273 | /* there's definitely a free entry */ | ||
| 274 | for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) | ||
| 275 | if (atomic_read(&e->refcnt) == 0) | ||
| 276 | goto found; | ||
| 277 | |||
| 278 | for (e = d->l2tab; atomic_read(&e->refcnt); ++e) | ||
| 279 | ; | ||
| 280 | found: | ||
| 281 | d->rover = e + 1; | ||
| 282 | atomic_dec(&d->nfree); | ||
| 283 | |||
| 284 | /* | ||
| 285 | * The entry we found may be an inactive entry that is | ||
| 286 | * presently in the hash table. We need to remove it. | ||
| 287 | */ | ||
| 288 | if (e->state < L2T_STATE_SWITCHING) | ||
| 289 | for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) | ||
| 290 | if (*p == e) { | ||
| 291 | *p = e->next; | ||
| 292 | e->next = NULL; | ||
| 293 | break; | ||
| 294 | } | ||
| 295 | |||
| 296 | e->state = L2T_STATE_UNUSED; | ||
| 297 | return e; | ||
| 298 | } | ||
| 299 | |||
| 300 | /* | ||
| 301 | * Called when an L2T entry has no more users. | ||
| 302 | */ | ||
| 303 | static void t4_l2e_free(struct l2t_entry *e) | ||
| 304 | { | ||
| 305 | struct l2t_data *d; | ||
| 306 | |||
| 307 | spin_lock_bh(&e->lock); | ||
| 308 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ | ||
| 309 | if (e->neigh) { | ||
| 310 | neigh_release(e->neigh); | ||
| 311 | e->neigh = NULL; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | spin_unlock_bh(&e->lock); | ||
| 315 | |||
| 316 | d = container_of(e, struct l2t_data, l2tab[e->idx]); | ||
| 317 | atomic_inc(&d->nfree); | ||
| 318 | } | ||
| 319 | |||
| 320 | void cxgb4_l2t_release(struct l2t_entry *e) | ||
| 321 | { | ||
| 322 | if (atomic_dec_and_test(&e->refcnt)) | ||
| 323 | t4_l2e_free(e); | ||
| 324 | } | ||
| 325 | EXPORT_SYMBOL(cxgb4_l2t_release); | ||
| 326 | |||
| 327 | /* | ||
| 328 | * Update an L2T entry that was previously used for the same next hop as neigh. | ||
| 329 | * Must be called with softirqs disabled. | ||
| 330 | */ | ||
| 331 | static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | ||
| 332 | { | ||
| 333 | unsigned int nud_state; | ||
| 334 | |||
| 335 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
| 336 | if (neigh != e->neigh) | ||
| 337 | neigh_replace(e, neigh); | ||
| 338 | nud_state = neigh->nud_state; | ||
| 339 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || | ||
| 340 | !(nud_state & NUD_VALID)) | ||
| 341 | e->state = L2T_STATE_RESOLVING; | ||
| 342 | else if (nud_state & NUD_CONNECTED) | ||
| 343 | e->state = L2T_STATE_VALID; | ||
| 344 | else | ||
| 345 | e->state = L2T_STATE_STALE; | ||
| 346 | spin_unlock(&e->lock); | ||
| 347 | } | ||
| 348 | |||
| 349 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | ||
| 350 | const struct net_device *physdev, | ||
| 351 | unsigned int priority) | ||
| 352 | { | ||
| 353 | u8 lport; | ||
| 354 | u16 vlan; | ||
| 355 | struct l2t_entry *e; | ||
| 356 | int addr_len = neigh->tbl->key_len; | ||
| 357 | u32 *addr = (u32 *)neigh->primary_key; | ||
| 358 | int ifidx = neigh->dev->ifindex; | ||
| 359 | int hash = addr_hash(addr, addr_len, ifidx); | ||
| 360 | |||
| 361 | if (neigh->dev->flags & IFF_LOOPBACK) | ||
| 362 | lport = netdev2pinfo(physdev)->tx_chan + 4; | ||
| 363 | else | ||
| 364 | lport = netdev2pinfo(physdev)->lport; | ||
| 365 | |||
| 366 | if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) | ||
| 367 | vlan = vlan_dev_vlan_id(neigh->dev); | ||
| 368 | else | ||
| 369 | vlan = VLAN_NONE; | ||
| 370 | |||
| 371 | write_lock_bh(&d->lock); | ||
| 372 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
| 373 | if (!addreq(e, addr) && e->ifindex == ifidx && | ||
| 374 | e->vlan == vlan && e->lport == lport) { | ||
| 375 | l2t_hold(d, e); | ||
| 376 | if (atomic_read(&e->refcnt) == 1) | ||
| 377 | reuse_entry(e, neigh); | ||
| 378 | goto done; | ||
| 379 | } | ||
| 380 | |||
| 381 | /* Need to allocate a new entry */ | ||
| 382 | e = alloc_l2e(d); | ||
| 383 | if (e) { | ||
| 384 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
| 385 | e->state = L2T_STATE_RESOLVING; | ||
| 386 | memcpy(e->addr, addr, addr_len); | ||
| 387 | e->ifindex = ifidx; | ||
| 388 | e->hash = hash; | ||
| 389 | e->lport = lport; | ||
| 390 | e->v6 = addr_len == 16; | ||
| 391 | atomic_set(&e->refcnt, 1); | ||
| 392 | neigh_replace(e, neigh); | ||
| 393 | e->vlan = vlan; | ||
| 394 | e->next = d->l2tab[hash].first; | ||
| 395 | d->l2tab[hash].first = e; | ||
| 396 | spin_unlock(&e->lock); | ||
| 397 | } | ||
| 398 | done: | ||
| 399 | write_unlock_bh(&d->lock); | ||
| 400 | return e; | ||
| 401 | } | ||
| 402 | EXPORT_SYMBOL(cxgb4_l2t_get); | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Called when address resolution fails for an L2T entry to handle packets | ||
| 406 | * on the arpq head. If a packet specifies a failure handler it is invoked, | ||
| 407 | * otherwise the packet is sent to the device. | ||
| 408 | */ | ||
| 409 | static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) | ||
| 410 | { | ||
| 411 | while (arpq) { | ||
| 412 | struct sk_buff *skb = arpq; | ||
| 413 | const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); | ||
| 414 | |||
| 415 | arpq = skb->next; | ||
| 416 | skb->next = NULL; | ||
| 417 | if (cb->arp_err_handler) | ||
| 418 | cb->arp_err_handler(cb->handle, skb); | ||
| 419 | else | ||
| 420 | t4_ofld_send(adap, skb); | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | /* | ||
| 425 | * Called when the host's neighbor layer makes a change to some entry that is | ||
| 426 | * loaded into the HW L2 table. | ||
| 427 | */ | ||
| 428 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) | ||
| 429 | { | ||
| 430 | struct l2t_entry *e; | ||
| 431 | struct sk_buff *arpq = NULL; | ||
| 432 | struct l2t_data *d = adap->l2t; | ||
| 433 | int addr_len = neigh->tbl->key_len; | ||
| 434 | u32 *addr = (u32 *) neigh->primary_key; | ||
| 435 | int ifidx = neigh->dev->ifindex; | ||
| 436 | int hash = addr_hash(addr, addr_len, ifidx); | ||
| 437 | |||
| 438 | read_lock_bh(&d->lock); | ||
| 439 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
| 440 | if (!addreq(e, addr) && e->ifindex == ifidx) { | ||
| 441 | spin_lock(&e->lock); | ||
| 442 | if (atomic_read(&e->refcnt)) | ||
| 443 | goto found; | ||
| 444 | spin_unlock(&e->lock); | ||
| 445 | break; | ||
| 446 | } | ||
| 447 | read_unlock_bh(&d->lock); | ||
| 448 | return; | ||
| 449 | |||
| 450 | found: | ||
| 451 | read_unlock(&d->lock); | ||
| 452 | |||
| 453 | if (neigh != e->neigh) | ||
| 454 | neigh_replace(e, neigh); | ||
| 455 | |||
| 456 | if (e->state == L2T_STATE_RESOLVING) { | ||
| 457 | if (neigh->nud_state & NUD_FAILED) { | ||
| 458 | arpq = e->arpq_head; | ||
| 459 | e->arpq_head = e->arpq_tail = NULL; | ||
| 460 | } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && | ||
| 461 | e->arpq_head) { | ||
| 462 | write_l2e(adap, e, 1); | ||
| 463 | } | ||
| 464 | } else { | ||
| 465 | e->state = neigh->nud_state & NUD_CONNECTED ? | ||
| 466 | L2T_STATE_VALID : L2T_STATE_STALE; | ||
| 467 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) | ||
| 468 | write_l2e(adap, e, 0); | ||
| 469 | } | ||
| 470 | |||
| 471 | spin_unlock_bh(&e->lock); | ||
| 472 | |||
| 473 | if (arpq) | ||
| 474 | handle_failed_resolution(adap, arpq); | ||
| 475 | } | ||
| 476 | |||
| 477 | /* | ||
| 478 | * Allocate an L2T entry for use by a switching rule. Such entries need to be | ||
| 479 | * explicitly freed and while busy they are not on any hash chain, so normal | ||
| 480 | * address resolution updates do not see them. | ||
| 481 | */ | ||
| 482 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) | ||
| 483 | { | ||
| 484 | struct l2t_entry *e; | ||
| 485 | |||
| 486 | write_lock_bh(&d->lock); | ||
| 487 | e = alloc_l2e(d); | ||
| 488 | if (e) { | ||
| 489 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
| 490 | e->state = L2T_STATE_SWITCHING; | ||
| 491 | atomic_set(&e->refcnt, 1); | ||
| 492 | spin_unlock(&e->lock); | ||
| 493 | } | ||
| 494 | write_unlock_bh(&d->lock); | ||
| 495 | return e; | ||
| 496 | } | ||
| 497 | |||
| 498 | /* | ||
| 499 | * Sets/updates the contents of a switching L2T entry that has been allocated | ||
| 500 | * with an earlier call to @t4_l2t_alloc_switching. | ||
| 501 | */ | ||
| 502 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
| 503 | u8 port, u8 *eth_addr) | ||
| 504 | { | ||
| 505 | e->vlan = vlan; | ||
| 506 | e->lport = port; | ||
| 507 | memcpy(e->dmac, eth_addr, ETH_ALEN); | ||
| 508 | return write_l2e(adap, e, 0); | ||
| 509 | } | ||
| 510 | |||
| 511 | struct l2t_data *t4_init_l2t(void) | ||
| 512 | { | ||
| 513 | int i; | ||
| 514 | struct l2t_data *d; | ||
| 515 | |||
| 516 | d = t4_alloc_mem(sizeof(*d)); | ||
| 517 | if (!d) | ||
| 518 | return NULL; | ||
| 519 | |||
| 520 | d->rover = d->l2tab; | ||
| 521 | atomic_set(&d->nfree, L2T_SIZE); | ||
| 522 | rwlock_init(&d->lock); | ||
| 523 | |||
| 524 | for (i = 0; i < L2T_SIZE; ++i) { | ||
| 525 | d->l2tab[i].idx = i; | ||
| 526 | d->l2tab[i].state = L2T_STATE_UNUSED; | ||
| 527 | spin_lock_init(&d->l2tab[i].lock); | ||
| 528 | atomic_set(&d->l2tab[i].refcnt, 0); | ||
| 529 | } | ||
| 530 | return d; | ||
| 531 | } | ||
| 532 | |||
| 533 | #include <linux/module.h> | ||
| 534 | #include <linux/debugfs.h> | ||
| 535 | #include <linux/seq_file.h> | ||
| 536 | |||
| 537 | static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) | ||
| 538 | { | ||
| 539 | struct l2t_entry *l2tab = seq->private; | ||
| 540 | |||
| 541 | return pos >= L2T_SIZE ? NULL : &l2tab[pos]; | ||
| 542 | } | ||
| 543 | |||
| 544 | static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) | ||
| 545 | { | ||
| 546 | return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
| 547 | } | ||
| 548 | |||
| 549 | static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
| 550 | { | ||
| 551 | v = l2t_get_idx(seq, *pos); | ||
| 552 | if (v) | ||
| 553 | ++*pos; | ||
| 554 | return v; | ||
| 555 | } | ||
| 556 | |||
| 557 | static void l2t_seq_stop(struct seq_file *seq, void *v) | ||
| 558 | { | ||
| 559 | } | ||
| 560 | |||
| 561 | static char l2e_state(const struct l2t_entry *e) | ||
| 562 | { | ||
| 563 | switch (e->state) { | ||
| 564 | case L2T_STATE_VALID: return 'V'; | ||
| 565 | case L2T_STATE_STALE: return 'S'; | ||
| 566 | case L2T_STATE_SYNC_WRITE: return 'W'; | ||
| 567 | case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; | ||
| 568 | case L2T_STATE_SWITCHING: return 'X'; | ||
| 569 | default: | ||
| 570 | return 'U'; | ||
| 571 | } | ||
| 572 | } | ||
| 573 | |||
| 574 | static int l2t_seq_show(struct seq_file *seq, void *v) | ||
| 575 | { | ||
| 576 | if (v == SEQ_START_TOKEN) | ||
| 577 | seq_puts(seq, " Idx IP address " | ||
| 578 | "Ethernet address VLAN/P LP State Users Port\n"); | ||
| 579 | else { | ||
| 580 | char ip[60]; | ||
| 581 | struct l2t_entry *e = v; | ||
| 582 | |||
| 583 | spin_lock_bh(&e->lock); | ||
| 584 | if (e->state == L2T_STATE_SWITCHING) | ||
| 585 | ip[0] = '\0'; | ||
| 586 | else | ||
| 587 | sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); | ||
| 588 | seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", | ||
| 589 | e->idx, ip, e->dmac, | ||
| 590 | e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, | ||
| 591 | l2e_state(e), atomic_read(&e->refcnt), | ||
| 592 | e->neigh ? e->neigh->dev->name : ""); | ||
| 593 | spin_unlock_bh(&e->lock); | ||
| 594 | } | ||
| 595 | return 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | static const struct seq_operations l2t_seq_ops = { | ||
| 599 | .start = l2t_seq_start, | ||
| 600 | .next = l2t_seq_next, | ||
| 601 | .stop = l2t_seq_stop, | ||
| 602 | .show = l2t_seq_show | ||
| 603 | }; | ||
| 604 | |||
| 605 | static int l2t_seq_open(struct inode *inode, struct file *file) | ||
| 606 | { | ||
| 607 | int rc = seq_open(file, &l2t_seq_ops); | ||
| 608 | |||
| 609 | if (!rc) { | ||
| 610 | struct adapter *adap = inode->i_private; | ||
| 611 | struct seq_file *seq = file->private_data; | ||
| 612 | |||
| 613 | seq->private = adap->l2t->l2tab; | ||
| 614 | } | ||
| 615 | return rc; | ||
| 616 | } | ||
| 617 | |||
| 618 | const struct file_operations t4_l2t_fops = { | ||
| 619 | .owner = THIS_MODULE, | ||
| 620 | .open = l2t_seq_open, | ||
| 621 | .read = seq_read, | ||
| 622 | .llseek = seq_lseek, | ||
| 623 | .release = seq_release, | ||
| 624 | }; | ||
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h new file mode 100644 index 000000000000..643f27ed3cf4 --- /dev/null +++ b/drivers/net/cxgb4/l2t.h | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __CXGB4_L2T_H | ||
| 36 | #define __CXGB4_L2T_H | ||
| 37 | |||
| 38 | #include <linux/spinlock.h> | ||
| 39 | #include <linux/if_ether.h> | ||
| 40 | #include <asm/atomic.h> | ||
| 41 | |||
| 42 | struct adapter; | ||
| 43 | struct l2t_data; | ||
| 44 | struct neighbour; | ||
| 45 | struct net_device; | ||
| 46 | struct file_operations; | ||
| 47 | struct cpl_l2t_write_rpl; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Each L2T entry plays multiple roles. First of all, it keeps state for the | ||
| 51 | * corresponding entry of the HW L2 table and maintains a queue of offload | ||
| 52 | * packets awaiting address resolution. Second, it is a node of a hash table | ||
| 53 | * chain, where the nodes of the chain are linked together through their next | ||
| 54 | * pointer. Finally, each node is a bucket of a hash table, pointing to the | ||
| 55 | * first element in its chain through its first pointer. | ||
| 56 | */ | ||
| 57 | struct l2t_entry { | ||
| 58 | u16 state; /* entry state */ | ||
| 59 | u16 idx; /* entry index */ | ||
| 60 | u32 addr[4]; /* next hop IP or IPv6 address */ | ||
| 61 | int ifindex; /* neighbor's net_device's ifindex */ | ||
| 62 | struct neighbour *neigh; /* associated neighbour */ | ||
| 63 | struct l2t_entry *first; /* start of hash chain */ | ||
| 64 | struct l2t_entry *next; /* next l2t_entry on chain */ | ||
| 65 | struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ | ||
| 66 | struct sk_buff *arpq_tail; | ||
| 67 | spinlock_t lock; | ||
| 68 | atomic_t refcnt; /* entry reference count */ | ||
| 69 | u16 hash; /* hash bucket the entry is on */ | ||
| 70 | u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ | ||
| 71 | u8 v6; /* whether entry is for IPv6 */ | ||
| 72 | u8 lport; /* associated offload logical interface */ | ||
| 73 | u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ | ||
| 74 | }; | ||
| 75 | |||
| 76 | typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); | ||
| 77 | |||
| 78 | /* | ||
| 79 | * Callback stored in an skb to handle address resolution failure. | ||
| 80 | */ | ||
| 81 | struct l2t_skb_cb { | ||
| 82 | void *handle; | ||
| 83 | arp_err_handler_t arp_err_handler; | ||
| 84 | }; | ||
| 85 | |||
| 86 | #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) | ||
| 87 | |||
| 88 | static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, | ||
| 89 | arp_err_handler_t handler) | ||
| 90 | { | ||
| 91 | L2T_SKB_CB(skb)->handle = handle; | ||
| 92 | L2T_SKB_CB(skb)->arp_err_handler = handler; | ||
| 93 | } | ||
| 94 | |||
| 95 | void cxgb4_l2t_release(struct l2t_entry *e); | ||
| 96 | int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | ||
| 97 | struct l2t_entry *e); | ||
| 98 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | ||
| 99 | const struct net_device *physdev, | ||
| 100 | unsigned int priority); | ||
| 101 | |||
| 102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | ||
| 103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | ||
| 104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
| 105 | u8 port, u8 *eth_addr); | ||
| 106 | struct l2t_data *t4_init_l2t(void); | ||
| 107 | void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); | ||
| 108 | |||
| 109 | extern const struct file_operations t4_l2t_fops; | ||
| 110 | #endif /* __CXGB4_L2T_H */ | ||
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c new file mode 100644 index 000000000000..14adc58e71c3 --- /dev/null +++ b/drivers/net/cxgb4/sge.c | |||
| @@ -0,0 +1,2431 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/skbuff.h> | ||
| 36 | #include <linux/netdevice.h> | ||
| 37 | #include <linux/etherdevice.h> | ||
| 38 | #include <linux/if_vlan.h> | ||
| 39 | #include <linux/ip.h> | ||
| 40 | #include <linux/dma-mapping.h> | ||
| 41 | #include <linux/jiffies.h> | ||
| 42 | #include <net/ipv6.h> | ||
| 43 | #include <net/tcp.h> | ||
| 44 | #include "cxgb4.h" | ||
| 45 | #include "t4_regs.h" | ||
| 46 | #include "t4_msg.h" | ||
| 47 | #include "t4fw_api.h" | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Rx buffer size. We use largish buffers if possible but settle for single | ||
| 51 | * pages under memory shortage. | ||
| 52 | */ | ||
| 53 | #if PAGE_SHIFT >= 16 | ||
| 54 | # define FL_PG_ORDER 0 | ||
| 55 | #else | ||
| 56 | # define FL_PG_ORDER (16 - PAGE_SHIFT) | ||
| 57 | #endif | ||
| 58 | |||
| 59 | /* RX_PULL_LEN should be <= RX_COPY_THRES */ | ||
| 60 | #define RX_COPY_THRES 256 | ||
| 61 | #define RX_PULL_LEN 128 | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Main body length for sk_buffs used for Rx Ethernet packets with fragments. | ||
| 65 | * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. | ||
| 66 | */ | ||
| 67 | #define RX_PKT_SKB_LEN 512 | ||
| 68 | |||
| 69 | /* Ethernet header padding prepended to RX_PKTs */ | ||
| 70 | #define RX_PKT_PAD 2 | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Max number of Tx descriptors we clean up at a time. Should be modest as | ||
| 74 | * freeing skbs isn't cheap and it happens while holding locks. We just need | ||
| 75 | * to free packets faster than they arrive, we eventually catch up and keep | ||
| 76 | * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. | ||
| 77 | */ | ||
| 78 | #define MAX_TX_RECLAIM 16 | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Max number of Rx buffers we replenish at a time. Again keep this modest, | ||
| 82 | * allocating buffers isn't cheap either. | ||
| 83 | */ | ||
| 84 | #define MAX_RX_REFILL 16U | ||
| 85 | |||
| 86 | /* | ||
| 87 | * Period of the Rx queue check timer. This timer is infrequent as it has | ||
| 88 | * something to do only when the system experiences severe memory shortage. | ||
| 89 | */ | ||
| 90 | #define RX_QCHECK_PERIOD (HZ / 2) | ||
| 91 | |||
| 92 | /* | ||
| 93 | * Period of the Tx queue check timer. | ||
| 94 | */ | ||
| 95 | #define TX_QCHECK_PERIOD (HZ / 2) | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Max number of Tx descriptors to be reclaimed by the Tx timer. | ||
| 99 | */ | ||
| 100 | #define MAX_TIMER_TX_RECLAIM 100 | ||
| 101 | |||
| 102 | /* | ||
| 103 | * Timer index used when backing off due to memory shortage. | ||
| 104 | */ | ||
| 105 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) | ||
| 106 | |||
| 107 | /* | ||
| 108 | * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will | ||
| 109 | * attempt to refill it. | ||
| 110 | */ | ||
| 111 | #define FL_STARVE_THRES 4 | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Suspend an Ethernet Tx queue with fewer available descriptors than this. | ||
| 115 | * This is the same as calc_tx_descs() for a TSO packet with | ||
| 116 | * nr_frags == MAX_SKB_FRAGS. | ||
| 117 | */ | ||
| 118 | #define ETHTXQ_STOP_THRES \ | ||
| 119 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Suspension threshold for non-Ethernet Tx queues. We require enough room | ||
| 123 | * for a full sized WR. | ||
| 124 | */ | ||
| 125 | #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined | ||
| 129 | * into a WR. | ||
| 130 | */ | ||
| 131 | #define MAX_IMM_TX_PKT_LEN 128 | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Max size of a WR sent through a control Tx queue. | ||
| 135 | */ | ||
| 136 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN | ||
| 137 | |||
| 138 | enum { | ||
| 139 | /* packet alignment in FL buffers */ | ||
| 140 | FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, | ||
| 141 | /* egress status entry size */ | ||
| 142 | STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 | ||
| 143 | }; | ||
| 144 | |||
| 145 | struct tx_sw_desc { /* SW state per Tx descriptor */ | ||
| 146 | struct sk_buff *skb; | ||
| 147 | struct ulptx_sgl *sgl; | ||
| 148 | }; | ||
| 149 | |||
| 150 | struct rx_sw_desc { /* SW state per Rx descriptor */ | ||
| 151 | struct page *page; | ||
| 152 | dma_addr_t dma_addr; | ||
| 153 | }; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * The low bits of rx_sw_desc.dma_addr have special meaning. | ||
| 157 | */ | ||
| 158 | enum { | ||
| 159 | RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ | ||
| 160 | RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ | ||
| 161 | }; | ||
| 162 | |||
| 163 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) | ||
| 164 | { | ||
| 165 | return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline bool is_buf_mapped(const struct rx_sw_desc *d) | ||
| 169 | { | ||
| 170 | return !(d->dma_addr & RX_UNMAPPED_BUF); | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | ||
| 174 | * txq_avail - return the number of available slots in a Tx queue | ||
| 175 | * @q: the Tx queue | ||
| 176 | * | ||
| 177 | * Returns the number of descriptors in a Tx queue available to write new | ||
| 178 | * packets. | ||
| 179 | */ | ||
| 180 | static inline unsigned int txq_avail(const struct sge_txq *q) | ||
| 181 | { | ||
| 182 | return q->size - 1 - q->in_use; | ||
| 183 | } | ||
| 184 | |||
| 185 | /** | ||
| 186 | * fl_cap - return the capacity of a free-buffer list | ||
| 187 | * @fl: the FL | ||
| 188 | * | ||
| 189 | * Returns the capacity of a free-buffer list. The capacity is less than | ||
| 190 | * the size because one descriptor needs to be left unpopulated, otherwise | ||
| 191 | * HW will think the FL is empty. | ||
| 192 | */ | ||
| 193 | static inline unsigned int fl_cap(const struct sge_fl *fl) | ||
| 194 | { | ||
| 195 | return fl->size - 8; /* 1 descriptor = 8 buffers */ | ||
| 196 | } | ||
| 197 | |||
| 198 | static inline bool fl_starving(const struct sge_fl *fl) | ||
| 199 | { | ||
| 200 | return fl->avail - fl->pend_cred <= FL_STARVE_THRES; | ||
| 201 | } | ||
| 202 | |||
| 203 | static int map_skb(struct device *dev, const struct sk_buff *skb, | ||
| 204 | dma_addr_t *addr) | ||
| 205 | { | ||
| 206 | const skb_frag_t *fp, *end; | ||
| 207 | const struct skb_shared_info *si; | ||
| 208 | |||
| 209 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | ||
| 210 | if (dma_mapping_error(dev, *addr)) | ||
| 211 | goto out_err; | ||
| 212 | |||
| 213 | si = skb_shinfo(skb); | ||
| 214 | end = &si->frags[si->nr_frags]; | ||
| 215 | |||
| 216 | for (fp = si->frags; fp < end; fp++) { | ||
| 217 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, | ||
| 218 | DMA_TO_DEVICE); | ||
| 219 | if (dma_mapping_error(dev, *addr)) | ||
| 220 | goto unwind; | ||
| 221 | } | ||
| 222 | return 0; | ||
| 223 | |||
| 224 | unwind: | ||
| 225 | while (fp-- > si->frags) | ||
| 226 | dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); | ||
| 227 | |||
| 228 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | ||
| 229 | out_err: | ||
| 230 | return -ENOMEM; | ||
| 231 | } | ||
| 232 | |||
| 233 | #ifdef CONFIG_NEED_DMA_MAP_STATE | ||
| 234 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, | ||
| 235 | const dma_addr_t *addr) | ||
| 236 | { | ||
| 237 | const skb_frag_t *fp, *end; | ||
| 238 | const struct skb_shared_info *si; | ||
| 239 | |||
| 240 | dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); | ||
| 241 | |||
| 242 | si = skb_shinfo(skb); | ||
| 243 | end = &si->frags[si->nr_frags]; | ||
| 244 | for (fp = si->frags; fp < end; fp++) | ||
| 245 | dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); | ||
| 246 | } | ||
| 247 | |||
| 248 | /** | ||
| 249 | * deferred_unmap_destructor - unmap a packet when it is freed | ||
| 250 | * @skb: the packet | ||
| 251 | * | ||
| 252 | * This is the packet destructor used for Tx packets that need to remain | ||
| 253 | * mapped until they are freed rather than until their Tx descriptors are | ||
| 254 | * freed. | ||
| 255 | */ | ||
| 256 | static void deferred_unmap_destructor(struct sk_buff *skb) | ||
| 257 | { | ||
| 258 | unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); | ||
| 259 | } | ||
| 260 | #endif | ||
| 261 | |||
| 262 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, | ||
| 263 | const struct ulptx_sgl *sgl, const struct sge_txq *q) | ||
| 264 | { | ||
| 265 | const struct ulptx_sge_pair *p; | ||
| 266 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | ||
| 267 | |||
| 268 | if (likely(skb_headlen(skb))) | ||
| 269 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | ||
| 270 | DMA_TO_DEVICE); | ||
| 271 | else { | ||
| 272 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | ||
| 273 | DMA_TO_DEVICE); | ||
| 274 | nfrags--; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* | ||
| 278 | * the complexity below is because of the possibility of a wrap-around | ||
| 279 | * in the middle of an SGL | ||
| 280 | */ | ||
| 281 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { | ||
| 282 | if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { | ||
| 283 | unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | ||
| 284 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
| 285 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), | ||
| 286 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
| 287 | p++; | ||
| 288 | } else if ((u8 *)p == (u8 *)q->stat) { | ||
| 289 | p = (const struct ulptx_sge_pair *)q->desc; | ||
| 290 | goto unmap; | ||
| 291 | } else if ((u8 *)p + 8 == (u8 *)q->stat) { | ||
| 292 | const __be64 *addr = (const __be64 *)q->desc; | ||
| 293 | |||
| 294 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | ||
| 295 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
| 296 | dma_unmap_page(dev, be64_to_cpu(addr[1]), | ||
| 297 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
| 298 | p = (const struct ulptx_sge_pair *)&addr[2]; | ||
| 299 | } else { | ||
| 300 | const __be64 *addr = (const __be64 *)q->desc; | ||
| 301 | |||
| 302 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | ||
| 303 | ntohl(p->len[0]), DMA_TO_DEVICE); | ||
| 304 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | ||
| 305 | ntohl(p->len[1]), DMA_TO_DEVICE); | ||
| 306 | p = (const struct ulptx_sge_pair *)&addr[1]; | ||
| 307 | } | ||
| 308 | } | ||
| 309 | if (nfrags) { | ||
| 310 | __be64 addr; | ||
| 311 | |||
| 312 | if ((u8 *)p == (u8 *)q->stat) | ||
| 313 | p = (const struct ulptx_sge_pair *)q->desc; | ||
| 314 | addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : | ||
| 315 | *(const __be64 *)q->desc; | ||
| 316 | dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), | ||
| 317 | DMA_TO_DEVICE); | ||
| 318 | } | ||
| 319 | } | ||
| 320 | |||
| 321 | /** | ||
| 322 | * free_tx_desc - reclaims Tx descriptors and their buffers | ||
| 323 | * @adapter: the adapter | ||
| 324 | * @q: the Tx queue to reclaim descriptors from | ||
| 325 | * @n: the number of descriptors to reclaim | ||
| 326 | * @unmap: whether the buffers should be unmapped for DMA | ||
| 327 | * | ||
| 328 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | ||
| 329 | * Tx buffers. Called with the Tx queue lock held. | ||
| 330 | */ | ||
| 331 | static void free_tx_desc(struct adapter *adap, struct sge_txq *q, | ||
| 332 | unsigned int n, bool unmap) | ||
| 333 | { | ||
| 334 | struct tx_sw_desc *d; | ||
| 335 | unsigned int cidx = q->cidx; | ||
| 336 | struct device *dev = adap->pdev_dev; | ||
| 337 | |||
| 338 | d = &q->sdesc[cidx]; | ||
| 339 | while (n--) { | ||
| 340 | if (d->skb) { /* an SGL is present */ | ||
| 341 | if (unmap) | ||
| 342 | unmap_sgl(dev, d->skb, d->sgl, q); | ||
| 343 | kfree_skb(d->skb); | ||
| 344 | d->skb = NULL; | ||
| 345 | } | ||
| 346 | ++d; | ||
| 347 | if (++cidx == q->size) { | ||
| 348 | cidx = 0; | ||
| 349 | d = q->sdesc; | ||
| 350 | } | ||
| 351 | } | ||
| 352 | q->cidx = cidx; | ||
| 353 | } | ||
| 354 | |||
| 355 | /* | ||
| 356 | * Return the number of reclaimable descriptors in a Tx queue. | ||
| 357 | */ | ||
| 358 | static inline int reclaimable(const struct sge_txq *q) | ||
| 359 | { | ||
| 360 | int hw_cidx = ntohs(q->stat->cidx); | ||
| 361 | hw_cidx -= q->cidx; | ||
| 362 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | ||
| 363 | } | ||
| 364 | |||
| 365 | /** | ||
| 366 | * reclaim_completed_tx - reclaims completed Tx descriptors | ||
| 367 | * @adap: the adapter | ||
| 368 | * @q: the Tx queue to reclaim completed descriptors from | ||
| 369 | * @unmap: whether the buffers should be unmapped for DMA | ||
| 370 | * | ||
| 371 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | ||
| 372 | * and frees the associated buffers if possible. Called with the Tx | ||
| 373 | * queue locked. | ||
| 374 | */ | ||
| 375 | static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, | ||
| 376 | bool unmap) | ||
| 377 | { | ||
| 378 | int avail = reclaimable(q); | ||
| 379 | |||
| 380 | if (avail) { | ||
| 381 | /* | ||
| 382 | * Limit the amount of clean up work we do at a time to keep | ||
| 383 | * the Tx lock hold time O(1). | ||
| 384 | */ | ||
| 385 | if (avail > MAX_TX_RECLAIM) | ||
| 386 | avail = MAX_TX_RECLAIM; | ||
| 387 | |||
| 388 | free_tx_desc(adap, q, avail, unmap); | ||
| 389 | q->in_use -= avail; | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | static inline int get_buf_size(const struct rx_sw_desc *d) | ||
| 394 | { | ||
| 395 | #if FL_PG_ORDER > 0 | ||
| 396 | return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : | ||
| 397 | PAGE_SIZE; | ||
| 398 | #else | ||
| 399 | return PAGE_SIZE; | ||
| 400 | #endif | ||
| 401 | } | ||
| 402 | |||
| 403 | /** | ||
| 404 | * free_rx_bufs - free the Rx buffers on an SGE free list | ||
| 405 | * @adap: the adapter | ||
| 406 | * @q: the SGE free list to free buffers from | ||
| 407 | * @n: how many buffers to free | ||
| 408 | * | ||
| 409 | * Release the next @n buffers on an SGE free-buffer Rx queue. The | ||
| 410 | * buffers must be made inaccessible to HW before calling this function. | ||
| 411 | */ | ||
| 412 | static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) | ||
| 413 | { | ||
| 414 | while (n--) { | ||
| 415 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | ||
| 416 | |||
| 417 | if (is_buf_mapped(d)) | ||
| 418 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | ||
| 419 | get_buf_size(d), PCI_DMA_FROMDEVICE); | ||
| 420 | put_page(d->page); | ||
| 421 | d->page = NULL; | ||
| 422 | if (++q->cidx == q->size) | ||
| 423 | q->cidx = 0; | ||
| 424 | q->avail--; | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | /** | ||
| 429 | * unmap_rx_buf - unmap the current Rx buffer on an SGE free list | ||
| 430 | * @adap: the adapter | ||
| 431 | * @q: the SGE free list | ||
| 432 | * | ||
| 433 | * Unmap the current buffer on an SGE free-buffer Rx queue. The | ||
| 434 | * buffer must be made inaccessible to HW before calling this function. | ||
| 435 | * | ||
| 436 | * This is similar to @free_rx_bufs above but does not free the buffer. | ||
| 437 | * Do note that the FL still loses any further access to the buffer. | ||
| 438 | */ | ||
| 439 | static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) | ||
| 440 | { | ||
| 441 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | ||
| 442 | |||
| 443 | if (is_buf_mapped(d)) | ||
| 444 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | ||
| 445 | get_buf_size(d), PCI_DMA_FROMDEVICE); | ||
| 446 | d->page = NULL; | ||
| 447 | if (++q->cidx == q->size) | ||
| 448 | q->cidx = 0; | ||
| 449 | q->avail--; | ||
| 450 | } | ||
| 451 | |||
| 452 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | ||
| 453 | { | ||
| 454 | if (q->pend_cred >= 8) { | ||
| 455 | wmb(); | ||
| 456 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | | ||
| 457 | QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); | ||
| 458 | q->pend_cred &= 7; | ||
| 459 | } | ||
| 460 | } | ||
| 461 | |||
| 462 | static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, | ||
| 463 | dma_addr_t mapping) | ||
| 464 | { | ||
| 465 | sd->page = pg; | ||
| 466 | sd->dma_addr = mapping; /* includes size low bits */ | ||
| 467 | } | ||
| 468 | |||
| 469 | /** | ||
| 470 | * refill_fl - refill an SGE Rx buffer ring | ||
| 471 | * @adap: the adapter | ||
| 472 | * @q: the ring to refill | ||
| 473 | * @n: the number of new buffers to allocate | ||
| 474 | * @gfp: the gfp flags for the allocations | ||
| 475 | * | ||
| 476 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, | ||
| 477 | * allocated with the supplied gfp flags. The caller must assure that | ||
| 478 | * @n does not exceed the queue's capacity. If afterwards the queue is | ||
| 479 | * found critically low mark it as starving in the bitmap of starving FLs. | ||
| 480 | * | ||
| 481 | * Returns the number of buffers allocated. | ||
| 482 | */ | ||
| 483 | static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, | ||
| 484 | gfp_t gfp) | ||
| 485 | { | ||
| 486 | struct page *pg; | ||
| 487 | dma_addr_t mapping; | ||
| 488 | unsigned int cred = q->avail; | ||
| 489 | __be64 *d = &q->desc[q->pidx]; | ||
| 490 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | ||
| 491 | |||
| 492 | gfp |= __GFP_NOWARN; /* failures are expected */ | ||
| 493 | |||
| 494 | #if FL_PG_ORDER > 0 | ||
| 495 | /* | ||
| 496 | * Prefer large buffers | ||
| 497 | */ | ||
| 498 | while (n) { | ||
| 499 | pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); | ||
| 500 | if (unlikely(!pg)) { | ||
| 501 | q->large_alloc_failed++; | ||
| 502 | break; /* fall back to single pages */ | ||
| 503 | } | ||
| 504 | |||
| 505 | mapping = dma_map_page(adap->pdev_dev, pg, 0, | ||
| 506 | PAGE_SIZE << FL_PG_ORDER, | ||
| 507 | PCI_DMA_FROMDEVICE); | ||
| 508 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | ||
| 509 | __free_pages(pg, FL_PG_ORDER); | ||
| 510 | goto out; /* do not try small pages for this error */ | ||
| 511 | } | ||
| 512 | mapping |= RX_LARGE_BUF; | ||
| 513 | *d++ = cpu_to_be64(mapping); | ||
| 514 | |||
| 515 | set_rx_sw_desc(sd, pg, mapping); | ||
| 516 | sd++; | ||
| 517 | |||
| 518 | q->avail++; | ||
| 519 | if (++q->pidx == q->size) { | ||
| 520 | q->pidx = 0; | ||
| 521 | sd = q->sdesc; | ||
| 522 | d = q->desc; | ||
| 523 | } | ||
| 524 | n--; | ||
| 525 | } | ||
| 526 | #endif | ||
| 527 | |||
| 528 | while (n--) { | ||
| 529 | pg = __netdev_alloc_page(adap->port[0], gfp); | ||
| 530 | if (unlikely(!pg)) { | ||
| 531 | q->alloc_failed++; | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | |||
| 535 | mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, | ||
| 536 | PCI_DMA_FROMDEVICE); | ||
| 537 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | ||
| 538 | netdev_free_page(adap->port[0], pg); | ||
| 539 | goto out; | ||
| 540 | } | ||
| 541 | *d++ = cpu_to_be64(mapping); | ||
| 542 | |||
| 543 | set_rx_sw_desc(sd, pg, mapping); | ||
| 544 | sd++; | ||
| 545 | |||
| 546 | q->avail++; | ||
| 547 | if (++q->pidx == q->size) { | ||
| 548 | q->pidx = 0; | ||
| 549 | sd = q->sdesc; | ||
| 550 | d = q->desc; | ||
| 551 | } | ||
| 552 | } | ||
| 553 | |||
| 554 | out: cred = q->avail - cred; | ||
| 555 | q->pend_cred += cred; | ||
| 556 | ring_fl_db(adap, q); | ||
| 557 | |||
| 558 | if (unlikely(fl_starving(q))) { | ||
| 559 | smp_wmb(); | ||
| 560 | set_bit(q->cntxt_id, adap->sge.starving_fl); | ||
| 561 | } | ||
| 562 | |||
| 563 | return cred; | ||
| 564 | } | ||
| 565 | |||
| 566 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | ||
| 567 | { | ||
| 568 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), | ||
| 569 | GFP_ATOMIC); | ||
| 570 | } | ||
| 571 | |||
| 572 | /** | ||
| 573 | * alloc_ring - allocate resources for an SGE descriptor ring | ||
| 574 | * @dev: the PCI device's core device | ||
| 575 | * @nelem: the number of descriptors | ||
| 576 | * @elem_size: the size of each descriptor | ||
| 577 | * @sw_size: the size of the SW state associated with each ring element | ||
| 578 | * @phys: the physical address of the allocated ring | ||
| 579 | * @metadata: address of the array holding the SW state for the ring | ||
| 580 | * @stat_size: extra space in HW ring for status information | ||
| 581 | * | ||
| 582 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | ||
| 583 | * free buffer lists, or response queues. Each SGE ring requires | ||
| 584 | * space for its HW descriptors plus, optionally, space for the SW state | ||
| 585 | * associated with each HW entry (the metadata). The function returns | ||
| 586 | * three values: the virtual address for the HW ring (the return value | ||
| 587 | * of the function), the bus address of the HW ring, and the address | ||
| 588 | * of the SW ring. | ||
| 589 | */ | ||
| 590 | static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | ||
| 591 | size_t sw_size, dma_addr_t *phys, void *metadata, | ||
| 592 | size_t stat_size) | ||
| 593 | { | ||
| 594 | size_t len = nelem * elem_size + stat_size; | ||
| 595 | void *s = NULL; | ||
| 596 | void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); | ||
| 597 | |||
| 598 | if (!p) | ||
| 599 | return NULL; | ||
| 600 | if (sw_size) { | ||
| 601 | s = kcalloc(nelem, sw_size, GFP_KERNEL); | ||
| 602 | |||
| 603 | if (!s) { | ||
| 604 | dma_free_coherent(dev, len, p, *phys); | ||
| 605 | return NULL; | ||
| 606 | } | ||
| 607 | } | ||
| 608 | if (metadata) | ||
| 609 | *(void **)metadata = s; | ||
| 610 | memset(p, 0, len); | ||
| 611 | return p; | ||
| 612 | } | ||
| 613 | |||
| 614 | /** | ||
| 615 | * sgl_len - calculates the size of an SGL of the given capacity | ||
| 616 | * @n: the number of SGL entries | ||
| 617 | * | ||
| 618 | * Calculates the number of flits needed for a scatter/gather list that | ||
| 619 | * can hold the given number of entries. | ||
| 620 | */ | ||
| 621 | static inline unsigned int sgl_len(unsigned int n) | ||
| 622 | { | ||
| 623 | n--; | ||
| 624 | return (3 * n) / 2 + (n & 1) + 2; | ||
| 625 | } | ||
| 626 | |||
| 627 | /** | ||
| 628 | * flits_to_desc - returns the num of Tx descriptors for the given flits | ||
| 629 | * @n: the number of flits | ||
| 630 | * | ||
| 631 | * Returns the number of Tx descriptors needed for the supplied number | ||
| 632 | * of flits. | ||
| 633 | */ | ||
| 634 | static inline unsigned int flits_to_desc(unsigned int n) | ||
| 635 | { | ||
| 636 | BUG_ON(n > SGE_MAX_WR_LEN / 8); | ||
| 637 | return DIV_ROUND_UP(n, 8); | ||
| 638 | } | ||
| 639 | |||
| 640 | /** | ||
| 641 | * is_eth_imm - can an Ethernet packet be sent as immediate data? | ||
| 642 | * @skb: the packet | ||
| 643 | * | ||
| 644 | * Returns whether an Ethernet packet is small enough to fit as | ||
| 645 | * immediate data. | ||
| 646 | */ | ||
| 647 | static inline int is_eth_imm(const struct sk_buff *skb) | ||
| 648 | { | ||
| 649 | return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); | ||
| 650 | } | ||
| 651 | |||
| 652 | /** | ||
| 653 | * calc_tx_flits - calculate the number of flits for a packet Tx WR | ||
| 654 | * @skb: the packet | ||
| 655 | * | ||
| 656 | * Returns the number of flits needed for a Tx WR for the given Ethernet | ||
| 657 | * packet, including the needed WR and CPL headers. | ||
| 658 | */ | ||
| 659 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb) | ||
| 660 | { | ||
| 661 | unsigned int flits; | ||
| 662 | |||
| 663 | if (is_eth_imm(skb)) | ||
| 664 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); | ||
| 665 | |||
| 666 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; | ||
| 667 | if (skb_shinfo(skb)->gso_size) | ||
| 668 | flits += 2; | ||
| 669 | return flits; | ||
| 670 | } | ||
| 671 | |||
| 672 | /** | ||
| 673 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | ||
| 674 | * @skb: the packet | ||
| 675 | * | ||
| 676 | * Returns the number of Tx descriptors needed for the given Ethernet | ||
| 677 | * packet, including the needed WR and CPL headers. | ||
| 678 | */ | ||
| 679 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | ||
| 680 | { | ||
| 681 | return flits_to_desc(calc_tx_flits(skb)); | ||
| 682 | } | ||
| 683 | |||
| 684 | /** | ||
| 685 | * write_sgl - populate a scatter/gather list for a packet | ||
| 686 | * @skb: the packet | ||
| 687 | * @q: the Tx queue we are writing into | ||
| 688 | * @sgl: starting location for writing the SGL | ||
| 689 | * @end: points right after the end of the SGL | ||
| 690 | * @start: start offset into skb main-body data to include in the SGL | ||
| 691 | * @addr: the list of bus addresses for the SGL elements | ||
| 692 | * | ||
| 693 | * Generates a gather list for the buffers that make up a packet. | ||
| 694 | * The caller must provide adequate space for the SGL that will be written. | ||
| 695 | * The SGL includes all of the packet's page fragments and the data in its | ||
| 696 | * main body except for the first @start bytes. @sgl must be 16-byte | ||
| 697 | * aligned and within a Tx descriptor with available space. @end points | ||
| 698 | * right after the end of the SGL but does not account for any potential | ||
| 699 | * wrap around, i.e., @end > @sgl. | ||
| 700 | */ | ||
| 701 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | ||
| 702 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | ||
| 703 | const dma_addr_t *addr) | ||
| 704 | { | ||
| 705 | unsigned int i, len; | ||
| 706 | struct ulptx_sge_pair *to; | ||
| 707 | const struct skb_shared_info *si = skb_shinfo(skb); | ||
| 708 | unsigned int nfrags = si->nr_frags; | ||
| 709 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; | ||
| 710 | |||
| 711 | len = skb_headlen(skb) - start; | ||
| 712 | if (likely(len)) { | ||
| 713 | sgl->len0 = htonl(len); | ||
| 714 | sgl->addr0 = cpu_to_be64(addr[0] + start); | ||
| 715 | nfrags++; | ||
| 716 | } else { | ||
| 717 | sgl->len0 = htonl(si->frags[0].size); | ||
| 718 | sgl->addr0 = cpu_to_be64(addr[1]); | ||
| 719 | } | ||
| 720 | |||
| 721 | sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); | ||
| 722 | if (likely(--nfrags == 0)) | ||
| 723 | return; | ||
| 724 | /* | ||
| 725 | * Most of the complexity below deals with the possibility we hit the | ||
| 726 | * end of the queue in the middle of writing the SGL. For this case | ||
| 727 | * only we create the SGL in a temporary buffer and then copy it. | ||
| 728 | */ | ||
| 729 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; | ||
| 730 | |||
| 731 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | ||
| 732 | to->len[0] = cpu_to_be32(si->frags[i].size); | ||
| 733 | to->len[1] = cpu_to_be32(si->frags[++i].size); | ||
| 734 | to->addr[0] = cpu_to_be64(addr[i]); | ||
| 735 | to->addr[1] = cpu_to_be64(addr[++i]); | ||
| 736 | } | ||
| 737 | if (nfrags) { | ||
| 738 | to->len[0] = cpu_to_be32(si->frags[i].size); | ||
| 739 | to->len[1] = cpu_to_be32(0); | ||
| 740 | to->addr[0] = cpu_to_be64(addr[i + 1]); | ||
| 741 | } | ||
| 742 | if (unlikely((u8 *)end > (u8 *)q->stat)) { | ||
| 743 | unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; | ||
| 744 | |||
| 745 | if (likely(part0)) | ||
| 746 | memcpy(sgl->sge, buf, part0); | ||
| 747 | part1 = (u8 *)end - (u8 *)q->stat; | ||
| 748 | memcpy(q->desc, (u8 *)buf + part0, part1); | ||
| 749 | end = (void *)q->desc + part1; | ||
| 750 | } | ||
| 751 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | ||
| 752 | *(u64 *)end = 0; | ||
| 753 | } | ||
| 754 | |||
| 755 | /** | ||
| 756 | * ring_tx_db - check and potentially ring a Tx queue's doorbell | ||
| 757 | * @adap: the adapter | ||
| 758 | * @q: the Tx queue | ||
| 759 | * @n: number of new descriptors to give to HW | ||
| 760 | * | ||
| 761 | * Ring the doorbel for a Tx queue. | ||
| 762 | */ | ||
| 763 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | ||
| 764 | { | ||
| 765 | wmb(); /* write descriptors before telling HW */ | ||
| 766 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | ||
| 767 | QID(q->cntxt_id) | PIDX(n)); | ||
| 768 | } | ||
| 769 | |||
| 770 | /** | ||
| 771 | * inline_tx_skb - inline a packet's data into Tx descriptors | ||
| 772 | * @skb: the packet | ||
| 773 | * @q: the Tx queue where the packet will be inlined | ||
| 774 | * @pos: starting position in the Tx queue where to inline the packet | ||
| 775 | * | ||
| 776 | * Inline a packet's contents directly into Tx descriptors, starting at | ||
| 777 | * the given position within the Tx DMA ring. | ||
| 778 | * Most of the complexity of this operation is dealing with wrap arounds | ||
| 779 | * in the middle of the packet we want to inline. | ||
| 780 | */ | ||
| 781 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | ||
| 782 | void *pos) | ||
| 783 | { | ||
| 784 | u64 *p; | ||
| 785 | int left = (void *)q->stat - pos; | ||
| 786 | |||
| 787 | if (likely(skb->len <= left)) { | ||
| 788 | if (likely(!skb->data_len)) | ||
| 789 | skb_copy_from_linear_data(skb, pos, skb->len); | ||
| 790 | else | ||
| 791 | skb_copy_bits(skb, 0, pos, skb->len); | ||
| 792 | pos += skb->len; | ||
| 793 | } else { | ||
| 794 | skb_copy_bits(skb, 0, pos, left); | ||
| 795 | skb_copy_bits(skb, left, q->desc, skb->len - left); | ||
| 796 | pos = (void *)q->desc + (skb->len - left); | ||
| 797 | } | ||
| 798 | |||
| 799 | /* 0-pad to multiple of 16 */ | ||
| 800 | p = PTR_ALIGN(pos, 8); | ||
| 801 | if ((uintptr_t)p & 8) | ||
| 802 | *p = 0; | ||
| 803 | } | ||
| 804 | |||
| 805 | /* | ||
| 806 | * Figure out what HW csum a packet wants and return the appropriate control | ||
| 807 | * bits. | ||
| 808 | */ | ||
| 809 | static u64 hwcsum(const struct sk_buff *skb) | ||
| 810 | { | ||
| 811 | int csum_type; | ||
| 812 | const struct iphdr *iph = ip_hdr(skb); | ||
| 813 | |||
| 814 | if (iph->version == 4) { | ||
| 815 | if (iph->protocol == IPPROTO_TCP) | ||
| 816 | csum_type = TX_CSUM_TCPIP; | ||
| 817 | else if (iph->protocol == IPPROTO_UDP) | ||
| 818 | csum_type = TX_CSUM_UDPIP; | ||
| 819 | else { | ||
| 820 | nocsum: /* | ||
| 821 | * unknown protocol, disable HW csum | ||
| 822 | * and hope a bad packet is detected | ||
| 823 | */ | ||
| 824 | return TXPKT_L4CSUM_DIS; | ||
| 825 | } | ||
| 826 | } else { | ||
| 827 | /* | ||
| 828 | * this doesn't work with extension headers | ||
| 829 | */ | ||
| 830 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; | ||
| 831 | |||
| 832 | if (ip6h->nexthdr == IPPROTO_TCP) | ||
| 833 | csum_type = TX_CSUM_TCPIP6; | ||
| 834 | else if (ip6h->nexthdr == IPPROTO_UDP) | ||
| 835 | csum_type = TX_CSUM_UDPIP6; | ||
| 836 | else | ||
| 837 | goto nocsum; | ||
| 838 | } | ||
| 839 | |||
| 840 | if (likely(csum_type >= TX_CSUM_TCPIP)) | ||
| 841 | return TXPKT_CSUM_TYPE(csum_type) | | ||
| 842 | TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | | ||
| 843 | TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); | ||
| 844 | else { | ||
| 845 | int start = skb_transport_offset(skb); | ||
| 846 | |||
| 847 | return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | | ||
| 848 | TXPKT_CSUM_LOC(start + skb->csum_offset); | ||
| 849 | } | ||
| 850 | } | ||
| 851 | |||
| 852 | static void eth_txq_stop(struct sge_eth_txq *q) | ||
| 853 | { | ||
| 854 | netif_tx_stop_queue(q->txq); | ||
| 855 | q->q.stops++; | ||
| 856 | } | ||
| 857 | |||
| 858 | static inline void txq_advance(struct sge_txq *q, unsigned int n) | ||
| 859 | { | ||
| 860 | q->in_use += n; | ||
| 861 | q->pidx += n; | ||
| 862 | if (q->pidx >= q->size) | ||
| 863 | q->pidx -= q->size; | ||
| 864 | } | ||
| 865 | |||
| 866 | /** | ||
| 867 | * t4_eth_xmit - add a packet to an Ethernet Tx queue | ||
| 868 | * @skb: the packet | ||
| 869 | * @dev: the egress net device | ||
| 870 | * | ||
| 871 | * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. | ||
| 872 | */ | ||
| 873 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 874 | { | ||
| 875 | u32 wr_mid; | ||
| 876 | u64 cntrl, *end; | ||
| 877 | int qidx, credits; | ||
| 878 | unsigned int flits, ndesc; | ||
| 879 | struct adapter *adap; | ||
| 880 | struct sge_eth_txq *q; | ||
| 881 | const struct port_info *pi; | ||
| 882 | struct fw_eth_tx_pkt_wr *wr; | ||
| 883 | struct cpl_tx_pkt_core *cpl; | ||
| 884 | const struct skb_shared_info *ssi; | ||
| 885 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
| 886 | |||
| 887 | /* | ||
| 888 | * The chip min packet length is 10 octets but play safe and reject | ||
| 889 | * anything shorter than an Ethernet header. | ||
| 890 | */ | ||
| 891 | if (unlikely(skb->len < ETH_HLEN)) { | ||
| 892 | out_free: dev_kfree_skb(skb); | ||
| 893 | return NETDEV_TX_OK; | ||
| 894 | } | ||
| 895 | |||
| 896 | pi = netdev_priv(dev); | ||
| 897 | adap = pi->adapter; | ||
| 898 | qidx = skb_get_queue_mapping(skb); | ||
| 899 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
| 900 | |||
| 901 | reclaim_completed_tx(adap, &q->q, true); | ||
| 902 | |||
| 903 | flits = calc_tx_flits(skb); | ||
| 904 | ndesc = flits_to_desc(flits); | ||
| 905 | credits = txq_avail(&q->q) - ndesc; | ||
| 906 | |||
| 907 | if (unlikely(credits < 0)) { | ||
| 908 | eth_txq_stop(q); | ||
| 909 | dev_err(adap->pdev_dev, | ||
| 910 | "%s: Tx ring %u full while queue awake!\n", | ||
| 911 | dev->name, qidx); | ||
| 912 | return NETDEV_TX_BUSY; | ||
| 913 | } | ||
| 914 | |||
| 915 | if (!is_eth_imm(skb) && | ||
| 916 | unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { | ||
| 917 | q->mapping_err++; | ||
| 918 | goto out_free; | ||
| 919 | } | ||
| 920 | |||
| 921 | wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); | ||
| 922 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | ||
| 923 | eth_txq_stop(q); | ||
| 924 | wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; | ||
| 925 | } | ||
| 926 | |||
| 927 | wr = (void *)&q->q.desc[q->q.pidx]; | ||
| 928 | wr->equiq_to_len16 = htonl(wr_mid); | ||
| 929 | wr->r3 = cpu_to_be64(0); | ||
| 930 | end = (u64 *)wr + flits; | ||
| 931 | |||
| 932 | ssi = skb_shinfo(skb); | ||
| 933 | if (ssi->gso_size) { | ||
| 934 | struct cpl_tx_pkt_lso *lso = (void *)wr; | ||
| 935 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; | ||
| 936 | int l3hdr_len = skb_network_header_len(skb); | ||
| 937 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | ||
| 938 | |||
| 939 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | | ||
| 940 | FW_WR_IMMDLEN(sizeof(*lso))); | ||
| 941 | lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | | ||
| 942 | LSO_FIRST_SLICE | LSO_LAST_SLICE | | ||
| 943 | LSO_IPV6(v6) | | ||
| 944 | LSO_ETHHDR_LEN(eth_xtra_len / 4) | | ||
| 945 | LSO_IPHDR_LEN(l3hdr_len / 4) | | ||
| 946 | LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); | ||
| 947 | lso->ipid_ofst = htons(0); | ||
| 948 | lso->mss = htons(ssi->gso_size); | ||
| 949 | lso->seqno_offset = htonl(0); | ||
| 950 | lso->len = htonl(skb->len); | ||
| 951 | cpl = (void *)(lso + 1); | ||
| 952 | cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | ||
| 953 | TXPKT_IPHDR_LEN(l3hdr_len) | | ||
| 954 | TXPKT_ETHHDR_LEN(eth_xtra_len); | ||
| 955 | q->tso++; | ||
| 956 | q->tx_cso += ssi->gso_segs; | ||
| 957 | } else { | ||
| 958 | int len; | ||
| 959 | |||
| 960 | len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); | ||
| 961 | wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | | ||
| 962 | FW_WR_IMMDLEN(len)); | ||
| 963 | cpl = (void *)(wr + 1); | ||
| 964 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
| 965 | cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; | ||
| 966 | q->tx_cso++; | ||
| 967 | } else | ||
| 968 | cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; | ||
| 969 | } | ||
| 970 | |||
| 971 | if (vlan_tx_tag_present(skb)) { | ||
| 972 | q->vlan_ins++; | ||
| 973 | cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); | ||
| 974 | } | ||
| 975 | |||
| 976 | cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | | ||
| 977 | TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); | ||
| 978 | cpl->pack = htons(0); | ||
| 979 | cpl->len = htons(skb->len); | ||
| 980 | cpl->ctrl1 = cpu_to_be64(cntrl); | ||
| 981 | |||
| 982 | if (is_eth_imm(skb)) { | ||
| 983 | inline_tx_skb(skb, &q->q, cpl + 1); | ||
| 984 | dev_kfree_skb(skb); | ||
| 985 | } else { | ||
| 986 | int last_desc; | ||
| 987 | |||
| 988 | write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, | ||
| 989 | addr); | ||
| 990 | skb_orphan(skb); | ||
| 991 | |||
| 992 | last_desc = q->q.pidx + ndesc - 1; | ||
| 993 | if (last_desc >= q->q.size) | ||
| 994 | last_desc -= q->q.size; | ||
| 995 | q->q.sdesc[last_desc].skb = skb; | ||
| 996 | q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); | ||
| 997 | } | ||
| 998 | |||
| 999 | txq_advance(&q->q, ndesc); | ||
| 1000 | |||
| 1001 | ring_tx_db(adap, &q->q, ndesc); | ||
| 1002 | return NETDEV_TX_OK; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | /** | ||
| 1006 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | ||
| 1007 | * @q: the SGE control Tx queue | ||
| 1008 | * | ||
| 1009 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | ||
| 1010 | * that send only immediate data (presently just the control queues) and | ||
| 1011 | * thus do not have any sk_buffs to release. | ||
| 1012 | */ | ||
| 1013 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | ||
| 1014 | { | ||
| 1015 | int hw_cidx = ntohs(q->stat->cidx); | ||
| 1016 | int reclaim = hw_cidx - q->cidx; | ||
| 1017 | |||
| 1018 | if (reclaim < 0) | ||
| 1019 | reclaim += q->size; | ||
| 1020 | |||
| 1021 | q->in_use -= reclaim; | ||
| 1022 | q->cidx = hw_cidx; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | /** | ||
| 1026 | * is_imm - check whether a packet can be sent as immediate data | ||
| 1027 | * @skb: the packet | ||
| 1028 | * | ||
| 1029 | * Returns true if a packet can be sent as a WR with immediate data. | ||
| 1030 | */ | ||
| 1031 | static inline int is_imm(const struct sk_buff *skb) | ||
| 1032 | { | ||
| 1033 | return skb->len <= MAX_CTRL_WR_LEN; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | /** | ||
| 1037 | * ctrlq_check_stop - check if a control queue is full and should stop | ||
| 1038 | * @q: the queue | ||
| 1039 | * @wr: most recent WR written to the queue | ||
| 1040 | * | ||
| 1041 | * Check if a control queue has become full and should be stopped. | ||
| 1042 | * We clean up control queue descriptors very lazily, only when we are out. | ||
| 1043 | * If the queue is still full after reclaiming any completed descriptors | ||
| 1044 | * we suspend it and have the last WR wake it up. | ||
| 1045 | */ | ||
| 1046 | static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) | ||
| 1047 | { | ||
| 1048 | reclaim_completed_tx_imm(&q->q); | ||
| 1049 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | ||
| 1050 | wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); | ||
| 1051 | q->q.stops++; | ||
| 1052 | q->full = 1; | ||
| 1053 | } | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | /** | ||
| 1057 | * ctrl_xmit - send a packet through an SGE control Tx queue | ||
| 1058 | * @q: the control queue | ||
| 1059 | * @skb: the packet | ||
| 1060 | * | ||
| 1061 | * Send a packet through an SGE control Tx queue. Packets sent through | ||
| 1062 | * a control queue must fit entirely as immediate data. | ||
| 1063 | */ | ||
| 1064 | static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) | ||
| 1065 | { | ||
| 1066 | unsigned int ndesc; | ||
| 1067 | struct fw_wr_hdr *wr; | ||
| 1068 | |||
| 1069 | if (unlikely(!is_imm(skb))) { | ||
| 1070 | WARN_ON(1); | ||
| 1071 | dev_kfree_skb(skb); | ||
| 1072 | return NET_XMIT_DROP; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); | ||
| 1076 | spin_lock(&q->sendq.lock); | ||
| 1077 | |||
| 1078 | if (unlikely(q->full)) { | ||
| 1079 | skb->priority = ndesc; /* save for restart */ | ||
| 1080 | __skb_queue_tail(&q->sendq, skb); | ||
| 1081 | spin_unlock(&q->sendq.lock); | ||
| 1082 | return NET_XMIT_CN; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | ||
| 1086 | inline_tx_skb(skb, &q->q, wr); | ||
| 1087 | |||
| 1088 | txq_advance(&q->q, ndesc); | ||
| 1089 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) | ||
| 1090 | ctrlq_check_stop(q, wr); | ||
| 1091 | |||
| 1092 | ring_tx_db(q->adap, &q->q, ndesc); | ||
| 1093 | spin_unlock(&q->sendq.lock); | ||
| 1094 | |||
| 1095 | kfree_skb(skb); | ||
| 1096 | return NET_XMIT_SUCCESS; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | /** | ||
| 1100 | * restart_ctrlq - restart a suspended control queue | ||
| 1101 | * @data: the control queue to restart | ||
| 1102 | * | ||
| 1103 | * Resumes transmission on a suspended Tx control queue. | ||
| 1104 | */ | ||
| 1105 | static void restart_ctrlq(unsigned long data) | ||
| 1106 | { | ||
| 1107 | struct sk_buff *skb; | ||
| 1108 | unsigned int written = 0; | ||
| 1109 | struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; | ||
| 1110 | |||
| 1111 | spin_lock(&q->sendq.lock); | ||
| 1112 | reclaim_completed_tx_imm(&q->q); | ||
| 1113 | BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ | ||
| 1114 | |||
| 1115 | while ((skb = __skb_dequeue(&q->sendq)) != NULL) { | ||
| 1116 | struct fw_wr_hdr *wr; | ||
| 1117 | unsigned int ndesc = skb->priority; /* previously saved */ | ||
| 1118 | |||
| 1119 | /* | ||
| 1120 | * Write descriptors and free skbs outside the lock to limit | ||
| 1121 | * wait times. q->full is still set so new skbs will be queued. | ||
| 1122 | */ | ||
| 1123 | spin_unlock(&q->sendq.lock); | ||
| 1124 | |||
| 1125 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | ||
| 1126 | inline_tx_skb(skb, &q->q, wr); | ||
| 1127 | kfree_skb(skb); | ||
| 1128 | |||
| 1129 | written += ndesc; | ||
| 1130 | txq_advance(&q->q, ndesc); | ||
| 1131 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | ||
| 1132 | unsigned long old = q->q.stops; | ||
| 1133 | |||
| 1134 | ctrlq_check_stop(q, wr); | ||
| 1135 | if (q->q.stops != old) { /* suspended anew */ | ||
| 1136 | spin_lock(&q->sendq.lock); | ||
| 1137 | goto ringdb; | ||
| 1138 | } | ||
| 1139 | } | ||
| 1140 | if (written > 16) { | ||
| 1141 | ring_tx_db(q->adap, &q->q, written); | ||
| 1142 | written = 0; | ||
| 1143 | } | ||
| 1144 | spin_lock(&q->sendq.lock); | ||
| 1145 | } | ||
| 1146 | q->full = 0; | ||
| 1147 | ringdb: if (written) | ||
| 1148 | ring_tx_db(q->adap, &q->q, written); | ||
| 1149 | spin_unlock(&q->sendq.lock); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | /** | ||
| 1153 | * t4_mgmt_tx - send a management message | ||
| 1154 | * @adap: the adapter | ||
| 1155 | * @skb: the packet containing the management message | ||
| 1156 | * | ||
| 1157 | * Send a management message through control queue 0. | ||
| 1158 | */ | ||
| 1159 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | ||
| 1160 | { | ||
| 1161 | int ret; | ||
| 1162 | |||
| 1163 | local_bh_disable(); | ||
| 1164 | ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); | ||
| 1165 | local_bh_enable(); | ||
| 1166 | return ret; | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | /** | ||
| 1170 | * is_ofld_imm - check whether a packet can be sent as immediate data | ||
| 1171 | * @skb: the packet | ||
| 1172 | * | ||
| 1173 | * Returns true if a packet can be sent as an offload WR with immediate | ||
| 1174 | * data. We currently use the same limit as for Ethernet packets. | ||
| 1175 | */ | ||
| 1176 | static inline int is_ofld_imm(const struct sk_buff *skb) | ||
| 1177 | { | ||
| 1178 | return skb->len <= MAX_IMM_TX_PKT_LEN; | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | /** | ||
| 1182 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | ||
| 1183 | * @skb: the packet | ||
| 1184 | * | ||
| 1185 | * Returns the number of flits needed for the given offload packet. | ||
| 1186 | * These packets are already fully constructed and no additional headers | ||
| 1187 | * will be added. | ||
| 1188 | */ | ||
| 1189 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | ||
| 1190 | { | ||
| 1191 | unsigned int flits, cnt; | ||
| 1192 | |||
| 1193 | if (is_ofld_imm(skb)) | ||
| 1194 | return DIV_ROUND_UP(skb->len, 8); | ||
| 1195 | |||
| 1196 | flits = skb_transport_offset(skb) / 8U; /* headers */ | ||
| 1197 | cnt = skb_shinfo(skb)->nr_frags; | ||
| 1198 | if (skb->tail != skb->transport_header) | ||
| 1199 | cnt++; | ||
| 1200 | return flits + sgl_len(cnt); | ||
| 1201 | } | ||
| 1202 | |||
| 1203 | /** | ||
| 1204 | * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion | ||
| 1205 | * @adap: the adapter | ||
| 1206 | * @q: the queue to stop | ||
| 1207 | * | ||
| 1208 | * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting | ||
| 1209 | * inability to map packets. A periodic timer attempts to restart | ||
| 1210 | * queues so marked. | ||
| 1211 | */ | ||
| 1212 | static void txq_stop_maperr(struct sge_ofld_txq *q) | ||
| 1213 | { | ||
| 1214 | q->mapping_err++; | ||
| 1215 | q->q.stops++; | ||
| 1216 | set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | /** | ||
| 1220 | * ofldtxq_stop - stop an offload Tx queue that has become full | ||
| 1221 | * @q: the queue to stop | ||
| 1222 | * @skb: the packet causing the queue to become full | ||
| 1223 | * | ||
| 1224 | * Stops an offload Tx queue that has become full and modifies the packet | ||
| 1225 | * being written to request a wakeup. | ||
| 1226 | */ | ||
| 1227 | static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) | ||
| 1228 | { | ||
| 1229 | struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; | ||
| 1230 | |||
| 1231 | wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); | ||
| 1232 | q->q.stops++; | ||
| 1233 | q->full = 1; | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | /** | ||
| 1237 | * service_ofldq - restart a suspended offload queue | ||
| 1238 | * @q: the offload queue | ||
| 1239 | * | ||
| 1240 | * Services an offload Tx queue by moving packets from its packet queue | ||
| 1241 | * to the HW Tx ring. The function starts and ends with the queue locked. | ||
| 1242 | */ | ||
| 1243 | static void service_ofldq(struct sge_ofld_txq *q) | ||
| 1244 | { | ||
| 1245 | u64 *pos; | ||
| 1246 | int credits; | ||
| 1247 | struct sk_buff *skb; | ||
| 1248 | unsigned int written = 0; | ||
| 1249 | unsigned int flits, ndesc; | ||
| 1250 | |||
| 1251 | while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { | ||
| 1252 | /* | ||
| 1253 | * We drop the lock but leave skb on sendq, thus retaining | ||
| 1254 | * exclusive access to the state of the queue. | ||
| 1255 | */ | ||
| 1256 | spin_unlock(&q->sendq.lock); | ||
| 1257 | |||
| 1258 | reclaim_completed_tx(q->adap, &q->q, false); | ||
| 1259 | |||
| 1260 | flits = skb->priority; /* previously saved */ | ||
| 1261 | ndesc = flits_to_desc(flits); | ||
| 1262 | credits = txq_avail(&q->q) - ndesc; | ||
| 1263 | BUG_ON(credits < 0); | ||
| 1264 | if (unlikely(credits < TXQ_STOP_THRES)) | ||
| 1265 | ofldtxq_stop(q, skb); | ||
| 1266 | |||
| 1267 | pos = (u64 *)&q->q.desc[q->q.pidx]; | ||
| 1268 | if (is_ofld_imm(skb)) | ||
| 1269 | inline_tx_skb(skb, &q->q, pos); | ||
| 1270 | else if (map_skb(q->adap->pdev_dev, skb, | ||
| 1271 | (dma_addr_t *)skb->head)) { | ||
| 1272 | txq_stop_maperr(q); | ||
| 1273 | spin_lock(&q->sendq.lock); | ||
| 1274 | break; | ||
| 1275 | } else { | ||
| 1276 | int last_desc, hdr_len = skb_transport_offset(skb); | ||
| 1277 | |||
| 1278 | memcpy(pos, skb->data, hdr_len); | ||
| 1279 | write_sgl(skb, &q->q, (void *)pos + hdr_len, | ||
| 1280 | pos + flits, hdr_len, | ||
| 1281 | (dma_addr_t *)skb->head); | ||
| 1282 | #ifdef CONFIG_NEED_DMA_MAP_STATE | ||
| 1283 | skb->dev = q->adap->port[0]; | ||
| 1284 | skb->destructor = deferred_unmap_destructor; | ||
| 1285 | #endif | ||
| 1286 | last_desc = q->q.pidx + ndesc - 1; | ||
| 1287 | if (last_desc >= q->q.size) | ||
| 1288 | last_desc -= q->q.size; | ||
| 1289 | q->q.sdesc[last_desc].skb = skb; | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | txq_advance(&q->q, ndesc); | ||
| 1293 | written += ndesc; | ||
| 1294 | if (unlikely(written > 32)) { | ||
| 1295 | ring_tx_db(q->adap, &q->q, written); | ||
| 1296 | written = 0; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | spin_lock(&q->sendq.lock); | ||
| 1300 | __skb_unlink(skb, &q->sendq); | ||
| 1301 | if (is_ofld_imm(skb)) | ||
| 1302 | kfree_skb(skb); | ||
| 1303 | } | ||
| 1304 | if (likely(written)) | ||
| 1305 | ring_tx_db(q->adap, &q->q, written); | ||
| 1306 | } | ||
| 1307 | |||
| 1308 | /** | ||
| 1309 | * ofld_xmit - send a packet through an offload queue | ||
| 1310 | * @q: the Tx offload queue | ||
| 1311 | * @skb: the packet | ||
| 1312 | * | ||
| 1313 | * Send an offload packet through an SGE offload queue. | ||
| 1314 | */ | ||
| 1315 | static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) | ||
| 1316 | { | ||
| 1317 | skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ | ||
| 1318 | spin_lock(&q->sendq.lock); | ||
| 1319 | __skb_queue_tail(&q->sendq, skb); | ||
| 1320 | if (q->sendq.qlen == 1) | ||
| 1321 | service_ofldq(q); | ||
| 1322 | spin_unlock(&q->sendq.lock); | ||
| 1323 | return NET_XMIT_SUCCESS; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | /** | ||
| 1327 | * restart_ofldq - restart a suspended offload queue | ||
| 1328 | * @data: the offload queue to restart | ||
| 1329 | * | ||
| 1330 | * Resumes transmission on a suspended Tx offload queue. | ||
| 1331 | */ | ||
| 1332 | static void restart_ofldq(unsigned long data) | ||
| 1333 | { | ||
| 1334 | struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; | ||
| 1335 | |||
| 1336 | spin_lock(&q->sendq.lock); | ||
| 1337 | q->full = 0; /* the queue actually is completely empty now */ | ||
| 1338 | service_ofldq(q); | ||
| 1339 | spin_unlock(&q->sendq.lock); | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | /** | ||
| 1343 | * skb_txq - return the Tx queue an offload packet should use | ||
| 1344 | * @skb: the packet | ||
| 1345 | * | ||
| 1346 | * Returns the Tx queue an offload packet should use as indicated by bits | ||
| 1347 | * 1-15 in the packet's queue_mapping. | ||
| 1348 | */ | ||
| 1349 | static inline unsigned int skb_txq(const struct sk_buff *skb) | ||
| 1350 | { | ||
| 1351 | return skb->queue_mapping >> 1; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | /** | ||
| 1355 | * is_ctrl_pkt - return whether an offload packet is a control packet | ||
| 1356 | * @skb: the packet | ||
| 1357 | * | ||
| 1358 | * Returns whether an offload packet should use an OFLD or a CTRL | ||
| 1359 | * Tx queue as indicated by bit 0 in the packet's queue_mapping. | ||
| 1360 | */ | ||
| 1361 | static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) | ||
| 1362 | { | ||
| 1363 | return skb->queue_mapping & 1; | ||
| 1364 | } | ||
| 1365 | |||
| 1366 | static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) | ||
| 1367 | { | ||
| 1368 | unsigned int idx = skb_txq(skb); | ||
| 1369 | |||
| 1370 | if (unlikely(is_ctrl_pkt(skb))) | ||
| 1371 | return ctrl_xmit(&adap->sge.ctrlq[idx], skb); | ||
| 1372 | return ofld_xmit(&adap->sge.ofldtxq[idx], skb); | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | /** | ||
| 1376 | * t4_ofld_send - send an offload packet | ||
| 1377 | * @adap: the adapter | ||
| 1378 | * @skb: the packet | ||
| 1379 | * | ||
| 1380 | * Sends an offload packet. We use the packet queue_mapping to select the | ||
| 1381 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | ||
| 1382 | * should be sent as regular or control, bits 1-15 select the queue. | ||
| 1383 | */ | ||
| 1384 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) | ||
| 1385 | { | ||
| 1386 | int ret; | ||
| 1387 | |||
| 1388 | local_bh_disable(); | ||
| 1389 | ret = ofld_send(adap, skb); | ||
| 1390 | local_bh_enable(); | ||
| 1391 | return ret; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | /** | ||
| 1395 | * cxgb4_ofld_send - send an offload packet | ||
| 1396 | * @dev: the net device | ||
| 1397 | * @skb: the packet | ||
| 1398 | * | ||
| 1399 | * Sends an offload packet. This is an exported version of @t4_ofld_send, | ||
| 1400 | * intended for ULDs. | ||
| 1401 | */ | ||
| 1402 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) | ||
| 1403 | { | ||
| 1404 | return t4_ofld_send(netdev2adap(dev), skb); | ||
| 1405 | } | ||
| 1406 | EXPORT_SYMBOL(cxgb4_ofld_send); | ||
| 1407 | |||
| 1408 | static inline void copy_frags(struct skb_shared_info *ssi, | ||
| 1409 | const struct pkt_gl *gl, unsigned int offset) | ||
| 1410 | { | ||
| 1411 | unsigned int n; | ||
| 1412 | |||
| 1413 | /* usually there's just one frag */ | ||
| 1414 | ssi->frags[0].page = gl->frags[0].page; | ||
| 1415 | ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; | ||
| 1416 | ssi->frags[0].size = gl->frags[0].size - offset; | ||
| 1417 | ssi->nr_frags = gl->nfrags; | ||
| 1418 | n = gl->nfrags - 1; | ||
| 1419 | if (n) | ||
| 1420 | memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); | ||
| 1421 | |||
| 1422 | /* get a reference to the last page, we don't own it */ | ||
| 1423 | get_page(gl->frags[n].page); | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | /** | ||
| 1427 | * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list | ||
| 1428 | * @gl: the gather list | ||
| 1429 | * @skb_len: size of sk_buff main body if it carries fragments | ||
| 1430 | * @pull_len: amount of data to move to the sk_buff's main body | ||
| 1431 | * | ||
| 1432 | * Builds an sk_buff from the given packet gather list. Returns the | ||
| 1433 | * sk_buff or %NULL if sk_buff allocation failed. | ||
| 1434 | */ | ||
| 1435 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | ||
| 1436 | unsigned int skb_len, unsigned int pull_len) | ||
| 1437 | { | ||
| 1438 | struct sk_buff *skb; | ||
| 1439 | |||
| 1440 | /* | ||
| 1441 | * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer | ||
| 1442 | * size, which is expected since buffers are at least PAGE_SIZEd. | ||
| 1443 | * In this case packets up to RX_COPY_THRES have only one fragment. | ||
| 1444 | */ | ||
| 1445 | if (gl->tot_len <= RX_COPY_THRES) { | ||
| 1446 | skb = dev_alloc_skb(gl->tot_len); | ||
| 1447 | if (unlikely(!skb)) | ||
| 1448 | goto out; | ||
| 1449 | __skb_put(skb, gl->tot_len); | ||
| 1450 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
| 1451 | } else { | ||
| 1452 | skb = dev_alloc_skb(skb_len); | ||
| 1453 | if (unlikely(!skb)) | ||
| 1454 | goto out; | ||
| 1455 | __skb_put(skb, pull_len); | ||
| 1456 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
| 1457 | |||
| 1458 | copy_frags(skb_shinfo(skb), gl, pull_len); | ||
| 1459 | skb->len = gl->tot_len; | ||
| 1460 | skb->data_len = skb->len - pull_len; | ||
| 1461 | skb->truesize += skb->data_len; | ||
| 1462 | } | ||
| 1463 | out: return skb; | ||
| 1464 | } | ||
| 1465 | EXPORT_SYMBOL(cxgb4_pktgl_to_skb); | ||
| 1466 | |||
| 1467 | /** | ||
| 1468 | * t4_pktgl_free - free a packet gather list | ||
| 1469 | * @gl: the gather list | ||
| 1470 | * | ||
| 1471 | * Releases the pages of a packet gather list. We do not own the last | ||
| 1472 | * page on the list and do not free it. | ||
| 1473 | */ | ||
| 1474 | void t4_pktgl_free(const struct pkt_gl *gl) | ||
| 1475 | { | ||
| 1476 | int n; | ||
| 1477 | const skb_frag_t *p; | ||
| 1478 | |||
| 1479 | for (p = gl->frags, n = gl->nfrags - 1; n--; p++) | ||
| 1480 | put_page(p->page); | ||
| 1481 | } | ||
| 1482 | |||
| 1483 | /* | ||
| 1484 | * Process an MPS trace packet. Give it an unused protocol number so it won't | ||
| 1485 | * be delivered to anyone and send it to the stack for capture. | ||
| 1486 | */ | ||
| 1487 | static noinline int handle_trace_pkt(struct adapter *adap, | ||
| 1488 | const struct pkt_gl *gl) | ||
| 1489 | { | ||
| 1490 | struct sk_buff *skb; | ||
| 1491 | struct cpl_trace_pkt *p; | ||
| 1492 | |||
| 1493 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | ||
| 1494 | if (unlikely(!skb)) { | ||
| 1495 | t4_pktgl_free(gl); | ||
| 1496 | return 0; | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | p = (struct cpl_trace_pkt *)skb->data; | ||
| 1500 | __skb_pull(skb, sizeof(*p)); | ||
| 1501 | skb_reset_mac_header(skb); | ||
| 1502 | skb->protocol = htons(0xffff); | ||
| 1503 | skb->dev = adap->port[0]; | ||
| 1504 | netif_receive_skb(skb); | ||
| 1505 | return 0; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | ||
| 1509 | const struct cpl_rx_pkt *pkt) | ||
| 1510 | { | ||
| 1511 | int ret; | ||
| 1512 | struct sk_buff *skb; | ||
| 1513 | |||
| 1514 | skb = napi_get_frags(&rxq->rspq.napi); | ||
| 1515 | if (unlikely(!skb)) { | ||
| 1516 | t4_pktgl_free(gl); | ||
| 1517 | rxq->stats.rx_drops++; | ||
| 1518 | return; | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); | ||
| 1522 | skb->len = gl->tot_len - RX_PKT_PAD; | ||
| 1523 | skb->data_len = skb->len; | ||
| 1524 | skb->truesize += skb->data_len; | ||
| 1525 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 1526 | skb_record_rx_queue(skb, rxq->rspq.idx); | ||
| 1527 | |||
| 1528 | if (unlikely(pkt->vlan_ex)) { | ||
| 1529 | struct port_info *pi = netdev_priv(rxq->rspq.netdev); | ||
| 1530 | struct vlan_group *grp = pi->vlan_grp; | ||
| 1531 | |||
| 1532 | rxq->stats.vlan_ex++; | ||
| 1533 | if (likely(grp)) { | ||
| 1534 | ret = vlan_gro_frags(&rxq->rspq.napi, grp, | ||
| 1535 | ntohs(pkt->vlan)); | ||
| 1536 | goto stats; | ||
| 1537 | } | ||
| 1538 | } | ||
| 1539 | ret = napi_gro_frags(&rxq->rspq.napi); | ||
| 1540 | stats: if (ret == GRO_HELD) | ||
| 1541 | rxq->stats.lro_pkts++; | ||
| 1542 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) | ||
| 1543 | rxq->stats.lro_merged++; | ||
| 1544 | rxq->stats.pkts++; | ||
| 1545 | rxq->stats.rx_cso++; | ||
| 1546 | } | ||
| 1547 | |||
| 1548 | /** | ||
| 1549 | * t4_ethrx_handler - process an ingress ethernet packet | ||
| 1550 | * @q: the response queue that received the packet | ||
| 1551 | * @rsp: the response queue descriptor holding the RX_PKT message | ||
| 1552 | * @si: the gather list of packet fragments | ||
| 1553 | * | ||
| 1554 | * Process an ingress ethernet packet and deliver it to the stack. | ||
| 1555 | */ | ||
| 1556 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | ||
| 1557 | const struct pkt_gl *si) | ||
| 1558 | { | ||
| 1559 | bool csum_ok; | ||
| 1560 | struct sk_buff *skb; | ||
| 1561 | struct port_info *pi; | ||
| 1562 | const struct cpl_rx_pkt *pkt; | ||
| 1563 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | ||
| 1564 | |||
| 1565 | if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) | ||
| 1566 | return handle_trace_pkt(q->adap, si); | ||
| 1567 | |||
| 1568 | pkt = (void *)&rsp[1]; | ||
| 1569 | csum_ok = pkt->csum_calc && !pkt->err_vec; | ||
| 1570 | if ((pkt->l2info & htonl(RXF_TCP)) && | ||
| 1571 | (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { | ||
| 1572 | do_gro(rxq, si, pkt); | ||
| 1573 | return 0; | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); | ||
| 1577 | if (unlikely(!skb)) { | ||
| 1578 | t4_pktgl_free(si); | ||
| 1579 | rxq->stats.rx_drops++; | ||
| 1580 | return 0; | ||
| 1581 | } | ||
| 1582 | |||
| 1583 | __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ | ||
| 1584 | skb->protocol = eth_type_trans(skb, q->netdev); | ||
| 1585 | skb_record_rx_queue(skb, q->idx); | ||
| 1586 | pi = netdev_priv(skb->dev); | ||
| 1587 | rxq->stats.pkts++; | ||
| 1588 | |||
| 1589 | if (csum_ok && (pi->rx_offload & RX_CSO) && | ||
| 1590 | (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { | ||
| 1591 | if (!pkt->ip_frag) | ||
| 1592 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 1593 | else { | ||
| 1594 | __sum16 c = (__force __sum16)pkt->csum; | ||
| 1595 | skb->csum = csum_unfold(c); | ||
| 1596 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
| 1597 | } | ||
| 1598 | rxq->stats.rx_cso++; | ||
| 1599 | } else | ||
| 1600 | skb->ip_summed = CHECKSUM_NONE; | ||
| 1601 | |||
| 1602 | if (unlikely(pkt->vlan_ex)) { | ||
| 1603 | struct vlan_group *grp = pi->vlan_grp; | ||
| 1604 | |||
| 1605 | rxq->stats.vlan_ex++; | ||
| 1606 | if (likely(grp)) | ||
| 1607 | vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); | ||
| 1608 | else | ||
| 1609 | dev_kfree_skb_any(skb); | ||
| 1610 | } else | ||
| 1611 | netif_receive_skb(skb); | ||
| 1612 | |||
| 1613 | return 0; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | /** | ||
| 1617 | * restore_rx_bufs - put back a packet's Rx buffers | ||
| 1618 | * @si: the packet gather list | ||
| 1619 | * @q: the SGE free list | ||
| 1620 | * @frags: number of FL buffers to restore | ||
| 1621 | * | ||
| 1622 | * Puts back on an FL the Rx buffers associated with @si. The buffers | ||
| 1623 | * have already been unmapped and are left unmapped, we mark them so to | ||
| 1624 | * prevent further unmapping attempts. | ||
| 1625 | * | ||
| 1626 | * This function undoes a series of @unmap_rx_buf calls when we find out | ||
| 1627 | * that the current packet can't be processed right away afterall and we | ||
| 1628 | * need to come back to it later. This is a very rare event and there's | ||
| 1629 | * no effort to make this particularly efficient. | ||
| 1630 | */ | ||
| 1631 | static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, | ||
| 1632 | int frags) | ||
| 1633 | { | ||
| 1634 | struct rx_sw_desc *d; | ||
| 1635 | |||
| 1636 | while (frags--) { | ||
| 1637 | if (q->cidx == 0) | ||
| 1638 | q->cidx = q->size - 1; | ||
| 1639 | else | ||
| 1640 | q->cidx--; | ||
| 1641 | d = &q->sdesc[q->cidx]; | ||
| 1642 | d->page = si->frags[frags].page; | ||
| 1643 | d->dma_addr |= RX_UNMAPPED_BUF; | ||
| 1644 | q->avail++; | ||
| 1645 | } | ||
| 1646 | } | ||
| 1647 | |||
| 1648 | /** | ||
| 1649 | * is_new_response - check if a response is newly written | ||
| 1650 | * @r: the response descriptor | ||
| 1651 | * @q: the response queue | ||
| 1652 | * | ||
| 1653 | * Returns true if a response descriptor contains a yet unprocessed | ||
| 1654 | * response. | ||
| 1655 | */ | ||
| 1656 | static inline bool is_new_response(const struct rsp_ctrl *r, | ||
| 1657 | const struct sge_rspq *q) | ||
| 1658 | { | ||
| 1659 | return RSPD_GEN(r->type_gen) == q->gen; | ||
| 1660 | } | ||
| 1661 | |||
| 1662 | /** | ||
| 1663 | * rspq_next - advance to the next entry in a response queue | ||
| 1664 | * @q: the queue | ||
| 1665 | * | ||
| 1666 | * Updates the state of a response queue to advance it to the next entry. | ||
| 1667 | */ | ||
| 1668 | static inline void rspq_next(struct sge_rspq *q) | ||
| 1669 | { | ||
| 1670 | q->cur_desc = (void *)q->cur_desc + q->iqe_len; | ||
| 1671 | if (unlikely(++q->cidx == q->size)) { | ||
| 1672 | q->cidx = 0; | ||
| 1673 | q->gen ^= 1; | ||
| 1674 | q->cur_desc = q->desc; | ||
| 1675 | } | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | /** | ||
| 1679 | * process_responses - process responses from an SGE response queue | ||
| 1680 | * @q: the ingress queue to process | ||
| 1681 | * @budget: how many responses can be processed in this round | ||
| 1682 | * | ||
| 1683 | * Process responses from an SGE response queue up to the supplied budget. | ||
| 1684 | * Responses include received packets as well as control messages from FW | ||
| 1685 | * or HW. | ||
| 1686 | * | ||
| 1687 | * Additionally choose the interrupt holdoff time for the next interrupt | ||
| 1688 | * on this queue. If the system is under memory shortage use a fairly | ||
| 1689 | * long delay to help recovery. | ||
| 1690 | */ | ||
| 1691 | static int process_responses(struct sge_rspq *q, int budget) | ||
| 1692 | { | ||
| 1693 | int ret, rsp_type; | ||
| 1694 | int budget_left = budget; | ||
| 1695 | const struct rsp_ctrl *rc; | ||
| 1696 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | ||
| 1697 | |||
| 1698 | while (likely(budget_left)) { | ||
| 1699 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | ||
| 1700 | if (!is_new_response(rc, q)) | ||
| 1701 | break; | ||
| 1702 | |||
| 1703 | rmb(); | ||
| 1704 | rsp_type = RSPD_TYPE(rc->type_gen); | ||
| 1705 | if (likely(rsp_type == RSP_TYPE_FLBUF)) { | ||
| 1706 | skb_frag_t *fp; | ||
| 1707 | struct pkt_gl si; | ||
| 1708 | const struct rx_sw_desc *rsd; | ||
| 1709 | u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; | ||
| 1710 | |||
| 1711 | if (len & RSPD_NEWBUF) { | ||
| 1712 | if (likely(q->offset > 0)) { | ||
| 1713 | free_rx_bufs(q->adap, &rxq->fl, 1); | ||
| 1714 | q->offset = 0; | ||
| 1715 | } | ||
| 1716 | len &= RSPD_LEN; | ||
| 1717 | } | ||
| 1718 | si.tot_len = len; | ||
| 1719 | |||
| 1720 | /* gather packet fragments */ | ||
| 1721 | for (frags = 0, fp = si.frags; ; frags++, fp++) { | ||
| 1722 | rsd = &rxq->fl.sdesc[rxq->fl.cidx]; | ||
| 1723 | bufsz = get_buf_size(rsd); | ||
| 1724 | fp->page = rsd->page; | ||
| 1725 | fp->page_offset = q->offset; | ||
| 1726 | fp->size = min(bufsz, len); | ||
| 1727 | len -= fp->size; | ||
| 1728 | if (!len) | ||
| 1729 | break; | ||
| 1730 | unmap_rx_buf(q->adap, &rxq->fl); | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | /* | ||
| 1734 | * Last buffer remains mapped so explicitly make it | ||
| 1735 | * coherent for CPU access. | ||
| 1736 | */ | ||
| 1737 | dma_sync_single_for_cpu(q->adap->pdev_dev, | ||
| 1738 | get_buf_addr(rsd), | ||
| 1739 | fp->size, DMA_FROM_DEVICE); | ||
| 1740 | |||
| 1741 | si.va = page_address(si.frags[0].page) + | ||
| 1742 | si.frags[0].page_offset; | ||
| 1743 | prefetch(si.va); | ||
| 1744 | |||
| 1745 | si.nfrags = frags + 1; | ||
| 1746 | ret = q->handler(q, q->cur_desc, &si); | ||
| 1747 | if (likely(ret == 0)) | ||
| 1748 | q->offset += ALIGN(fp->size, FL_ALIGN); | ||
| 1749 | else | ||
| 1750 | restore_rx_bufs(&si, &rxq->fl, frags); | ||
| 1751 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | ||
| 1752 | ret = q->handler(q, q->cur_desc, NULL); | ||
| 1753 | } else { | ||
| 1754 | ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); | ||
| 1755 | } | ||
| 1756 | |||
| 1757 | if (unlikely(ret)) { | ||
| 1758 | /* couldn't process descriptor, back off for recovery */ | ||
| 1759 | q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); | ||
| 1760 | break; | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | rspq_next(q); | ||
| 1764 | budget_left--; | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) | ||
| 1768 | __refill_fl(q->adap, &rxq->fl); | ||
| 1769 | return budget - budget_left; | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /** | ||
| 1773 | * napi_rx_handler - the NAPI handler for Rx processing | ||
| 1774 | * @napi: the napi instance | ||
| 1775 | * @budget: how many packets we can process in this round | ||
| 1776 | * | ||
| 1777 | * Handler for new data events when using NAPI. This does not need any | ||
| 1778 | * locking or protection from interrupts as data interrupts are off at | ||
| 1779 | * this point and other adapter interrupts do not interfere (the latter | ||
| 1780 | * in not a concern at all with MSI-X as non-data interrupts then have | ||
| 1781 | * a separate handler). | ||
| 1782 | */ | ||
| 1783 | static int napi_rx_handler(struct napi_struct *napi, int budget) | ||
| 1784 | { | ||
| 1785 | unsigned int params; | ||
| 1786 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); | ||
| 1787 | int work_done = process_responses(q, budget); | ||
| 1788 | |||
| 1789 | if (likely(work_done < budget)) { | ||
| 1790 | napi_complete(napi); | ||
| 1791 | params = q->next_intr_params; | ||
| 1792 | q->next_intr_params = q->intr_params; | ||
| 1793 | } else | ||
| 1794 | params = QINTR_TIMER_IDX(7); | ||
| 1795 | |||
| 1796 | t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | | ||
| 1797 | INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); | ||
| 1798 | return work_done; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | /* | ||
| 1802 | * The MSI-X interrupt handler for an SGE response queue. | ||
| 1803 | */ | ||
| 1804 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie) | ||
| 1805 | { | ||
| 1806 | struct sge_rspq *q = cookie; | ||
| 1807 | |||
| 1808 | napi_schedule(&q->napi); | ||
| 1809 | return IRQ_HANDLED; | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | /* | ||
| 1813 | * Process the indirect interrupt entries in the interrupt queue and kick off | ||
| 1814 | * NAPI for each queue that has generated an entry. | ||
| 1815 | */ | ||
| 1816 | static unsigned int process_intrq(struct adapter *adap) | ||
| 1817 | { | ||
| 1818 | unsigned int credits; | ||
| 1819 | const struct rsp_ctrl *rc; | ||
| 1820 | struct sge_rspq *q = &adap->sge.intrq; | ||
| 1821 | |||
| 1822 | spin_lock(&adap->sge.intrq_lock); | ||
| 1823 | for (credits = 0; ; credits++) { | ||
| 1824 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | ||
| 1825 | if (!is_new_response(rc, q)) | ||
| 1826 | break; | ||
| 1827 | |||
| 1828 | rmb(); | ||
| 1829 | if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { | ||
| 1830 | unsigned int qid = ntohl(rc->pldbuflen_qid); | ||
| 1831 | |||
| 1832 | napi_schedule(&adap->sge.ingr_map[qid]->napi); | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | rspq_next(q); | ||
| 1836 | } | ||
| 1837 | |||
| 1838 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | | ||
| 1839 | INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); | ||
| 1840 | spin_unlock(&adap->sge.intrq_lock); | ||
| 1841 | return credits; | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | /* | ||
| 1845 | * The MSI interrupt handler, which handles data events from SGE response queues | ||
| 1846 | * as well as error and other async events as they all use the same MSI vector. | ||
| 1847 | */ | ||
| 1848 | static irqreturn_t t4_intr_msi(int irq, void *cookie) | ||
| 1849 | { | ||
| 1850 | struct adapter *adap = cookie; | ||
| 1851 | |||
| 1852 | t4_slow_intr_handler(adap); | ||
| 1853 | process_intrq(adap); | ||
| 1854 | return IRQ_HANDLED; | ||
| 1855 | } | ||
| 1856 | |||
| 1857 | /* | ||
| 1858 | * Interrupt handler for legacy INTx interrupts. | ||
| 1859 | * Handles data events from SGE response queues as well as error and other | ||
| 1860 | * async events as they all use the same interrupt line. | ||
| 1861 | */ | ||
| 1862 | static irqreturn_t t4_intr_intx(int irq, void *cookie) | ||
| 1863 | { | ||
| 1864 | struct adapter *adap = cookie; | ||
| 1865 | |||
| 1866 | t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); | ||
| 1867 | if (t4_slow_intr_handler(adap) | process_intrq(adap)) | ||
| 1868 | return IRQ_HANDLED; | ||
| 1869 | return IRQ_NONE; /* probably shared interrupt */ | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | /** | ||
| 1873 | * t4_intr_handler - select the top-level interrupt handler | ||
| 1874 | * @adap: the adapter | ||
| 1875 | * | ||
| 1876 | * Selects the top-level interrupt handler based on the type of interrupts | ||
| 1877 | * (MSI-X, MSI, or INTx). | ||
| 1878 | */ | ||
| 1879 | irq_handler_t t4_intr_handler(struct adapter *adap) | ||
| 1880 | { | ||
| 1881 | if (adap->flags & USING_MSIX) | ||
| 1882 | return t4_sge_intr_msix; | ||
| 1883 | if (adap->flags & USING_MSI) | ||
| 1884 | return t4_intr_msi; | ||
| 1885 | return t4_intr_intx; | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | static void sge_rx_timer_cb(unsigned long data) | ||
| 1889 | { | ||
| 1890 | unsigned long m; | ||
| 1891 | unsigned int i, cnt[2]; | ||
| 1892 | struct adapter *adap = (struct adapter *)data; | ||
| 1893 | struct sge *s = &adap->sge; | ||
| 1894 | |||
| 1895 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) | ||
| 1896 | for (m = s->starving_fl[i]; m; m &= m - 1) { | ||
| 1897 | struct sge_eth_rxq *rxq; | ||
| 1898 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | ||
| 1899 | struct sge_fl *fl = s->egr_map[id]; | ||
| 1900 | |||
| 1901 | clear_bit(id, s->starving_fl); | ||
| 1902 | smp_mb__after_clear_bit(); | ||
| 1903 | |||
| 1904 | if (fl_starving(fl)) { | ||
| 1905 | rxq = container_of(fl, struct sge_eth_rxq, fl); | ||
| 1906 | if (napi_reschedule(&rxq->rspq.napi)) | ||
| 1907 | fl->starving++; | ||
| 1908 | else | ||
| 1909 | set_bit(id, s->starving_fl); | ||
| 1910 | } | ||
| 1911 | } | ||
| 1912 | |||
| 1913 | t4_write_reg(adap, SGE_DEBUG_INDEX, 13); | ||
| 1914 | cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); | ||
| 1915 | cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); | ||
| 1916 | |||
| 1917 | for (i = 0; i < 2; i++) | ||
| 1918 | if (cnt[i] >= s->starve_thres) { | ||
| 1919 | if (s->idma_state[i] || cnt[i] == 0xffffffff) | ||
| 1920 | continue; | ||
| 1921 | s->idma_state[i] = 1; | ||
| 1922 | t4_write_reg(adap, SGE_DEBUG_INDEX, 11); | ||
| 1923 | m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); | ||
| 1924 | dev_warn(adap->pdev_dev, | ||
| 1925 | "SGE idma%u starvation detected for " | ||
| 1926 | "queue %lu\n", i, m & 0xffff); | ||
| 1927 | } else if (s->idma_state[i]) | ||
| 1928 | s->idma_state[i] = 0; | ||
| 1929 | |||
| 1930 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | static void sge_tx_timer_cb(unsigned long data) | ||
| 1934 | { | ||
| 1935 | unsigned long m; | ||
| 1936 | unsigned int i, budget; | ||
| 1937 | struct adapter *adap = (struct adapter *)data; | ||
| 1938 | struct sge *s = &adap->sge; | ||
| 1939 | |||
| 1940 | for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) | ||
| 1941 | for (m = s->txq_maperr[i]; m; m &= m - 1) { | ||
| 1942 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | ||
| 1943 | struct sge_ofld_txq *txq = s->egr_map[id]; | ||
| 1944 | |||
| 1945 | clear_bit(id, s->txq_maperr); | ||
| 1946 | tasklet_schedule(&txq->qresume_tsk); | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | budget = MAX_TIMER_TX_RECLAIM; | ||
| 1950 | i = s->ethtxq_rover; | ||
| 1951 | do { | ||
| 1952 | struct sge_eth_txq *q = &s->ethtxq[i]; | ||
| 1953 | |||
| 1954 | if (q->q.in_use && | ||
| 1955 | time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && | ||
| 1956 | __netif_tx_trylock(q->txq)) { | ||
| 1957 | int avail = reclaimable(&q->q); | ||
| 1958 | |||
| 1959 | if (avail) { | ||
| 1960 | if (avail > budget) | ||
| 1961 | avail = budget; | ||
| 1962 | |||
| 1963 | free_tx_desc(adap, &q->q, avail, true); | ||
| 1964 | q->q.in_use -= avail; | ||
| 1965 | budget -= avail; | ||
| 1966 | } | ||
| 1967 | __netif_tx_unlock(q->txq); | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | if (++i >= s->ethqsets) | ||
| 1971 | i = 0; | ||
| 1972 | } while (budget && i != s->ethtxq_rover); | ||
| 1973 | s->ethtxq_rover = i; | ||
| 1974 | mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); | ||
| 1975 | } | ||
| 1976 | |||
| 1977 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | ||
| 1978 | struct net_device *dev, int intr_idx, | ||
| 1979 | struct sge_fl *fl, rspq_handler_t hnd) | ||
| 1980 | { | ||
| 1981 | int ret, flsz = 0; | ||
| 1982 | struct fw_iq_cmd c; | ||
| 1983 | struct port_info *pi = netdev_priv(dev); | ||
| 1984 | |||
| 1985 | /* Size needs to be multiple of 16, including status entry. */ | ||
| 1986 | iq->size = roundup(iq->size, 16); | ||
| 1987 | |||
| 1988 | iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, | ||
| 1989 | &iq->phys_addr, NULL, 0); | ||
| 1990 | if (!iq->desc) | ||
| 1991 | return -ENOMEM; | ||
| 1992 | |||
| 1993 | memset(&c, 0, sizeof(c)); | ||
| 1994 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
| 1995 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
| 1996 | FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); | ||
| 1997 | c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | | ||
| 1998 | FW_LEN16(c)); | ||
| 1999 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | | ||
| 2000 | FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | | ||
| 2001 | FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | | ||
| 2002 | FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : | ||
| 2003 | -intr_idx - 1)); | ||
| 2004 | c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | | ||
| 2005 | FW_IQ_CMD_IQGTSMODE | | ||
| 2006 | FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | | ||
| 2007 | FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); | ||
| 2008 | c.iqsize = htons(iq->size); | ||
| 2009 | c.iqaddr = cpu_to_be64(iq->phys_addr); | ||
| 2010 | |||
| 2011 | if (fl) { | ||
| 2012 | fl->size = roundup(fl->size, 8); | ||
| 2013 | fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), | ||
| 2014 | sizeof(struct rx_sw_desc), &fl->addr, | ||
| 2015 | &fl->sdesc, STAT_LEN); | ||
| 2016 | if (!fl->desc) | ||
| 2017 | goto fl_nomem; | ||
| 2018 | |||
| 2019 | flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); | ||
| 2020 | c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | | ||
| 2021 | FW_IQ_CMD_FL0PADEN); | ||
| 2022 | c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | | ||
| 2023 | FW_IQ_CMD_FL0FBMAX(3)); | ||
| 2024 | c.fl0size = htons(flsz); | ||
| 2025 | c.fl0addr = cpu_to_be64(fl->addr); | ||
| 2026 | } | ||
| 2027 | |||
| 2028 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
| 2029 | if (ret) | ||
| 2030 | goto err; | ||
| 2031 | |||
| 2032 | netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); | ||
| 2033 | iq->cur_desc = iq->desc; | ||
| 2034 | iq->cidx = 0; | ||
| 2035 | iq->gen = 1; | ||
| 2036 | iq->next_intr_params = iq->intr_params; | ||
| 2037 | iq->cntxt_id = ntohs(c.iqid); | ||
| 2038 | iq->abs_id = ntohs(c.physiqid); | ||
| 2039 | iq->size--; /* subtract status entry */ | ||
| 2040 | iq->adap = adap; | ||
| 2041 | iq->netdev = dev; | ||
| 2042 | iq->handler = hnd; | ||
| 2043 | |||
| 2044 | /* set offset to -1 to distinguish ingress queues without FL */ | ||
| 2045 | iq->offset = fl ? 0 : -1; | ||
| 2046 | |||
| 2047 | adap->sge.ingr_map[iq->cntxt_id] = iq; | ||
| 2048 | |||
| 2049 | if (fl) { | ||
| 2050 | fl->cntxt_id = htons(c.fl0id); | ||
| 2051 | fl->avail = fl->pend_cred = 0; | ||
| 2052 | fl->pidx = fl->cidx = 0; | ||
| 2053 | fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; | ||
| 2054 | adap->sge.egr_map[fl->cntxt_id] = fl; | ||
| 2055 | refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); | ||
| 2056 | } | ||
| 2057 | return 0; | ||
| 2058 | |||
| 2059 | fl_nomem: | ||
| 2060 | ret = -ENOMEM; | ||
| 2061 | err: | ||
| 2062 | if (iq->desc) { | ||
| 2063 | dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, | ||
| 2064 | iq->desc, iq->phys_addr); | ||
| 2065 | iq->desc = NULL; | ||
| 2066 | } | ||
| 2067 | if (fl && fl->desc) { | ||
| 2068 | kfree(fl->sdesc); | ||
| 2069 | fl->sdesc = NULL; | ||
| 2070 | dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), | ||
| 2071 | fl->desc, fl->addr); | ||
| 2072 | fl->desc = NULL; | ||
| 2073 | } | ||
| 2074 | return ret; | ||
| 2075 | } | ||
| 2076 | |||
| 2077 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | ||
| 2078 | { | ||
| 2079 | q->in_use = 0; | ||
| 2080 | q->cidx = q->pidx = 0; | ||
| 2081 | q->stops = q->restarts = 0; | ||
| 2082 | q->stat = (void *)&q->desc[q->size]; | ||
| 2083 | q->cntxt_id = id; | ||
| 2084 | adap->sge.egr_map[id] = q; | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | ||
| 2088 | struct net_device *dev, struct netdev_queue *netdevq, | ||
| 2089 | unsigned int iqid) | ||
| 2090 | { | ||
| 2091 | int ret, nentries; | ||
| 2092 | struct fw_eq_eth_cmd c; | ||
| 2093 | struct port_info *pi = netdev_priv(dev); | ||
| 2094 | |||
| 2095 | /* Add status entries */ | ||
| 2096 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
| 2097 | |||
| 2098 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | ||
| 2099 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | ||
| 2100 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | ||
| 2101 | if (!txq->q.desc) | ||
| 2102 | return -ENOMEM; | ||
| 2103 | |||
| 2104 | memset(&c, 0, sizeof(c)); | ||
| 2105 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | | ||
| 2106 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
| 2107 | FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); | ||
| 2108 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | | ||
| 2109 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); | ||
| 2110 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
| 2111 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | | ||
| 2112 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | | ||
| 2113 | FW_EQ_ETH_CMD_IQID(iqid)); | ||
| 2114 | c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | | ||
| 2115 | FW_EQ_ETH_CMD_FBMAX(3) | | ||
| 2116 | FW_EQ_ETH_CMD_CIDXFTHRESH(5) | | ||
| 2117 | FW_EQ_ETH_CMD_EQSIZE(nentries)); | ||
| 2118 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
| 2119 | |||
| 2120 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
| 2121 | if (ret) { | ||
| 2122 | kfree(txq->q.sdesc); | ||
| 2123 | txq->q.sdesc = NULL; | ||
| 2124 | dma_free_coherent(adap->pdev_dev, | ||
| 2125 | nentries * sizeof(struct tx_desc), | ||
| 2126 | txq->q.desc, txq->q.phys_addr); | ||
| 2127 | txq->q.desc = NULL; | ||
| 2128 | return ret; | ||
| 2129 | } | ||
| 2130 | |||
| 2131 | init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); | ||
| 2132 | txq->txq = netdevq; | ||
| 2133 | txq->tso = txq->tx_cso = txq->vlan_ins = 0; | ||
| 2134 | txq->mapping_err = 0; | ||
| 2135 | return 0; | ||
| 2136 | } | ||
| 2137 | |||
| 2138 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | ||
| 2139 | struct net_device *dev, unsigned int iqid, | ||
| 2140 | unsigned int cmplqid) | ||
| 2141 | { | ||
| 2142 | int ret, nentries; | ||
| 2143 | struct fw_eq_ctrl_cmd c; | ||
| 2144 | struct port_info *pi = netdev_priv(dev); | ||
| 2145 | |||
| 2146 | /* Add status entries */ | ||
| 2147 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
| 2148 | |||
| 2149 | txq->q.desc = alloc_ring(adap->pdev_dev, nentries, | ||
| 2150 | sizeof(struct tx_desc), 0, &txq->q.phys_addr, | ||
| 2151 | NULL, 0); | ||
| 2152 | if (!txq->q.desc) | ||
| 2153 | return -ENOMEM; | ||
| 2154 | |||
| 2155 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | | ||
| 2156 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
| 2157 | FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); | ||
| 2158 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | | ||
| 2159 | FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); | ||
| 2160 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); | ||
| 2161 | c.physeqid_pkd = htonl(0); | ||
| 2162 | c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | | ||
| 2163 | FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | | ||
| 2164 | FW_EQ_CTRL_CMD_IQID(iqid)); | ||
| 2165 | c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | | ||
| 2166 | FW_EQ_CTRL_CMD_FBMAX(3) | | ||
| 2167 | FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | | ||
| 2168 | FW_EQ_CTRL_CMD_EQSIZE(nentries)); | ||
| 2169 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
| 2170 | |||
| 2171 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
| 2172 | if (ret) { | ||
| 2173 | dma_free_coherent(adap->pdev_dev, | ||
| 2174 | nentries * sizeof(struct tx_desc), | ||
| 2175 | txq->q.desc, txq->q.phys_addr); | ||
| 2176 | txq->q.desc = NULL; | ||
| 2177 | return ret; | ||
| 2178 | } | ||
| 2179 | |||
| 2180 | init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); | ||
| 2181 | txq->adap = adap; | ||
| 2182 | skb_queue_head_init(&txq->sendq); | ||
| 2183 | tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); | ||
| 2184 | txq->full = 0; | ||
| 2185 | return 0; | ||
| 2186 | } | ||
| 2187 | |||
| 2188 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | ||
| 2189 | struct net_device *dev, unsigned int iqid) | ||
| 2190 | { | ||
| 2191 | int ret, nentries; | ||
| 2192 | struct fw_eq_ofld_cmd c; | ||
| 2193 | struct port_info *pi = netdev_priv(dev); | ||
| 2194 | |||
| 2195 | /* Add status entries */ | ||
| 2196 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | ||
| 2197 | |||
| 2198 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | ||
| 2199 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | ||
| 2200 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | ||
| 2201 | if (!txq->q.desc) | ||
| 2202 | return -ENOMEM; | ||
| 2203 | |||
| 2204 | memset(&c, 0, sizeof(c)); | ||
| 2205 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | | ||
| 2206 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
| 2207 | FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); | ||
| 2208 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | | ||
| 2209 | FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); | ||
| 2210 | c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | | ||
| 2211 | FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | | ||
| 2212 | FW_EQ_OFLD_CMD_IQID(iqid)); | ||
| 2213 | c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | | ||
| 2214 | FW_EQ_OFLD_CMD_FBMAX(3) | | ||
| 2215 | FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | | ||
| 2216 | FW_EQ_OFLD_CMD_EQSIZE(nentries)); | ||
| 2217 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); | ||
| 2218 | |||
| 2219 | ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); | ||
| 2220 | if (ret) { | ||
| 2221 | kfree(txq->q.sdesc); | ||
| 2222 | txq->q.sdesc = NULL; | ||
| 2223 | dma_free_coherent(adap->pdev_dev, | ||
| 2224 | nentries * sizeof(struct tx_desc), | ||
| 2225 | txq->q.desc, txq->q.phys_addr); | ||
| 2226 | txq->q.desc = NULL; | ||
| 2227 | return ret; | ||
| 2228 | } | ||
| 2229 | |||
| 2230 | init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); | ||
| 2231 | txq->adap = adap; | ||
| 2232 | skb_queue_head_init(&txq->sendq); | ||
| 2233 | tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); | ||
| 2234 | txq->full = 0; | ||
| 2235 | txq->mapping_err = 0; | ||
| 2236 | return 0; | ||
| 2237 | } | ||
| 2238 | |||
| 2239 | static void free_txq(struct adapter *adap, struct sge_txq *q) | ||
| 2240 | { | ||
| 2241 | dma_free_coherent(adap->pdev_dev, | ||
| 2242 | q->size * sizeof(struct tx_desc) + STAT_LEN, | ||
| 2243 | q->desc, q->phys_addr); | ||
| 2244 | q->cntxt_id = 0; | ||
| 2245 | q->sdesc = NULL; | ||
| 2246 | q->desc = NULL; | ||
| 2247 | } | ||
| 2248 | |||
| 2249 | static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, | ||
| 2250 | struct sge_fl *fl) | ||
| 2251 | { | ||
| 2252 | unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; | ||
| 2253 | |||
| 2254 | adap->sge.ingr_map[rq->cntxt_id] = NULL; | ||
| 2255 | t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, | ||
| 2256 | 0xffff); | ||
| 2257 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, | ||
| 2258 | rq->desc, rq->phys_addr); | ||
| 2259 | netif_napi_del(&rq->napi); | ||
| 2260 | rq->netdev = NULL; | ||
| 2261 | rq->cntxt_id = rq->abs_id = 0; | ||
| 2262 | rq->desc = NULL; | ||
| 2263 | |||
| 2264 | if (fl) { | ||
| 2265 | free_rx_bufs(adap, fl, fl->avail); | ||
| 2266 | dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, | ||
| 2267 | fl->desc, fl->addr); | ||
| 2268 | kfree(fl->sdesc); | ||
| 2269 | fl->sdesc = NULL; | ||
| 2270 | fl->cntxt_id = 0; | ||
| 2271 | fl->desc = NULL; | ||
| 2272 | } | ||
| 2273 | } | ||
| 2274 | |||
| 2275 | /** | ||
| 2276 | * t4_free_sge_resources - free SGE resources | ||
| 2277 | * @adap: the adapter | ||
| 2278 | * | ||
| 2279 | * Frees resources used by the SGE queue sets. | ||
| 2280 | */ | ||
| 2281 | void t4_free_sge_resources(struct adapter *adap) | ||
| 2282 | { | ||
| 2283 | int i; | ||
| 2284 | struct sge_eth_rxq *eq = adap->sge.ethrxq; | ||
| 2285 | struct sge_eth_txq *etq = adap->sge.ethtxq; | ||
| 2286 | struct sge_ofld_rxq *oq = adap->sge.ofldrxq; | ||
| 2287 | |||
| 2288 | /* clean up Ethernet Tx/Rx queues */ | ||
| 2289 | for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { | ||
| 2290 | if (eq->rspq.desc) | ||
| 2291 | free_rspq_fl(adap, &eq->rspq, &eq->fl); | ||
| 2292 | if (etq->q.desc) { | ||
| 2293 | t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); | ||
| 2294 | free_tx_desc(adap, &etq->q, etq->q.in_use, true); | ||
| 2295 | kfree(etq->q.sdesc); | ||
| 2296 | free_txq(adap, &etq->q); | ||
| 2297 | } | ||
| 2298 | } | ||
| 2299 | |||
| 2300 | /* clean up RDMA and iSCSI Rx queues */ | ||
| 2301 | for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { | ||
| 2302 | if (oq->rspq.desc) | ||
| 2303 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | ||
| 2304 | } | ||
| 2305 | for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { | ||
| 2306 | if (oq->rspq.desc) | ||
| 2307 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | ||
| 2308 | } | ||
| 2309 | |||
| 2310 | /* clean up offload Tx queues */ | ||
| 2311 | for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { | ||
| 2312 | struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; | ||
| 2313 | |||
| 2314 | if (q->q.desc) { | ||
| 2315 | tasklet_kill(&q->qresume_tsk); | ||
| 2316 | t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); | ||
| 2317 | free_tx_desc(adap, &q->q, q->q.in_use, false); | ||
| 2318 | kfree(q->q.sdesc); | ||
| 2319 | __skb_queue_purge(&q->sendq); | ||
| 2320 | free_txq(adap, &q->q); | ||
| 2321 | } | ||
| 2322 | } | ||
| 2323 | |||
| 2324 | /* clean up control Tx queues */ | ||
| 2325 | for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { | ||
| 2326 | struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; | ||
| 2327 | |||
| 2328 | if (cq->q.desc) { | ||
| 2329 | tasklet_kill(&cq->qresume_tsk); | ||
| 2330 | t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); | ||
| 2331 | __skb_queue_purge(&cq->sendq); | ||
| 2332 | free_txq(adap, &cq->q); | ||
| 2333 | } | ||
| 2334 | } | ||
| 2335 | |||
| 2336 | if (adap->sge.fw_evtq.desc) | ||
| 2337 | free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); | ||
| 2338 | |||
| 2339 | if (adap->sge.intrq.desc) | ||
| 2340 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | ||
| 2341 | |||
| 2342 | /* clear the reverse egress queue map */ | ||
| 2343 | memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); | ||
| 2344 | } | ||
| 2345 | |||
| 2346 | void t4_sge_start(struct adapter *adap) | ||
| 2347 | { | ||
| 2348 | adap->sge.ethtxq_rover = 0; | ||
| 2349 | mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); | ||
| 2350 | mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); | ||
| 2351 | } | ||
| 2352 | |||
| 2353 | /** | ||
| 2354 | * t4_sge_stop - disable SGE operation | ||
| 2355 | * @adap: the adapter | ||
| 2356 | * | ||
| 2357 | * Stop tasklets and timers associated with the DMA engine. Note that | ||
| 2358 | * this is effective only if measures have been taken to disable any HW | ||
| 2359 | * events that may restart them. | ||
| 2360 | */ | ||
| 2361 | void t4_sge_stop(struct adapter *adap) | ||
| 2362 | { | ||
| 2363 | int i; | ||
| 2364 | struct sge *s = &adap->sge; | ||
| 2365 | |||
| 2366 | if (in_interrupt()) /* actions below require waiting */ | ||
| 2367 | return; | ||
| 2368 | |||
| 2369 | if (s->rx_timer.function) | ||
| 2370 | del_timer_sync(&s->rx_timer); | ||
| 2371 | if (s->tx_timer.function) | ||
| 2372 | del_timer_sync(&s->tx_timer); | ||
| 2373 | |||
| 2374 | for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { | ||
| 2375 | struct sge_ofld_txq *q = &s->ofldtxq[i]; | ||
| 2376 | |||
| 2377 | if (q->q.desc) | ||
| 2378 | tasklet_kill(&q->qresume_tsk); | ||
| 2379 | } | ||
| 2380 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { | ||
| 2381 | struct sge_ctrl_txq *cq = &s->ctrlq[i]; | ||
| 2382 | |||
| 2383 | if (cq->q.desc) | ||
| 2384 | tasklet_kill(&cq->qresume_tsk); | ||
| 2385 | } | ||
| 2386 | } | ||
| 2387 | |||
| 2388 | /** | ||
| 2389 | * t4_sge_init - initialize SGE | ||
| 2390 | * @adap: the adapter | ||
| 2391 | * | ||
| 2392 | * Performs SGE initialization needed every time after a chip reset. | ||
| 2393 | * We do not initialize any of the queues here, instead the driver | ||
| 2394 | * top-level must request them individually. | ||
| 2395 | */ | ||
| 2396 | void t4_sge_init(struct adapter *adap) | ||
| 2397 | { | ||
| 2398 | struct sge *s = &adap->sge; | ||
| 2399 | unsigned int fl_align_log = ilog2(FL_ALIGN); | ||
| 2400 | |||
| 2401 | t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | | ||
| 2402 | INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, | ||
| 2403 | INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | | ||
| 2404 | RXPKTCPLMODE | | ||
| 2405 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); | ||
| 2406 | t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, | ||
| 2407 | HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); | ||
| 2408 | t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); | ||
| 2409 | #if FL_PG_ORDER > 0 | ||
| 2410 | t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); | ||
| 2411 | #endif | ||
| 2412 | t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, | ||
| 2413 | THRESHOLD_0(s->counter_val[0]) | | ||
| 2414 | THRESHOLD_1(s->counter_val[1]) | | ||
| 2415 | THRESHOLD_2(s->counter_val[2]) | | ||
| 2416 | THRESHOLD_3(s->counter_val[3])); | ||
| 2417 | t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, | ||
| 2418 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | | ||
| 2419 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); | ||
| 2420 | t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, | ||
| 2421 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | | ||
| 2422 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); | ||
| 2423 | t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, | ||
| 2424 | TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | | ||
| 2425 | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); | ||
| 2426 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); | ||
| 2427 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); | ||
| 2428 | s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ | ||
| 2429 | s->idma_state[0] = s->idma_state[1] = 0; | ||
| 2430 | spin_lock_init(&s->intrq_lock); | ||
| 2431 | } | ||
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c new file mode 100644 index 000000000000..a814a3afe123 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.c | |||
| @@ -0,0 +1,3131 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/init.h> | ||
| 36 | #include <linux/delay.h> | ||
| 37 | #include "cxgb4.h" | ||
| 38 | #include "t4_regs.h" | ||
| 39 | #include "t4fw_api.h" | ||
| 40 | |||
| 41 | /** | ||
| 42 | * t4_wait_op_done_val - wait until an operation is completed | ||
| 43 | * @adapter: the adapter performing the operation | ||
| 44 | * @reg: the register to check for completion | ||
| 45 | * @mask: a single-bit field within @reg that indicates completion | ||
| 46 | * @polarity: the value of the field when the operation is completed | ||
| 47 | * @attempts: number of check iterations | ||
| 48 | * @delay: delay in usecs between iterations | ||
| 49 | * @valp: where to store the value of the register at completion time | ||
| 50 | * | ||
| 51 | * Wait until an operation is completed by checking a bit in a register | ||
| 52 | * up to @attempts times. If @valp is not NULL the value of the register | ||
| 53 | * at the time it indicated completion is stored there. Returns 0 if the | ||
| 54 | * operation completes and -EAGAIN otherwise. | ||
| 55 | */ | ||
| 56 | int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, | ||
| 57 | int polarity, int attempts, int delay, u32 *valp) | ||
| 58 | { | ||
| 59 | while (1) { | ||
| 60 | u32 val = t4_read_reg(adapter, reg); | ||
| 61 | |||
| 62 | if (!!(val & mask) == polarity) { | ||
| 63 | if (valp) | ||
| 64 | *valp = val; | ||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | if (--attempts == 0) | ||
| 68 | return -EAGAIN; | ||
| 69 | if (delay) | ||
| 70 | udelay(delay); | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, | ||
| 75 | int polarity, int attempts, int delay) | ||
| 76 | { | ||
| 77 | return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, | ||
| 78 | delay, NULL); | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * t4_set_reg_field - set a register field to a value | ||
| 83 | * @adapter: the adapter to program | ||
| 84 | * @addr: the register address | ||
| 85 | * @mask: specifies the portion of the register to modify | ||
| 86 | * @val: the new value for the register field | ||
| 87 | * | ||
| 88 | * Sets a register field specified by the supplied mask to the | ||
| 89 | * given value. | ||
| 90 | */ | ||
| 91 | void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, | ||
| 92 | u32 val) | ||
| 93 | { | ||
| 94 | u32 v = t4_read_reg(adapter, addr) & ~mask; | ||
| 95 | |||
| 96 | t4_write_reg(adapter, addr, v | val); | ||
| 97 | (void) t4_read_reg(adapter, addr); /* flush */ | ||
| 98 | } | ||
| 99 | |||
| 100 | /** | ||
| 101 | * t4_read_indirect - read indirectly addressed registers | ||
| 102 | * @adap: the adapter | ||
| 103 | * @addr_reg: register holding the indirect address | ||
| 104 | * @data_reg: register holding the value of the indirect register | ||
| 105 | * @vals: where the read register values are stored | ||
| 106 | * @nregs: how many indirect registers to read | ||
| 107 | * @start_idx: index of first indirect register to read | ||
| 108 | * | ||
| 109 | * Reads registers that are accessed indirectly through an address/data | ||
| 110 | * register pair. | ||
| 111 | */ | ||
| 112 | void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, | ||
| 113 | unsigned int data_reg, u32 *vals, unsigned int nregs, | ||
| 114 | unsigned int start_idx) | ||
| 115 | { | ||
| 116 | while (nregs--) { | ||
| 117 | t4_write_reg(adap, addr_reg, start_idx); | ||
| 118 | *vals++ = t4_read_reg(adap, data_reg); | ||
| 119 | start_idx++; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | /** | ||
| 124 | * t4_write_indirect - write indirectly addressed registers | ||
| 125 | * @adap: the adapter | ||
| 126 | * @addr_reg: register holding the indirect addresses | ||
| 127 | * @data_reg: register holding the value for the indirect registers | ||
| 128 | * @vals: values to write | ||
| 129 | * @nregs: how many indirect registers to write | ||
| 130 | * @start_idx: address of first indirect register to write | ||
| 131 | * | ||
| 132 | * Writes a sequential block of registers that are accessed indirectly | ||
| 133 | * through an address/data register pair. | ||
| 134 | */ | ||
| 135 | void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, | ||
| 136 | unsigned int data_reg, const u32 *vals, | ||
| 137 | unsigned int nregs, unsigned int start_idx) | ||
| 138 | { | ||
| 139 | while (nregs--) { | ||
| 140 | t4_write_reg(adap, addr_reg, start_idx++); | ||
| 141 | t4_write_reg(adap, data_reg, *vals++); | ||
| 142 | } | ||
| 143 | } | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. | ||
| 147 | */ | ||
| 148 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, | ||
| 149 | u32 mbox_addr) | ||
| 150 | { | ||
| 151 | for ( ; nflit; nflit--, mbox_addr += 8) | ||
| 152 | *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Handle a FW assertion reported in a mailbox. | ||
| 157 | */ | ||
| 158 | static void fw_asrt(struct adapter *adap, u32 mbox_addr) | ||
| 159 | { | ||
| 160 | struct fw_debug_cmd asrt; | ||
| 161 | |||
| 162 | get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); | ||
| 163 | dev_alert(adap->pdev_dev, | ||
| 164 | "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", | ||
| 165 | asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), | ||
| 166 | ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); | ||
| 167 | } | ||
| 168 | |||
| 169 | static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) | ||
| 170 | { | ||
| 171 | dev_err(adap->pdev_dev, | ||
| 172 | "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, | ||
| 173 | (unsigned long long)t4_read_reg64(adap, data_reg), | ||
| 174 | (unsigned long long)t4_read_reg64(adap, data_reg + 8), | ||
| 175 | (unsigned long long)t4_read_reg64(adap, data_reg + 16), | ||
| 176 | (unsigned long long)t4_read_reg64(adap, data_reg + 24), | ||
| 177 | (unsigned long long)t4_read_reg64(adap, data_reg + 32), | ||
| 178 | (unsigned long long)t4_read_reg64(adap, data_reg + 40), | ||
| 179 | (unsigned long long)t4_read_reg64(adap, data_reg + 48), | ||
| 180 | (unsigned long long)t4_read_reg64(adap, data_reg + 56)); | ||
| 181 | } | ||
| 182 | |||
| 183 | /** | ||
| 184 | * t4_wr_mbox_meat - send a command to FW through the given mailbox | ||
| 185 | * @adap: the adapter | ||
| 186 | * @mbox: index of the mailbox to use | ||
| 187 | * @cmd: the command to write | ||
| 188 | * @size: command length in bytes | ||
| 189 | * @rpl: where to optionally store the reply | ||
| 190 | * @sleep_ok: if true we may sleep while awaiting command completion | ||
| 191 | * | ||
| 192 | * Sends the given command to FW through the selected mailbox and waits | ||
| 193 | * for the FW to execute the command. If @rpl is not %NULL it is used to | ||
| 194 | * store the FW's reply to the command. The command and its optional | ||
| 195 | * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms | ||
| 196 | * to respond. @sleep_ok determines whether we may sleep while awaiting | ||
| 197 | * the response. If sleeping is allowed we use progressive backoff | ||
| 198 | * otherwise we spin. | ||
| 199 | * | ||
| 200 | * The return value is 0 on success or a negative errno on failure. A | ||
| 201 | * failure can happen either because we are not able to execute the | ||
| 202 | * command or FW executes it but signals an error. In the latter case | ||
| 203 | * the return value is the error code indicated by FW (negated). | ||
| 204 | */ | ||
| 205 | int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | ||
| 206 | void *rpl, bool sleep_ok) | ||
| 207 | { | ||
| 208 | static int delay[] = { | ||
| 209 | 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 | ||
| 210 | }; | ||
| 211 | |||
| 212 | u32 v; | ||
| 213 | u64 res; | ||
| 214 | int i, ms, delay_idx; | ||
| 215 | const __be64 *p = cmd; | ||
| 216 | u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); | ||
| 217 | u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); | ||
| 218 | |||
| 219 | if ((size & 15) || size > MBOX_LEN) | ||
| 220 | return -EINVAL; | ||
| 221 | |||
| 222 | v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); | ||
| 223 | for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) | ||
| 224 | v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); | ||
| 225 | |||
| 226 | if (v != MBOX_OWNER_DRV) | ||
| 227 | return v ? -EBUSY : -ETIMEDOUT; | ||
| 228 | |||
| 229 | for (i = 0; i < size; i += 8) | ||
| 230 | t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); | ||
| 231 | |||
| 232 | t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); | ||
| 233 | t4_read_reg(adap, ctl_reg); /* flush write */ | ||
| 234 | |||
| 235 | delay_idx = 0; | ||
| 236 | ms = delay[0]; | ||
| 237 | |||
| 238 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { | ||
| 239 | if (sleep_ok) { | ||
| 240 | ms = delay[delay_idx]; /* last element may repeat */ | ||
| 241 | if (delay_idx < ARRAY_SIZE(delay) - 1) | ||
| 242 | delay_idx++; | ||
| 243 | msleep(ms); | ||
| 244 | } else | ||
| 245 | mdelay(ms); | ||
| 246 | |||
| 247 | v = t4_read_reg(adap, ctl_reg); | ||
| 248 | if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { | ||
| 249 | if (!(v & MBMSGVALID)) { | ||
| 250 | t4_write_reg(adap, ctl_reg, 0); | ||
| 251 | continue; | ||
| 252 | } | ||
| 253 | |||
| 254 | res = t4_read_reg64(adap, data_reg); | ||
| 255 | if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { | ||
| 256 | fw_asrt(adap, data_reg); | ||
| 257 | res = FW_CMD_RETVAL(EIO); | ||
| 258 | } else if (rpl) | ||
| 259 | get_mbox_rpl(adap, rpl, size / 8, data_reg); | ||
| 260 | |||
| 261 | if (FW_CMD_RETVAL_GET((int)res)) | ||
| 262 | dump_mbox(adap, mbox, data_reg); | ||
| 263 | t4_write_reg(adap, ctl_reg, 0); | ||
| 264 | return -FW_CMD_RETVAL_GET((int)res); | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | dump_mbox(adap, mbox, data_reg); | ||
| 269 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", | ||
| 270 | *(const u8 *)cmd, mbox); | ||
| 271 | return -ETIMEDOUT; | ||
| 272 | } | ||
| 273 | |||
| 274 | /** | ||
| 275 | * t4_mc_read - read from MC through backdoor accesses | ||
| 276 | * @adap: the adapter | ||
| 277 | * @addr: address of first byte requested | ||
| 278 | * @data: 64 bytes of data containing the requested address | ||
| 279 | * @ecc: where to store the corresponding 64-bit ECC word | ||
| 280 | * | ||
| 281 | * Read 64 bytes of data from MC starting at a 64-byte-aligned address | ||
| 282 | * that covers the requested address @addr. If @parity is not %NULL it | ||
| 283 | * is assigned the 64-bit ECC word for the read data. | ||
| 284 | */ | ||
| 285 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) | ||
| 286 | { | ||
| 287 | int i; | ||
| 288 | |||
| 289 | if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) | ||
| 290 | return -EBUSY; | ||
| 291 | t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); | ||
| 292 | t4_write_reg(adap, MC_BIST_CMD_LEN, 64); | ||
| 293 | t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); | ||
| 294 | t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | | ||
| 295 | BIST_CMD_GAP(1)); | ||
| 296 | i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); | ||
| 297 | if (i) | ||
| 298 | return i; | ||
| 299 | |||
| 300 | #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) | ||
| 301 | |||
| 302 | for (i = 15; i >= 0; i--) | ||
| 303 | *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); | ||
| 304 | if (ecc) | ||
| 305 | *ecc = t4_read_reg64(adap, MC_DATA(16)); | ||
| 306 | #undef MC_DATA | ||
| 307 | return 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | /** | ||
| 311 | * t4_edc_read - read from EDC through backdoor accesses | ||
| 312 | * @adap: the adapter | ||
| 313 | * @idx: which EDC to access | ||
| 314 | * @addr: address of first byte requested | ||
| 315 | * @data: 64 bytes of data containing the requested address | ||
| 316 | * @ecc: where to store the corresponding 64-bit ECC word | ||
| 317 | * | ||
| 318 | * Read 64 bytes of data from EDC starting at a 64-byte-aligned address | ||
| 319 | * that covers the requested address @addr. If @parity is not %NULL it | ||
| 320 | * is assigned the 64-bit ECC word for the read data. | ||
| 321 | */ | ||
| 322 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | ||
| 323 | { | ||
| 324 | int i; | ||
| 325 | |||
| 326 | idx *= EDC_STRIDE; | ||
| 327 | if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) | ||
| 328 | return -EBUSY; | ||
| 329 | t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); | ||
| 330 | t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); | ||
| 331 | t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); | ||
| 332 | t4_write_reg(adap, EDC_BIST_CMD + idx, | ||
| 333 | BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); | ||
| 334 | i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); | ||
| 335 | if (i) | ||
| 336 | return i; | ||
| 337 | |||
| 338 | #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) | ||
| 339 | |||
| 340 | for (i = 15; i >= 0; i--) | ||
| 341 | *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); | ||
| 342 | if (ecc) | ||
| 343 | *ecc = t4_read_reg64(adap, EDC_DATA(16)); | ||
| 344 | #undef EDC_DATA | ||
| 345 | return 0; | ||
| 346 | } | ||
| 347 | |||
| 348 | #define VPD_ENTRY(name, len) \ | ||
| 349 | u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] | ||
| 350 | |||
| 351 | /* | ||
| 352 | * Partial EEPROM Vital Product Data structure. Includes only the ID and | ||
| 353 | * VPD-R sections. | ||
| 354 | */ | ||
| 355 | struct t4_vpd { | ||
| 356 | u8 id_tag; | ||
| 357 | u8 id_len[2]; | ||
| 358 | u8 id_data[ID_LEN]; | ||
| 359 | u8 vpdr_tag; | ||
| 360 | u8 vpdr_len[2]; | ||
| 361 | VPD_ENTRY(pn, 16); /* part number */ | ||
| 362 | VPD_ENTRY(ec, EC_LEN); /* EC level */ | ||
| 363 | VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ | ||
| 364 | VPD_ENTRY(na, 12); /* MAC address base */ | ||
| 365 | VPD_ENTRY(port_type, 8); /* port types */ | ||
| 366 | VPD_ENTRY(gpio, 14); /* GPIO usage */ | ||
| 367 | VPD_ENTRY(cclk, 6); /* core clock */ | ||
| 368 | VPD_ENTRY(port_addr, 8); /* port MDIO addresses */ | ||
| 369 | VPD_ENTRY(rv, 1); /* csum */ | ||
| 370 | u32 pad; /* for multiple-of-4 sizing and alignment */ | ||
| 371 | }; | ||
| 372 | |||
| 373 | #define EEPROM_STAT_ADDR 0x7bfc | ||
| 374 | #define VPD_BASE 0 | ||
| 375 | |||
| 376 | /** | ||
| 377 | * t4_seeprom_wp - enable/disable EEPROM write protection | ||
| 378 | * @adapter: the adapter | ||
| 379 | * @enable: whether to enable or disable write protection | ||
| 380 | * | ||
| 381 | * Enables or disables write protection on the serial EEPROM. | ||
| 382 | */ | ||
| 383 | int t4_seeprom_wp(struct adapter *adapter, bool enable) | ||
| 384 | { | ||
| 385 | unsigned int v = enable ? 0xc : 0; | ||
| 386 | int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); | ||
| 387 | return ret < 0 ? ret : 0; | ||
| 388 | } | ||
| 389 | |||
| 390 | /** | ||
| 391 | * get_vpd_params - read VPD parameters from VPD EEPROM | ||
| 392 | * @adapter: adapter to read | ||
| 393 | * @p: where to store the parameters | ||
| 394 | * | ||
| 395 | * Reads card parameters stored in VPD EEPROM. | ||
| 396 | */ | ||
| 397 | static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | ||
| 398 | { | ||
| 399 | int ret; | ||
| 400 | struct t4_vpd vpd; | ||
| 401 | u8 *q = (u8 *)&vpd, csum; | ||
| 402 | |||
| 403 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); | ||
| 404 | if (ret < 0) | ||
| 405 | return ret; | ||
| 406 | |||
| 407 | for (csum = 0; q <= vpd.rv_data; q++) | ||
| 408 | csum += *q; | ||
| 409 | |||
| 410 | if (csum) { | ||
| 411 | dev_err(adapter->pdev_dev, | ||
| 412 | "corrupted VPD EEPROM, actual csum %u\n", csum); | ||
| 413 | return -EINVAL; | ||
| 414 | } | ||
| 415 | |||
| 416 | p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); | ||
| 417 | memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); | ||
| 418 | strim(p->id); | ||
| 419 | memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); | ||
| 420 | strim(p->ec); | ||
| 421 | memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); | ||
| 422 | strim(p->sn); | ||
| 423 | return 0; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* serial flash and firmware constants */ | ||
| 427 | enum { | ||
| 428 | SF_ATTEMPTS = 10, /* max retries for SF operations */ | ||
| 429 | |||
| 430 | /* flash command opcodes */ | ||
| 431 | SF_PROG_PAGE = 2, /* program page */ | ||
| 432 | SF_WR_DISABLE = 4, /* disable writes */ | ||
| 433 | SF_RD_STATUS = 5, /* read status register */ | ||
| 434 | SF_WR_ENABLE = 6, /* enable writes */ | ||
| 435 | SF_RD_DATA_FAST = 0xb, /* read flash */ | ||
| 436 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ | ||
| 437 | |||
| 438 | FW_START_SEC = 8, /* first flash sector for FW */ | ||
| 439 | FW_END_SEC = 15, /* last flash sector for FW */ | ||
| 440 | FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, | ||
| 441 | FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, | ||
| 442 | }; | ||
| 443 | |||
| 444 | /** | ||
| 445 | * sf1_read - read data from the serial flash | ||
| 446 | * @adapter: the adapter | ||
| 447 | * @byte_cnt: number of bytes to read | ||
| 448 | * @cont: whether another operation will be chained | ||
| 449 | * @lock: whether to lock SF for PL access only | ||
| 450 | * @valp: where to store the read data | ||
| 451 | * | ||
| 452 | * Reads up to 4 bytes of data from the serial flash. The location of | ||
| 453 | * the read needs to be specified prior to calling this by issuing the | ||
| 454 | * appropriate commands to the serial flash. | ||
| 455 | */ | ||
| 456 | static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, | ||
| 457 | int lock, u32 *valp) | ||
| 458 | { | ||
| 459 | int ret; | ||
| 460 | |||
| 461 | if (!byte_cnt || byte_cnt > 4) | ||
| 462 | return -EINVAL; | ||
| 463 | if (t4_read_reg(adapter, SF_OP) & BUSY) | ||
| 464 | return -EBUSY; | ||
| 465 | cont = cont ? SF_CONT : 0; | ||
| 466 | lock = lock ? SF_LOCK : 0; | ||
| 467 | t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); | ||
| 468 | ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); | ||
| 469 | if (!ret) | ||
| 470 | *valp = t4_read_reg(adapter, SF_DATA); | ||
| 471 | return ret; | ||
| 472 | } | ||
| 473 | |||
| 474 | /** | ||
| 475 | * sf1_write - write data to the serial flash | ||
| 476 | * @adapter: the adapter | ||
| 477 | * @byte_cnt: number of bytes to write | ||
| 478 | * @cont: whether another operation will be chained | ||
| 479 | * @lock: whether to lock SF for PL access only | ||
| 480 | * @val: value to write | ||
| 481 | * | ||
| 482 | * Writes up to 4 bytes of data to the serial flash. The location of | ||
| 483 | * the write needs to be specified prior to calling this by issuing the | ||
| 484 | * appropriate commands to the serial flash. | ||
| 485 | */ | ||
| 486 | static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, | ||
| 487 | int lock, u32 val) | ||
| 488 | { | ||
| 489 | if (!byte_cnt || byte_cnt > 4) | ||
| 490 | return -EINVAL; | ||
| 491 | if (t4_read_reg(adapter, SF_OP) & BUSY) | ||
| 492 | return -EBUSY; | ||
| 493 | cont = cont ? SF_CONT : 0; | ||
| 494 | lock = lock ? SF_LOCK : 0; | ||
| 495 | t4_write_reg(adapter, SF_DATA, val); | ||
| 496 | t4_write_reg(adapter, SF_OP, lock | | ||
| 497 | cont | BYTECNT(byte_cnt - 1) | OP_WR); | ||
| 498 | return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); | ||
| 499 | } | ||
| 500 | |||
| 501 | /** | ||
| 502 | * flash_wait_op - wait for a flash operation to complete | ||
| 503 | * @adapter: the adapter | ||
| 504 | * @attempts: max number of polls of the status register | ||
| 505 | * @delay: delay between polls in ms | ||
| 506 | * | ||
| 507 | * Wait for a flash operation to complete by polling the status register. | ||
| 508 | */ | ||
| 509 | static int flash_wait_op(struct adapter *adapter, int attempts, int delay) | ||
| 510 | { | ||
| 511 | int ret; | ||
| 512 | u32 status; | ||
| 513 | |||
| 514 | while (1) { | ||
| 515 | if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || | ||
| 516 | (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) | ||
| 517 | return ret; | ||
| 518 | if (!(status & 1)) | ||
| 519 | return 0; | ||
| 520 | if (--attempts == 0) | ||
| 521 | return -EAGAIN; | ||
| 522 | if (delay) | ||
| 523 | msleep(delay); | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 527 | /** | ||
| 528 | * t4_read_flash - read words from serial flash | ||
| 529 | * @adapter: the adapter | ||
| 530 | * @addr: the start address for the read | ||
| 531 | * @nwords: how many 32-bit words to read | ||
| 532 | * @data: where to store the read data | ||
| 533 | * @byte_oriented: whether to store data as bytes or as words | ||
| 534 | * | ||
| 535 | * Read the specified number of 32-bit words from the serial flash. | ||
| 536 | * If @byte_oriented is set the read data is stored as a byte array | ||
| 537 | * (i.e., big-endian), otherwise as 32-bit words in the platform's | ||
| 538 | * natural endianess. | ||
| 539 | */ | ||
| 540 | int t4_read_flash(struct adapter *adapter, unsigned int addr, | ||
| 541 | unsigned int nwords, u32 *data, int byte_oriented) | ||
| 542 | { | ||
| 543 | int ret; | ||
| 544 | |||
| 545 | if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) | ||
| 546 | return -EINVAL; | ||
| 547 | |||
| 548 | addr = swab32(addr) | SF_RD_DATA_FAST; | ||
| 549 | |||
| 550 | if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || | ||
| 551 | (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) | ||
| 552 | return ret; | ||
| 553 | |||
| 554 | for ( ; nwords; nwords--, data++) { | ||
| 555 | ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); | ||
| 556 | if (nwords == 1) | ||
| 557 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
| 558 | if (ret) | ||
| 559 | return ret; | ||
| 560 | if (byte_oriented) | ||
| 561 | *data = htonl(*data); | ||
| 562 | } | ||
| 563 | return 0; | ||
| 564 | } | ||
| 565 | |||
| 566 | /** | ||
| 567 | * t4_write_flash - write up to a page of data to the serial flash | ||
| 568 | * @adapter: the adapter | ||
| 569 | * @addr: the start address to write | ||
| 570 | * @n: length of data to write in bytes | ||
| 571 | * @data: the data to write | ||
| 572 | * | ||
| 573 | * Writes up to a page of data (256 bytes) to the serial flash starting | ||
| 574 | * at the given address. All the data must be written to the same page. | ||
| 575 | */ | ||
| 576 | static int t4_write_flash(struct adapter *adapter, unsigned int addr, | ||
| 577 | unsigned int n, const u8 *data) | ||
| 578 | { | ||
| 579 | int ret; | ||
| 580 | u32 buf[64]; | ||
| 581 | unsigned int i, c, left, val, offset = addr & 0xff; | ||
| 582 | |||
| 583 | if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) | ||
| 584 | return -EINVAL; | ||
| 585 | |||
| 586 | val = swab32(addr) | SF_PROG_PAGE; | ||
| 587 | |||
| 588 | if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || | ||
| 589 | (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) | ||
| 590 | goto unlock; | ||
| 591 | |||
| 592 | for (left = n; left; left -= c) { | ||
| 593 | c = min(left, 4U); | ||
| 594 | for (val = 0, i = 0; i < c; ++i) | ||
| 595 | val = (val << 8) + *data++; | ||
| 596 | |||
| 597 | ret = sf1_write(adapter, c, c != left, 1, val); | ||
| 598 | if (ret) | ||
| 599 | goto unlock; | ||
| 600 | } | ||
| 601 | ret = flash_wait_op(adapter, 5, 1); | ||
| 602 | if (ret) | ||
| 603 | goto unlock; | ||
| 604 | |||
| 605 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
| 606 | |||
| 607 | /* Read the page to verify the write succeeded */ | ||
| 608 | ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); | ||
| 609 | if (ret) | ||
| 610 | return ret; | ||
| 611 | |||
| 612 | if (memcmp(data - n, (u8 *)buf + offset, n)) { | ||
| 613 | dev_err(adapter->pdev_dev, | ||
| 614 | "failed to correctly write the flash page at %#x\n", | ||
| 615 | addr); | ||
| 616 | return -EIO; | ||
| 617 | } | ||
| 618 | return 0; | ||
| 619 | |||
| 620 | unlock: | ||
| 621 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
| 622 | return ret; | ||
| 623 | } | ||
| 624 | |||
| 625 | /** | ||
| 626 | * get_fw_version - read the firmware version | ||
| 627 | * @adapter: the adapter | ||
| 628 | * @vers: where to place the version | ||
| 629 | * | ||
| 630 | * Reads the FW version from flash. | ||
| 631 | */ | ||
| 632 | static int get_fw_version(struct adapter *adapter, u32 *vers) | ||
| 633 | { | ||
| 634 | return t4_read_flash(adapter, | ||
| 635 | FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, | ||
| 636 | vers, 0); | ||
| 637 | } | ||
| 638 | |||
| 639 | /** | ||
| 640 | * get_tp_version - read the TP microcode version | ||
| 641 | * @adapter: the adapter | ||
| 642 | * @vers: where to place the version | ||
| 643 | * | ||
| 644 | * Reads the TP microcode version from flash. | ||
| 645 | */ | ||
| 646 | static int get_tp_version(struct adapter *adapter, u32 *vers) | ||
| 647 | { | ||
| 648 | return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, | ||
| 649 | tp_microcode_ver), | ||
| 650 | 1, vers, 0); | ||
| 651 | } | ||
| 652 | |||
| 653 | /** | ||
| 654 | * t4_check_fw_version - check if the FW is compatible with this driver | ||
| 655 | * @adapter: the adapter | ||
| 656 | * | ||
| 657 | * Checks if an adapter's FW is compatible with the driver. Returns 0 | ||
| 658 | * if there's exact match, a negative error if the version could not be | ||
| 659 | * read or there's a major version mismatch, and a positive value if the | ||
| 660 | * expected major version is found but there's a minor version mismatch. | ||
| 661 | */ | ||
| 662 | int t4_check_fw_version(struct adapter *adapter) | ||
| 663 | { | ||
| 664 | u32 api_vers[2]; | ||
| 665 | int ret, major, minor, micro; | ||
| 666 | |||
| 667 | ret = get_fw_version(adapter, &adapter->params.fw_vers); | ||
| 668 | if (!ret) | ||
| 669 | ret = get_tp_version(adapter, &adapter->params.tp_vers); | ||
| 670 | if (!ret) | ||
| 671 | ret = t4_read_flash(adapter, | ||
| 672 | FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), | ||
| 673 | 2, api_vers, 1); | ||
| 674 | if (ret) | ||
| 675 | return ret; | ||
| 676 | |||
| 677 | major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); | ||
| 678 | minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); | ||
| 679 | micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); | ||
| 680 | memcpy(adapter->params.api_vers, api_vers, | ||
| 681 | sizeof(adapter->params.api_vers)); | ||
| 682 | |||
| 683 | if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ | ||
| 684 | dev_err(adapter->pdev_dev, | ||
| 685 | "card FW has major version %u, driver wants %u\n", | ||
| 686 | major, FW_VERSION_MAJOR); | ||
| 687 | return -EINVAL; | ||
| 688 | } | ||
| 689 | |||
| 690 | if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) | ||
| 691 | return 0; /* perfect match */ | ||
| 692 | |||
| 693 | /* Minor/micro version mismatch. Report it but often it's OK. */ | ||
| 694 | return 1; | ||
| 695 | } | ||
| 696 | |||
| 697 | /** | ||
| 698 | * t4_flash_erase_sectors - erase a range of flash sectors | ||
| 699 | * @adapter: the adapter | ||
| 700 | * @start: the first sector to erase | ||
| 701 | * @end: the last sector to erase | ||
| 702 | * | ||
| 703 | * Erases the sectors in the given inclusive range. | ||
| 704 | */ | ||
| 705 | static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) | ||
| 706 | { | ||
| 707 | int ret = 0; | ||
| 708 | |||
| 709 | while (start <= end) { | ||
| 710 | if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || | ||
| 711 | (ret = sf1_write(adapter, 4, 0, 1, | ||
| 712 | SF_ERASE_SECTOR | (start << 8))) != 0 || | ||
| 713 | (ret = flash_wait_op(adapter, 5, 500)) != 0) { | ||
| 714 | dev_err(adapter->pdev_dev, | ||
| 715 | "erase of flash sector %d failed, error %d\n", | ||
| 716 | start, ret); | ||
| 717 | break; | ||
| 718 | } | ||
| 719 | start++; | ||
| 720 | } | ||
| 721 | t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ | ||
| 722 | return ret; | ||
| 723 | } | ||
| 724 | |||
| 725 | /** | ||
| 726 | * t4_load_fw - download firmware | ||
| 727 | * @adap: the adapter | ||
| 728 | * @fw_data: the firmware image to write | ||
| 729 | * @size: image size | ||
| 730 | * | ||
| 731 | * Write the supplied firmware image to the card's serial flash. | ||
| 732 | */ | ||
| 733 | int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) | ||
| 734 | { | ||
| 735 | u32 csum; | ||
| 736 | int ret, addr; | ||
| 737 | unsigned int i; | ||
| 738 | u8 first_page[SF_PAGE_SIZE]; | ||
| 739 | const u32 *p = (const u32 *)fw_data; | ||
| 740 | const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; | ||
| 741 | |||
| 742 | if (!size) { | ||
| 743 | dev_err(adap->pdev_dev, "FW image has no data\n"); | ||
| 744 | return -EINVAL; | ||
| 745 | } | ||
| 746 | if (size & 511) { | ||
| 747 | dev_err(adap->pdev_dev, | ||
| 748 | "FW image size not multiple of 512 bytes\n"); | ||
| 749 | return -EINVAL; | ||
| 750 | } | ||
| 751 | if (ntohs(hdr->len512) * 512 != size) { | ||
| 752 | dev_err(adap->pdev_dev, | ||
| 753 | "FW image size differs from size in FW header\n"); | ||
| 754 | return -EINVAL; | ||
| 755 | } | ||
| 756 | if (size > FW_MAX_SIZE) { | ||
| 757 | dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", | ||
| 758 | FW_MAX_SIZE); | ||
| 759 | return -EFBIG; | ||
| 760 | } | ||
| 761 | |||
| 762 | for (csum = 0, i = 0; i < size / sizeof(csum); i++) | ||
| 763 | csum += ntohl(p[i]); | ||
| 764 | |||
| 765 | if (csum != 0xffffffff) { | ||
| 766 | dev_err(adap->pdev_dev, | ||
| 767 | "corrupted firmware image, checksum %#x\n", csum); | ||
| 768 | return -EINVAL; | ||
| 769 | } | ||
| 770 | |||
| 771 | i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ | ||
| 772 | ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); | ||
| 773 | if (ret) | ||
| 774 | goto out; | ||
| 775 | |||
| 776 | /* | ||
| 777 | * We write the correct version at the end so the driver can see a bad | ||
| 778 | * version if the FW write fails. Start by writing a copy of the | ||
| 779 | * first page with a bad version. | ||
| 780 | */ | ||
| 781 | memcpy(first_page, fw_data, SF_PAGE_SIZE); | ||
| 782 | ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); | ||
| 783 | ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); | ||
| 784 | if (ret) | ||
| 785 | goto out; | ||
| 786 | |||
| 787 | addr = FW_IMG_START; | ||
| 788 | for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { | ||
| 789 | addr += SF_PAGE_SIZE; | ||
| 790 | fw_data += SF_PAGE_SIZE; | ||
| 791 | ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); | ||
| 792 | if (ret) | ||
| 793 | goto out; | ||
| 794 | } | ||
| 795 | |||
| 796 | ret = t4_write_flash(adap, | ||
| 797 | FW_IMG_START + offsetof(struct fw_hdr, fw_ver), | ||
| 798 | sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); | ||
| 799 | out: | ||
| 800 | if (ret) | ||
| 801 | dev_err(adap->pdev_dev, "firmware download failed, error %d\n", | ||
| 802 | ret); | ||
| 803 | return ret; | ||
| 804 | } | ||
| 805 | |||
| 806 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | ||
| 807 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) | ||
| 808 | |||
| 809 | /** | ||
| 810 | * t4_link_start - apply link configuration to MAC/PHY | ||
| 811 | * @phy: the PHY to setup | ||
| 812 | * @mac: the MAC to setup | ||
| 813 | * @lc: the requested link configuration | ||
| 814 | * | ||
| 815 | * Set up a port's MAC and PHY according to a desired link configuration. | ||
| 816 | * - If the PHY can auto-negotiate first decide what to advertise, then | ||
| 817 | * enable/disable auto-negotiation as desired, and reset. | ||
| 818 | * - If the PHY does not auto-negotiate just reset it. | ||
| 819 | * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, | ||
| 820 | * otherwise do it later based on the outcome of auto-negotiation. | ||
| 821 | */ | ||
| 822 | int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
| 823 | struct link_config *lc) | ||
| 824 | { | ||
| 825 | struct fw_port_cmd c; | ||
| 826 | unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); | ||
| 827 | |||
| 828 | lc->link_ok = 0; | ||
| 829 | if (lc->requested_fc & PAUSE_RX) | ||
| 830 | fc |= FW_PORT_CAP_FC_RX; | ||
| 831 | if (lc->requested_fc & PAUSE_TX) | ||
| 832 | fc |= FW_PORT_CAP_FC_TX; | ||
| 833 | |||
| 834 | memset(&c, 0, sizeof(c)); | ||
| 835 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | | ||
| 836 | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); | ||
| 837 | c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | | ||
| 838 | FW_LEN16(c)); | ||
| 839 | |||
| 840 | if (!(lc->supported & FW_PORT_CAP_ANEG)) { | ||
| 841 | c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); | ||
| 842 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
| 843 | } else if (lc->autoneg == AUTONEG_DISABLE) { | ||
| 844 | c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); | ||
| 845 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
| 846 | } else | ||
| 847 | c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); | ||
| 848 | |||
| 849 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 850 | } | ||
| 851 | |||
| 852 | /** | ||
| 853 | * t4_restart_aneg - restart autonegotiation | ||
| 854 | * @adap: the adapter | ||
| 855 | * @mbox: mbox to use for the FW command | ||
| 856 | * @port: the port id | ||
| 857 | * | ||
| 858 | * Restarts autonegotiation for the selected port. | ||
| 859 | */ | ||
| 860 | int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) | ||
| 861 | { | ||
| 862 | struct fw_port_cmd c; | ||
| 863 | |||
| 864 | memset(&c, 0, sizeof(c)); | ||
| 865 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | | ||
| 866 | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); | ||
| 867 | c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | | ||
| 868 | FW_LEN16(c)); | ||
| 869 | c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); | ||
| 870 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 871 | } | ||
| 872 | |||
| 873 | /** | ||
| 874 | * t4_set_vlan_accel - configure HW VLAN extraction | ||
| 875 | * @adap: the adapter | ||
| 876 | * @ports: bitmap of adapter ports to operate on | ||
| 877 | * @on: enable (1) or disable (0) HW VLAN extraction | ||
| 878 | * | ||
| 879 | * Enables or disables HW extraction of VLAN tags for the ports specified | ||
| 880 | * by @ports. @ports is a bitmap with the ith bit designating the port | ||
| 881 | * associated with the ith adapter channel. | ||
| 882 | */ | ||
| 883 | void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on) | ||
| 884 | { | ||
| 885 | ports <<= VLANEXTENABLE_SHIFT; | ||
| 886 | t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0); | ||
| 887 | } | ||
| 888 | |||
| 889 | struct intr_info { | ||
| 890 | unsigned int mask; /* bits to check in interrupt status */ | ||
| 891 | const char *msg; /* message to print or NULL */ | ||
| 892 | short stat_idx; /* stat counter to increment or -1 */ | ||
| 893 | unsigned short fatal; /* whether the condition reported is fatal */ | ||
| 894 | }; | ||
| 895 | |||
| 896 | /** | ||
| 897 | * t4_handle_intr_status - table driven interrupt handler | ||
| 898 | * @adapter: the adapter that generated the interrupt | ||
| 899 | * @reg: the interrupt status register to process | ||
| 900 | * @acts: table of interrupt actions | ||
| 901 | * | ||
| 902 | * A table driven interrupt handler that applies a set of masks to an | ||
| 903 | * interrupt status word and performs the corresponding actions if the | ||
| 904 | * interrupts described by the mask have occured. The actions include | ||
| 905 | * optionally emitting a warning or alert message. The table is terminated | ||
| 906 | * by an entry specifying mask 0. Returns the number of fatal interrupt | ||
| 907 | * conditions. | ||
| 908 | */ | ||
| 909 | static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, | ||
| 910 | const struct intr_info *acts) | ||
| 911 | { | ||
| 912 | int fatal = 0; | ||
| 913 | unsigned int mask = 0; | ||
| 914 | unsigned int status = t4_read_reg(adapter, reg); | ||
| 915 | |||
| 916 | for ( ; acts->mask; ++acts) { | ||
| 917 | if (!(status & acts->mask)) | ||
| 918 | continue; | ||
| 919 | if (acts->fatal) { | ||
| 920 | fatal++; | ||
| 921 | dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, | ||
| 922 | status & acts->mask); | ||
| 923 | } else if (acts->msg && printk_ratelimit()) | ||
| 924 | dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, | ||
| 925 | status & acts->mask); | ||
| 926 | mask |= acts->mask; | ||
| 927 | } | ||
| 928 | status &= mask; | ||
| 929 | if (status) /* clear processed interrupts */ | ||
| 930 | t4_write_reg(adapter, reg, status); | ||
| 931 | return fatal; | ||
| 932 | } | ||
| 933 | |||
| 934 | /* | ||
| 935 | * Interrupt handler for the PCIE module. | ||
| 936 | */ | ||
| 937 | static void pcie_intr_handler(struct adapter *adapter) | ||
| 938 | { | ||
| 939 | static struct intr_info sysbus_intr_info[] = { | ||
| 940 | { RNPP, "RXNP array parity error", -1, 1 }, | ||
| 941 | { RPCP, "RXPC array parity error", -1, 1 }, | ||
| 942 | { RCIP, "RXCIF array parity error", -1, 1 }, | ||
| 943 | { RCCP, "Rx completions control array parity error", -1, 1 }, | ||
| 944 | { RFTP, "RXFT array parity error", -1, 1 }, | ||
| 945 | { 0 } | ||
| 946 | }; | ||
| 947 | static struct intr_info pcie_port_intr_info[] = { | ||
| 948 | { TPCP, "TXPC array parity error", -1, 1 }, | ||
| 949 | { TNPP, "TXNP array parity error", -1, 1 }, | ||
| 950 | { TFTP, "TXFT array parity error", -1, 1 }, | ||
| 951 | { TCAP, "TXCA array parity error", -1, 1 }, | ||
| 952 | { TCIP, "TXCIF array parity error", -1, 1 }, | ||
| 953 | { RCAP, "RXCA array parity error", -1, 1 }, | ||
| 954 | { OTDD, "outbound request TLP discarded", -1, 1 }, | ||
| 955 | { RDPE, "Rx data parity error", -1, 1 }, | ||
| 956 | { TDUE, "Tx uncorrectable data error", -1, 1 }, | ||
| 957 | { 0 } | ||
| 958 | }; | ||
| 959 | static struct intr_info pcie_intr_info[] = { | ||
| 960 | { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, | ||
| 961 | { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, | ||
| 962 | { MSIDATAPERR, "MSI data parity error", -1, 1 }, | ||
| 963 | { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, | ||
| 964 | { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, | ||
| 965 | { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, | ||
| 966 | { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, | ||
| 967 | { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, | ||
| 968 | { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, | ||
| 969 | { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, | ||
| 970 | { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, | ||
| 971 | { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, | ||
| 972 | { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, | ||
| 973 | { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, | ||
| 974 | { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, | ||
| 975 | { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, | ||
| 976 | { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, | ||
| 977 | { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, | ||
| 978 | { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, | ||
| 979 | { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, | ||
| 980 | { FIDPERR, "PCI FID parity error", -1, 1 }, | ||
| 981 | { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, | ||
| 982 | { MATAGPERR, "PCI MA tag parity error", -1, 1 }, | ||
| 983 | { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, | ||
| 984 | { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, | ||
| 985 | { RXWRPERR, "PCI Rx write parity error", -1, 1 }, | ||
| 986 | { RPLPERR, "PCI replay buffer parity error", -1, 1 }, | ||
| 987 | { PCIESINT, "PCI core secondary fault", -1, 1 }, | ||
| 988 | { PCIEPINT, "PCI core primary fault", -1, 1 }, | ||
| 989 | { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, | ||
| 990 | { 0 } | ||
| 991 | }; | ||
| 992 | |||
| 993 | int fat; | ||
| 994 | |||
| 995 | fat = t4_handle_intr_status(adapter, | ||
| 996 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
| 997 | sysbus_intr_info) + | ||
| 998 | t4_handle_intr_status(adapter, | ||
| 999 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
| 1000 | pcie_port_intr_info) + | ||
| 1001 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); | ||
| 1002 | if (fat) | ||
| 1003 | t4_fatal_err(adapter); | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | /* | ||
| 1007 | * TP interrupt handler. | ||
| 1008 | */ | ||
| 1009 | static void tp_intr_handler(struct adapter *adapter) | ||
| 1010 | { | ||
| 1011 | static struct intr_info tp_intr_info[] = { | ||
| 1012 | { 0x3fffffff, "TP parity error", -1, 1 }, | ||
| 1013 | { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, | ||
| 1014 | { 0 } | ||
| 1015 | }; | ||
| 1016 | |||
| 1017 | if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) | ||
| 1018 | t4_fatal_err(adapter); | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /* | ||
| 1022 | * SGE interrupt handler. | ||
| 1023 | */ | ||
| 1024 | static void sge_intr_handler(struct adapter *adapter) | ||
| 1025 | { | ||
| 1026 | u64 v; | ||
| 1027 | |||
| 1028 | static struct intr_info sge_intr_info[] = { | ||
| 1029 | { ERR_CPL_EXCEED_IQE_SIZE, | ||
| 1030 | "SGE received CPL exceeding IQE size", -1, 1 }, | ||
| 1031 | { ERR_INVALID_CIDX_INC, | ||
| 1032 | "SGE GTS CIDX increment too large", -1, 0 }, | ||
| 1033 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, | ||
| 1034 | { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, | ||
| 1035 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, | ||
| 1036 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, | ||
| 1037 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, | ||
| 1038 | 0 }, | ||
| 1039 | { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, | ||
| 1040 | 0 }, | ||
| 1041 | { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, | ||
| 1042 | 0 }, | ||
| 1043 | { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, | ||
| 1044 | 0 }, | ||
| 1045 | { ERR_ING_CTXT_PRIO, | ||
| 1046 | "SGE too many priority ingress contexts", -1, 0 }, | ||
| 1047 | { ERR_EGR_CTXT_PRIO, | ||
| 1048 | "SGE too many priority egress contexts", -1, 0 }, | ||
| 1049 | { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, | ||
| 1050 | { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, | ||
| 1051 | { 0 } | ||
| 1052 | }; | ||
| 1053 | |||
| 1054 | v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | | ||
| 1055 | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); | ||
| 1056 | if (v) { | ||
| 1057 | dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", | ||
| 1058 | (unsigned long long)v); | ||
| 1059 | t4_write_reg(adapter, SGE_INT_CAUSE1, v); | ||
| 1060 | t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || | ||
| 1064 | v != 0) | ||
| 1065 | t4_fatal_err(adapter); | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | /* | ||
| 1069 | * CIM interrupt handler. | ||
| 1070 | */ | ||
| 1071 | static void cim_intr_handler(struct adapter *adapter) | ||
| 1072 | { | ||
| 1073 | static struct intr_info cim_intr_info[] = { | ||
| 1074 | { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, | ||
| 1075 | { OBQPARERR, "CIM OBQ parity error", -1, 1 }, | ||
| 1076 | { IBQPARERR, "CIM IBQ parity error", -1, 1 }, | ||
| 1077 | { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, | ||
| 1078 | { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, | ||
| 1079 | { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, | ||
| 1080 | { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, | ||
| 1081 | { 0 } | ||
| 1082 | }; | ||
| 1083 | static struct intr_info cim_upintr_info[] = { | ||
| 1084 | { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, | ||
| 1085 | { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, | ||
| 1086 | { ILLWRINT, "CIM illegal write", -1, 1 }, | ||
| 1087 | { ILLRDINT, "CIM illegal read", -1, 1 }, | ||
| 1088 | { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, | ||
| 1089 | { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, | ||
| 1090 | { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, | ||
| 1091 | { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, | ||
| 1092 | { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, | ||
| 1093 | { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, | ||
| 1094 | { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, | ||
| 1095 | { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, | ||
| 1096 | { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, | ||
| 1097 | { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, | ||
| 1098 | { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, | ||
| 1099 | { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, | ||
| 1100 | { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, | ||
| 1101 | { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, | ||
| 1102 | { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, | ||
| 1103 | { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, | ||
| 1104 | { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, | ||
| 1105 | { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, | ||
| 1106 | { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, | ||
| 1107 | { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, | ||
| 1108 | { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, | ||
| 1109 | { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, | ||
| 1110 | { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, | ||
| 1111 | { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, | ||
| 1112 | { 0 } | ||
| 1113 | }; | ||
| 1114 | |||
| 1115 | int fat; | ||
| 1116 | |||
| 1117 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, | ||
| 1118 | cim_intr_info) + | ||
| 1119 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, | ||
| 1120 | cim_upintr_info); | ||
| 1121 | if (fat) | ||
| 1122 | t4_fatal_err(adapter); | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | /* | ||
| 1126 | * ULP RX interrupt handler. | ||
| 1127 | */ | ||
| 1128 | static void ulprx_intr_handler(struct adapter *adapter) | ||
| 1129 | { | ||
| 1130 | static struct intr_info ulprx_intr_info[] = { | ||
| 1131 | { 0x7fffff, "ULPRX parity error", -1, 1 }, | ||
| 1132 | { 0 } | ||
| 1133 | }; | ||
| 1134 | |||
| 1135 | if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) | ||
| 1136 | t4_fatal_err(adapter); | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | /* | ||
| 1140 | * ULP TX interrupt handler. | ||
| 1141 | */ | ||
| 1142 | static void ulptx_intr_handler(struct adapter *adapter) | ||
| 1143 | { | ||
| 1144 | static struct intr_info ulptx_intr_info[] = { | ||
| 1145 | { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, | ||
| 1146 | 0 }, | ||
| 1147 | { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, | ||
| 1148 | 0 }, | ||
| 1149 | { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, | ||
| 1150 | 0 }, | ||
| 1151 | { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, | ||
| 1152 | 0 }, | ||
| 1153 | { 0xfffffff, "ULPTX parity error", -1, 1 }, | ||
| 1154 | { 0 } | ||
| 1155 | }; | ||
| 1156 | |||
| 1157 | if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) | ||
| 1158 | t4_fatal_err(adapter); | ||
| 1159 | } | ||
| 1160 | |||
| 1161 | /* | ||
| 1162 | * PM TX interrupt handler. | ||
| 1163 | */ | ||
| 1164 | static void pmtx_intr_handler(struct adapter *adapter) | ||
| 1165 | { | ||
| 1166 | static struct intr_info pmtx_intr_info[] = { | ||
| 1167 | { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, | ||
| 1168 | { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, | ||
| 1169 | { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, | ||
| 1170 | { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, | ||
| 1171 | { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, | ||
| 1172 | { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, | ||
| 1173 | { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, | ||
| 1174 | { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, | ||
| 1175 | { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, | ||
| 1176 | { 0 } | ||
| 1177 | }; | ||
| 1178 | |||
| 1179 | if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) | ||
| 1180 | t4_fatal_err(adapter); | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | /* | ||
| 1184 | * PM RX interrupt handler. | ||
| 1185 | */ | ||
| 1186 | static void pmrx_intr_handler(struct adapter *adapter) | ||
| 1187 | { | ||
| 1188 | static struct intr_info pmrx_intr_info[] = { | ||
| 1189 | { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, | ||
| 1190 | { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, | ||
| 1191 | { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, | ||
| 1192 | { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, | ||
| 1193 | { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, | ||
| 1194 | { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, | ||
| 1195 | { 0 } | ||
| 1196 | }; | ||
| 1197 | |||
| 1198 | if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) | ||
| 1199 | t4_fatal_err(adapter); | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | /* | ||
| 1203 | * CPL switch interrupt handler. | ||
| 1204 | */ | ||
| 1205 | static void cplsw_intr_handler(struct adapter *adapter) | ||
| 1206 | { | ||
| 1207 | static struct intr_info cplsw_intr_info[] = { | ||
| 1208 | { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, | ||
| 1209 | { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, | ||
| 1210 | { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, | ||
| 1211 | { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, | ||
| 1212 | { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, | ||
| 1213 | { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, | ||
| 1214 | { 0 } | ||
| 1215 | }; | ||
| 1216 | |||
| 1217 | if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) | ||
| 1218 | t4_fatal_err(adapter); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | /* | ||
| 1222 | * LE interrupt handler. | ||
| 1223 | */ | ||
| 1224 | static void le_intr_handler(struct adapter *adap) | ||
| 1225 | { | ||
| 1226 | static struct intr_info le_intr_info[] = { | ||
| 1227 | { LIPMISS, "LE LIP miss", -1, 0 }, | ||
| 1228 | { LIP0, "LE 0 LIP error", -1, 0 }, | ||
| 1229 | { PARITYERR, "LE parity error", -1, 1 }, | ||
| 1230 | { UNKNOWNCMD, "LE unknown command", -1, 1 }, | ||
| 1231 | { REQQPARERR, "LE request queue parity error", -1, 1 }, | ||
| 1232 | { 0 } | ||
| 1233 | }; | ||
| 1234 | |||
| 1235 | if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) | ||
| 1236 | t4_fatal_err(adap); | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | /* | ||
| 1240 | * MPS interrupt handler. | ||
| 1241 | */ | ||
| 1242 | static void mps_intr_handler(struct adapter *adapter) | ||
| 1243 | { | ||
| 1244 | static struct intr_info mps_rx_intr_info[] = { | ||
| 1245 | { 0xffffff, "MPS Rx parity error", -1, 1 }, | ||
| 1246 | { 0 } | ||
| 1247 | }; | ||
| 1248 | static struct intr_info mps_tx_intr_info[] = { | ||
| 1249 | { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, | ||
| 1250 | { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, | ||
| 1251 | { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, | ||
| 1252 | { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, | ||
| 1253 | { BUBBLE, "MPS Tx underflow", -1, 1 }, | ||
| 1254 | { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, | ||
| 1255 | { FRMERR, "MPS Tx framing error", -1, 1 }, | ||
| 1256 | { 0 } | ||
| 1257 | }; | ||
| 1258 | static struct intr_info mps_trc_intr_info[] = { | ||
| 1259 | { FILTMEM, "MPS TRC filter parity error", -1, 1 }, | ||
| 1260 | { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, | ||
| 1261 | { MISCPERR, "MPS TRC misc parity error", -1, 1 }, | ||
| 1262 | { 0 } | ||
| 1263 | }; | ||
| 1264 | static struct intr_info mps_stat_sram_intr_info[] = { | ||
| 1265 | { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, | ||
| 1266 | { 0 } | ||
| 1267 | }; | ||
| 1268 | static struct intr_info mps_stat_tx_intr_info[] = { | ||
| 1269 | { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, | ||
| 1270 | { 0 } | ||
| 1271 | }; | ||
| 1272 | static struct intr_info mps_stat_rx_intr_info[] = { | ||
| 1273 | { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, | ||
| 1274 | { 0 } | ||
| 1275 | }; | ||
| 1276 | static struct intr_info mps_cls_intr_info[] = { | ||
| 1277 | { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, | ||
| 1278 | { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, | ||
| 1279 | { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, | ||
| 1280 | { 0 } | ||
| 1281 | }; | ||
| 1282 | |||
| 1283 | int fat; | ||
| 1284 | |||
| 1285 | fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, | ||
| 1286 | mps_rx_intr_info) + | ||
| 1287 | t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, | ||
| 1288 | mps_tx_intr_info) + | ||
| 1289 | t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, | ||
| 1290 | mps_trc_intr_info) + | ||
| 1291 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, | ||
| 1292 | mps_stat_sram_intr_info) + | ||
| 1293 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, | ||
| 1294 | mps_stat_tx_intr_info) + | ||
| 1295 | t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, | ||
| 1296 | mps_stat_rx_intr_info) + | ||
| 1297 | t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, | ||
| 1298 | mps_cls_intr_info); | ||
| 1299 | |||
| 1300 | t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | | ||
| 1301 | RXINT | TXINT | STATINT); | ||
| 1302 | t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ | ||
| 1303 | if (fat) | ||
| 1304 | t4_fatal_err(adapter); | ||
| 1305 | } | ||
| 1306 | |||
| 1307 | #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) | ||
| 1308 | |||
| 1309 | /* | ||
| 1310 | * EDC/MC interrupt handler. | ||
| 1311 | */ | ||
| 1312 | static void mem_intr_handler(struct adapter *adapter, int idx) | ||
| 1313 | { | ||
| 1314 | static const char name[3][5] = { "EDC0", "EDC1", "MC" }; | ||
| 1315 | |||
| 1316 | unsigned int addr, cnt_addr, v; | ||
| 1317 | |||
| 1318 | if (idx <= MEM_EDC1) { | ||
| 1319 | addr = EDC_REG(EDC_INT_CAUSE, idx); | ||
| 1320 | cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); | ||
| 1321 | } else { | ||
| 1322 | addr = MC_INT_CAUSE; | ||
| 1323 | cnt_addr = MC_ECC_STATUS; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | v = t4_read_reg(adapter, addr) & MEM_INT_MASK; | ||
| 1327 | if (v & PERR_INT_CAUSE) | ||
| 1328 | dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", | ||
| 1329 | name[idx]); | ||
| 1330 | if (v & ECC_CE_INT_CAUSE) { | ||
| 1331 | u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); | ||
| 1332 | |||
| 1333 | t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); | ||
| 1334 | if (printk_ratelimit()) | ||
| 1335 | dev_warn(adapter->pdev_dev, | ||
| 1336 | "%u %s correctable ECC data error%s\n", | ||
| 1337 | cnt, name[idx], cnt > 1 ? "s" : ""); | ||
| 1338 | } | ||
| 1339 | if (v & ECC_UE_INT_CAUSE) | ||
| 1340 | dev_alert(adapter->pdev_dev, | ||
| 1341 | "%s uncorrectable ECC data error\n", name[idx]); | ||
| 1342 | |||
| 1343 | t4_write_reg(adapter, addr, v); | ||
| 1344 | if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) | ||
| 1345 | t4_fatal_err(adapter); | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | /* | ||
| 1349 | * MA interrupt handler. | ||
| 1350 | */ | ||
| 1351 | static void ma_intr_handler(struct adapter *adap) | ||
| 1352 | { | ||
| 1353 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); | ||
| 1354 | |||
| 1355 | if (status & MEM_PERR_INT_CAUSE) | ||
| 1356 | dev_alert(adap->pdev_dev, | ||
| 1357 | "MA parity error, parity status %#x\n", | ||
| 1358 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); | ||
| 1359 | if (status & MEM_WRAP_INT_CAUSE) { | ||
| 1360 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); | ||
| 1361 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " | ||
| 1362 | "client %u to address %#x\n", | ||
| 1363 | MEM_WRAP_CLIENT_NUM_GET(v), | ||
| 1364 | MEM_WRAP_ADDRESS_GET(v) << 4); | ||
| 1365 | } | ||
| 1366 | t4_write_reg(adap, MA_INT_CAUSE, status); | ||
| 1367 | t4_fatal_err(adap); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | /* | ||
| 1371 | * SMB interrupt handler. | ||
| 1372 | */ | ||
| 1373 | static void smb_intr_handler(struct adapter *adap) | ||
| 1374 | { | ||
| 1375 | static struct intr_info smb_intr_info[] = { | ||
| 1376 | { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, | ||
| 1377 | { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, | ||
| 1378 | { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, | ||
| 1379 | { 0 } | ||
| 1380 | }; | ||
| 1381 | |||
| 1382 | if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) | ||
| 1383 | t4_fatal_err(adap); | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | /* | ||
| 1387 | * NC-SI interrupt handler. | ||
| 1388 | */ | ||
| 1389 | static void ncsi_intr_handler(struct adapter *adap) | ||
| 1390 | { | ||
| 1391 | static struct intr_info ncsi_intr_info[] = { | ||
| 1392 | { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, | ||
| 1393 | { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, | ||
| 1394 | { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, | ||
| 1395 | { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, | ||
| 1396 | { 0 } | ||
| 1397 | }; | ||
| 1398 | |||
| 1399 | if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) | ||
| 1400 | t4_fatal_err(adap); | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | /* | ||
| 1404 | * XGMAC interrupt handler. | ||
| 1405 | */ | ||
| 1406 | static void xgmac_intr_handler(struct adapter *adap, int port) | ||
| 1407 | { | ||
| 1408 | u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); | ||
| 1409 | |||
| 1410 | v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; | ||
| 1411 | if (!v) | ||
| 1412 | return; | ||
| 1413 | |||
| 1414 | if (v & TXFIFO_PRTY_ERR) | ||
| 1415 | dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", | ||
| 1416 | port); | ||
| 1417 | if (v & RXFIFO_PRTY_ERR) | ||
| 1418 | dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", | ||
| 1419 | port); | ||
| 1420 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); | ||
| 1421 | t4_fatal_err(adap); | ||
| 1422 | } | ||
| 1423 | |||
| 1424 | /* | ||
| 1425 | * PL interrupt handler. | ||
| 1426 | */ | ||
| 1427 | static void pl_intr_handler(struct adapter *adap) | ||
| 1428 | { | ||
| 1429 | static struct intr_info pl_intr_info[] = { | ||
| 1430 | { FATALPERR, "T4 fatal parity error", -1, 1 }, | ||
| 1431 | { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, | ||
| 1432 | { 0 } | ||
| 1433 | }; | ||
| 1434 | |||
| 1435 | if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) | ||
| 1436 | t4_fatal_err(adap); | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | #define PF_INTR_MASK (PFSW | PFCIM) | ||
| 1440 | #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ | ||
| 1441 | EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ | ||
| 1442 | CPL_SWITCH | SGE | ULP_TX) | ||
| 1443 | |||
| 1444 | /** | ||
| 1445 | * t4_slow_intr_handler - control path interrupt handler | ||
| 1446 | * @adapter: the adapter | ||
| 1447 | * | ||
| 1448 | * T4 interrupt handler for non-data global interrupt events, e.g., errors. | ||
| 1449 | * The designation 'slow' is because it involves register reads, while | ||
| 1450 | * data interrupts typically don't involve any MMIOs. | ||
| 1451 | */ | ||
| 1452 | int t4_slow_intr_handler(struct adapter *adapter) | ||
| 1453 | { | ||
| 1454 | u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); | ||
| 1455 | |||
| 1456 | if (!(cause & GLBL_INTR_MASK)) | ||
| 1457 | return 0; | ||
| 1458 | if (cause & CIM) | ||
| 1459 | cim_intr_handler(adapter); | ||
| 1460 | if (cause & MPS) | ||
| 1461 | mps_intr_handler(adapter); | ||
| 1462 | if (cause & NCSI) | ||
| 1463 | ncsi_intr_handler(adapter); | ||
| 1464 | if (cause & PL) | ||
| 1465 | pl_intr_handler(adapter); | ||
| 1466 | if (cause & SMB) | ||
| 1467 | smb_intr_handler(adapter); | ||
| 1468 | if (cause & XGMAC0) | ||
| 1469 | xgmac_intr_handler(adapter, 0); | ||
| 1470 | if (cause & XGMAC1) | ||
| 1471 | xgmac_intr_handler(adapter, 1); | ||
| 1472 | if (cause & XGMAC_KR0) | ||
| 1473 | xgmac_intr_handler(adapter, 2); | ||
| 1474 | if (cause & XGMAC_KR1) | ||
| 1475 | xgmac_intr_handler(adapter, 3); | ||
| 1476 | if (cause & PCIE) | ||
| 1477 | pcie_intr_handler(adapter); | ||
| 1478 | if (cause & MC) | ||
| 1479 | mem_intr_handler(adapter, MEM_MC); | ||
| 1480 | if (cause & EDC0) | ||
| 1481 | mem_intr_handler(adapter, MEM_EDC0); | ||
| 1482 | if (cause & EDC1) | ||
| 1483 | mem_intr_handler(adapter, MEM_EDC1); | ||
| 1484 | if (cause & LE) | ||
| 1485 | le_intr_handler(adapter); | ||
| 1486 | if (cause & TP) | ||
| 1487 | tp_intr_handler(adapter); | ||
| 1488 | if (cause & MA) | ||
| 1489 | ma_intr_handler(adapter); | ||
| 1490 | if (cause & PM_TX) | ||
| 1491 | pmtx_intr_handler(adapter); | ||
| 1492 | if (cause & PM_RX) | ||
| 1493 | pmrx_intr_handler(adapter); | ||
| 1494 | if (cause & ULP_RX) | ||
| 1495 | ulprx_intr_handler(adapter); | ||
| 1496 | if (cause & CPL_SWITCH) | ||
| 1497 | cplsw_intr_handler(adapter); | ||
| 1498 | if (cause & SGE) | ||
| 1499 | sge_intr_handler(adapter); | ||
| 1500 | if (cause & ULP_TX) | ||
| 1501 | ulptx_intr_handler(adapter); | ||
| 1502 | |||
| 1503 | /* Clear the interrupts just processed for which we are the master. */ | ||
| 1504 | t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); | ||
| 1505 | (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ | ||
| 1506 | return 1; | ||
| 1507 | } | ||
| 1508 | |||
| 1509 | /** | ||
| 1510 | * t4_intr_enable - enable interrupts | ||
| 1511 | * @adapter: the adapter whose interrupts should be enabled | ||
| 1512 | * | ||
| 1513 | * Enable PF-specific interrupts for the calling function and the top-level | ||
| 1514 | * interrupt concentrator for global interrupts. Interrupts are already | ||
| 1515 | * enabled at each module, here we just enable the roots of the interrupt | ||
| 1516 | * hierarchies. | ||
| 1517 | * | ||
| 1518 | * Note: this function should be called only when the driver manages | ||
| 1519 | * non PF-specific interrupts from the various HW modules. Only one PCI | ||
| 1520 | * function at a time should be doing this. | ||
| 1521 | */ | ||
| 1522 | void t4_intr_enable(struct adapter *adapter) | ||
| 1523 | { | ||
| 1524 | u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); | ||
| 1525 | |||
| 1526 | t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | | ||
| 1527 | ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | | ||
| 1528 | ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | | ||
| 1529 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | | ||
| 1530 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | | ||
| 1531 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | | ||
| 1532 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | | ||
| 1533 | EGRESS_SIZE_ERR); | ||
| 1534 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); | ||
| 1535 | t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | /** | ||
| 1539 | * t4_intr_disable - disable interrupts | ||
| 1540 | * @adapter: the adapter whose interrupts should be disabled | ||
| 1541 | * | ||
| 1542 | * Disable interrupts. We only disable the top-level interrupt | ||
| 1543 | * concentrators. The caller must be a PCI function managing global | ||
| 1544 | * interrupts. | ||
| 1545 | */ | ||
| 1546 | void t4_intr_disable(struct adapter *adapter) | ||
| 1547 | { | ||
| 1548 | u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); | ||
| 1549 | |||
| 1550 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); | ||
| 1551 | t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); | ||
| 1552 | } | ||
| 1553 | |||
| 1554 | /** | ||
| 1555 | * t4_intr_clear - clear all interrupts | ||
| 1556 | * @adapter: the adapter whose interrupts should be cleared | ||
| 1557 | * | ||
| 1558 | * Clears all interrupts. The caller must be a PCI function managing | ||
| 1559 | * global interrupts. | ||
| 1560 | */ | ||
| 1561 | void t4_intr_clear(struct adapter *adapter) | ||
| 1562 | { | ||
| 1563 | static const unsigned int cause_reg[] = { | ||
| 1564 | SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, | ||
| 1565 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
| 1566 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
| 1567 | PCIE_NONFAT_ERR, PCIE_INT_CAUSE, | ||
| 1568 | MC_INT_CAUSE, | ||
| 1569 | MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, | ||
| 1570 | EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), | ||
| 1571 | CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, | ||
| 1572 | MYPF_REG(CIM_PF_HOST_INT_CAUSE), | ||
| 1573 | TP_INT_CAUSE, | ||
| 1574 | ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, | ||
| 1575 | PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, | ||
| 1576 | MPS_RX_PERR_INT_CAUSE, | ||
| 1577 | CPL_INTR_CAUSE, | ||
| 1578 | MYPF_REG(PL_PF_INT_CAUSE), | ||
| 1579 | PL_PL_INT_CAUSE, | ||
| 1580 | LE_DB_INT_CAUSE, | ||
| 1581 | }; | ||
| 1582 | |||
| 1583 | unsigned int i; | ||
| 1584 | |||
| 1585 | for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) | ||
| 1586 | t4_write_reg(adapter, cause_reg[i], 0xffffffff); | ||
| 1587 | |||
| 1588 | t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); | ||
| 1589 | (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ | ||
| 1590 | } | ||
| 1591 | |||
| 1592 | /** | ||
| 1593 | * hash_mac_addr - return the hash value of a MAC address | ||
| 1594 | * @addr: the 48-bit Ethernet MAC address | ||
| 1595 | * | ||
| 1596 | * Hashes a MAC address according to the hash function used by HW inexact | ||
| 1597 | * (hash) address matching. | ||
| 1598 | */ | ||
| 1599 | static int hash_mac_addr(const u8 *addr) | ||
| 1600 | { | ||
| 1601 | u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; | ||
| 1602 | u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; | ||
| 1603 | a ^= b; | ||
| 1604 | a ^= (a >> 12); | ||
| 1605 | a ^= (a >> 6); | ||
| 1606 | return a & 0x3f; | ||
| 1607 | } | ||
| 1608 | |||
| 1609 | /** | ||
| 1610 | * t4_config_rss_range - configure a portion of the RSS mapping table | ||
| 1611 | * @adapter: the adapter | ||
| 1612 | * @mbox: mbox to use for the FW command | ||
| 1613 | * @viid: virtual interface whose RSS subtable is to be written | ||
| 1614 | * @start: start entry in the table to write | ||
| 1615 | * @n: how many table entries to write | ||
| 1616 | * @rspq: values for the response queue lookup table | ||
| 1617 | * @nrspq: number of values in @rspq | ||
| 1618 | * | ||
| 1619 | * Programs the selected part of the VI's RSS mapping table with the | ||
| 1620 | * provided values. If @nrspq < @n the supplied values are used repeatedly | ||
| 1621 | * until the full table range is populated. | ||
| 1622 | * | ||
| 1623 | * The caller must ensure the values in @rspq are in the range allowed for | ||
| 1624 | * @viid. | ||
| 1625 | */ | ||
| 1626 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | ||
| 1627 | int start, int n, const u16 *rspq, unsigned int nrspq) | ||
| 1628 | { | ||
| 1629 | int ret; | ||
| 1630 | const u16 *rsp = rspq; | ||
| 1631 | const u16 *rsp_end = rspq + nrspq; | ||
| 1632 | struct fw_rss_ind_tbl_cmd cmd; | ||
| 1633 | |||
| 1634 | memset(&cmd, 0, sizeof(cmd)); | ||
| 1635 | cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | | ||
| 1636 | FW_CMD_REQUEST | FW_CMD_WRITE | | ||
| 1637 | FW_RSS_IND_TBL_CMD_VIID(viid)); | ||
| 1638 | cmd.retval_len16 = htonl(FW_LEN16(cmd)); | ||
| 1639 | |||
| 1640 | /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ | ||
| 1641 | while (n > 0) { | ||
| 1642 | int nq = min(n, 32); | ||
| 1643 | __be32 *qp = &cmd.iq0_to_iq2; | ||
| 1644 | |||
| 1645 | cmd.niqid = htons(nq); | ||
| 1646 | cmd.startidx = htons(start); | ||
| 1647 | |||
| 1648 | start += nq; | ||
| 1649 | n -= nq; | ||
| 1650 | |||
| 1651 | while (nq > 0) { | ||
| 1652 | unsigned int v; | ||
| 1653 | |||
| 1654 | v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); | ||
| 1655 | if (++rsp >= rsp_end) | ||
| 1656 | rsp = rspq; | ||
| 1657 | v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); | ||
| 1658 | if (++rsp >= rsp_end) | ||
| 1659 | rsp = rspq; | ||
| 1660 | v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); | ||
| 1661 | if (++rsp >= rsp_end) | ||
| 1662 | rsp = rspq; | ||
| 1663 | |||
| 1664 | *qp++ = htonl(v); | ||
| 1665 | nq -= 3; | ||
| 1666 | } | ||
| 1667 | |||
| 1668 | ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); | ||
| 1669 | if (ret) | ||
| 1670 | return ret; | ||
| 1671 | } | ||
| 1672 | return 0; | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | /** | ||
| 1676 | * t4_config_glbl_rss - configure the global RSS mode | ||
| 1677 | * @adapter: the adapter | ||
| 1678 | * @mbox: mbox to use for the FW command | ||
| 1679 | * @mode: global RSS mode | ||
| 1680 | * @flags: mode-specific flags | ||
| 1681 | * | ||
| 1682 | * Sets the global RSS mode. | ||
| 1683 | */ | ||
| 1684 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | ||
| 1685 | unsigned int flags) | ||
| 1686 | { | ||
| 1687 | struct fw_rss_glb_config_cmd c; | ||
| 1688 | |||
| 1689 | memset(&c, 0, sizeof(c)); | ||
| 1690 | c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | | ||
| 1691 | FW_CMD_REQUEST | FW_CMD_WRITE); | ||
| 1692 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 1693 | if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { | ||
| 1694 | c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
| 1695 | } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { | ||
| 1696 | c.u.basicvirtual.mode_pkd = | ||
| 1697 | htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); | ||
| 1698 | c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); | ||
| 1699 | } else | ||
| 1700 | return -EINVAL; | ||
| 1701 | return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); | ||
| 1702 | } | ||
| 1703 | |||
| 1704 | /* Read an RSS table row */ | ||
| 1705 | static int rd_rss_row(struct adapter *adap, int row, u32 *val) | ||
| 1706 | { | ||
| 1707 | t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); | ||
| 1708 | return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, | ||
| 1709 | 5, 0, val); | ||
| 1710 | } | ||
| 1711 | |||
| 1712 | /** | ||
| 1713 | * t4_read_rss - read the contents of the RSS mapping table | ||
| 1714 | * @adapter: the adapter | ||
| 1715 | * @map: holds the contents of the RSS mapping table | ||
| 1716 | * | ||
| 1717 | * Reads the contents of the RSS hash->queue mapping table. | ||
| 1718 | */ | ||
| 1719 | int t4_read_rss(struct adapter *adapter, u16 *map) | ||
| 1720 | { | ||
| 1721 | u32 val; | ||
| 1722 | int i, ret; | ||
| 1723 | |||
| 1724 | for (i = 0; i < RSS_NENTRIES / 2; ++i) { | ||
| 1725 | ret = rd_rss_row(adapter, i, &val); | ||
| 1726 | if (ret) | ||
| 1727 | return ret; | ||
| 1728 | *map++ = LKPTBLQUEUE0_GET(val); | ||
| 1729 | *map++ = LKPTBLQUEUE1_GET(val); | ||
| 1730 | } | ||
| 1731 | return 0; | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | /** | ||
| 1735 | * t4_tp_get_tcp_stats - read TP's TCP MIB counters | ||
| 1736 | * @adap: the adapter | ||
| 1737 | * @v4: holds the TCP/IP counter values | ||
| 1738 | * @v6: holds the TCP/IPv6 counter values | ||
| 1739 | * | ||
| 1740 | * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. | ||
| 1741 | * Either @v4 or @v6 may be %NULL to skip the corresponding stats. | ||
| 1742 | */ | ||
| 1743 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | ||
| 1744 | struct tp_tcp_stats *v6) | ||
| 1745 | { | ||
| 1746 | u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; | ||
| 1747 | |||
| 1748 | #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) | ||
| 1749 | #define STAT(x) val[STAT_IDX(x)] | ||
| 1750 | #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) | ||
| 1751 | |||
| 1752 | if (v4) { | ||
| 1753 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, | ||
| 1754 | ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); | ||
| 1755 | v4->tcpOutRsts = STAT(OUT_RST); | ||
| 1756 | v4->tcpInSegs = STAT64(IN_SEG); | ||
| 1757 | v4->tcpOutSegs = STAT64(OUT_SEG); | ||
| 1758 | v4->tcpRetransSegs = STAT64(RXT_SEG); | ||
| 1759 | } | ||
| 1760 | if (v6) { | ||
| 1761 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, | ||
| 1762 | ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); | ||
| 1763 | v6->tcpOutRsts = STAT(OUT_RST); | ||
| 1764 | v6->tcpInSegs = STAT64(IN_SEG); | ||
| 1765 | v6->tcpOutSegs = STAT64(OUT_SEG); | ||
| 1766 | v6->tcpRetransSegs = STAT64(RXT_SEG); | ||
| 1767 | } | ||
| 1768 | #undef STAT64 | ||
| 1769 | #undef STAT | ||
| 1770 | #undef STAT_IDX | ||
| 1771 | } | ||
| 1772 | |||
| 1773 | /** | ||
| 1774 | * t4_tp_get_err_stats - read TP's error MIB counters | ||
| 1775 | * @adap: the adapter | ||
| 1776 | * @st: holds the counter values | ||
| 1777 | * | ||
| 1778 | * Returns the values of TP's error counters. | ||
| 1779 | */ | ||
| 1780 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) | ||
| 1781 | { | ||
| 1782 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, | ||
| 1783 | 12, TP_MIB_MAC_IN_ERR_0); | ||
| 1784 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, | ||
| 1785 | 8, TP_MIB_TNL_CNG_DROP_0); | ||
| 1786 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, | ||
| 1787 | 4, TP_MIB_TNL_DROP_0); | ||
| 1788 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, | ||
| 1789 | 4, TP_MIB_OFD_VLN_DROP_0); | ||
| 1790 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, | ||
| 1791 | 4, TP_MIB_TCP_V6IN_ERR_0); | ||
| 1792 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, | ||
| 1793 | 2, TP_MIB_OFD_ARP_DROP); | ||
| 1794 | } | ||
| 1795 | |||
| 1796 | /** | ||
| 1797 | * t4_read_mtu_tbl - returns the values in the HW path MTU table | ||
| 1798 | * @adap: the adapter | ||
| 1799 | * @mtus: where to store the MTU values | ||
| 1800 | * @mtu_log: where to store the MTU base-2 log (may be %NULL) | ||
| 1801 | * | ||
| 1802 | * Reads the HW path MTU table. | ||
| 1803 | */ | ||
| 1804 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) | ||
| 1805 | { | ||
| 1806 | u32 v; | ||
| 1807 | int i; | ||
| 1808 | |||
| 1809 | for (i = 0; i < NMTUS; ++i) { | ||
| 1810 | t4_write_reg(adap, TP_MTU_TABLE, | ||
| 1811 | MTUINDEX(0xff) | MTUVALUE(i)); | ||
| 1812 | v = t4_read_reg(adap, TP_MTU_TABLE); | ||
| 1813 | mtus[i] = MTUVALUE_GET(v); | ||
| 1814 | if (mtu_log) | ||
| 1815 | mtu_log[i] = MTUWIDTH_GET(v); | ||
| 1816 | } | ||
| 1817 | } | ||
| 1818 | |||
| 1819 | /** | ||
| 1820 | * init_cong_ctrl - initialize congestion control parameters | ||
| 1821 | * @a: the alpha values for congestion control | ||
| 1822 | * @b: the beta values for congestion control | ||
| 1823 | * | ||
| 1824 | * Initialize the congestion control parameters. | ||
| 1825 | */ | ||
| 1826 | static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) | ||
| 1827 | { | ||
| 1828 | a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; | ||
| 1829 | a[9] = 2; | ||
| 1830 | a[10] = 3; | ||
| 1831 | a[11] = 4; | ||
| 1832 | a[12] = 5; | ||
| 1833 | a[13] = 6; | ||
| 1834 | a[14] = 7; | ||
| 1835 | a[15] = 8; | ||
| 1836 | a[16] = 9; | ||
| 1837 | a[17] = 10; | ||
| 1838 | a[18] = 14; | ||
| 1839 | a[19] = 17; | ||
| 1840 | a[20] = 21; | ||
| 1841 | a[21] = 25; | ||
| 1842 | a[22] = 30; | ||
| 1843 | a[23] = 35; | ||
| 1844 | a[24] = 45; | ||
| 1845 | a[25] = 60; | ||
| 1846 | a[26] = 80; | ||
| 1847 | a[27] = 100; | ||
| 1848 | a[28] = 200; | ||
| 1849 | a[29] = 300; | ||
| 1850 | a[30] = 400; | ||
| 1851 | a[31] = 500; | ||
| 1852 | |||
| 1853 | b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; | ||
| 1854 | b[9] = b[10] = 1; | ||
| 1855 | b[11] = b[12] = 2; | ||
| 1856 | b[13] = b[14] = b[15] = b[16] = 3; | ||
| 1857 | b[17] = b[18] = b[19] = b[20] = b[21] = 4; | ||
| 1858 | b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; | ||
| 1859 | b[28] = b[29] = 6; | ||
| 1860 | b[30] = b[31] = 7; | ||
| 1861 | } | ||
| 1862 | |||
| 1863 | /* The minimum additive increment value for the congestion control table */ | ||
| 1864 | #define CC_MIN_INCR 2U | ||
| 1865 | |||
| 1866 | /** | ||
| 1867 | * t4_load_mtus - write the MTU and congestion control HW tables | ||
| 1868 | * @adap: the adapter | ||
| 1869 | * @mtus: the values for the MTU table | ||
| 1870 | * @alpha: the values for the congestion control alpha parameter | ||
| 1871 | * @beta: the values for the congestion control beta parameter | ||
| 1872 | * | ||
| 1873 | * Write the HW MTU table with the supplied MTUs and the high-speed | ||
| 1874 | * congestion control table with the supplied alpha, beta, and MTUs. | ||
| 1875 | * We write the two tables together because the additive increments | ||
| 1876 | * depend on the MTUs. | ||
| 1877 | */ | ||
| 1878 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | ||
| 1879 | const unsigned short *alpha, const unsigned short *beta) | ||
| 1880 | { | ||
| 1881 | static const unsigned int avg_pkts[NCCTRL_WIN] = { | ||
| 1882 | 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, | ||
| 1883 | 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, | ||
| 1884 | 28672, 40960, 57344, 81920, 114688, 163840, 229376 | ||
| 1885 | }; | ||
| 1886 | |||
| 1887 | unsigned int i, w; | ||
| 1888 | |||
| 1889 | for (i = 0; i < NMTUS; ++i) { | ||
| 1890 | unsigned int mtu = mtus[i]; | ||
| 1891 | unsigned int log2 = fls(mtu); | ||
| 1892 | |||
| 1893 | if (!(mtu & ((1 << log2) >> 2))) /* round */ | ||
| 1894 | log2--; | ||
| 1895 | t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | | ||
| 1896 | MTUWIDTH(log2) | MTUVALUE(mtu)); | ||
| 1897 | |||
| 1898 | for (w = 0; w < NCCTRL_WIN; ++w) { | ||
| 1899 | unsigned int inc; | ||
| 1900 | |||
| 1901 | inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], | ||
| 1902 | CC_MIN_INCR); | ||
| 1903 | |||
| 1904 | t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | | ||
| 1905 | (w << 16) | (beta[w] << 13) | inc); | ||
| 1906 | } | ||
| 1907 | } | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | /** | ||
| 1911 | * t4_set_trace_filter - configure one of the tracing filters | ||
| 1912 | * @adap: the adapter | ||
| 1913 | * @tp: the desired trace filter parameters | ||
| 1914 | * @idx: which filter to configure | ||
| 1915 | * @enable: whether to enable or disable the filter | ||
| 1916 | * | ||
| 1917 | * Configures one of the tracing filters available in HW. If @enable is | ||
| 1918 | * %0 @tp is not examined and may be %NULL. | ||
| 1919 | */ | ||
| 1920 | int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, | ||
| 1921 | int idx, int enable) | ||
| 1922 | { | ||
| 1923 | int i, ofst = idx * 4; | ||
| 1924 | u32 data_reg, mask_reg, cfg; | ||
| 1925 | u32 multitrc = TRCMULTIFILTER; | ||
| 1926 | |||
| 1927 | if (!enable) { | ||
| 1928 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
| 1929 | goto out; | ||
| 1930 | } | ||
| 1931 | |||
| 1932 | if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || | ||
| 1933 | tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || | ||
| 1934 | tp->snap_len > 9600 || (idx && tp->snap_len > 256)) | ||
| 1935 | return -EINVAL; | ||
| 1936 | |||
| 1937 | if (tp->snap_len > 256) { /* must be tracer 0 */ | ||
| 1938 | if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | | ||
| 1939 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | | ||
| 1940 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) | ||
| 1941 | return -EINVAL; /* other tracers are enabled */ | ||
| 1942 | multitrc = 0; | ||
| 1943 | } else if (idx) { | ||
| 1944 | i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); | ||
| 1945 | if (TFCAPTUREMAX_GET(i) > 256 && | ||
| 1946 | (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) | ||
| 1947 | return -EINVAL; | ||
| 1948 | } | ||
| 1949 | |||
| 1950 | /* stop the tracer we'll be changing */ | ||
| 1951 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
| 1952 | |||
| 1953 | /* disable tracing globally if running in the wrong single/multi mode */ | ||
| 1954 | cfg = t4_read_reg(adap, MPS_TRC_CFG); | ||
| 1955 | if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { | ||
| 1956 | t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); | ||
| 1957 | t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
| 1958 | msleep(1); | ||
| 1959 | if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) | ||
| 1960 | return -ETIMEDOUT; | ||
| 1961 | } | ||
| 1962 | /* | ||
| 1963 | * At this point either the tracing is enabled and in the right mode or | ||
| 1964 | * disabled. | ||
| 1965 | */ | ||
| 1966 | |||
| 1967 | idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); | ||
| 1968 | data_reg = MPS_TRC_FILTER0_MATCH + idx; | ||
| 1969 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; | ||
| 1970 | |||
| 1971 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
| 1972 | t4_write_reg(adap, data_reg, tp->data[i]); | ||
| 1973 | t4_write_reg(adap, mask_reg, ~tp->mask[i]); | ||
| 1974 | } | ||
| 1975 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, | ||
| 1976 | TFCAPTUREMAX(tp->snap_len) | | ||
| 1977 | TFMINPKTSIZE(tp->min_len)); | ||
| 1978 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, | ||
| 1979 | TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | | ||
| 1980 | TFPORT(tp->port) | TFEN | | ||
| 1981 | (tp->invert ? TFINVERTMATCH : 0)); | ||
| 1982 | |||
| 1983 | cfg &= ~TRCMULTIFILTER; | ||
| 1984 | t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); | ||
| 1985 | out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
| 1986 | return 0; | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | /** | ||
| 1990 | * t4_get_trace_filter - query one of the tracing filters | ||
| 1991 | * @adap: the adapter | ||
| 1992 | * @tp: the current trace filter parameters | ||
| 1993 | * @idx: which trace filter to query | ||
| 1994 | * @enabled: non-zero if the filter is enabled | ||
| 1995 | * | ||
| 1996 | * Returns the current settings of one of the HW tracing filters. | ||
| 1997 | */ | ||
| 1998 | void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, | ||
| 1999 | int *enabled) | ||
| 2000 | { | ||
| 2001 | u32 ctla, ctlb; | ||
| 2002 | int i, ofst = idx * 4; | ||
| 2003 | u32 data_reg, mask_reg; | ||
| 2004 | |||
| 2005 | ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); | ||
| 2006 | ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); | ||
| 2007 | |||
| 2008 | *enabled = !!(ctla & TFEN); | ||
| 2009 | tp->snap_len = TFCAPTUREMAX_GET(ctlb); | ||
| 2010 | tp->min_len = TFMINPKTSIZE_GET(ctlb); | ||
| 2011 | tp->skip_ofst = TFOFFSET_GET(ctla); | ||
| 2012 | tp->skip_len = TFLENGTH_GET(ctla); | ||
| 2013 | tp->invert = !!(ctla & TFINVERTMATCH); | ||
| 2014 | tp->port = TFPORT_GET(ctla); | ||
| 2015 | |||
| 2016 | ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; | ||
| 2017 | data_reg = MPS_TRC_FILTER0_MATCH + ofst; | ||
| 2018 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; | ||
| 2019 | |||
| 2020 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
| 2021 | tp->mask[i] = ~t4_read_reg(adap, mask_reg); | ||
| 2022 | tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; | ||
| 2023 | } | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | /** | ||
| 2027 | * get_mps_bg_map - return the buffer groups associated with a port | ||
| 2028 | * @adap: the adapter | ||
| 2029 | * @idx: the port index | ||
| 2030 | * | ||
| 2031 | * Returns a bitmap indicating which MPS buffer groups are associated | ||
| 2032 | * with the given port. Bit i is set if buffer group i is used by the | ||
| 2033 | * port. | ||
| 2034 | */ | ||
| 2035 | static unsigned int get_mps_bg_map(struct adapter *adap, int idx) | ||
| 2036 | { | ||
| 2037 | u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); | ||
| 2038 | |||
| 2039 | if (n == 0) | ||
| 2040 | return idx == 0 ? 0xf : 0; | ||
| 2041 | if (n == 1) | ||
| 2042 | return idx < 2 ? (3 << (2 * idx)) : 0; | ||
| 2043 | return 1 << idx; | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | /** | ||
| 2047 | * t4_get_port_stats - collect port statistics | ||
| 2048 | * @adap: the adapter | ||
| 2049 | * @idx: the port index | ||
| 2050 | * @p: the stats structure to fill | ||
| 2051 | * | ||
| 2052 | * Collect statistics related to the given port from HW. | ||
| 2053 | */ | ||
| 2054 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) | ||
| 2055 | { | ||
| 2056 | u32 bgmap = get_mps_bg_map(adap, idx); | ||
| 2057 | |||
| 2058 | #define GET_STAT(name) \ | ||
| 2059 | t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) | ||
| 2060 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | ||
| 2061 | |||
| 2062 | p->tx_octets = GET_STAT(TX_PORT_BYTES); | ||
| 2063 | p->tx_frames = GET_STAT(TX_PORT_FRAMES); | ||
| 2064 | p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); | ||
| 2065 | p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); | ||
| 2066 | p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); | ||
| 2067 | p->tx_error_frames = GET_STAT(TX_PORT_ERROR); | ||
| 2068 | p->tx_frames_64 = GET_STAT(TX_PORT_64B); | ||
| 2069 | p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); | ||
| 2070 | p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); | ||
| 2071 | p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); | ||
| 2072 | p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); | ||
| 2073 | p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); | ||
| 2074 | p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); | ||
| 2075 | p->tx_drop = GET_STAT(TX_PORT_DROP); | ||
| 2076 | p->tx_pause = GET_STAT(TX_PORT_PAUSE); | ||
| 2077 | p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); | ||
| 2078 | p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); | ||
| 2079 | p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); | ||
| 2080 | p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); | ||
| 2081 | p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); | ||
| 2082 | p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); | ||
| 2083 | p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); | ||
| 2084 | p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); | ||
| 2085 | |||
| 2086 | p->rx_octets = GET_STAT(RX_PORT_BYTES); | ||
| 2087 | p->rx_frames = GET_STAT(RX_PORT_FRAMES); | ||
| 2088 | p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); | ||
| 2089 | p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); | ||
| 2090 | p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); | ||
| 2091 | p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); | ||
| 2092 | p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); | ||
| 2093 | p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); | ||
| 2094 | p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); | ||
| 2095 | p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); | ||
| 2096 | p->rx_runt = GET_STAT(RX_PORT_LESS_64B); | ||
| 2097 | p->rx_frames_64 = GET_STAT(RX_PORT_64B); | ||
| 2098 | p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); | ||
| 2099 | p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); | ||
| 2100 | p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); | ||
| 2101 | p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); | ||
| 2102 | p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); | ||
| 2103 | p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); | ||
| 2104 | p->rx_pause = GET_STAT(RX_PORT_PAUSE); | ||
| 2105 | p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); | ||
| 2106 | p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); | ||
| 2107 | p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); | ||
| 2108 | p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); | ||
| 2109 | p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); | ||
| 2110 | p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); | ||
| 2111 | p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); | ||
| 2112 | p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); | ||
| 2113 | |||
| 2114 | p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; | ||
| 2115 | p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; | ||
| 2116 | p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; | ||
| 2117 | p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; | ||
| 2118 | p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; | ||
| 2119 | p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; | ||
| 2120 | p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; | ||
| 2121 | p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; | ||
| 2122 | |||
| 2123 | #undef GET_STAT | ||
| 2124 | #undef GET_STAT_COM | ||
| 2125 | } | ||
| 2126 | |||
| 2127 | /** | ||
| 2128 | * t4_get_lb_stats - collect loopback port statistics | ||
| 2129 | * @adap: the adapter | ||
| 2130 | * @idx: the loopback port index | ||
| 2131 | * @p: the stats structure to fill | ||
| 2132 | * | ||
| 2133 | * Return HW statistics for the given loopback port. | ||
| 2134 | */ | ||
| 2135 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) | ||
| 2136 | { | ||
| 2137 | u32 bgmap = get_mps_bg_map(adap, idx); | ||
| 2138 | |||
| 2139 | #define GET_STAT(name) \ | ||
| 2140 | t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) | ||
| 2141 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | ||
| 2142 | |||
| 2143 | p->octets = GET_STAT(BYTES); | ||
| 2144 | p->frames = GET_STAT(FRAMES); | ||
| 2145 | p->bcast_frames = GET_STAT(BCAST); | ||
| 2146 | p->mcast_frames = GET_STAT(MCAST); | ||
| 2147 | p->ucast_frames = GET_STAT(UCAST); | ||
| 2148 | p->error_frames = GET_STAT(ERROR); | ||
| 2149 | |||
| 2150 | p->frames_64 = GET_STAT(64B); | ||
| 2151 | p->frames_65_127 = GET_STAT(65B_127B); | ||
| 2152 | p->frames_128_255 = GET_STAT(128B_255B); | ||
| 2153 | p->frames_256_511 = GET_STAT(256B_511B); | ||
| 2154 | p->frames_512_1023 = GET_STAT(512B_1023B); | ||
| 2155 | p->frames_1024_1518 = GET_STAT(1024B_1518B); | ||
| 2156 | p->frames_1519_max = GET_STAT(1519B_MAX); | ||
| 2157 | p->drop = t4_read_reg(adap, PORT_REG(idx, | ||
| 2158 | MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); | ||
| 2159 | |||
| 2160 | p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; | ||
| 2161 | p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; | ||
| 2162 | p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; | ||
| 2163 | p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; | ||
| 2164 | p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; | ||
| 2165 | p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; | ||
| 2166 | p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; | ||
| 2167 | p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; | ||
| 2168 | |||
| 2169 | #undef GET_STAT | ||
| 2170 | #undef GET_STAT_COM | ||
| 2171 | } | ||
| 2172 | |||
| 2173 | /** | ||
| 2174 | * t4_wol_magic_enable - enable/disable magic packet WoL | ||
| 2175 | * @adap: the adapter | ||
| 2176 | * @port: the physical port index | ||
| 2177 | * @addr: MAC address expected in magic packets, %NULL to disable | ||
| 2178 | * | ||
| 2179 | * Enables/disables magic packet wake-on-LAN for the selected port. | ||
| 2180 | */ | ||
| 2181 | void t4_wol_magic_enable(struct adapter *adap, unsigned int port, | ||
| 2182 | const u8 *addr) | ||
| 2183 | { | ||
| 2184 | if (addr) { | ||
| 2185 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), | ||
| 2186 | (addr[2] << 24) | (addr[3] << 16) | | ||
| 2187 | (addr[4] << 8) | addr[5]); | ||
| 2188 | t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), | ||
| 2189 | (addr[0] << 8) | addr[1]); | ||
| 2190 | } | ||
| 2191 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, | ||
| 2192 | addr ? MAGICEN : 0); | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | /** | ||
| 2196 | * t4_wol_pat_enable - enable/disable pattern-based WoL | ||
| 2197 | * @adap: the adapter | ||
| 2198 | * @port: the physical port index | ||
| 2199 | * @map: bitmap of which HW pattern filters to set | ||
| 2200 | * @mask0: byte mask for bytes 0-63 of a packet | ||
| 2201 | * @mask1: byte mask for bytes 64-127 of a packet | ||
| 2202 | * @crc: Ethernet CRC for selected bytes | ||
| 2203 | * @enable: enable/disable switch | ||
| 2204 | * | ||
| 2205 | * Sets the pattern filters indicated in @map to mask out the bytes | ||
| 2206 | * specified in @mask0/@mask1 in received packets and compare the CRC of | ||
| 2207 | * the resulting packet against @crc. If @enable is %true pattern-based | ||
| 2208 | * WoL is enabled, otherwise disabled. | ||
| 2209 | */ | ||
| 2210 | int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | ||
| 2211 | u64 mask0, u64 mask1, unsigned int crc, bool enable) | ||
| 2212 | { | ||
| 2213 | int i; | ||
| 2214 | |||
| 2215 | if (!enable) { | ||
| 2216 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), | ||
| 2217 | PATEN, 0); | ||
| 2218 | return 0; | ||
| 2219 | } | ||
| 2220 | if (map > 0xff) | ||
| 2221 | return -EINVAL; | ||
| 2222 | |||
| 2223 | #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) | ||
| 2224 | |||
| 2225 | t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); | ||
| 2226 | t4_write_reg(adap, EPIO_REG(DATA2), mask1); | ||
| 2227 | t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); | ||
| 2228 | |||
| 2229 | for (i = 0; i < NWOL_PAT; i++, map >>= 1) { | ||
| 2230 | if (!(map & 1)) | ||
| 2231 | continue; | ||
| 2232 | |||
| 2233 | /* write byte masks */ | ||
| 2234 | t4_write_reg(adap, EPIO_REG(DATA0), mask0); | ||
| 2235 | t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); | ||
| 2236 | t4_read_reg(adap, EPIO_REG(OP)); /* flush */ | ||
| 2237 | if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) | ||
| 2238 | return -ETIMEDOUT; | ||
| 2239 | |||
| 2240 | /* write CRC */ | ||
| 2241 | t4_write_reg(adap, EPIO_REG(DATA0), crc); | ||
| 2242 | t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); | ||
| 2243 | t4_read_reg(adap, EPIO_REG(OP)); /* flush */ | ||
| 2244 | if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) | ||
| 2245 | return -ETIMEDOUT; | ||
| 2246 | } | ||
| 2247 | #undef EPIO_REG | ||
| 2248 | |||
| 2249 | t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); | ||
| 2250 | return 0; | ||
| 2251 | } | ||
| 2252 | |||
| 2253 | #define INIT_CMD(var, cmd, rd_wr) do { \ | ||
| 2254 | (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ | ||
| 2255 | FW_CMD_REQUEST | FW_CMD_##rd_wr); \ | ||
| 2256 | (var).retval_len16 = htonl(FW_LEN16(var)); \ | ||
| 2257 | } while (0) | ||
| 2258 | |||
| 2259 | /** | ||
| 2260 | * t4_mdio_rd - read a PHY register through MDIO | ||
| 2261 | * @adap: the adapter | ||
| 2262 | * @mbox: mailbox to use for the FW command | ||
| 2263 | * @phy_addr: the PHY address | ||
| 2264 | * @mmd: the PHY MMD to access (0 for clause 22 PHYs) | ||
| 2265 | * @reg: the register to read | ||
| 2266 | * @valp: where to store the value | ||
| 2267 | * | ||
| 2268 | * Issues a FW command through the given mailbox to read a PHY register. | ||
| 2269 | */ | ||
| 2270 | int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
| 2271 | unsigned int mmd, unsigned int reg, u16 *valp) | ||
| 2272 | { | ||
| 2273 | int ret; | ||
| 2274 | struct fw_ldst_cmd c; | ||
| 2275 | |||
| 2276 | memset(&c, 0, sizeof(c)); | ||
| 2277 | c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | | ||
| 2278 | FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); | ||
| 2279 | c.cycles_to_len16 = htonl(FW_LEN16(c)); | ||
| 2280 | c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | | ||
| 2281 | FW_LDST_CMD_MMD(mmd)); | ||
| 2282 | c.u.mdio.raddr = htons(reg); | ||
| 2283 | |||
| 2284 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2285 | if (ret == 0) | ||
| 2286 | *valp = ntohs(c.u.mdio.rval); | ||
| 2287 | return ret; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | /** | ||
| 2291 | * t4_mdio_wr - write a PHY register through MDIO | ||
| 2292 | * @adap: the adapter | ||
| 2293 | * @mbox: mailbox to use for the FW command | ||
| 2294 | * @phy_addr: the PHY address | ||
| 2295 | * @mmd: the PHY MMD to access (0 for clause 22 PHYs) | ||
| 2296 | * @reg: the register to write | ||
| 2297 | * @valp: value to write | ||
| 2298 | * | ||
| 2299 | * Issues a FW command through the given mailbox to write a PHY register. | ||
| 2300 | */ | ||
| 2301 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | ||
| 2302 | unsigned int mmd, unsigned int reg, u16 val) | ||
| 2303 | { | ||
| 2304 | struct fw_ldst_cmd c; | ||
| 2305 | |||
| 2306 | memset(&c, 0, sizeof(c)); | ||
| 2307 | c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | | ||
| 2308 | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); | ||
| 2309 | c.cycles_to_len16 = htonl(FW_LEN16(c)); | ||
| 2310 | c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | | ||
| 2311 | FW_LDST_CMD_MMD(mmd)); | ||
| 2312 | c.u.mdio.raddr = htons(reg); | ||
| 2313 | c.u.mdio.rval = htons(val); | ||
| 2314 | |||
| 2315 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2316 | } | ||
| 2317 | |||
| 2318 | /** | ||
| 2319 | * t4_fw_hello - establish communication with FW | ||
| 2320 | * @adap: the adapter | ||
| 2321 | * @mbox: mailbox to use for the FW command | ||
| 2322 | * @evt_mbox: mailbox to receive async FW events | ||
| 2323 | * @master: specifies the caller's willingness to be the device master | ||
| 2324 | * @state: returns the current device state | ||
| 2325 | * | ||
| 2326 | * Issues a command to establish communication with FW. | ||
| 2327 | */ | ||
| 2328 | int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, | ||
| 2329 | enum dev_master master, enum dev_state *state) | ||
| 2330 | { | ||
| 2331 | int ret; | ||
| 2332 | struct fw_hello_cmd c; | ||
| 2333 | |||
| 2334 | INIT_CMD(c, HELLO, WRITE); | ||
| 2335 | c.err_to_mbasyncnot = htonl( | ||
| 2336 | FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | | ||
| 2337 | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | | ||
| 2338 | FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | | ||
| 2339 | FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); | ||
| 2340 | |||
| 2341 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2342 | if (ret == 0 && state) { | ||
| 2343 | u32 v = ntohl(c.err_to_mbasyncnot); | ||
| 2344 | if (v & FW_HELLO_CMD_INIT) | ||
| 2345 | *state = DEV_STATE_INIT; | ||
| 2346 | else if (v & FW_HELLO_CMD_ERR) | ||
| 2347 | *state = DEV_STATE_ERR; | ||
| 2348 | else | ||
| 2349 | *state = DEV_STATE_UNINIT; | ||
| 2350 | } | ||
| 2351 | return ret; | ||
| 2352 | } | ||
| 2353 | |||
| 2354 | /** | ||
| 2355 | * t4_fw_bye - end communication with FW | ||
| 2356 | * @adap: the adapter | ||
| 2357 | * @mbox: mailbox to use for the FW command | ||
| 2358 | * | ||
| 2359 | * Issues a command to terminate communication with FW. | ||
| 2360 | */ | ||
| 2361 | int t4_fw_bye(struct adapter *adap, unsigned int mbox) | ||
| 2362 | { | ||
| 2363 | struct fw_bye_cmd c; | ||
| 2364 | |||
| 2365 | INIT_CMD(c, BYE, WRITE); | ||
| 2366 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2367 | } | ||
| 2368 | |||
| 2369 | /** | ||
| 2370 | * t4_init_cmd - ask FW to initialize the device | ||
| 2371 | * @adap: the adapter | ||
| 2372 | * @mbox: mailbox to use for the FW command | ||
| 2373 | * | ||
| 2374 | * Issues a command to FW to partially initialize the device. This | ||
| 2375 | * performs initialization that generally doesn't depend on user input. | ||
| 2376 | */ | ||
| 2377 | int t4_early_init(struct adapter *adap, unsigned int mbox) | ||
| 2378 | { | ||
| 2379 | struct fw_initialize_cmd c; | ||
| 2380 | |||
| 2381 | INIT_CMD(c, INITIALIZE, WRITE); | ||
| 2382 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2383 | } | ||
| 2384 | |||
| 2385 | /** | ||
| 2386 | * t4_fw_reset - issue a reset to FW | ||
| 2387 | * @adap: the adapter | ||
| 2388 | * @mbox: mailbox to use for the FW command | ||
| 2389 | * @reset: specifies the type of reset to perform | ||
| 2390 | * | ||
| 2391 | * Issues a reset command of the specified type to FW. | ||
| 2392 | */ | ||
| 2393 | int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) | ||
| 2394 | { | ||
| 2395 | struct fw_reset_cmd c; | ||
| 2396 | |||
| 2397 | INIT_CMD(c, RESET, WRITE); | ||
| 2398 | c.val = htonl(reset); | ||
| 2399 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | /** | ||
| 2403 | * t4_query_params - query FW or device parameters | ||
| 2404 | * @adap: the adapter | ||
| 2405 | * @mbox: mailbox to use for the FW command | ||
| 2406 | * @pf: the PF | ||
| 2407 | * @vf: the VF | ||
| 2408 | * @nparams: the number of parameters | ||
| 2409 | * @params: the parameter names | ||
| 2410 | * @val: the parameter values | ||
| 2411 | * | ||
| 2412 | * Reads the value of FW or device parameters. Up to 7 parameters can be | ||
| 2413 | * queried at once. | ||
| 2414 | */ | ||
| 2415 | int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2416 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
| 2417 | u32 *val) | ||
| 2418 | { | ||
| 2419 | int i, ret; | ||
| 2420 | struct fw_params_cmd c; | ||
| 2421 | __be32 *p = &c.param[0].mnem; | ||
| 2422 | |||
| 2423 | if (nparams > 7) | ||
| 2424 | return -EINVAL; | ||
| 2425 | |||
| 2426 | memset(&c, 0, sizeof(c)); | ||
| 2427 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | | ||
| 2428 | FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | | ||
| 2429 | FW_PARAMS_CMD_VFN(vf)); | ||
| 2430 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 2431 | for (i = 0; i < nparams; i++, p += 2) | ||
| 2432 | *p = htonl(*params++); | ||
| 2433 | |||
| 2434 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2435 | if (ret == 0) | ||
| 2436 | for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) | ||
| 2437 | *val++ = ntohl(*p); | ||
| 2438 | return ret; | ||
| 2439 | } | ||
| 2440 | |||
| 2441 | /** | ||
| 2442 | * t4_set_params - sets FW or device parameters | ||
| 2443 | * @adap: the adapter | ||
| 2444 | * @mbox: mailbox to use for the FW command | ||
| 2445 | * @pf: the PF | ||
| 2446 | * @vf: the VF | ||
| 2447 | * @nparams: the number of parameters | ||
| 2448 | * @params: the parameter names | ||
| 2449 | * @val: the parameter values | ||
| 2450 | * | ||
| 2451 | * Sets the value of FW or device parameters. Up to 7 parameters can be | ||
| 2452 | * specified at once. | ||
| 2453 | */ | ||
| 2454 | int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2455 | unsigned int vf, unsigned int nparams, const u32 *params, | ||
| 2456 | const u32 *val) | ||
| 2457 | { | ||
| 2458 | struct fw_params_cmd c; | ||
| 2459 | __be32 *p = &c.param[0].mnem; | ||
| 2460 | |||
| 2461 | if (nparams > 7) | ||
| 2462 | return -EINVAL; | ||
| 2463 | |||
| 2464 | memset(&c, 0, sizeof(c)); | ||
| 2465 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | | ||
| 2466 | FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | | ||
| 2467 | FW_PARAMS_CMD_VFN(vf)); | ||
| 2468 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 2469 | while (nparams--) { | ||
| 2470 | *p++ = htonl(*params++); | ||
| 2471 | *p++ = htonl(*val++); | ||
| 2472 | } | ||
| 2473 | |||
| 2474 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2475 | } | ||
| 2476 | |||
| 2477 | /** | ||
| 2478 | * t4_cfg_pfvf - configure PF/VF resource limits | ||
| 2479 | * @adap: the adapter | ||
| 2480 | * @mbox: mailbox to use for the FW command | ||
| 2481 | * @pf: the PF being configured | ||
| 2482 | * @vf: the VF being configured | ||
| 2483 | * @txq: the max number of egress queues | ||
| 2484 | * @txq_eth_ctrl: the max number of egress Ethernet or control queues | ||
| 2485 | * @rxqi: the max number of interrupt-capable ingress queues | ||
| 2486 | * @rxq: the max number of interruptless ingress queues | ||
| 2487 | * @tc: the PCI traffic class | ||
| 2488 | * @vi: the max number of virtual interfaces | ||
| 2489 | * @cmask: the channel access rights mask for the PF/VF | ||
| 2490 | * @pmask: the port access rights mask for the PF/VF | ||
| 2491 | * @nexact: the maximum number of exact MPS filters | ||
| 2492 | * @rcaps: read capabilities | ||
| 2493 | * @wxcaps: write/execute capabilities | ||
| 2494 | * | ||
| 2495 | * Configures resource limits and capabilities for a physical or virtual | ||
| 2496 | * function. | ||
| 2497 | */ | ||
| 2498 | int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2499 | unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, | ||
| 2500 | unsigned int rxqi, unsigned int rxq, unsigned int tc, | ||
| 2501 | unsigned int vi, unsigned int cmask, unsigned int pmask, | ||
| 2502 | unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) | ||
| 2503 | { | ||
| 2504 | struct fw_pfvf_cmd c; | ||
| 2505 | |||
| 2506 | memset(&c, 0, sizeof(c)); | ||
| 2507 | c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | | ||
| 2508 | FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | | ||
| 2509 | FW_PFVF_CMD_VFN(vf)); | ||
| 2510 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 2511 | c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | | ||
| 2512 | FW_PFVF_CMD_NIQ(rxq)); | ||
| 2513 | c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | | ||
| 2514 | FW_PFVF_CMD_PMASK(pmask) | | ||
| 2515 | FW_PFVF_CMD_NEQ(txq)); | ||
| 2516 | c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | | ||
| 2517 | FW_PFVF_CMD_NEXACTF(nexact)); | ||
| 2518 | c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | | ||
| 2519 | FW_PFVF_CMD_WX_CAPS(wxcaps) | | ||
| 2520 | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); | ||
| 2521 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2522 | } | ||
| 2523 | |||
| 2524 | /** | ||
| 2525 | * t4_alloc_vi - allocate a virtual interface | ||
| 2526 | * @adap: the adapter | ||
| 2527 | * @mbox: mailbox to use for the FW command | ||
| 2528 | * @port: physical port associated with the VI | ||
| 2529 | * @pf: the PF owning the VI | ||
| 2530 | * @vf: the VF owning the VI | ||
| 2531 | * @nmac: number of MAC addresses needed (1 to 5) | ||
| 2532 | * @mac: the MAC addresses of the VI | ||
| 2533 | * @rss_size: size of RSS table slice associated with this VI | ||
| 2534 | * | ||
| 2535 | * Allocates a virtual interface for the given physical port. If @mac is | ||
| 2536 | * not %NULL it contains the MAC addresses of the VI as assigned by FW. | ||
| 2537 | * @mac should be large enough to hold @nmac Ethernet addresses, they are | ||
| 2538 | * stored consecutively so the space needed is @nmac * 6 bytes. | ||
| 2539 | * Returns a negative error number or the non-negative VI id. | ||
| 2540 | */ | ||
| 2541 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | ||
| 2542 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | ||
| 2543 | unsigned int *rss_size) | ||
| 2544 | { | ||
| 2545 | int ret; | ||
| 2546 | struct fw_vi_cmd c; | ||
| 2547 | |||
| 2548 | memset(&c, 0, sizeof(c)); | ||
| 2549 | c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | | ||
| 2550 | FW_CMD_WRITE | FW_CMD_EXEC | | ||
| 2551 | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); | ||
| 2552 | c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); | ||
| 2553 | c.portid_pkd = FW_VI_CMD_PORTID(port); | ||
| 2554 | c.nmac = nmac - 1; | ||
| 2555 | |||
| 2556 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2557 | if (ret) | ||
| 2558 | return ret; | ||
| 2559 | |||
| 2560 | if (mac) { | ||
| 2561 | memcpy(mac, c.mac, sizeof(c.mac)); | ||
| 2562 | switch (nmac) { | ||
| 2563 | case 5: | ||
| 2564 | memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); | ||
| 2565 | case 4: | ||
| 2566 | memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); | ||
| 2567 | case 3: | ||
| 2568 | memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); | ||
| 2569 | case 2: | ||
| 2570 | memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); | ||
| 2571 | } | ||
| 2572 | } | ||
| 2573 | if (rss_size) | ||
| 2574 | *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); | ||
| 2575 | return ntohs(c.viid_pkd); | ||
| 2576 | } | ||
| 2577 | |||
| 2578 | /** | ||
| 2579 | * t4_free_vi - free a virtual interface | ||
| 2580 | * @adap: the adapter | ||
| 2581 | * @mbox: mailbox to use for the FW command | ||
| 2582 | * @pf: the PF owning the VI | ||
| 2583 | * @vf: the VF owning the VI | ||
| 2584 | * @viid: virtual interface identifiler | ||
| 2585 | * | ||
| 2586 | * Free a previously allocated virtual interface. | ||
| 2587 | */ | ||
| 2588 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2589 | unsigned int vf, unsigned int viid) | ||
| 2590 | { | ||
| 2591 | struct fw_vi_cmd c; | ||
| 2592 | |||
| 2593 | memset(&c, 0, sizeof(c)); | ||
| 2594 | c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | | ||
| 2595 | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | | ||
| 2596 | FW_VI_CMD_VFN(vf)); | ||
| 2597 | c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); | ||
| 2598 | c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); | ||
| 2599 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2600 | } | ||
| 2601 | |||
| 2602 | /** | ||
| 2603 | * t4_set_rxmode - set Rx properties of a virtual interface | ||
| 2604 | * @adap: the adapter | ||
| 2605 | * @mbox: mailbox to use for the FW command | ||
| 2606 | * @viid: the VI id | ||
| 2607 | * @mtu: the new MTU or -1 | ||
| 2608 | * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change | ||
| 2609 | * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change | ||
| 2610 | * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change | ||
| 2611 | * @sleep_ok: if true we may sleep while awaiting command completion | ||
| 2612 | * | ||
| 2613 | * Sets Rx properties of a virtual interface. | ||
| 2614 | */ | ||
| 2615 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 2616 | int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) | ||
| 2617 | { | ||
| 2618 | struct fw_vi_rxmode_cmd c; | ||
| 2619 | |||
| 2620 | /* convert to FW values */ | ||
| 2621 | if (mtu < 0) | ||
| 2622 | mtu = FW_RXMODE_MTU_NO_CHG; | ||
| 2623 | if (promisc < 0) | ||
| 2624 | promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; | ||
| 2625 | if (all_multi < 0) | ||
| 2626 | all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; | ||
| 2627 | if (bcast < 0) | ||
| 2628 | bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; | ||
| 2629 | |||
| 2630 | memset(&c, 0, sizeof(c)); | ||
| 2631 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | | ||
| 2632 | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); | ||
| 2633 | c.retval_len16 = htonl(FW_LEN16(c)); | ||
| 2634 | c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | | ||
| 2635 | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | | ||
| 2636 | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | | ||
| 2637 | FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); | ||
| 2638 | return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); | ||
| 2639 | } | ||
| 2640 | |||
| 2641 | /** | ||
| 2642 | * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses | ||
| 2643 | * @adap: the adapter | ||
| 2644 | * @mbox: mailbox to use for the FW command | ||
| 2645 | * @viid: the VI id | ||
| 2646 | * @free: if true any existing filters for this VI id are first removed | ||
| 2647 | * @naddr: the number of MAC addresses to allocate filters for (up to 7) | ||
| 2648 | * @addr: the MAC address(es) | ||
| 2649 | * @idx: where to store the index of each allocated filter | ||
| 2650 | * @hash: pointer to hash address filter bitmap | ||
| 2651 | * @sleep_ok: call is allowed to sleep | ||
| 2652 | * | ||
| 2653 | * Allocates an exact-match filter for each of the supplied addresses and | ||
| 2654 | * sets it to the corresponding address. If @idx is not %NULL it should | ||
| 2655 | * have at least @naddr entries, each of which will be set to the index of | ||
| 2656 | * the filter allocated for the corresponding MAC address. If a filter | ||
| 2657 | * could not be allocated for an address its index is set to 0xffff. | ||
| 2658 | * If @hash is not %NULL addresses that fail to allocate an exact filter | ||
| 2659 | * are hashed and update the hash filter bitmap pointed at by @hash. | ||
| 2660 | * | ||
| 2661 | * Returns a negative error number or the number of filters allocated. | ||
| 2662 | */ | ||
| 2663 | int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, | ||
| 2664 | unsigned int viid, bool free, unsigned int naddr, | ||
| 2665 | const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) | ||
| 2666 | { | ||
| 2667 | int i, ret; | ||
| 2668 | struct fw_vi_mac_cmd c; | ||
| 2669 | struct fw_vi_mac_exact *p; | ||
| 2670 | |||
| 2671 | if (naddr > 7) | ||
| 2672 | return -EINVAL; | ||
| 2673 | |||
| 2674 | memset(&c, 0, sizeof(c)); | ||
| 2675 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
| 2676 | FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | | ||
| 2677 | FW_VI_MAC_CMD_VIID(viid)); | ||
| 2678 | c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | | ||
| 2679 | FW_CMD_LEN16((naddr + 2) / 2)); | ||
| 2680 | |||
| 2681 | for (i = 0, p = c.u.exact; i < naddr; i++, p++) { | ||
| 2682 | p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | | ||
| 2683 | FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); | ||
| 2684 | memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); | ||
| 2685 | } | ||
| 2686 | |||
| 2687 | ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); | ||
| 2688 | if (ret) | ||
| 2689 | return ret; | ||
| 2690 | |||
| 2691 | for (i = 0, p = c.u.exact; i < naddr; i++, p++) { | ||
| 2692 | u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); | ||
| 2693 | |||
| 2694 | if (idx) | ||
| 2695 | idx[i] = index >= NEXACT_MAC ? 0xffff : index; | ||
| 2696 | if (index < NEXACT_MAC) | ||
| 2697 | ret++; | ||
| 2698 | else if (hash) | ||
| 2699 | *hash |= (1 << hash_mac_addr(addr[i])); | ||
| 2700 | } | ||
| 2701 | return ret; | ||
| 2702 | } | ||
| 2703 | |||
| 2704 | /** | ||
| 2705 | * t4_change_mac - modifies the exact-match filter for a MAC address | ||
| 2706 | * @adap: the adapter | ||
| 2707 | * @mbox: mailbox to use for the FW command | ||
| 2708 | * @viid: the VI id | ||
| 2709 | * @idx: index of existing filter for old value of MAC address, or -1 | ||
| 2710 | * @addr: the new MAC address value | ||
| 2711 | * @persist: whether a new MAC allocation should be persistent | ||
| 2712 | * @add_smt: if true also add the address to the HW SMT | ||
| 2713 | * | ||
| 2714 | * Modifies an exact-match filter and sets it to the new MAC address. | ||
| 2715 | * Note that in general it is not possible to modify the value of a given | ||
| 2716 | * filter so the generic way to modify an address filter is to free the one | ||
| 2717 | * being used by the old address value and allocate a new filter for the | ||
| 2718 | * new address value. @idx can be -1 if the address is a new addition. | ||
| 2719 | * | ||
| 2720 | * Returns a negative error number or the index of the filter with the new | ||
| 2721 | * MAC value. | ||
| 2722 | */ | ||
| 2723 | int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 2724 | int idx, const u8 *addr, bool persist, bool add_smt) | ||
| 2725 | { | ||
| 2726 | int ret, mode; | ||
| 2727 | struct fw_vi_mac_cmd c; | ||
| 2728 | struct fw_vi_mac_exact *p = c.u.exact; | ||
| 2729 | |||
| 2730 | if (idx < 0) /* new allocation */ | ||
| 2731 | idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; | ||
| 2732 | mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; | ||
| 2733 | |||
| 2734 | memset(&c, 0, sizeof(c)); | ||
| 2735 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
| 2736 | FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); | ||
| 2737 | c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); | ||
| 2738 | p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | | ||
| 2739 | FW_VI_MAC_CMD_SMAC_RESULT(mode) | | ||
| 2740 | FW_VI_MAC_CMD_IDX(idx)); | ||
| 2741 | memcpy(p->macaddr, addr, sizeof(p->macaddr)); | ||
| 2742 | |||
| 2743 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 2744 | if (ret == 0) { | ||
| 2745 | ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); | ||
| 2746 | if (ret >= NEXACT_MAC) | ||
| 2747 | ret = -ENOMEM; | ||
| 2748 | } | ||
| 2749 | return ret; | ||
| 2750 | } | ||
| 2751 | |||
| 2752 | /** | ||
| 2753 | * t4_set_addr_hash - program the MAC inexact-match hash filter | ||
| 2754 | * @adap: the adapter | ||
| 2755 | * @mbox: mailbox to use for the FW command | ||
| 2756 | * @viid: the VI id | ||
| 2757 | * @ucast: whether the hash filter should also match unicast addresses | ||
| 2758 | * @vec: the value to be written to the hash filter | ||
| 2759 | * @sleep_ok: call is allowed to sleep | ||
| 2760 | * | ||
| 2761 | * Sets the 64-bit inexact-match hash filter for a virtual interface. | ||
| 2762 | */ | ||
| 2763 | int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 2764 | bool ucast, u64 vec, bool sleep_ok) | ||
| 2765 | { | ||
| 2766 | struct fw_vi_mac_cmd c; | ||
| 2767 | |||
| 2768 | memset(&c, 0, sizeof(c)); | ||
| 2769 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | | ||
| 2770 | FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); | ||
| 2771 | c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | | ||
| 2772 | FW_VI_MAC_CMD_HASHUNIEN(ucast) | | ||
| 2773 | FW_CMD_LEN16(1)); | ||
| 2774 | c.u.hash.hashvec = cpu_to_be64(vec); | ||
| 2775 | return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); | ||
| 2776 | } | ||
| 2777 | |||
| 2778 | /** | ||
| 2779 | * t4_enable_vi - enable/disable a virtual interface | ||
| 2780 | * @adap: the adapter | ||
| 2781 | * @mbox: mailbox to use for the FW command | ||
| 2782 | * @viid: the VI id | ||
| 2783 | * @rx_en: 1=enable Rx, 0=disable Rx | ||
| 2784 | * @tx_en: 1=enable Tx, 0=disable Tx | ||
| 2785 | * | ||
| 2786 | * Enables/disables a virtual interface. | ||
| 2787 | */ | ||
| 2788 | int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 2789 | bool rx_en, bool tx_en) | ||
| 2790 | { | ||
| 2791 | struct fw_vi_enable_cmd c; | ||
| 2792 | |||
| 2793 | memset(&c, 0, sizeof(c)); | ||
| 2794 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | | ||
| 2795 | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); | ||
| 2796 | c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | | ||
| 2797 | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); | ||
| 2798 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2799 | } | ||
| 2800 | |||
| 2801 | /** | ||
| 2802 | * t4_identify_port - identify a VI's port by blinking its LED | ||
| 2803 | * @adap: the adapter | ||
| 2804 | * @mbox: mailbox to use for the FW command | ||
| 2805 | * @viid: the VI id | ||
| 2806 | * @nblinks: how many times to blink LED at 2.5 Hz | ||
| 2807 | * | ||
| 2808 | * Identifies a VI's port by blinking its LED. | ||
| 2809 | */ | ||
| 2810 | int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | ||
| 2811 | unsigned int nblinks) | ||
| 2812 | { | ||
| 2813 | struct fw_vi_enable_cmd c; | ||
| 2814 | |||
| 2815 | c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | | ||
| 2816 | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); | ||
| 2817 | c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); | ||
| 2818 | c.blinkdur = htons(nblinks); | ||
| 2819 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2820 | } | ||
| 2821 | |||
| 2822 | /** | ||
| 2823 | * t4_iq_start_stop - enable/disable an ingress queue and its FLs | ||
| 2824 | * @adap: the adapter | ||
| 2825 | * @mbox: mailbox to use for the FW command | ||
| 2826 | * @start: %true to enable the queues, %false to disable them | ||
| 2827 | * @pf: the PF owning the queues | ||
| 2828 | * @vf: the VF owning the queues | ||
| 2829 | * @iqid: ingress queue id | ||
| 2830 | * @fl0id: FL0 queue id or 0xffff if no attached FL0 | ||
| 2831 | * @fl1id: FL1 queue id or 0xffff if no attached FL1 | ||
| 2832 | * | ||
| 2833 | * Starts or stops an ingress queue and its associated FLs, if any. | ||
| 2834 | */ | ||
| 2835 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
| 2836 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
| 2837 | unsigned int fl0id, unsigned int fl1id) | ||
| 2838 | { | ||
| 2839 | struct fw_iq_cmd c; | ||
| 2840 | |||
| 2841 | memset(&c, 0, sizeof(c)); | ||
| 2842 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
| 2843 | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | | ||
| 2844 | FW_IQ_CMD_VFN(vf)); | ||
| 2845 | c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | | ||
| 2846 | FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); | ||
| 2847 | c.iqid = htons(iqid); | ||
| 2848 | c.fl0id = htons(fl0id); | ||
| 2849 | c.fl1id = htons(fl1id); | ||
| 2850 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2851 | } | ||
| 2852 | |||
| 2853 | /** | ||
| 2854 | * t4_iq_free - free an ingress queue and its FLs | ||
| 2855 | * @adap: the adapter | ||
| 2856 | * @mbox: mailbox to use for the FW command | ||
| 2857 | * @pf: the PF owning the queues | ||
| 2858 | * @vf: the VF owning the queues | ||
| 2859 | * @iqtype: the ingress queue type | ||
| 2860 | * @iqid: ingress queue id | ||
| 2861 | * @fl0id: FL0 queue id or 0xffff if no attached FL0 | ||
| 2862 | * @fl1id: FL1 queue id or 0xffff if no attached FL1 | ||
| 2863 | * | ||
| 2864 | * Frees an ingress queue and its associated FLs, if any. | ||
| 2865 | */ | ||
| 2866 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2867 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | ||
| 2868 | unsigned int fl0id, unsigned int fl1id) | ||
| 2869 | { | ||
| 2870 | struct fw_iq_cmd c; | ||
| 2871 | |||
| 2872 | memset(&c, 0, sizeof(c)); | ||
| 2873 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
| 2874 | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | | ||
| 2875 | FW_IQ_CMD_VFN(vf)); | ||
| 2876 | c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); | ||
| 2877 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); | ||
| 2878 | c.iqid = htons(iqid); | ||
| 2879 | c.fl0id = htons(fl0id); | ||
| 2880 | c.fl1id = htons(fl1id); | ||
| 2881 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2882 | } | ||
| 2883 | |||
| 2884 | /** | ||
| 2885 | * t4_eth_eq_free - free an Ethernet egress queue | ||
| 2886 | * @adap: the adapter | ||
| 2887 | * @mbox: mailbox to use for the FW command | ||
| 2888 | * @pf: the PF owning the queue | ||
| 2889 | * @vf: the VF owning the queue | ||
| 2890 | * @eqid: egress queue id | ||
| 2891 | * | ||
| 2892 | * Frees an Ethernet egress queue. | ||
| 2893 | */ | ||
| 2894 | int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2895 | unsigned int vf, unsigned int eqid) | ||
| 2896 | { | ||
| 2897 | struct fw_eq_eth_cmd c; | ||
| 2898 | |||
| 2899 | memset(&c, 0, sizeof(c)); | ||
| 2900 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | | ||
| 2901 | FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | | ||
| 2902 | FW_EQ_ETH_CMD_VFN(vf)); | ||
| 2903 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); | ||
| 2904 | c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); | ||
| 2905 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2906 | } | ||
| 2907 | |||
| 2908 | /** | ||
| 2909 | * t4_ctrl_eq_free - free a control egress queue | ||
| 2910 | * @adap: the adapter | ||
| 2911 | * @mbox: mailbox to use for the FW command | ||
| 2912 | * @pf: the PF owning the queue | ||
| 2913 | * @vf: the VF owning the queue | ||
| 2914 | * @eqid: egress queue id | ||
| 2915 | * | ||
| 2916 | * Frees a control egress queue. | ||
| 2917 | */ | ||
| 2918 | int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2919 | unsigned int vf, unsigned int eqid) | ||
| 2920 | { | ||
| 2921 | struct fw_eq_ctrl_cmd c; | ||
| 2922 | |||
| 2923 | memset(&c, 0, sizeof(c)); | ||
| 2924 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | | ||
| 2925 | FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | | ||
| 2926 | FW_EQ_CTRL_CMD_VFN(vf)); | ||
| 2927 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); | ||
| 2928 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); | ||
| 2929 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2930 | } | ||
| 2931 | |||
| 2932 | /** | ||
| 2933 | * t4_ofld_eq_free - free an offload egress queue | ||
| 2934 | * @adap: the adapter | ||
| 2935 | * @mbox: mailbox to use for the FW command | ||
| 2936 | * @pf: the PF owning the queue | ||
| 2937 | * @vf: the VF owning the queue | ||
| 2938 | * @eqid: egress queue id | ||
| 2939 | * | ||
| 2940 | * Frees a control egress queue. | ||
| 2941 | */ | ||
| 2942 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
| 2943 | unsigned int vf, unsigned int eqid) | ||
| 2944 | { | ||
| 2945 | struct fw_eq_ofld_cmd c; | ||
| 2946 | |||
| 2947 | memset(&c, 0, sizeof(c)); | ||
| 2948 | c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | | ||
| 2949 | FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | | ||
| 2950 | FW_EQ_OFLD_CMD_VFN(vf)); | ||
| 2951 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); | ||
| 2952 | c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); | ||
| 2953 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2954 | } | ||
| 2955 | |||
| 2956 | /** | ||
| 2957 | * t4_handle_fw_rpl - process a FW reply message | ||
| 2958 | * @adap: the adapter | ||
| 2959 | * @rpl: start of the FW message | ||
| 2960 | * | ||
| 2961 | * Processes a FW message, such as link state change messages. | ||
| 2962 | */ | ||
| 2963 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) | ||
| 2964 | { | ||
| 2965 | u8 opcode = *(const u8 *)rpl; | ||
| 2966 | |||
| 2967 | if (opcode == FW_PORT_CMD) { /* link/module state change message */ | ||
| 2968 | int speed = 0, fc = 0; | ||
| 2969 | const struct fw_port_cmd *p = (void *)rpl; | ||
| 2970 | int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); | ||
| 2971 | int port = adap->chan_map[chan]; | ||
| 2972 | struct port_info *pi = adap2pinfo(adap, port); | ||
| 2973 | struct link_config *lc = &pi->link_cfg; | ||
| 2974 | u32 stat = ntohl(p->u.info.lstatus_to_modtype); | ||
| 2975 | int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; | ||
| 2976 | u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); | ||
| 2977 | |||
| 2978 | if (stat & FW_PORT_CMD_RXPAUSE) | ||
| 2979 | fc |= PAUSE_RX; | ||
| 2980 | if (stat & FW_PORT_CMD_TXPAUSE) | ||
| 2981 | fc |= PAUSE_TX; | ||
| 2982 | if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) | ||
| 2983 | speed = SPEED_100; | ||
| 2984 | else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) | ||
| 2985 | speed = SPEED_1000; | ||
| 2986 | else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) | ||
| 2987 | speed = SPEED_10000; | ||
| 2988 | |||
| 2989 | if (link_ok != lc->link_ok || speed != lc->speed || | ||
| 2990 | fc != lc->fc) { /* something changed */ | ||
| 2991 | lc->link_ok = link_ok; | ||
| 2992 | lc->speed = speed; | ||
| 2993 | lc->fc = fc; | ||
| 2994 | t4_os_link_changed(adap, port, link_ok); | ||
| 2995 | } | ||
| 2996 | if (mod != pi->mod_type) { | ||
| 2997 | pi->mod_type = mod; | ||
| 2998 | t4_os_portmod_changed(adap, port); | ||
| 2999 | } | ||
| 3000 | } | ||
| 3001 | return 0; | ||
| 3002 | } | ||
| 3003 | |||
| 3004 | static void __devinit get_pci_mode(struct adapter *adapter, | ||
| 3005 | struct pci_params *p) | ||
| 3006 | { | ||
| 3007 | u16 val; | ||
| 3008 | u32 pcie_cap = pci_pcie_cap(adapter->pdev); | ||
| 3009 | |||
| 3010 | if (pcie_cap) { | ||
| 3011 | pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, | ||
| 3012 | &val); | ||
| 3013 | p->speed = val & PCI_EXP_LNKSTA_CLS; | ||
| 3014 | p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; | ||
| 3015 | } | ||
| 3016 | } | ||
| 3017 | |||
| 3018 | /** | ||
| 3019 | * init_link_config - initialize a link's SW state | ||
| 3020 | * @lc: structure holding the link state | ||
| 3021 | * @caps: link capabilities | ||
| 3022 | * | ||
| 3023 | * Initializes the SW state maintained for each link, including the link's | ||
| 3024 | * capabilities and default speed/flow-control/autonegotiation settings. | ||
| 3025 | */ | ||
| 3026 | static void __devinit init_link_config(struct link_config *lc, | ||
| 3027 | unsigned int caps) | ||
| 3028 | { | ||
| 3029 | lc->supported = caps; | ||
| 3030 | lc->requested_speed = 0; | ||
| 3031 | lc->speed = 0; | ||
| 3032 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; | ||
| 3033 | if (lc->supported & FW_PORT_CAP_ANEG) { | ||
| 3034 | lc->advertising = lc->supported & ADVERT_MASK; | ||
| 3035 | lc->autoneg = AUTONEG_ENABLE; | ||
| 3036 | lc->requested_fc |= PAUSE_AUTONEG; | ||
| 3037 | } else { | ||
| 3038 | lc->advertising = 0; | ||
| 3039 | lc->autoneg = AUTONEG_DISABLE; | ||
| 3040 | } | ||
| 3041 | } | ||
| 3042 | |||
| 3043 | static int __devinit wait_dev_ready(struct adapter *adap) | ||
| 3044 | { | ||
| 3045 | if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) | ||
| 3046 | return 0; | ||
| 3047 | msleep(500); | ||
| 3048 | return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; | ||
| 3049 | } | ||
| 3050 | |||
| 3051 | /** | ||
| 3052 | * t4_prep_adapter - prepare SW and HW for operation | ||
| 3053 | * @adapter: the adapter | ||
| 3054 | * @reset: if true perform a HW reset | ||
| 3055 | * | ||
| 3056 | * Initialize adapter SW state for the various HW modules, set initial | ||
| 3057 | * values for some adapter tunables, take PHYs out of reset, and | ||
| 3058 | * initialize the MDIO interface. | ||
| 3059 | */ | ||
| 3060 | int __devinit t4_prep_adapter(struct adapter *adapter) | ||
| 3061 | { | ||
| 3062 | int ret; | ||
| 3063 | |||
| 3064 | ret = wait_dev_ready(adapter); | ||
| 3065 | if (ret < 0) | ||
| 3066 | return ret; | ||
| 3067 | |||
| 3068 | get_pci_mode(adapter, &adapter->params.pci); | ||
| 3069 | adapter->params.rev = t4_read_reg(adapter, PL_REV); | ||
| 3070 | |||
| 3071 | ret = get_vpd_params(adapter, &adapter->params.vpd); | ||
| 3072 | if (ret < 0) | ||
| 3073 | return ret; | ||
| 3074 | |||
| 3075 | init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); | ||
| 3076 | |||
| 3077 | /* | ||
| 3078 | * Default port for debugging in case we can't reach FW. | ||
| 3079 | */ | ||
| 3080 | adapter->params.nports = 1; | ||
| 3081 | adapter->params.portvec = 1; | ||
| 3082 | return 0; | ||
| 3083 | } | ||
| 3084 | |||
| 3085 | int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) | ||
| 3086 | { | ||
| 3087 | u8 addr[6]; | ||
| 3088 | int ret, i, j = 0; | ||
| 3089 | struct fw_port_cmd c; | ||
| 3090 | |||
| 3091 | memset(&c, 0, sizeof(c)); | ||
| 3092 | |||
| 3093 | for_each_port(adap, i) { | ||
| 3094 | unsigned int rss_size; | ||
| 3095 | struct port_info *p = adap2pinfo(adap, i); | ||
| 3096 | |||
| 3097 | while ((adap->params.portvec & (1 << j)) == 0) | ||
| 3098 | j++; | ||
| 3099 | |||
| 3100 | c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | | ||
| 3101 | FW_CMD_REQUEST | FW_CMD_READ | | ||
| 3102 | FW_PORT_CMD_PORTID(j)); | ||
| 3103 | c.action_to_len16 = htonl( | ||
| 3104 | FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | | ||
| 3105 | FW_LEN16(c)); | ||
| 3106 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
| 3107 | if (ret) | ||
| 3108 | return ret; | ||
| 3109 | |||
| 3110 | ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); | ||
| 3111 | if (ret < 0) | ||
| 3112 | return ret; | ||
| 3113 | |||
| 3114 | p->viid = ret; | ||
| 3115 | p->tx_chan = j; | ||
| 3116 | p->lport = j; | ||
| 3117 | p->rss_size = rss_size; | ||
| 3118 | memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); | ||
| 3119 | memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); | ||
| 3120 | |||
| 3121 | ret = ntohl(c.u.info.lstatus_to_modtype); | ||
| 3122 | p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? | ||
| 3123 | FW_PORT_CMD_MDIOADDR_GET(ret) : -1; | ||
| 3124 | p->port_type = FW_PORT_CMD_PTYPE_GET(ret); | ||
| 3125 | p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); | ||
| 3126 | |||
| 3127 | init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); | ||
| 3128 | j++; | ||
| 3129 | } | ||
| 3130 | return 0; | ||
| 3131 | } | ||
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h new file mode 100644 index 000000000000..025623285c93 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __T4_HW_H | ||
| 36 | #define __T4_HW_H | ||
| 37 | |||
| 38 | #include <linux/types.h> | ||
| 39 | |||
| 40 | enum { | ||
| 41 | NCHAN = 4, /* # of HW channels */ | ||
| 42 | MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ | ||
| 43 | EEPROMSIZE = 17408, /* Serial EEPROM physical size */ | ||
| 44 | EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ | ||
| 45 | RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ | ||
| 46 | TCB_SIZE = 128, /* TCB size */ | ||
| 47 | NMTUS = 16, /* size of MTU table */ | ||
| 48 | NCCTRL_WIN = 32, /* # of congestion control windows */ | ||
| 49 | NEXACT_MAC = 336, /* # of exact MAC address filters */ | ||
| 50 | L2T_SIZE = 4096, /* # of L2T entries */ | ||
| 51 | MBOX_LEN = 64, /* mailbox size in bytes */ | ||
| 52 | TRACE_LEN = 112, /* length of trace data and mask */ | ||
| 53 | FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ | ||
| 54 | NWOL_PAT = 8, /* # of WoL patterns */ | ||
| 55 | WOL_PAT_LEN = 128, /* length of WoL patterns */ | ||
| 56 | }; | ||
| 57 | |||
| 58 | enum { | ||
| 59 | SF_PAGE_SIZE = 256, /* serial flash page size */ | ||
| 60 | SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ | ||
| 61 | SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ | ||
| 62 | }; | ||
| 63 | |||
| 64 | enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ | ||
| 65 | |||
| 66 | enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ | ||
| 67 | |||
| 68 | enum { | ||
| 69 | SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ | ||
| 70 | SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ | ||
| 71 | SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct sge_qstat { /* data written to SGE queue status entries */ | ||
| 75 | __be32 qid; | ||
| 76 | __be16 cidx; | ||
| 77 | __be16 pidx; | ||
| 78 | }; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Structure for last 128 bits of response descriptors | ||
| 82 | */ | ||
| 83 | struct rsp_ctrl { | ||
| 84 | __be32 hdrbuflen_pidx; | ||
| 85 | __be32 pldbuflen_qid; | ||
| 86 | union { | ||
| 87 | u8 type_gen; | ||
| 88 | __be64 last_flit; | ||
| 89 | }; | ||
| 90 | }; | ||
| 91 | |||
| 92 | #define RSPD_NEWBUF 0x80000000U | ||
| 93 | #define RSPD_LEN 0x7fffffffU | ||
| 94 | |||
| 95 | #define RSPD_GEN(x) ((x) >> 7) | ||
| 96 | #define RSPD_TYPE(x) (((x) >> 4) & 3) | ||
| 97 | |||
| 98 | #define QINTR_CNT_EN 0x1 | ||
| 99 | #define QINTR_TIMER_IDX(x) ((x) << 1) | ||
| 100 | #endif /* __T4_HW_H */ | ||
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h new file mode 100644 index 000000000000..fdb117443144 --- /dev/null +++ b/drivers/net/cxgb4/t4_msg.h | |||
| @@ -0,0 +1,664 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __T4_MSG_H | ||
| 36 | #define __T4_MSG_H | ||
| 37 | |||
| 38 | #include <linux/types.h> | ||
| 39 | |||
| 40 | enum { | ||
| 41 | CPL_PASS_OPEN_REQ = 0x1, | ||
| 42 | CPL_PASS_ACCEPT_RPL = 0x2, | ||
| 43 | CPL_ACT_OPEN_REQ = 0x3, | ||
| 44 | CPL_SET_TCB_FIELD = 0x5, | ||
| 45 | CPL_GET_TCB = 0x6, | ||
| 46 | CPL_CLOSE_CON_REQ = 0x8, | ||
| 47 | CPL_CLOSE_LISTSRV_REQ = 0x9, | ||
| 48 | CPL_ABORT_REQ = 0xA, | ||
| 49 | CPL_ABORT_RPL = 0xB, | ||
| 50 | CPL_RX_DATA_ACK = 0xD, | ||
| 51 | CPL_TX_PKT = 0xE, | ||
| 52 | CPL_L2T_WRITE_REQ = 0x12, | ||
| 53 | CPL_TID_RELEASE = 0x1A, | ||
| 54 | |||
| 55 | CPL_CLOSE_LISTSRV_RPL = 0x20, | ||
| 56 | CPL_L2T_WRITE_RPL = 0x23, | ||
| 57 | CPL_PASS_OPEN_RPL = 0x24, | ||
| 58 | CPL_ACT_OPEN_RPL = 0x25, | ||
| 59 | CPL_PEER_CLOSE = 0x26, | ||
| 60 | CPL_ABORT_REQ_RSS = 0x2B, | ||
| 61 | CPL_ABORT_RPL_RSS = 0x2D, | ||
| 62 | |||
| 63 | CPL_CLOSE_CON_RPL = 0x32, | ||
| 64 | CPL_ISCSI_HDR = 0x33, | ||
| 65 | CPL_RDMA_CQE = 0x35, | ||
| 66 | CPL_RDMA_CQE_READ_RSP = 0x36, | ||
| 67 | CPL_RDMA_CQE_ERR = 0x37, | ||
| 68 | CPL_RX_DATA = 0x39, | ||
| 69 | CPL_SET_TCB_RPL = 0x3A, | ||
| 70 | CPL_RX_PKT = 0x3B, | ||
| 71 | CPL_RX_DDP_COMPLETE = 0x3F, | ||
| 72 | |||
| 73 | CPL_ACT_ESTABLISH = 0x40, | ||
| 74 | CPL_PASS_ESTABLISH = 0x41, | ||
| 75 | CPL_RX_DATA_DDP = 0x42, | ||
| 76 | CPL_PASS_ACCEPT_REQ = 0x44, | ||
| 77 | |||
| 78 | CPL_RDMA_READ_REQ = 0x60, | ||
| 79 | |||
| 80 | CPL_PASS_OPEN_REQ6 = 0x81, | ||
| 81 | CPL_ACT_OPEN_REQ6 = 0x83, | ||
| 82 | |||
| 83 | CPL_RDMA_TERMINATE = 0xA2, | ||
| 84 | CPL_RDMA_WRITE = 0xA4, | ||
| 85 | CPL_SGE_EGR_UPDATE = 0xA5, | ||
| 86 | |||
| 87 | CPL_TRACE_PKT = 0xB0, | ||
| 88 | |||
| 89 | CPL_FW4_MSG = 0xC0, | ||
| 90 | CPL_FW4_PLD = 0xC1, | ||
| 91 | CPL_FW4_ACK = 0xC3, | ||
| 92 | |||
| 93 | CPL_FW6_MSG = 0xE0, | ||
| 94 | CPL_FW6_PLD = 0xE1, | ||
| 95 | CPL_TX_PKT_LSO = 0xED, | ||
| 96 | CPL_TX_PKT_XT = 0xEE, | ||
| 97 | |||
| 98 | NUM_CPL_CMDS | ||
| 99 | }; | ||
| 100 | |||
| 101 | enum CPL_error { | ||
| 102 | CPL_ERR_NONE = 0, | ||
| 103 | CPL_ERR_TCAM_FULL = 3, | ||
| 104 | CPL_ERR_BAD_LENGTH = 15, | ||
| 105 | CPL_ERR_BAD_ROUTE = 18, | ||
| 106 | CPL_ERR_CONN_RESET = 20, | ||
| 107 | CPL_ERR_CONN_EXIST_SYNRECV = 21, | ||
| 108 | CPL_ERR_CONN_EXIST = 22, | ||
| 109 | CPL_ERR_ARP_MISS = 23, | ||
| 110 | CPL_ERR_BAD_SYN = 24, | ||
| 111 | CPL_ERR_CONN_TIMEDOUT = 30, | ||
| 112 | CPL_ERR_XMIT_TIMEDOUT = 31, | ||
| 113 | CPL_ERR_PERSIST_TIMEDOUT = 32, | ||
| 114 | CPL_ERR_FINWAIT2_TIMEDOUT = 33, | ||
| 115 | CPL_ERR_KEEPALIVE_TIMEDOUT = 34, | ||
| 116 | CPL_ERR_RTX_NEG_ADVICE = 35, | ||
| 117 | CPL_ERR_PERSIST_NEG_ADVICE = 36, | ||
| 118 | CPL_ERR_ABORT_FAILED = 42, | ||
| 119 | CPL_ERR_IWARP_FLM = 50, | ||
| 120 | }; | ||
| 121 | |||
| 122 | enum { | ||
| 123 | ULP_MODE_NONE = 0, | ||
| 124 | ULP_MODE_ISCSI = 2, | ||
| 125 | ULP_MODE_RDMA = 4, | ||
| 126 | ULP_MODE_FCOE = 6, | ||
| 127 | }; | ||
| 128 | |||
| 129 | enum { | ||
| 130 | ULP_CRC_HEADER = 1 << 0, | ||
| 131 | ULP_CRC_DATA = 1 << 1 | ||
| 132 | }; | ||
| 133 | |||
| 134 | enum { | ||
| 135 | CPL_ABORT_SEND_RST = 0, | ||
| 136 | CPL_ABORT_NO_RST, | ||
| 137 | }; | ||
| 138 | |||
| 139 | enum { /* TX_PKT_XT checksum types */ | ||
| 140 | TX_CSUM_TCP = 0, | ||
| 141 | TX_CSUM_UDP = 1, | ||
| 142 | TX_CSUM_CRC16 = 4, | ||
| 143 | TX_CSUM_CRC32 = 5, | ||
| 144 | TX_CSUM_CRC32C = 6, | ||
| 145 | TX_CSUM_FCOE = 7, | ||
| 146 | TX_CSUM_TCPIP = 8, | ||
| 147 | TX_CSUM_UDPIP = 9, | ||
| 148 | TX_CSUM_TCPIP6 = 10, | ||
| 149 | TX_CSUM_UDPIP6 = 11, | ||
| 150 | TX_CSUM_IP = 12, | ||
| 151 | }; | ||
| 152 | |||
| 153 | union opcode_tid { | ||
| 154 | __be32 opcode_tid; | ||
| 155 | u8 opcode; | ||
| 156 | }; | ||
| 157 | |||
| 158 | #define CPL_OPCODE(x) ((x) << 24) | ||
| 159 | #define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) | ||
| 160 | #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) | ||
| 161 | #define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) | ||
| 162 | |||
| 163 | /* partitioning of TID fields that also carry a queue id */ | ||
| 164 | #define GET_TID_TID(x) ((x) & 0x3fff) | ||
| 165 | #define GET_TID_QID(x) (((x) >> 14) & 0x3ff) | ||
| 166 | #define TID_QID(x) ((x) << 14) | ||
| 167 | |||
| 168 | struct rss_header { | ||
| 169 | u8 opcode; | ||
| 170 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
| 171 | u8 channel:2; | ||
| 172 | u8 filter_hit:1; | ||
| 173 | u8 filter_tid:1; | ||
| 174 | u8 hash_type:2; | ||
| 175 | u8 ipv6:1; | ||
| 176 | u8 send2fw:1; | ||
| 177 | #else | ||
| 178 | u8 send2fw:1; | ||
| 179 | u8 ipv6:1; | ||
| 180 | u8 hash_type:2; | ||
| 181 | u8 filter_tid:1; | ||
| 182 | u8 filter_hit:1; | ||
| 183 | u8 channel:2; | ||
| 184 | #endif | ||
| 185 | __be16 qid; | ||
| 186 | __be32 hash_val; | ||
| 187 | }; | ||
| 188 | |||
| 189 | struct work_request_hdr { | ||
| 190 | __be32 wr_hi; | ||
| 191 | __be32 wr_mid; | ||
| 192 | __be64 wr_lo; | ||
| 193 | }; | ||
| 194 | |||
| 195 | #define WR_HDR struct work_request_hdr wr | ||
| 196 | |||
| 197 | struct cpl_pass_open_req { | ||
| 198 | WR_HDR; | ||
| 199 | union opcode_tid ot; | ||
| 200 | __be16 local_port; | ||
| 201 | __be16 peer_port; | ||
| 202 | __be32 local_ip; | ||
| 203 | __be32 peer_ip; | ||
| 204 | __be64 opt0; | ||
| 205 | #define TX_CHAN(x) ((x) << 2) | ||
| 206 | #define DELACK(x) ((x) << 5) | ||
| 207 | #define ULP_MODE(x) ((x) << 8) | ||
| 208 | #define RCV_BUFSIZ(x) ((x) << 12) | ||
| 209 | #define DSCP(x) ((x) << 22) | ||
| 210 | #define SMAC_SEL(x) ((u64)(x) << 28) | ||
| 211 | #define L2T_IDX(x) ((u64)(x) << 36) | ||
| 212 | #define NAGLE(x) ((u64)(x) << 49) | ||
| 213 | #define WND_SCALE(x) ((u64)(x) << 50) | ||
| 214 | #define KEEP_ALIVE(x) ((u64)(x) << 54) | ||
| 215 | #define MSS_IDX(x) ((u64)(x) << 60) | ||
| 216 | __be64 opt1; | ||
| 217 | #define SYN_RSS_ENABLE (1 << 0) | ||
| 218 | #define SYN_RSS_QUEUE(x) ((x) << 2) | ||
| 219 | #define CONN_POLICY_ASK (1 << 22) | ||
| 220 | }; | ||
| 221 | |||
| 222 | struct cpl_pass_open_req6 { | ||
| 223 | WR_HDR; | ||
| 224 | union opcode_tid ot; | ||
| 225 | __be16 local_port; | ||
| 226 | __be16 peer_port; | ||
| 227 | __be64 local_ip_hi; | ||
| 228 | __be64 local_ip_lo; | ||
| 229 | __be64 peer_ip_hi; | ||
| 230 | __be64 peer_ip_lo; | ||
| 231 | __be64 opt0; | ||
| 232 | __be64 opt1; | ||
| 233 | }; | ||
| 234 | |||
| 235 | struct cpl_pass_open_rpl { | ||
| 236 | union opcode_tid ot; | ||
| 237 | u8 rsvd[3]; | ||
| 238 | u8 status; | ||
| 239 | }; | ||
| 240 | |||
| 241 | struct cpl_pass_accept_rpl { | ||
| 242 | WR_HDR; | ||
| 243 | union opcode_tid ot; | ||
| 244 | __be32 opt2; | ||
| 245 | #define RSS_QUEUE(x) ((x) << 0) | ||
| 246 | #define RSS_QUEUE_VALID (1 << 10) | ||
| 247 | #define RX_COALESCE_VALID(x) ((x) << 11) | ||
| 248 | #define RX_COALESCE(x) ((x) << 12) | ||
| 249 | #define TX_QUEUE(x) ((x) << 23) | ||
| 250 | #define RX_CHANNEL(x) ((x) << 26) | ||
| 251 | #define WND_SCALE_EN(x) ((x) << 28) | ||
| 252 | #define TSTAMPS_EN(x) ((x) << 29) | ||
| 253 | #define SACK_EN(x) ((x) << 30) | ||
| 254 | __be64 opt0; | ||
| 255 | }; | ||
| 256 | |||
| 257 | struct cpl_act_open_req { | ||
| 258 | WR_HDR; | ||
| 259 | union opcode_tid ot; | ||
| 260 | __be16 local_port; | ||
| 261 | __be16 peer_port; | ||
| 262 | __be32 local_ip; | ||
| 263 | __be32 peer_ip; | ||
| 264 | __be64 opt0; | ||
| 265 | __be32 params; | ||
| 266 | __be32 opt2; | ||
| 267 | }; | ||
| 268 | |||
| 269 | struct cpl_act_open_req6 { | ||
| 270 | WR_HDR; | ||
| 271 | union opcode_tid ot; | ||
| 272 | __be16 local_port; | ||
| 273 | __be16 peer_port; | ||
| 274 | __be64 local_ip_hi; | ||
| 275 | __be64 local_ip_lo; | ||
| 276 | __be64 peer_ip_hi; | ||
| 277 | __be64 peer_ip_lo; | ||
| 278 | __be64 opt0; | ||
| 279 | __be32 params; | ||
| 280 | __be32 opt2; | ||
| 281 | }; | ||
| 282 | |||
| 283 | struct cpl_act_open_rpl { | ||
| 284 | union opcode_tid ot; | ||
| 285 | __be32 atid_status; | ||
| 286 | #define GET_AOPEN_STATUS(x) ((x) & 0xff) | ||
| 287 | #define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) | ||
| 288 | }; | ||
| 289 | |||
| 290 | struct cpl_pass_establish { | ||
| 291 | union opcode_tid ot; | ||
| 292 | __be32 rsvd; | ||
| 293 | __be32 tos_stid; | ||
| 294 | #define GET_POPEN_TID(x) ((x) & 0xffffff) | ||
| 295 | #define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) | ||
| 296 | __be16 mac_idx; | ||
| 297 | __be16 tcp_opt; | ||
| 298 | #define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) | ||
| 299 | #define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) | ||
| 300 | #define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) | ||
| 301 | #define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) | ||
| 302 | #define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) | ||
| 303 | __be32 snd_isn; | ||
| 304 | __be32 rcv_isn; | ||
| 305 | }; | ||
| 306 | |||
| 307 | struct cpl_act_establish { | ||
| 308 | union opcode_tid ot; | ||
| 309 | __be32 rsvd; | ||
| 310 | __be32 tos_atid; | ||
| 311 | __be16 mac_idx; | ||
| 312 | __be16 tcp_opt; | ||
| 313 | __be32 snd_isn; | ||
| 314 | __be32 rcv_isn; | ||
| 315 | }; | ||
| 316 | |||
| 317 | struct cpl_get_tcb { | ||
| 318 | WR_HDR; | ||
| 319 | union opcode_tid ot; | ||
| 320 | __be16 reply_ctrl; | ||
| 321 | #define QUEUENO(x) ((x) << 0) | ||
| 322 | #define REPLY_CHAN(x) ((x) << 14) | ||
| 323 | #define NO_REPLY(x) ((x) << 15) | ||
| 324 | __be16 cookie; | ||
| 325 | }; | ||
| 326 | |||
| 327 | struct cpl_set_tcb_field { | ||
| 328 | WR_HDR; | ||
| 329 | union opcode_tid ot; | ||
| 330 | __be16 reply_ctrl; | ||
| 331 | __be16 word_cookie; | ||
| 332 | #define TCB_WORD(x) ((x) << 0) | ||
| 333 | #define TCB_COOKIE(x) ((x) << 5) | ||
| 334 | __be64 mask; | ||
| 335 | __be64 val; | ||
| 336 | }; | ||
| 337 | |||
| 338 | struct cpl_set_tcb_rpl { | ||
| 339 | union opcode_tid ot; | ||
| 340 | __be16 rsvd; | ||
| 341 | u8 cookie; | ||
| 342 | u8 status; | ||
| 343 | __be64 oldval; | ||
| 344 | }; | ||
| 345 | |||
| 346 | struct cpl_close_con_req { | ||
| 347 | WR_HDR; | ||
| 348 | union opcode_tid ot; | ||
| 349 | __be32 rsvd; | ||
| 350 | }; | ||
| 351 | |||
| 352 | struct cpl_close_con_rpl { | ||
| 353 | union opcode_tid ot; | ||
| 354 | u8 rsvd[3]; | ||
| 355 | u8 status; | ||
| 356 | __be32 snd_nxt; | ||
| 357 | __be32 rcv_nxt; | ||
| 358 | }; | ||
| 359 | |||
| 360 | struct cpl_close_listsvr_req { | ||
| 361 | WR_HDR; | ||
| 362 | union opcode_tid ot; | ||
| 363 | __be16 reply_ctrl; | ||
| 364 | #define LISTSVR_IPV6 (1 << 14) | ||
| 365 | __be16 rsvd; | ||
| 366 | }; | ||
| 367 | |||
| 368 | struct cpl_close_listsvr_rpl { | ||
| 369 | union opcode_tid ot; | ||
| 370 | u8 rsvd[3]; | ||
| 371 | u8 status; | ||
| 372 | }; | ||
| 373 | |||
| 374 | struct cpl_abort_req_rss { | ||
| 375 | union opcode_tid ot; | ||
| 376 | u8 rsvd[3]; | ||
| 377 | u8 status; | ||
| 378 | }; | ||
| 379 | |||
| 380 | struct cpl_abort_req { | ||
| 381 | WR_HDR; | ||
| 382 | union opcode_tid ot; | ||
| 383 | __be32 rsvd0; | ||
| 384 | u8 rsvd1; | ||
| 385 | u8 cmd; | ||
| 386 | u8 rsvd2[6]; | ||
| 387 | }; | ||
| 388 | |||
| 389 | struct cpl_abort_rpl_rss { | ||
| 390 | union opcode_tid ot; | ||
| 391 | u8 rsvd[3]; | ||
| 392 | u8 status; | ||
| 393 | }; | ||
| 394 | |||
| 395 | struct cpl_abort_rpl { | ||
| 396 | WR_HDR; | ||
| 397 | union opcode_tid ot; | ||
| 398 | __be32 rsvd0; | ||
| 399 | u8 rsvd1; | ||
| 400 | u8 cmd; | ||
| 401 | u8 rsvd2[6]; | ||
| 402 | }; | ||
| 403 | |||
| 404 | struct cpl_peer_close { | ||
| 405 | union opcode_tid ot; | ||
| 406 | __be32 rcv_nxt; | ||
| 407 | }; | ||
| 408 | |||
| 409 | struct cpl_tid_release { | ||
| 410 | WR_HDR; | ||
| 411 | union opcode_tid ot; | ||
| 412 | __be32 rsvd; | ||
| 413 | }; | ||
| 414 | |||
| 415 | struct cpl_tx_pkt_core { | ||
| 416 | __be32 ctrl0; | ||
| 417 | #define TXPKT_VF(x) ((x) << 0) | ||
| 418 | #define TXPKT_PF(x) ((x) << 8) | ||
| 419 | #define TXPKT_VF_VLD (1 << 11) | ||
| 420 | #define TXPKT_OVLAN_IDX(x) ((x) << 12) | ||
| 421 | #define TXPKT_INTF(x) ((x) << 16) | ||
| 422 | #define TXPKT_INS_OVLAN (1 << 21) | ||
| 423 | #define TXPKT_OPCODE(x) ((x) << 24) | ||
| 424 | __be16 pack; | ||
| 425 | __be16 len; | ||
| 426 | __be64 ctrl1; | ||
| 427 | #define TXPKT_CSUM_END(x) ((x) << 12) | ||
| 428 | #define TXPKT_CSUM_START(x) ((x) << 20) | ||
| 429 | #define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) | ||
| 430 | #define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) | ||
| 431 | #define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) | ||
| 432 | #define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) | ||
| 433 | #define TXPKT_VLAN(x) ((u64)(x) << 44) | ||
| 434 | #define TXPKT_VLAN_VLD (1ULL << 60) | ||
| 435 | #define TXPKT_IPCSUM_DIS (1ULL << 62) | ||
| 436 | #define TXPKT_L4CSUM_DIS (1ULL << 63) | ||
| 437 | }; | ||
| 438 | |||
| 439 | struct cpl_tx_pkt { | ||
| 440 | WR_HDR; | ||
| 441 | struct cpl_tx_pkt_core c; | ||
| 442 | }; | ||
| 443 | |||
| 444 | #define cpl_tx_pkt_xt cpl_tx_pkt | ||
| 445 | |||
| 446 | struct cpl_tx_pkt_lso { | ||
| 447 | WR_HDR; | ||
| 448 | __be32 lso_ctrl; | ||
| 449 | #define LSO_TCPHDR_LEN(x) ((x) << 0) | ||
| 450 | #define LSO_IPHDR_LEN(x) ((x) << 4) | ||
| 451 | #define LSO_ETHHDR_LEN(x) ((x) << 16) | ||
| 452 | #define LSO_IPV6(x) ((x) << 20) | ||
| 453 | #define LSO_LAST_SLICE (1 << 22) | ||
| 454 | #define LSO_FIRST_SLICE (1 << 23) | ||
| 455 | #define LSO_OPCODE(x) ((x) << 24) | ||
| 456 | __be16 ipid_ofst; | ||
| 457 | __be16 mss; | ||
| 458 | __be32 seqno_offset; | ||
| 459 | __be32 len; | ||
| 460 | /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ | ||
| 461 | }; | ||
| 462 | |||
| 463 | struct cpl_iscsi_hdr { | ||
| 464 | union opcode_tid ot; | ||
| 465 | __be16 pdu_len_ddp; | ||
| 466 | #define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) | ||
| 467 | #define ISCSI_DDP (1 << 15) | ||
| 468 | __be16 len; | ||
| 469 | __be32 seq; | ||
| 470 | __be16 urg; | ||
| 471 | u8 rsvd; | ||
| 472 | u8 status; | ||
| 473 | }; | ||
| 474 | |||
| 475 | struct cpl_rx_data { | ||
| 476 | union opcode_tid ot; | ||
| 477 | __be16 rsvd; | ||
| 478 | __be16 len; | ||
| 479 | __be32 seq; | ||
| 480 | __be16 urg; | ||
| 481 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
| 482 | u8 dack_mode:2; | ||
| 483 | u8 psh:1; | ||
| 484 | u8 heartbeat:1; | ||
| 485 | u8 ddp_off:1; | ||
| 486 | u8 :3; | ||
| 487 | #else | ||
| 488 | u8 :3; | ||
| 489 | u8 ddp_off:1; | ||
| 490 | u8 heartbeat:1; | ||
| 491 | u8 psh:1; | ||
| 492 | u8 dack_mode:2; | ||
| 493 | #endif | ||
| 494 | u8 status; | ||
| 495 | }; | ||
| 496 | |||
| 497 | struct cpl_rx_data_ack { | ||
| 498 | WR_HDR; | ||
| 499 | union opcode_tid ot; | ||
| 500 | __be32 credit_dack; | ||
| 501 | #define RX_CREDITS(x) ((x) << 0) | ||
| 502 | #define RX_FORCE_ACK(x) ((x) << 28) | ||
| 503 | }; | ||
| 504 | |||
| 505 | struct cpl_rx_pkt { | ||
| 506 | u8 opcode; | ||
| 507 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
| 508 | u8 iff:4; | ||
| 509 | u8 csum_calc:1; | ||
| 510 | u8 ipmi_pkt:1; | ||
| 511 | u8 vlan_ex:1; | ||
| 512 | u8 ip_frag:1; | ||
| 513 | #else | ||
| 514 | u8 ip_frag:1; | ||
| 515 | u8 vlan_ex:1; | ||
| 516 | u8 ipmi_pkt:1; | ||
| 517 | u8 csum_calc:1; | ||
| 518 | u8 iff:4; | ||
| 519 | #endif | ||
| 520 | __be16 csum; | ||
| 521 | __be16 vlan; | ||
| 522 | __be16 len; | ||
| 523 | __be32 l2info; | ||
| 524 | #define RXF_UDP (1 << 22) | ||
| 525 | #define RXF_TCP (1 << 23) | ||
| 526 | __be16 hdr_len; | ||
| 527 | __be16 err_vec; | ||
| 528 | }; | ||
| 529 | |||
| 530 | struct cpl_trace_pkt { | ||
| 531 | u8 opcode; | ||
| 532 | u8 intf; | ||
| 533 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
| 534 | u8 runt:4; | ||
| 535 | u8 filter_hit:4; | ||
| 536 | u8 :6; | ||
| 537 | u8 err:1; | ||
| 538 | u8 trunc:1; | ||
| 539 | #else | ||
| 540 | u8 filter_hit:4; | ||
| 541 | u8 runt:4; | ||
| 542 | u8 trunc:1; | ||
| 543 | u8 err:1; | ||
| 544 | u8 :6; | ||
| 545 | #endif | ||
| 546 | __be16 rsvd; | ||
| 547 | __be16 len; | ||
| 548 | __be64 tstamp; | ||
| 549 | }; | ||
| 550 | |||
| 551 | struct cpl_l2t_write_req { | ||
| 552 | WR_HDR; | ||
| 553 | union opcode_tid ot; | ||
| 554 | __be16 params; | ||
| 555 | #define L2T_W_INFO(x) ((x) << 2) | ||
| 556 | #define L2T_W_PORT(x) ((x) << 8) | ||
| 557 | #define L2T_W_NOREPLY(x) ((x) << 15) | ||
| 558 | __be16 l2t_idx; | ||
| 559 | __be16 vlan; | ||
| 560 | u8 dst_mac[6]; | ||
| 561 | }; | ||
| 562 | |||
| 563 | struct cpl_l2t_write_rpl { | ||
| 564 | union opcode_tid ot; | ||
| 565 | u8 status; | ||
| 566 | u8 rsvd[3]; | ||
| 567 | }; | ||
| 568 | |||
| 569 | struct cpl_rdma_terminate { | ||
| 570 | union opcode_tid ot; | ||
| 571 | __be16 rsvd; | ||
| 572 | __be16 len; | ||
| 573 | }; | ||
| 574 | |||
| 575 | struct cpl_sge_egr_update { | ||
| 576 | __be32 opcode_qid; | ||
| 577 | #define EGR_QID(x) ((x) & 0x1FFFF) | ||
| 578 | __be16 cidx; | ||
| 579 | __be16 pidx; | ||
| 580 | }; | ||
| 581 | |||
| 582 | struct cpl_fw4_pld { | ||
| 583 | u8 opcode; | ||
| 584 | u8 rsvd0[3]; | ||
| 585 | u8 type; | ||
| 586 | u8 rsvd1; | ||
| 587 | __be16 len; | ||
| 588 | __be64 data; | ||
| 589 | __be64 rsvd2; | ||
| 590 | }; | ||
| 591 | |||
| 592 | struct cpl_fw6_pld { | ||
| 593 | u8 opcode; | ||
| 594 | u8 rsvd[5]; | ||
| 595 | __be16 len; | ||
| 596 | __be64 data[4]; | ||
| 597 | }; | ||
| 598 | |||
| 599 | struct cpl_fw4_msg { | ||
| 600 | u8 opcode; | ||
| 601 | u8 type; | ||
| 602 | __be16 rsvd0; | ||
| 603 | __be32 rsvd1; | ||
| 604 | __be64 data[2]; | ||
| 605 | }; | ||
| 606 | |||
| 607 | struct cpl_fw4_ack { | ||
| 608 | union opcode_tid ot; | ||
| 609 | u8 credits; | ||
| 610 | u8 rsvd0[2]; | ||
| 611 | u8 seq_vld; | ||
| 612 | __be32 snd_nxt; | ||
| 613 | __be32 snd_una; | ||
| 614 | __be64 rsvd1; | ||
| 615 | }; | ||
| 616 | |||
| 617 | struct cpl_fw6_msg { | ||
| 618 | u8 opcode; | ||
| 619 | u8 type; | ||
| 620 | __be16 rsvd0; | ||
| 621 | __be32 rsvd1; | ||
| 622 | __be64 data[4]; | ||
| 623 | }; | ||
| 624 | |||
| 625 | enum { | ||
| 626 | ULP_TX_MEM_READ = 2, | ||
| 627 | ULP_TX_MEM_WRITE = 3, | ||
| 628 | ULP_TX_PKT = 4 | ||
| 629 | }; | ||
| 630 | |||
| 631 | enum { | ||
| 632 | ULP_TX_SC_NOOP = 0x80, | ||
| 633 | ULP_TX_SC_IMM = 0x81, | ||
| 634 | ULP_TX_SC_DSGL = 0x82, | ||
| 635 | ULP_TX_SC_ISGL = 0x83 | ||
| 636 | }; | ||
| 637 | |||
| 638 | struct ulptx_sge_pair { | ||
| 639 | __be32 len[2]; | ||
| 640 | __be64 addr[2]; | ||
| 641 | }; | ||
| 642 | |||
| 643 | struct ulptx_sgl { | ||
| 644 | __be32 cmd_nsge; | ||
| 645 | #define ULPTX_CMD(x) ((x) << 24) | ||
| 646 | #define ULPTX_NSGE(x) ((x) << 0) | ||
| 647 | __be32 len0; | ||
| 648 | __be64 addr0; | ||
| 649 | struct ulptx_sge_pair sge[0]; | ||
| 650 | }; | ||
| 651 | |||
| 652 | struct ulp_mem_io { | ||
| 653 | WR_HDR; | ||
| 654 | __be32 cmd; | ||
| 655 | #define ULP_MEMIO_ORDER(x) ((x) << 23) | ||
| 656 | __be32 len16; /* command length */ | ||
| 657 | __be32 dlen; /* data length in 32-byte units */ | ||
| 658 | #define ULP_MEMIO_DATA_LEN(x) ((x) << 0) | ||
| 659 | __be32 lock_addr; | ||
| 660 | #define ULP_MEMIO_ADDR(x) ((x) << 0) | ||
| 661 | #define ULP_MEMIO_LOCK(x) ((x) << 31) | ||
| 662 | }; | ||
| 663 | |||
| 664 | #endif /* __T4_MSG_H */ | ||
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h new file mode 100644 index 000000000000..5ed56483cbc2 --- /dev/null +++ b/drivers/net/cxgb4/t4_regs.h | |||
| @@ -0,0 +1,878 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __T4_REGS_H | ||
| 36 | #define __T4_REGS_H | ||
| 37 | |||
| 38 | #define MYPF_BASE 0x1b000 | ||
| 39 | #define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) | ||
| 40 | |||
| 41 | #define PF0_BASE 0x1e000 | ||
| 42 | #define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) | ||
| 43 | |||
| 44 | #define PF_STRIDE 0x400 | ||
| 45 | #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) | ||
| 46 | #define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) | ||
| 47 | |||
| 48 | #define MYPORT_BASE 0x1c000 | ||
| 49 | #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) | ||
| 50 | |||
| 51 | #define PORT0_BASE 0x20000 | ||
| 52 | #define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) | ||
| 53 | |||
| 54 | #define PORT_STRIDE 0x2000 | ||
| 55 | #define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) | ||
| 56 | #define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) | ||
| 57 | |||
| 58 | #define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) | ||
| 59 | #define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) | ||
| 60 | |||
| 61 | #define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) | ||
| 62 | #define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) | ||
| 63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
| 64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
| 65 | |||
| 66 | #define SGE_PF_KDOORBELL 0x0 | ||
| 67 | #define QID_MASK 0xffff8000U | ||
| 68 | #define QID_SHIFT 15 | ||
| 69 | #define QID(x) ((x) << QID_SHIFT) | ||
| 70 | #define DBPRIO 0x00004000U | ||
| 71 | #define PIDX_MASK 0x00003fffU | ||
| 72 | #define PIDX_SHIFT 0 | ||
| 73 | #define PIDX(x) ((x) << PIDX_SHIFT) | ||
| 74 | |||
| 75 | #define SGE_PF_GTS 0x4 | ||
| 76 | #define INGRESSQID_MASK 0xffff0000U | ||
| 77 | #define INGRESSQID_SHIFT 16 | ||
| 78 | #define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) | ||
| 79 | #define TIMERREG_MASK 0x0000e000U | ||
| 80 | #define TIMERREG_SHIFT 13 | ||
| 81 | #define TIMERREG(x) ((x) << TIMERREG_SHIFT) | ||
| 82 | #define SEINTARM_MASK 0x00001000U | ||
| 83 | #define SEINTARM_SHIFT 12 | ||
| 84 | #define SEINTARM(x) ((x) << SEINTARM_SHIFT) | ||
| 85 | #define CIDXINC_MASK 0x00000fffU | ||
| 86 | #define CIDXINC_SHIFT 0 | ||
| 87 | #define CIDXINC(x) ((x) << CIDXINC_SHIFT) | ||
| 88 | |||
| 89 | #define SGE_CONTROL 0x1008 | ||
| 90 | #define DCASYSTYPE 0x00080000U | ||
| 91 | #define RXPKTCPLMODE 0x00040000U | ||
| 92 | #define EGRSTATUSPAGESIZE 0x00020000U | ||
| 93 | #define PKTSHIFT_MASK 0x00001c00U | ||
| 94 | #define PKTSHIFT_SHIFT 10 | ||
| 95 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) | ||
| 96 | #define INGPCIEBOUNDARY_MASK 0x00000380U | ||
| 97 | #define INGPCIEBOUNDARY_SHIFT 7 | ||
| 98 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) | ||
| 99 | #define INGPADBOUNDARY_MASK 0x00000070U | ||
| 100 | #define INGPADBOUNDARY_SHIFT 4 | ||
| 101 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) | ||
| 102 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU | ||
| 103 | #define EGRPCIEBOUNDARY_SHIFT 1 | ||
| 104 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) | ||
| 105 | #define GLOBALENABLE 0x00000001U | ||
| 106 | |||
| 107 | #define SGE_HOST_PAGE_SIZE 0x100c | ||
| 108 | #define HOSTPAGESIZEPF0_MASK 0x0000000fU | ||
| 109 | #define HOSTPAGESIZEPF0_SHIFT 0 | ||
| 110 | #define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) | ||
| 111 | |||
| 112 | #define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 | ||
| 113 | #define QUEUESPERPAGEPF0_MASK 0x0000000fU | ||
| 114 | #define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) | ||
| 115 | |||
| 116 | #define SGE_INT_CAUSE1 0x1024 | ||
| 117 | #define SGE_INT_CAUSE2 0x1030 | ||
| 118 | #define SGE_INT_CAUSE3 0x103c | ||
| 119 | #define ERR_FLM_DBP 0x80000000U | ||
| 120 | #define ERR_FLM_IDMA1 0x40000000U | ||
| 121 | #define ERR_FLM_IDMA0 0x20000000U | ||
| 122 | #define ERR_FLM_HINT 0x10000000U | ||
| 123 | #define ERR_PCIE_ERROR3 0x08000000U | ||
| 124 | #define ERR_PCIE_ERROR2 0x04000000U | ||
| 125 | #define ERR_PCIE_ERROR1 0x02000000U | ||
| 126 | #define ERR_PCIE_ERROR0 0x01000000U | ||
| 127 | #define ERR_TIMER_ABOVE_MAX_QID 0x00800000U | ||
| 128 | #define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U | ||
| 129 | #define ERR_INVALID_CIDX_INC 0x00200000U | ||
| 130 | #define ERR_ITP_TIME_PAUSED 0x00100000U | ||
| 131 | #define ERR_CPL_OPCODE_0 0x00080000U | ||
| 132 | #define ERR_DROPPED_DB 0x00040000U | ||
| 133 | #define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U | ||
| 134 | #define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U | ||
| 135 | #define ERR_BAD_DB_PIDX3 0x00008000U | ||
| 136 | #define ERR_BAD_DB_PIDX2 0x00004000U | ||
| 137 | #define ERR_BAD_DB_PIDX1 0x00002000U | ||
| 138 | #define ERR_BAD_DB_PIDX0 0x00001000U | ||
| 139 | #define ERR_ING_PCIE_CHAN 0x00000800U | ||
| 140 | #define ERR_ING_CTXT_PRIO 0x00000400U | ||
| 141 | #define ERR_EGR_CTXT_PRIO 0x00000200U | ||
| 142 | #define DBFIFO_HP_INT 0x00000100U | ||
| 143 | #define DBFIFO_LP_INT 0x00000080U | ||
| 144 | #define REG_ADDRESS_ERR 0x00000040U | ||
| 145 | #define INGRESS_SIZE_ERR 0x00000020U | ||
| 146 | #define EGRESS_SIZE_ERR 0x00000010U | ||
| 147 | #define ERR_INV_CTXT3 0x00000008U | ||
| 148 | #define ERR_INV_CTXT2 0x00000004U | ||
| 149 | #define ERR_INV_CTXT1 0x00000002U | ||
| 150 | #define ERR_INV_CTXT0 0x00000001U | ||
| 151 | |||
| 152 | #define SGE_INT_ENABLE3 0x1040 | ||
| 153 | #define SGE_FL_BUFFER_SIZE0 0x1044 | ||
| 154 | #define SGE_FL_BUFFER_SIZE1 0x1048 | ||
| 155 | #define SGE_INGRESS_RX_THRESHOLD 0x10a0 | ||
| 156 | #define THRESHOLD_0_MASK 0x3f000000U | ||
| 157 | #define THRESHOLD_0_SHIFT 24 | ||
| 158 | #define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) | ||
| 159 | #define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) | ||
| 160 | #define THRESHOLD_1_MASK 0x003f0000U | ||
| 161 | #define THRESHOLD_1_SHIFT 16 | ||
| 162 | #define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) | ||
| 163 | #define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) | ||
| 164 | #define THRESHOLD_2_MASK 0x00003f00U | ||
| 165 | #define THRESHOLD_2_SHIFT 8 | ||
| 166 | #define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) | ||
| 167 | #define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) | ||
| 168 | #define THRESHOLD_3_MASK 0x0000003fU | ||
| 169 | #define THRESHOLD_3_SHIFT 0 | ||
| 170 | #define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) | ||
| 171 | #define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) | ||
| 172 | |||
| 173 | #define SGE_TIMER_VALUE_0_AND_1 0x10b8 | ||
| 174 | #define TIMERVALUE0_MASK 0xffff0000U | ||
| 175 | #define TIMERVALUE0_SHIFT 16 | ||
| 176 | #define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) | ||
| 177 | #define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) | ||
| 178 | #define TIMERVALUE1_MASK 0x0000ffffU | ||
| 179 | #define TIMERVALUE1_SHIFT 0 | ||
| 180 | #define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) | ||
| 181 | #define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) | ||
| 182 | |||
| 183 | #define SGE_TIMER_VALUE_2_AND_3 0x10bc | ||
| 184 | #define SGE_TIMER_VALUE_4_AND_5 0x10c0 | ||
| 185 | #define SGE_DEBUG_INDEX 0x10cc | ||
| 186 | #define SGE_DEBUG_DATA_HIGH 0x10d0 | ||
| 187 | #define SGE_DEBUG_DATA_LOW 0x10d4 | ||
| 188 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 | ||
| 189 | |||
| 190 | #define PCIE_PF_CLI 0x44 | ||
| 191 | #define PCIE_INT_CAUSE 0x3004 | ||
| 192 | #define UNXSPLCPLERR 0x20000000U | ||
| 193 | #define PCIEPINT 0x10000000U | ||
| 194 | #define PCIESINT 0x08000000U | ||
| 195 | #define RPLPERR 0x04000000U | ||
| 196 | #define RXWRPERR 0x02000000U | ||
| 197 | #define RXCPLPERR 0x01000000U | ||
| 198 | #define PIOTAGPERR 0x00800000U | ||
| 199 | #define MATAGPERR 0x00400000U | ||
| 200 | #define INTXCLRPERR 0x00200000U | ||
| 201 | #define FIDPERR 0x00100000U | ||
| 202 | #define CFGSNPPERR 0x00080000U | ||
| 203 | #define HRSPPERR 0x00040000U | ||
| 204 | #define HREQPERR 0x00020000U | ||
| 205 | #define HCNTPERR 0x00010000U | ||
| 206 | #define DRSPPERR 0x00008000U | ||
| 207 | #define DREQPERR 0x00004000U | ||
| 208 | #define DCNTPERR 0x00002000U | ||
| 209 | #define CRSPPERR 0x00001000U | ||
| 210 | #define CREQPERR 0x00000800U | ||
| 211 | #define CCNTPERR 0x00000400U | ||
| 212 | #define TARTAGPERR 0x00000200U | ||
| 213 | #define PIOREQPERR 0x00000100U | ||
| 214 | #define PIOCPLPERR 0x00000080U | ||
| 215 | #define MSIXDIPERR 0x00000040U | ||
| 216 | #define MSIXDATAPERR 0x00000020U | ||
| 217 | #define MSIXADDRHPERR 0x00000010U | ||
| 218 | #define MSIXADDRLPERR 0x00000008U | ||
| 219 | #define MSIDATAPERR 0x00000004U | ||
| 220 | #define MSIADDRHPERR 0x00000002U | ||
| 221 | #define MSIADDRLPERR 0x00000001U | ||
| 222 | |||
| 223 | #define PCIE_NONFAT_ERR 0x3010 | ||
| 224 | #define PCIE_MEM_ACCESS_BASE_WIN 0x3068 | ||
| 225 | #define PCIEOFST_MASK 0xfffffc00U | ||
| 226 | #define BIR_MASK 0x00000300U | ||
| 227 | #define BIR_SHIFT 8 | ||
| 228 | #define BIR(x) ((x) << BIR_SHIFT) | ||
| 229 | #define WINDOW_MASK 0x000000ffU | ||
| 230 | #define WINDOW_SHIFT 0 | ||
| 231 | #define WINDOW(x) ((x) << WINDOW_SHIFT) | ||
| 232 | |||
| 233 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 | ||
| 234 | #define RNPP 0x80000000U | ||
| 235 | #define RPCP 0x20000000U | ||
| 236 | #define RCIP 0x08000000U | ||
| 237 | #define RCCP 0x04000000U | ||
| 238 | #define RFTP 0x00800000U | ||
| 239 | #define PTRP 0x00100000U | ||
| 240 | |||
| 241 | #define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 | ||
| 242 | #define TPCP 0x40000000U | ||
| 243 | #define TNPP 0x20000000U | ||
| 244 | #define TFTP 0x10000000U | ||
| 245 | #define TCAP 0x08000000U | ||
| 246 | #define TCIP 0x04000000U | ||
| 247 | #define RCAP 0x02000000U | ||
| 248 | #define PLUP 0x00800000U | ||
| 249 | #define PLDN 0x00400000U | ||
| 250 | #define OTDD 0x00200000U | ||
| 251 | #define GTRP 0x00100000U | ||
| 252 | #define RDPE 0x00040000U | ||
| 253 | #define TDCE 0x00020000U | ||
| 254 | #define TDUE 0x00010000U | ||
| 255 | |||
| 256 | #define MC_INT_CAUSE 0x7518 | ||
| 257 | #define ECC_UE_INT_CAUSE 0x00000004U | ||
| 258 | #define ECC_CE_INT_CAUSE 0x00000002U | ||
| 259 | #define PERR_INT_CAUSE 0x00000001U | ||
| 260 | |||
| 261 | #define MC_ECC_STATUS 0x751c | ||
| 262 | #define ECC_CECNT_MASK 0xffff0000U | ||
| 263 | #define ECC_CECNT_SHIFT 16 | ||
| 264 | #define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) | ||
| 265 | #define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) | ||
| 266 | #define ECC_UECNT_MASK 0x0000ffffU | ||
| 267 | #define ECC_UECNT_SHIFT 0 | ||
| 268 | #define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) | ||
| 269 | #define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) | ||
| 270 | |||
| 271 | #define MC_BIST_CMD 0x7600 | ||
| 272 | #define START_BIST 0x80000000U | ||
| 273 | #define BIST_CMD_GAP_MASK 0x0000ff00U | ||
| 274 | #define BIST_CMD_GAP_SHIFT 8 | ||
| 275 | #define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) | ||
| 276 | #define BIST_OPCODE_MASK 0x00000003U | ||
| 277 | #define BIST_OPCODE_SHIFT 0 | ||
| 278 | #define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) | ||
| 279 | |||
| 280 | #define MC_BIST_CMD_ADDR 0x7604 | ||
| 281 | #define MC_BIST_CMD_LEN 0x7608 | ||
| 282 | #define MC_BIST_DATA_PATTERN 0x760c | ||
| 283 | #define BIST_DATA_TYPE_MASK 0x0000000fU | ||
| 284 | #define BIST_DATA_TYPE_SHIFT 0 | ||
| 285 | #define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) | ||
| 286 | |||
| 287 | #define MC_BIST_STATUS_RDATA 0x7688 | ||
| 288 | |||
| 289 | #define MA_EXT_MEMORY_BAR 0x77c8 | ||
| 290 | #define EXT_MEM_SIZE_MASK 0x00000fffU | ||
| 291 | #define EXT_MEM_SIZE_SHIFT 0 | ||
| 292 | #define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) | ||
| 293 | |||
| 294 | #define MA_TARGET_MEM_ENABLE 0x77d8 | ||
| 295 | #define EXT_MEM_ENABLE 0x00000004U | ||
| 296 | #define EDRAM1_ENABLE 0x00000002U | ||
| 297 | #define EDRAM0_ENABLE 0x00000001U | ||
| 298 | |||
| 299 | #define MA_INT_CAUSE 0x77e0 | ||
| 300 | #define MEM_PERR_INT_CAUSE 0x00000002U | ||
| 301 | #define MEM_WRAP_INT_CAUSE 0x00000001U | ||
| 302 | |||
| 303 | #define MA_INT_WRAP_STATUS 0x77e4 | ||
| 304 | #define MEM_WRAP_ADDRESS_MASK 0xfffffff0U | ||
| 305 | #define MEM_WRAP_ADDRESS_SHIFT 4 | ||
| 306 | #define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) | ||
| 307 | #define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU | ||
| 308 | #define MEM_WRAP_CLIENT_NUM_SHIFT 0 | ||
| 309 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) | ||
| 310 | |||
| 311 | #define MA_PARITY_ERROR_STATUS 0x77f4 | ||
| 312 | |||
| 313 | #define EDC_0_BASE_ADDR 0x7900 | ||
| 314 | |||
| 315 | #define EDC_BIST_CMD 0x7904 | ||
| 316 | #define EDC_BIST_CMD_ADDR 0x7908 | ||
| 317 | #define EDC_BIST_CMD_LEN 0x790c | ||
| 318 | #define EDC_BIST_DATA_PATTERN 0x7910 | ||
| 319 | #define EDC_BIST_STATUS_RDATA 0x7928 | ||
| 320 | #define EDC_INT_CAUSE 0x7978 | ||
| 321 | #define ECC_UE_PAR 0x00000020U | ||
| 322 | #define ECC_CE_PAR 0x00000010U | ||
| 323 | #define PERR_PAR_CAUSE 0x00000008U | ||
| 324 | |||
| 325 | #define EDC_ECC_STATUS 0x797c | ||
| 326 | |||
| 327 | #define EDC_1_BASE_ADDR 0x7980 | ||
| 328 | |||
| 329 | #define CIM_PF_MAILBOX_DATA 0x240 | ||
| 330 | #define CIM_PF_MAILBOX_CTRL 0x280 | ||
| 331 | #define MBMSGVALID 0x00000008U | ||
| 332 | #define MBINTREQ 0x00000004U | ||
| 333 | #define MBOWNER_MASK 0x00000003U | ||
| 334 | #define MBOWNER_SHIFT 0 | ||
| 335 | #define MBOWNER(x) ((x) << MBOWNER_SHIFT) | ||
| 336 | #define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) | ||
| 337 | |||
| 338 | #define CIM_PF_HOST_INT_CAUSE 0x28c | ||
| 339 | #define MBMSGRDYINT 0x00080000U | ||
| 340 | |||
| 341 | #define CIM_HOST_INT_CAUSE 0x7b2c | ||
| 342 | #define TIEQOUTPARERRINT 0x00100000U | ||
| 343 | #define TIEQINPARERRINT 0x00080000U | ||
| 344 | #define MBHOSTPARERR 0x00040000U | ||
| 345 | #define MBUPPARERR 0x00020000U | ||
| 346 | #define IBQPARERR 0x0001f800U | ||
| 347 | #define IBQTP0PARERR 0x00010000U | ||
| 348 | #define IBQTP1PARERR 0x00008000U | ||
| 349 | #define IBQULPPARERR 0x00004000U | ||
| 350 | #define IBQSGELOPARERR 0x00002000U | ||
| 351 | #define IBQSGEHIPARERR 0x00001000U | ||
| 352 | #define IBQNCSIPARERR 0x00000800U | ||
| 353 | #define OBQPARERR 0x000007e0U | ||
| 354 | #define OBQULP0PARERR 0x00000400U | ||
| 355 | #define OBQULP1PARERR 0x00000200U | ||
| 356 | #define OBQULP2PARERR 0x00000100U | ||
| 357 | #define OBQULP3PARERR 0x00000080U | ||
| 358 | #define OBQSGEPARERR 0x00000040U | ||
| 359 | #define OBQNCSIPARERR 0x00000020U | ||
| 360 | #define PREFDROPINT 0x00000002U | ||
| 361 | #define UPACCNONZERO 0x00000001U | ||
| 362 | |||
| 363 | #define CIM_HOST_UPACC_INT_CAUSE 0x7b34 | ||
| 364 | #define EEPROMWRINT 0x40000000U | ||
| 365 | #define TIMEOUTMAINT 0x20000000U | ||
| 366 | #define TIMEOUTINT 0x10000000U | ||
| 367 | #define RSPOVRLOOKUPINT 0x08000000U | ||
| 368 | #define REQOVRLOOKUPINT 0x04000000U | ||
| 369 | #define BLKWRPLINT 0x02000000U | ||
| 370 | #define BLKRDPLINT 0x01000000U | ||
| 371 | #define SGLWRPLINT 0x00800000U | ||
| 372 | #define SGLRDPLINT 0x00400000U | ||
| 373 | #define BLKWRCTLINT 0x00200000U | ||
| 374 | #define BLKRDCTLINT 0x00100000U | ||
| 375 | #define SGLWRCTLINT 0x00080000U | ||
| 376 | #define SGLRDCTLINT 0x00040000U | ||
| 377 | #define BLKWREEPROMINT 0x00020000U | ||
| 378 | #define BLKRDEEPROMINT 0x00010000U | ||
| 379 | #define SGLWREEPROMINT 0x00008000U | ||
| 380 | #define SGLRDEEPROMINT 0x00004000U | ||
| 381 | #define BLKWRFLASHINT 0x00002000U | ||
| 382 | #define BLKRDFLASHINT 0x00001000U | ||
| 383 | #define SGLWRFLASHINT 0x00000800U | ||
| 384 | #define SGLRDFLASHINT 0x00000400U | ||
| 385 | #define BLKWRBOOTINT 0x00000200U | ||
| 386 | #define BLKRDBOOTINT 0x00000100U | ||
| 387 | #define SGLWRBOOTINT 0x00000080U | ||
| 388 | #define SGLRDBOOTINT 0x00000040U | ||
| 389 | #define ILLWRBEINT 0x00000020U | ||
| 390 | #define ILLRDBEINT 0x00000010U | ||
| 391 | #define ILLRDINT 0x00000008U | ||
| 392 | #define ILLWRINT 0x00000004U | ||
| 393 | #define ILLTRANSINT 0x00000002U | ||
| 394 | #define RSVDSPACEINT 0x00000001U | ||
| 395 | |||
| 396 | #define TP_OUT_CONFIG 0x7d04 | ||
| 397 | #define VLANEXTENABLE_MASK 0x0000f000U | ||
| 398 | #define VLANEXTENABLE_SHIFT 12 | ||
| 399 | |||
| 400 | #define TP_PARA_REG2 0x7d68 | ||
| 401 | #define MAXRXDATA_MASK 0xffff0000U | ||
| 402 | #define MAXRXDATA_SHIFT 16 | ||
| 403 | #define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) | ||
| 404 | |||
| 405 | #define TP_TIMER_RESOLUTION 0x7d90 | ||
| 406 | #define TIMERRESOLUTION_MASK 0x00ff0000U | ||
| 407 | #define TIMERRESOLUTION_SHIFT 16 | ||
| 408 | #define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) | ||
| 409 | |||
| 410 | #define TP_SHIFT_CNT 0x7dc0 | ||
| 411 | |||
| 412 | #define TP_CCTRL_TABLE 0x7ddc | ||
| 413 | #define TP_MTU_TABLE 0x7de4 | ||
| 414 | #define MTUINDEX_MASK 0xff000000U | ||
| 415 | #define MTUINDEX_SHIFT 24 | ||
| 416 | #define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) | ||
| 417 | #define MTUWIDTH_MASK 0x000f0000U | ||
| 418 | #define MTUWIDTH_SHIFT 16 | ||
| 419 | #define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) | ||
| 420 | #define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) | ||
| 421 | #define MTUVALUE_MASK 0x00003fffU | ||
| 422 | #define MTUVALUE_SHIFT 0 | ||
| 423 | #define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) | ||
| 424 | #define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) | ||
| 425 | |||
| 426 | #define TP_RSS_LKP_TABLE 0x7dec | ||
| 427 | #define LKPTBLROWVLD 0x80000000U | ||
| 428 | #define LKPTBLQUEUE1_MASK 0x000ffc00U | ||
| 429 | #define LKPTBLQUEUE1_SHIFT 10 | ||
| 430 | #define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) | ||
| 431 | #define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) | ||
| 432 | #define LKPTBLQUEUE0_MASK 0x000003ffU | ||
| 433 | #define LKPTBLQUEUE0_SHIFT 0 | ||
| 434 | #define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) | ||
| 435 | #define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) | ||
| 436 | |||
| 437 | #define TP_PIO_ADDR 0x7e40 | ||
| 438 | #define TP_PIO_DATA 0x7e44 | ||
| 439 | #define TP_MIB_INDEX 0x7e50 | ||
| 440 | #define TP_MIB_DATA 0x7e54 | ||
| 441 | #define TP_INT_CAUSE 0x7e74 | ||
| 442 | #define FLMTXFLSTEMPTY 0x40000000U | ||
| 443 | |||
| 444 | #define TP_INGRESS_CONFIG 0x141 | ||
| 445 | #define VNIC 0x00000800U | ||
| 446 | #define CSUM_HAS_PSEUDO_HDR 0x00000400U | ||
| 447 | #define RM_OVLAN 0x00000200U | ||
| 448 | #define LOOKUPEVERYPKT 0x00000100U | ||
| 449 | |||
| 450 | #define TP_MIB_MAC_IN_ERR_0 0x0 | ||
| 451 | #define TP_MIB_TCP_OUT_RST 0xc | ||
| 452 | #define TP_MIB_TCP_IN_SEG_HI 0x10 | ||
| 453 | #define TP_MIB_TCP_IN_SEG_LO 0x11 | ||
| 454 | #define TP_MIB_TCP_OUT_SEG_HI 0x12 | ||
| 455 | #define TP_MIB_TCP_OUT_SEG_LO 0x13 | ||
| 456 | #define TP_MIB_TCP_RXT_SEG_HI 0x14 | ||
| 457 | #define TP_MIB_TCP_RXT_SEG_LO 0x15 | ||
| 458 | #define TP_MIB_TNL_CNG_DROP_0 0x18 | ||
| 459 | #define TP_MIB_TCP_V6IN_ERR_0 0x28 | ||
| 460 | #define TP_MIB_TCP_V6OUT_RST 0x2c | ||
| 461 | #define TP_MIB_OFD_ARP_DROP 0x36 | ||
| 462 | #define TP_MIB_TNL_DROP_0 0x44 | ||
| 463 | #define TP_MIB_OFD_VLN_DROP_0 0x58 | ||
| 464 | |||
| 465 | #define ULP_TX_INT_CAUSE 0x8dcc | ||
| 466 | #define PBL_BOUND_ERR_CH3 0x80000000U | ||
| 467 | #define PBL_BOUND_ERR_CH2 0x40000000U | ||
| 468 | #define PBL_BOUND_ERR_CH1 0x20000000U | ||
| 469 | #define PBL_BOUND_ERR_CH0 0x10000000U | ||
| 470 | |||
| 471 | #define PM_RX_INT_CAUSE 0x8fdc | ||
| 472 | #define ZERO_E_CMD_ERROR 0x00400000U | ||
| 473 | #define PMRX_FRAMING_ERROR 0x003ffff0U | ||
| 474 | #define OCSPI_PAR_ERROR 0x00000008U | ||
| 475 | #define DB_OPTIONS_PAR_ERROR 0x00000004U | ||
| 476 | #define IESPI_PAR_ERROR 0x00000002U | ||
| 477 | #define E_PCMD_PAR_ERROR 0x00000001U | ||
| 478 | |||
| 479 | #define PM_TX_INT_CAUSE 0x8ffc | ||
| 480 | #define PCMD_LEN_OVFL0 0x80000000U | ||
| 481 | #define PCMD_LEN_OVFL1 0x40000000U | ||
| 482 | #define PCMD_LEN_OVFL2 0x20000000U | ||
| 483 | #define ZERO_C_CMD_ERROR 0x10000000U | ||
| 484 | #define PMTX_FRAMING_ERROR 0x0ffffff0U | ||
| 485 | #define OESPI_PAR_ERROR 0x00000008U | ||
| 486 | #define ICSPI_PAR_ERROR 0x00000002U | ||
| 487 | #define C_PCMD_PAR_ERROR 0x00000001U | ||
| 488 | |||
| 489 | #define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 | ||
| 490 | #define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 | ||
| 491 | #define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 | ||
| 492 | #define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c | ||
| 493 | #define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 | ||
| 494 | #define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 | ||
| 495 | #define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 | ||
| 496 | #define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c | ||
| 497 | #define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 | ||
| 498 | #define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 | ||
| 499 | #define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 | ||
| 500 | #define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c | ||
| 501 | #define MPS_PORT_STAT_TX_PORT_64B_L 0x430 | ||
| 502 | #define MPS_PORT_STAT_TX_PORT_64B_H 0x434 | ||
| 503 | #define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 | ||
| 504 | #define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c | ||
| 505 | #define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 | ||
| 506 | #define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 | ||
| 507 | #define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 | ||
| 508 | #define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c | ||
| 509 | #define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 | ||
| 510 | #define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 | ||
| 511 | #define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 | ||
| 512 | #define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c | ||
| 513 | #define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 | ||
| 514 | #define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 | ||
| 515 | #define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 | ||
| 516 | #define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c | ||
| 517 | #define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 | ||
| 518 | #define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 | ||
| 519 | #define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 | ||
| 520 | #define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c | ||
| 521 | #define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 | ||
| 522 | #define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 | ||
| 523 | #define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 | ||
| 524 | #define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c | ||
| 525 | #define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 | ||
| 526 | #define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 | ||
| 527 | #define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 | ||
| 528 | #define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c | ||
| 529 | #define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 | ||
| 530 | #define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 | ||
| 531 | #define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 | ||
| 532 | #define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac | ||
| 533 | #define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 | ||
| 534 | #define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 | ||
| 535 | #define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 | ||
| 536 | #define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 | ||
| 537 | #define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 | ||
| 538 | #define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc | ||
| 539 | #define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 | ||
| 540 | #define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 | ||
| 541 | #define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 | ||
| 542 | #define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc | ||
| 543 | #define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 | ||
| 544 | #define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 | ||
| 545 | #define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 | ||
| 546 | #define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec | ||
| 547 | #define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 | ||
| 548 | #define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 | ||
| 549 | #define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 | ||
| 550 | #define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc | ||
| 551 | #define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 | ||
| 552 | #define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 | ||
| 553 | #define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 | ||
| 554 | #define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c | ||
| 555 | #define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 | ||
| 556 | #define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 | ||
| 557 | #define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 | ||
| 558 | #define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c | ||
| 559 | #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 | ||
| 560 | #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 | ||
| 561 | #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 | ||
| 562 | #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 | ||
| 563 | #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 | ||
| 564 | #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 | ||
| 565 | #define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c | ||
| 566 | #define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 | ||
| 567 | #define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 | ||
| 568 | #define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 | ||
| 569 | #define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c | ||
| 570 | #define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 | ||
| 571 | #define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 | ||
| 572 | #define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 | ||
| 573 | #define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c | ||
| 574 | #define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 | ||
| 575 | #define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 | ||
| 576 | #define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 | ||
| 577 | #define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c | ||
| 578 | #define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 | ||
| 579 | #define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 | ||
| 580 | #define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 | ||
| 581 | #define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c | ||
| 582 | #define MPS_PORT_STAT_RX_PORT_64B_L 0x590 | ||
| 583 | #define MPS_PORT_STAT_RX_PORT_64B_H 0x594 | ||
| 584 | #define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 | ||
| 585 | #define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c | ||
| 586 | #define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 | ||
| 587 | #define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 | ||
| 588 | #define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 | ||
| 589 | #define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac | ||
| 590 | #define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 | ||
| 591 | #define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 | ||
| 592 | #define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 | ||
| 593 | #define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc | ||
| 594 | #define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 | ||
| 595 | #define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 | ||
| 596 | #define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 | ||
| 597 | #define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc | ||
| 598 | #define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 | ||
| 599 | #define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 | ||
| 600 | #define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 | ||
| 601 | #define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc | ||
| 602 | #define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 | ||
| 603 | #define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 | ||
| 604 | #define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 | ||
| 605 | #define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec | ||
| 606 | #define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 | ||
| 607 | #define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 | ||
| 608 | #define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 | ||
| 609 | #define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc | ||
| 610 | #define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 | ||
| 611 | #define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 | ||
| 612 | #define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 | ||
| 613 | #define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c | ||
| 614 | #define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 | ||
| 615 | #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 | ||
| 616 | #define MPS_CMN_CTL 0x9000 | ||
| 617 | #define NUMPORTS_MASK 0x00000003U | ||
| 618 | #define NUMPORTS_SHIFT 0 | ||
| 619 | #define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) | ||
| 620 | |||
| 621 | #define MPS_INT_CAUSE 0x9008 | ||
| 622 | #define STATINT 0x00000020U | ||
| 623 | #define TXINT 0x00000010U | ||
| 624 | #define RXINT 0x00000008U | ||
| 625 | #define TRCINT 0x00000004U | ||
| 626 | #define CLSINT 0x00000002U | ||
| 627 | #define PLINT 0x00000001U | ||
| 628 | |||
| 629 | #define MPS_TX_INT_CAUSE 0x9408 | ||
| 630 | #define PORTERR 0x00010000U | ||
| 631 | #define FRMERR 0x00008000U | ||
| 632 | #define SECNTERR 0x00004000U | ||
| 633 | #define BUBBLE 0x00002000U | ||
| 634 | #define TXDESCFIFO 0x00001e00U | ||
| 635 | #define TXDATAFIFO 0x000001e0U | ||
| 636 | #define NCSIFIFO 0x00000010U | ||
| 637 | #define TPFIFO 0x0000000fU | ||
| 638 | |||
| 639 | #define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 | ||
| 640 | #define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 | ||
| 641 | #define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c | ||
| 642 | |||
| 643 | #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 | ||
| 644 | #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 | ||
| 645 | #define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 | ||
| 646 | #define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c | ||
| 647 | #define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 | ||
| 648 | #define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 | ||
| 649 | #define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 | ||
| 650 | #define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c | ||
| 651 | #define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 | ||
| 652 | #define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 | ||
| 653 | #define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 | ||
| 654 | #define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c | ||
| 655 | #define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 | ||
| 656 | #define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 | ||
| 657 | #define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 | ||
| 658 | #define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c | ||
| 659 | #define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 | ||
| 660 | #define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 | ||
| 661 | #define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 | ||
| 662 | #define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c | ||
| 663 | #define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 | ||
| 664 | #define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 | ||
| 665 | #define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 | ||
| 666 | #define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c | ||
| 667 | #define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 | ||
| 668 | #define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 | ||
| 669 | #define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 | ||
| 670 | #define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac | ||
| 671 | #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 | ||
| 672 | #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 | ||
| 673 | #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 | ||
| 674 | #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc | ||
| 675 | #define MPS_TRC_CFG 0x9800 | ||
| 676 | #define TRCFIFOEMPTY 0x00000010U | ||
| 677 | #define TRCIGNOREDROPINPUT 0x00000008U | ||
| 678 | #define TRCKEEPDUPLICATES 0x00000004U | ||
| 679 | #define TRCEN 0x00000002U | ||
| 680 | #define TRCMULTIFILTER 0x00000001U | ||
| 681 | |||
| 682 | #define MPS_TRC_RSS_CONTROL 0x9808 | ||
| 683 | #define RSSCONTROL_MASK 0x00ff0000U | ||
| 684 | #define RSSCONTROL_SHIFT 16 | ||
| 685 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) | ||
| 686 | #define QUEUENUMBER_MASK 0x0000ffffU | ||
| 687 | #define QUEUENUMBER_SHIFT 0 | ||
| 688 | #define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) | ||
| 689 | |||
| 690 | #define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 | ||
| 691 | #define TFINVERTMATCH 0x01000000U | ||
| 692 | #define TFPKTTOOLARGE 0x00800000U | ||
| 693 | #define TFEN 0x00400000U | ||
| 694 | #define TFPORT_MASK 0x003c0000U | ||
| 695 | #define TFPORT_SHIFT 18 | ||
| 696 | #define TFPORT(x) ((x) << TFPORT_SHIFT) | ||
| 697 | #define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) | ||
| 698 | #define TFDROP 0x00020000U | ||
| 699 | #define TFSOPEOPERR 0x00010000U | ||
| 700 | #define TFLENGTH_MASK 0x00001f00U | ||
| 701 | #define TFLENGTH_SHIFT 8 | ||
| 702 | #define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) | ||
| 703 | #define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) | ||
| 704 | #define TFOFFSET_MASK 0x0000001fU | ||
| 705 | #define TFOFFSET_SHIFT 0 | ||
| 706 | #define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) | ||
| 707 | #define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) | ||
| 708 | |||
| 709 | #define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 | ||
| 710 | #define TFMINPKTSIZE_MASK 0x01ff0000U | ||
| 711 | #define TFMINPKTSIZE_SHIFT 16 | ||
| 712 | #define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) | ||
| 713 | #define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) | ||
| 714 | #define TFCAPTUREMAX_MASK 0x00003fffU | ||
| 715 | #define TFCAPTUREMAX_SHIFT 0 | ||
| 716 | #define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) | ||
| 717 | #define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) | ||
| 718 | |||
| 719 | #define MPS_TRC_INT_CAUSE 0x985c | ||
| 720 | #define MISCPERR 0x00000100U | ||
| 721 | #define PKTFIFO 0x000000f0U | ||
| 722 | #define FILTMEM 0x0000000fU | ||
| 723 | |||
| 724 | #define MPS_TRC_FILTER0_MATCH 0x9c00 | ||
| 725 | #define MPS_TRC_FILTER0_DONT_CARE 0x9c80 | ||
| 726 | #define MPS_TRC_FILTER1_MATCH 0x9d00 | ||
| 727 | #define MPS_CLS_INT_CAUSE 0xd028 | ||
| 728 | #define PLERRENB 0x00000008U | ||
| 729 | #define HASHSRAM 0x00000004U | ||
| 730 | #define MATCHTCAM 0x00000002U | ||
| 731 | #define MATCHSRAM 0x00000001U | ||
| 732 | |||
| 733 | #define MPS_RX_PERR_INT_CAUSE 0x11074 | ||
| 734 | |||
| 735 | #define CPL_INTR_CAUSE 0x19054 | ||
| 736 | #define CIM_OP_MAP_PERR 0x00000020U | ||
| 737 | #define CIM_OVFL_ERROR 0x00000010U | ||
| 738 | #define TP_FRAMING_ERROR 0x00000008U | ||
| 739 | #define SGE_FRAMING_ERROR 0x00000004U | ||
| 740 | #define CIM_FRAMING_ERROR 0x00000002U | ||
| 741 | #define ZERO_SWITCH_ERROR 0x00000001U | ||
| 742 | |||
| 743 | #define SMB_INT_CAUSE 0x19090 | ||
| 744 | #define MSTTXFIFOPARINT 0x00200000U | ||
| 745 | #define MSTRXFIFOPARINT 0x00100000U | ||
| 746 | #define SLVFIFOPARINT 0x00080000U | ||
| 747 | |||
| 748 | #define ULP_RX_INT_CAUSE 0x19158 | ||
| 749 | #define ULP_RX_ISCSI_TAGMASK 0x19164 | ||
| 750 | #define ULP_RX_ISCSI_PSZ 0x19168 | ||
| 751 | #define HPZ3_MASK 0x0f000000U | ||
| 752 | #define HPZ3_SHIFT 24 | ||
| 753 | #define HPZ3(x) ((x) << HPZ3_SHIFT) | ||
| 754 | #define HPZ2_MASK 0x000f0000U | ||
| 755 | #define HPZ2_SHIFT 16 | ||
| 756 | #define HPZ2(x) ((x) << HPZ2_SHIFT) | ||
| 757 | #define HPZ1_MASK 0x00000f00U | ||
| 758 | #define HPZ1_SHIFT 8 | ||
| 759 | #define HPZ1(x) ((x) << HPZ1_SHIFT) | ||
| 760 | #define HPZ0_MASK 0x0000000fU | ||
| 761 | #define HPZ0_SHIFT 0 | ||
| 762 | #define HPZ0(x) ((x) << HPZ0_SHIFT) | ||
| 763 | |||
| 764 | #define ULP_RX_TDDP_PSZ 0x19178 | ||
| 765 | |||
| 766 | #define SF_DATA 0x193f8 | ||
| 767 | #define SF_OP 0x193fc | ||
| 768 | #define BUSY 0x80000000U | ||
| 769 | #define SF_LOCK 0x00000010U | ||
| 770 | #define SF_CONT 0x00000008U | ||
| 771 | #define BYTECNT_MASK 0x00000006U | ||
| 772 | #define BYTECNT_SHIFT 1 | ||
| 773 | #define BYTECNT(x) ((x) << BYTECNT_SHIFT) | ||
| 774 | #define OP_WR 0x00000001U | ||
| 775 | |||
| 776 | #define PL_PF_INT_CAUSE 0x3c0 | ||
| 777 | #define PFSW 0x00000008U | ||
| 778 | #define PFSGE 0x00000004U | ||
| 779 | #define PFCIM 0x00000002U | ||
| 780 | #define PFMPS 0x00000001U | ||
| 781 | |||
| 782 | #define PL_PF_INT_ENABLE 0x3c4 | ||
| 783 | #define PL_PF_CTL 0x3c8 | ||
| 784 | #define SWINT 0x00000001U | ||
| 785 | |||
| 786 | #define PL_WHOAMI 0x19400 | ||
| 787 | #define SOURCEPF_MASK 0x00000700U | ||
| 788 | #define SOURCEPF_SHIFT 8 | ||
| 789 | #define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) | ||
| 790 | #define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) | ||
| 791 | #define ISVF 0x00000080U | ||
| 792 | #define VFID_MASK 0x0000007fU | ||
| 793 | #define VFID_SHIFT 0 | ||
| 794 | #define VFID(x) ((x) << VFID_SHIFT) | ||
| 795 | #define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) | ||
| 796 | |||
| 797 | #define PL_INT_CAUSE 0x1940c | ||
| 798 | #define ULP_TX 0x08000000U | ||
| 799 | #define SGE 0x04000000U | ||
| 800 | #define HMA 0x02000000U | ||
| 801 | #define CPL_SWITCH 0x01000000U | ||
| 802 | #define ULP_RX 0x00800000U | ||
| 803 | #define PM_RX 0x00400000U | ||
| 804 | #define PM_TX 0x00200000U | ||
| 805 | #define MA 0x00100000U | ||
| 806 | #define TP 0x00080000U | ||
| 807 | #define LE 0x00040000U | ||
| 808 | #define EDC1 0x00020000U | ||
| 809 | #define EDC0 0x00010000U | ||
| 810 | #define MC 0x00008000U | ||
| 811 | #define PCIE 0x00004000U | ||
| 812 | #define PMU 0x00002000U | ||
| 813 | #define XGMAC_KR1 0x00001000U | ||
| 814 | #define XGMAC_KR0 0x00000800U | ||
| 815 | #define XGMAC1 0x00000400U | ||
| 816 | #define XGMAC0 0x00000200U | ||
| 817 | #define SMB 0x00000100U | ||
| 818 | #define SF 0x00000080U | ||
| 819 | #define PL 0x00000040U | ||
| 820 | #define NCSI 0x00000020U | ||
| 821 | #define MPS 0x00000010U | ||
| 822 | #define MI 0x00000008U | ||
| 823 | #define DBG 0x00000004U | ||
| 824 | #define I2CM 0x00000002U | ||
| 825 | #define CIM 0x00000001U | ||
| 826 | |||
| 827 | #define PL_INT_MAP0 0x19414 | ||
| 828 | #define PL_RST 0x19428 | ||
| 829 | #define PIORST 0x00000002U | ||
| 830 | #define PIORSTMODE 0x00000001U | ||
| 831 | |||
| 832 | #define PL_PL_INT_CAUSE 0x19430 | ||
| 833 | #define FATALPERR 0x00000010U | ||
| 834 | #define PERRVFID 0x00000001U | ||
| 835 | |||
| 836 | #define PL_REV 0x1943c | ||
| 837 | |||
| 838 | #define LE_DB_CONFIG 0x19c04 | ||
| 839 | #define HASHEN 0x00100000U | ||
| 840 | |||
| 841 | #define LE_DB_SERVER_INDEX 0x19c18 | ||
| 842 | #define LE_DB_ACT_CNT_IPV4 0x19c20 | ||
| 843 | #define LE_DB_ACT_CNT_IPV6 0x19c24 | ||
| 844 | |||
| 845 | #define LE_DB_INT_CAUSE 0x19c3c | ||
| 846 | #define REQQPARERR 0x00010000U | ||
| 847 | #define UNKNOWNCMD 0x00008000U | ||
| 848 | #define PARITYERR 0x00000040U | ||
| 849 | #define LIPMISS 0x00000020U | ||
| 850 | #define LIP0 0x00000010U | ||
| 851 | |||
| 852 | #define LE_DB_TID_HASHBASE 0x19df8 | ||
| 853 | |||
| 854 | #define NCSI_INT_CAUSE 0x1a0d8 | ||
| 855 | #define CIM_DM_PRTY_ERR 0x00000100U | ||
| 856 | #define MPS_DM_PRTY_ERR 0x00000080U | ||
| 857 | #define TXFIFO_PRTY_ERR 0x00000002U | ||
| 858 | #define RXFIFO_PRTY_ERR 0x00000001U | ||
| 859 | |||
| 860 | #define XGMAC_PORT_CFG2 0x1018 | ||
| 861 | #define PATEN 0x00040000U | ||
| 862 | #define MAGICEN 0x00020000U | ||
| 863 | |||
| 864 | #define XGMAC_PORT_MAGIC_MACID_LO 0x1024 | ||
| 865 | #define XGMAC_PORT_MAGIC_MACID_HI 0x1028 | ||
| 866 | |||
| 867 | #define XGMAC_PORT_EPIO_DATA0 0x10c0 | ||
| 868 | #define XGMAC_PORT_EPIO_DATA1 0x10c4 | ||
| 869 | #define XGMAC_PORT_EPIO_DATA2 0x10c8 | ||
| 870 | #define XGMAC_PORT_EPIO_DATA3 0x10cc | ||
| 871 | #define XGMAC_PORT_EPIO_OP 0x10d0 | ||
| 872 | #define EPIOWR 0x00000100U | ||
| 873 | #define ADDRESS_MASK 0x000000ffU | ||
| 874 | #define ADDRESS_SHIFT 0 | ||
| 875 | #define ADDRESS(x) ((x) << ADDRESS_SHIFT) | ||
| 876 | |||
| 877 | #define XGMAC_PORT_INT_CAUSE 0x10dc | ||
| 878 | #endif /* __T4_REGS_H */ | ||
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h new file mode 100644 index 000000000000..3393d05a388a --- /dev/null +++ b/drivers/net/cxgb4/t4fw_api.h | |||
| @@ -0,0 +1,1580 @@ | |||
| 1 | /* | ||
| 2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef _T4FW_INTERFACE_H_ | ||
| 36 | #define _T4FW_INTERFACE_H_ | ||
| 37 | |||
| 38 | #define FW_T4VF_SGE_BASE_ADDR 0x0000 | ||
| 39 | #define FW_T4VF_MPS_BASE_ADDR 0x0100 | ||
| 40 | #define FW_T4VF_PL_BASE_ADDR 0x0200 | ||
| 41 | #define FW_T4VF_MBDATA_BASE_ADDR 0x0240 | ||
| 42 | #define FW_T4VF_CIM_BASE_ADDR 0x0300 | ||
| 43 | |||
| 44 | enum fw_wr_opcodes { | ||
| 45 | FW_FILTER_WR = 0x02, | ||
| 46 | FW_ULPTX_WR = 0x04, | ||
| 47 | FW_TP_WR = 0x05, | ||
| 48 | FW_ETH_TX_PKT_WR = 0x08, | ||
| 49 | FW_FLOWC_WR = 0x0a, | ||
| 50 | FW_OFLD_TX_DATA_WR = 0x0b, | ||
| 51 | FW_CMD_WR = 0x10, | ||
| 52 | FW_ETH_TX_PKT_VM_WR = 0x11, | ||
| 53 | FW_RI_RES_WR = 0x0c, | ||
| 54 | FW_RI_INIT_WR = 0x0d, | ||
| 55 | FW_RI_RDMA_WRITE_WR = 0x14, | ||
| 56 | FW_RI_SEND_WR = 0x15, | ||
| 57 | FW_RI_RDMA_READ_WR = 0x16, | ||
| 58 | FW_RI_RECV_WR = 0x17, | ||
| 59 | FW_RI_BIND_MW_WR = 0x18, | ||
| 60 | FW_RI_FR_NSMR_WR = 0x19, | ||
| 61 | FW_RI_INV_LSTAG_WR = 0x1a, | ||
| 62 | FW_LASTC2E_WR = 0x40 | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct fw_wr_hdr { | ||
| 66 | __be32 hi; | ||
| 67 | __be32 lo; | ||
| 68 | }; | ||
| 69 | |||
| 70 | #define FW_WR_OP(x) ((x) << 24) | ||
| 71 | #define FW_WR_ATOMIC(x) ((x) << 23) | ||
| 72 | #define FW_WR_FLUSH(x) ((x) << 22) | ||
| 73 | #define FW_WR_COMPL(x) ((x) << 21) | ||
| 74 | #define FW_WR_IMMDLEN(x) ((x) << 0) | ||
| 75 | |||
| 76 | #define FW_WR_EQUIQ (1U << 31) | ||
| 77 | #define FW_WR_EQUEQ (1U << 30) | ||
| 78 | #define FW_WR_FLOWID(x) ((x) << 8) | ||
| 79 | #define FW_WR_LEN16(x) ((x) << 0) | ||
| 80 | |||
| 81 | struct fw_ulptx_wr { | ||
| 82 | __be32 op_to_compl; | ||
| 83 | __be32 flowid_len16; | ||
| 84 | u64 cookie; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct fw_tp_wr { | ||
| 88 | __be32 op_to_immdlen; | ||
| 89 | __be32 flowid_len16; | ||
| 90 | u64 cookie; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct fw_eth_tx_pkt_wr { | ||
| 94 | __be32 op_immdlen; | ||
| 95 | __be32 equiq_to_len16; | ||
| 96 | __be64 r3; | ||
| 97 | }; | ||
| 98 | |||
| 99 | enum fw_flowc_mnem { | ||
| 100 | FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ | ||
| 101 | FW_FLOWC_MNEM_CH, | ||
| 102 | FW_FLOWC_MNEM_PORT, | ||
| 103 | FW_FLOWC_MNEM_IQID, | ||
| 104 | FW_FLOWC_MNEM_SNDNXT, | ||
| 105 | FW_FLOWC_MNEM_RCVNXT, | ||
| 106 | FW_FLOWC_MNEM_SNDBUF, | ||
| 107 | FW_FLOWC_MNEM_MSS, | ||
| 108 | }; | ||
| 109 | |||
| 110 | struct fw_flowc_mnemval { | ||
| 111 | u8 mnemonic; | ||
| 112 | u8 r4[3]; | ||
| 113 | __be32 val; | ||
| 114 | }; | ||
| 115 | |||
| 116 | struct fw_flowc_wr { | ||
| 117 | __be32 op_to_nparams; | ||
| 118 | #define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) | ||
| 119 | __be32 flowid_len16; | ||
| 120 | struct fw_flowc_mnemval mnemval[0]; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct fw_ofld_tx_data_wr { | ||
| 124 | __be32 op_to_immdlen; | ||
| 125 | __be32 flowid_len16; | ||
| 126 | __be32 plen; | ||
| 127 | __be32 tunnel_to_proxy; | ||
| 128 | #define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) | ||
| 129 | #define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) | ||
| 130 | #define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) | ||
| 131 | #define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) | ||
| 132 | #define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) | ||
| 133 | #define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) | ||
| 134 | #define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) | ||
| 135 | #define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) | ||
| 136 | }; | ||
| 137 | |||
| 138 | struct fw_cmd_wr { | ||
| 139 | __be32 op_dma; | ||
| 140 | #define FW_CMD_WR_DMA (1U << 17) | ||
| 141 | __be32 len16_pkd; | ||
| 142 | __be64 cookie_daddr; | ||
| 143 | }; | ||
| 144 | |||
| 145 | struct fw_eth_tx_pkt_vm_wr { | ||
| 146 | __be32 op_immdlen; | ||
| 147 | __be32 equiq_to_len16; | ||
| 148 | __be32 r3[2]; | ||
| 149 | u8 ethmacdst[6]; | ||
| 150 | u8 ethmacsrc[6]; | ||
| 151 | __be16 ethtype; | ||
| 152 | __be16 vlantci; | ||
| 153 | }; | ||
| 154 | |||
| 155 | #define FW_CMD_MAX_TIMEOUT 3000 | ||
| 156 | |||
| 157 | enum fw_cmd_opcodes { | ||
| 158 | FW_LDST_CMD = 0x01, | ||
| 159 | FW_RESET_CMD = 0x03, | ||
| 160 | FW_HELLO_CMD = 0x04, | ||
| 161 | FW_BYE_CMD = 0x05, | ||
| 162 | FW_INITIALIZE_CMD = 0x06, | ||
| 163 | FW_CAPS_CONFIG_CMD = 0x07, | ||
| 164 | FW_PARAMS_CMD = 0x08, | ||
| 165 | FW_PFVF_CMD = 0x09, | ||
| 166 | FW_IQ_CMD = 0x10, | ||
| 167 | FW_EQ_MNGT_CMD = 0x11, | ||
| 168 | FW_EQ_ETH_CMD = 0x12, | ||
| 169 | FW_EQ_CTRL_CMD = 0x13, | ||
| 170 | FW_EQ_OFLD_CMD = 0x21, | ||
| 171 | FW_VI_CMD = 0x14, | ||
| 172 | FW_VI_MAC_CMD = 0x15, | ||
| 173 | FW_VI_RXMODE_CMD = 0x16, | ||
| 174 | FW_VI_ENABLE_CMD = 0x17, | ||
| 175 | FW_ACL_MAC_CMD = 0x18, | ||
| 176 | FW_ACL_VLAN_CMD = 0x19, | ||
| 177 | FW_VI_STATS_CMD = 0x1a, | ||
| 178 | FW_PORT_CMD = 0x1b, | ||
| 179 | FW_PORT_STATS_CMD = 0x1c, | ||
| 180 | FW_PORT_LB_STATS_CMD = 0x1d, | ||
| 181 | FW_PORT_TRACE_CMD = 0x1e, | ||
| 182 | FW_PORT_TRACE_MMAP_CMD = 0x1f, | ||
| 183 | FW_RSS_IND_TBL_CMD = 0x20, | ||
| 184 | FW_RSS_GLB_CONFIG_CMD = 0x22, | ||
| 185 | FW_RSS_VI_CONFIG_CMD = 0x23, | ||
| 186 | FW_LASTC2E_CMD = 0x40, | ||
| 187 | FW_ERROR_CMD = 0x80, | ||
| 188 | FW_DEBUG_CMD = 0x81, | ||
| 189 | }; | ||
| 190 | |||
| 191 | enum fw_cmd_cap { | ||
| 192 | FW_CMD_CAP_PF = 0x01, | ||
| 193 | FW_CMD_CAP_DMAQ = 0x02, | ||
| 194 | FW_CMD_CAP_PORT = 0x04, | ||
| 195 | FW_CMD_CAP_PORTPROMISC = 0x08, | ||
| 196 | FW_CMD_CAP_PORTSTATS = 0x10, | ||
| 197 | FW_CMD_CAP_VF = 0x80, | ||
| 198 | }; | ||
| 199 | |||
| 200 | /* | ||
| 201 | * Generic command header flit0 | ||
| 202 | */ | ||
| 203 | struct fw_cmd_hdr { | ||
| 204 | __be32 hi; | ||
| 205 | __be32 lo; | ||
| 206 | }; | ||
| 207 | |||
| 208 | #define FW_CMD_OP(x) ((x) << 24) | ||
| 209 | #define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) | ||
| 210 | #define FW_CMD_REQUEST (1U << 23) | ||
| 211 | #define FW_CMD_READ (1U << 22) | ||
| 212 | #define FW_CMD_WRITE (1U << 21) | ||
| 213 | #define FW_CMD_EXEC (1U << 20) | ||
| 214 | #define FW_CMD_RAMASK(x) ((x) << 20) | ||
| 215 | #define FW_CMD_RETVAL(x) ((x) << 8) | ||
| 216 | #define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) | ||
| 217 | #define FW_CMD_LEN16(x) ((x) << 0) | ||
| 218 | |||
| 219 | enum fw_ldst_addrspc { | ||
| 220 | FW_LDST_ADDRSPC_FIRMWARE = 0x0001, | ||
| 221 | FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, | ||
| 222 | FW_LDST_ADDRSPC_SGE_INGC = 0x0009, | ||
| 223 | FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, | ||
| 224 | FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, | ||
| 225 | FW_LDST_ADDRSPC_TP_PIO = 0x0010, | ||
| 226 | FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, | ||
| 227 | FW_LDST_ADDRSPC_TP_MIB = 0x0012, | ||
| 228 | FW_LDST_ADDRSPC_MDIO = 0x0018, | ||
| 229 | FW_LDST_ADDRSPC_MPS = 0x0020, | ||
| 230 | FW_LDST_ADDRSPC_FUNC = 0x0028 | ||
| 231 | }; | ||
| 232 | |||
| 233 | enum fw_ldst_mps_fid { | ||
| 234 | FW_LDST_MPS_ATRB, | ||
| 235 | FW_LDST_MPS_RPLC | ||
| 236 | }; | ||
| 237 | |||
| 238 | enum fw_ldst_func_access_ctl { | ||
| 239 | FW_LDST_FUNC_ACC_CTL_VIID, | ||
| 240 | FW_LDST_FUNC_ACC_CTL_FID | ||
| 241 | }; | ||
| 242 | |||
| 243 | enum fw_ldst_func_mod_index { | ||
| 244 | FW_LDST_FUNC_MPS | ||
| 245 | }; | ||
| 246 | |||
| 247 | struct fw_ldst_cmd { | ||
| 248 | __be32 op_to_addrspace; | ||
| 249 | #define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) | ||
| 250 | __be32 cycles_to_len16; | ||
| 251 | union fw_ldst { | ||
| 252 | struct fw_ldst_addrval { | ||
| 253 | __be32 addr; | ||
| 254 | __be32 val; | ||
| 255 | } addrval; | ||
| 256 | struct fw_ldst_idctxt { | ||
| 257 | __be32 physid; | ||
| 258 | __be32 msg_pkd; | ||
| 259 | __be32 ctxt_data7; | ||
| 260 | __be32 ctxt_data6; | ||
| 261 | __be32 ctxt_data5; | ||
| 262 | __be32 ctxt_data4; | ||
| 263 | __be32 ctxt_data3; | ||
| 264 | __be32 ctxt_data2; | ||
| 265 | __be32 ctxt_data1; | ||
| 266 | __be32 ctxt_data0; | ||
| 267 | } idctxt; | ||
| 268 | struct fw_ldst_mdio { | ||
| 269 | __be16 paddr_mmd; | ||
| 270 | __be16 raddr; | ||
| 271 | __be16 vctl; | ||
| 272 | __be16 rval; | ||
| 273 | } mdio; | ||
| 274 | struct fw_ldst_mps { | ||
| 275 | __be16 fid_ctl; | ||
| 276 | __be16 rplcpf_pkd; | ||
| 277 | __be32 rplc127_96; | ||
| 278 | __be32 rplc95_64; | ||
| 279 | __be32 rplc63_32; | ||
| 280 | __be32 rplc31_0; | ||
| 281 | __be32 atrb; | ||
| 282 | __be16 vlan[16]; | ||
| 283 | } mps; | ||
| 284 | struct fw_ldst_func { | ||
| 285 | u8 access_ctl; | ||
| 286 | u8 mod_index; | ||
| 287 | __be16 ctl_id; | ||
| 288 | __be32 offset; | ||
| 289 | __be64 data0; | ||
| 290 | __be64 data1; | ||
| 291 | } func; | ||
| 292 | } u; | ||
| 293 | }; | ||
| 294 | |||
| 295 | #define FW_LDST_CMD_MSG(x) ((x) << 31) | ||
| 296 | #define FW_LDST_CMD_PADDR(x) ((x) << 8) | ||
| 297 | #define FW_LDST_CMD_MMD(x) ((x) << 0) | ||
| 298 | #define FW_LDST_CMD_FID(x) ((x) << 15) | ||
| 299 | #define FW_LDST_CMD_CTL(x) ((x) << 0) | ||
| 300 | #define FW_LDST_CMD_RPLCPF(x) ((x) << 0) | ||
| 301 | |||
| 302 | struct fw_reset_cmd { | ||
| 303 | __be32 op_to_write; | ||
| 304 | __be32 retval_len16; | ||
| 305 | __be32 val; | ||
| 306 | __be32 r3; | ||
| 307 | }; | ||
| 308 | |||
| 309 | struct fw_hello_cmd { | ||
| 310 | __be32 op_to_write; | ||
| 311 | __be32 retval_len16; | ||
| 312 | __be32 err_to_mbasyncnot; | ||
| 313 | #define FW_HELLO_CMD_ERR (1U << 31) | ||
| 314 | #define FW_HELLO_CMD_INIT (1U << 30) | ||
| 315 | #define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) | ||
| 316 | #define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) | ||
| 317 | #define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) | ||
| 318 | #define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) | ||
| 319 | __be32 fwrev; | ||
| 320 | }; | ||
| 321 | |||
| 322 | struct fw_bye_cmd { | ||
| 323 | __be32 op_to_write; | ||
| 324 | __be32 retval_len16; | ||
| 325 | __be64 r3; | ||
| 326 | }; | ||
| 327 | |||
| 328 | struct fw_initialize_cmd { | ||
| 329 | __be32 op_to_write; | ||
| 330 | __be32 retval_len16; | ||
| 331 | __be64 r3; | ||
| 332 | }; | ||
| 333 | |||
| 334 | enum fw_caps_config_hm { | ||
| 335 | FW_CAPS_CONFIG_HM_PCIE = 0x00000001, | ||
| 336 | FW_CAPS_CONFIG_HM_PL = 0x00000002, | ||
| 337 | FW_CAPS_CONFIG_HM_SGE = 0x00000004, | ||
| 338 | FW_CAPS_CONFIG_HM_CIM = 0x00000008, | ||
| 339 | FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, | ||
| 340 | FW_CAPS_CONFIG_HM_TP = 0x00000020, | ||
| 341 | FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, | ||
| 342 | FW_CAPS_CONFIG_HM_PMRX = 0x00000080, | ||
| 343 | FW_CAPS_CONFIG_HM_PMTX = 0x00000100, | ||
| 344 | FW_CAPS_CONFIG_HM_MC = 0x00000200, | ||
| 345 | FW_CAPS_CONFIG_HM_LE = 0x00000400, | ||
| 346 | FW_CAPS_CONFIG_HM_MPS = 0x00000800, | ||
| 347 | FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, | ||
| 348 | FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, | ||
| 349 | FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, | ||
| 350 | FW_CAPS_CONFIG_HM_MI = 0x00008000, | ||
| 351 | FW_CAPS_CONFIG_HM_I2CM = 0x00010000, | ||
| 352 | FW_CAPS_CONFIG_HM_NCSI = 0x00020000, | ||
| 353 | FW_CAPS_CONFIG_HM_SMB = 0x00040000, | ||
| 354 | FW_CAPS_CONFIG_HM_MA = 0x00080000, | ||
| 355 | FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, | ||
| 356 | FW_CAPS_CONFIG_HM_PMU = 0x00200000, | ||
| 357 | FW_CAPS_CONFIG_HM_UART = 0x00400000, | ||
| 358 | FW_CAPS_CONFIG_HM_SF = 0x00800000, | ||
| 359 | }; | ||
| 360 | |||
| 361 | enum fw_caps_config_nbm { | ||
| 362 | FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, | ||
| 363 | FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, | ||
| 364 | }; | ||
| 365 | |||
| 366 | enum fw_caps_config_link { | ||
| 367 | FW_CAPS_CONFIG_LINK_PPP = 0x00000001, | ||
| 368 | FW_CAPS_CONFIG_LINK_QFC = 0x00000002, | ||
| 369 | FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, | ||
| 370 | }; | ||
| 371 | |||
| 372 | enum fw_caps_config_switch { | ||
| 373 | FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, | ||
| 374 | FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, | ||
| 375 | }; | ||
| 376 | |||
| 377 | enum fw_caps_config_nic { | ||
| 378 | FW_CAPS_CONFIG_NIC = 0x00000001, | ||
| 379 | FW_CAPS_CONFIG_NIC_VM = 0x00000002, | ||
| 380 | }; | ||
| 381 | |||
| 382 | enum fw_caps_config_ofld { | ||
| 383 | FW_CAPS_CONFIG_OFLD = 0x00000001, | ||
| 384 | }; | ||
| 385 | |||
| 386 | enum fw_caps_config_rdma { | ||
| 387 | FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, | ||
| 388 | FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, | ||
| 389 | }; | ||
| 390 | |||
| 391 | enum fw_caps_config_iscsi { | ||
| 392 | FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, | ||
| 393 | FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, | ||
| 394 | FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, | ||
| 395 | FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, | ||
| 396 | }; | ||
| 397 | |||
| 398 | enum fw_caps_config_fcoe { | ||
| 399 | FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, | ||
| 400 | FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, | ||
| 401 | }; | ||
| 402 | |||
| 403 | struct fw_caps_config_cmd { | ||
| 404 | __be32 op_to_write; | ||
| 405 | __be32 retval_len16; | ||
| 406 | __be32 r2; | ||
| 407 | __be32 hwmbitmap; | ||
| 408 | __be16 nbmcaps; | ||
| 409 | __be16 linkcaps; | ||
| 410 | __be16 switchcaps; | ||
| 411 | __be16 r3; | ||
| 412 | __be16 niccaps; | ||
| 413 | __be16 ofldcaps; | ||
| 414 | __be16 rdmacaps; | ||
| 415 | __be16 r4; | ||
| 416 | __be16 iscsicaps; | ||
| 417 | __be16 fcoecaps; | ||
| 418 | __be32 r5; | ||
| 419 | __be64 r6; | ||
| 420 | }; | ||
| 421 | |||
| 422 | /* | ||
| 423 | * params command mnemonics | ||
| 424 | */ | ||
| 425 | enum fw_params_mnem { | ||
| 426 | FW_PARAMS_MNEM_DEV = 1, /* device params */ | ||
| 427 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ | ||
| 428 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ | ||
| 429 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ | ||
| 430 | FW_PARAMS_MNEM_LAST | ||
| 431 | }; | ||
| 432 | |||
| 433 | /* | ||
| 434 | * device parameters | ||
| 435 | */ | ||
| 436 | enum fw_params_param_dev { | ||
| 437 | FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ | ||
| 438 | FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ | ||
| 439 | FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs | ||
| 440 | * allocated by the device's | ||
| 441 | * Lookup Engine | ||
| 442 | */ | ||
| 443 | FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, | ||
| 444 | FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, | ||
| 445 | FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, | ||
| 446 | FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, | ||
| 447 | FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, | ||
| 448 | FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, | ||
| 449 | FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, | ||
| 450 | FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A | ||
| 451 | }; | ||
| 452 | |||
| 453 | /* | ||
| 454 | * physical and virtual function parameters | ||
| 455 | */ | ||
| 456 | enum fw_params_param_pfvf { | ||
| 457 | FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, | ||
| 458 | FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, | ||
| 459 | FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, | ||
| 460 | FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, | ||
| 461 | FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, | ||
| 462 | FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, | ||
| 463 | FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, | ||
| 464 | FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, | ||
| 465 | FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, | ||
| 466 | FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, | ||
| 467 | FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, | ||
| 468 | FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, | ||
| 469 | FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, | ||
| 470 | FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, | ||
| 471 | FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, | ||
| 472 | FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, | ||
| 473 | FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, | ||
| 474 | FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, | ||
| 475 | FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, | ||
| 476 | FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, | ||
| 477 | FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, | ||
| 478 | FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, | ||
| 479 | }; | ||
| 480 | |||
| 481 | /* | ||
| 482 | * dma queue parameters | ||
| 483 | */ | ||
| 484 | enum fw_params_param_dmaq { | ||
| 485 | FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, | ||
| 486 | FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, | ||
| 487 | FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, | ||
| 488 | FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, | ||
| 489 | FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, | ||
| 490 | }; | ||
| 491 | |||
| 492 | #define FW_PARAMS_MNEM(x) ((x) << 24) | ||
| 493 | #define FW_PARAMS_PARAM_X(x) ((x) << 16) | ||
| 494 | #define FW_PARAMS_PARAM_Y(x) ((x) << 8) | ||
| 495 | #define FW_PARAMS_PARAM_Z(x) ((x) << 0) | ||
| 496 | #define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) | ||
| 497 | #define FW_PARAMS_PARAM_YZ(x) ((x) << 0) | ||
| 498 | |||
| 499 | struct fw_params_cmd { | ||
| 500 | __be32 op_to_vfn; | ||
| 501 | __be32 retval_len16; | ||
| 502 | struct fw_params_param { | ||
| 503 | __be32 mnem; | ||
| 504 | __be32 val; | ||
| 505 | } param[7]; | ||
| 506 | }; | ||
| 507 | |||
| 508 | #define FW_PARAMS_CMD_PFN(x) ((x) << 8) | ||
| 509 | #define FW_PARAMS_CMD_VFN(x) ((x) << 0) | ||
| 510 | |||
| 511 | struct fw_pfvf_cmd { | ||
| 512 | __be32 op_to_vfn; | ||
| 513 | __be32 retval_len16; | ||
| 514 | __be32 niqflint_niq; | ||
| 515 | __be32 cmask_to_neq; | ||
| 516 | __be32 tc_to_nexactf; | ||
| 517 | __be32 r_caps_to_nethctrl; | ||
| 518 | __be16 nricq; | ||
| 519 | __be16 nriqp; | ||
| 520 | __be32 r4; | ||
| 521 | }; | ||
| 522 | |||
| 523 | #define FW_PFVF_CMD_PFN(x) ((x) << 8) | ||
| 524 | #define FW_PFVF_CMD_VFN(x) ((x) << 0) | ||
| 525 | |||
| 526 | #define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) | ||
| 527 | #define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) | ||
| 528 | |||
| 529 | #define FW_PFVF_CMD_NIQ(x) ((x) << 0) | ||
| 530 | #define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) | ||
| 531 | |||
| 532 | #define FW_PFVF_CMD_CMASK(x) ((x) << 24) | ||
| 533 | #define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) | ||
| 534 | |||
| 535 | #define FW_PFVF_CMD_PMASK(x) ((x) << 20) | ||
| 536 | #define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) | ||
| 537 | |||
| 538 | #define FW_PFVF_CMD_NEQ(x) ((x) << 0) | ||
| 539 | #define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) | ||
| 540 | |||
| 541 | #define FW_PFVF_CMD_TC(x) ((x) << 24) | ||
| 542 | #define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) | ||
| 543 | |||
| 544 | #define FW_PFVF_CMD_NVI(x) ((x) << 16) | ||
| 545 | #define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) | ||
| 546 | |||
| 547 | #define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) | ||
| 548 | #define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) | ||
| 549 | |||
| 550 | #define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) | ||
| 551 | #define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) | ||
| 552 | |||
| 553 | #define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) | ||
| 554 | #define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) | ||
| 555 | |||
| 556 | #define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) | ||
| 557 | #define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) | ||
| 558 | |||
| 559 | enum fw_iq_type { | ||
| 560 | FW_IQ_TYPE_FL_INT_CAP, | ||
| 561 | FW_IQ_TYPE_NO_FL_INT_CAP | ||
| 562 | }; | ||
| 563 | |||
| 564 | struct fw_iq_cmd { | ||
| 565 | __be32 op_to_vfn; | ||
| 566 | __be32 alloc_to_len16; | ||
| 567 | __be16 physiqid; | ||
| 568 | __be16 iqid; | ||
| 569 | __be16 fl0id; | ||
| 570 | __be16 fl1id; | ||
| 571 | __be32 type_to_iqandstindex; | ||
| 572 | __be16 iqdroprss_to_iqesize; | ||
| 573 | __be16 iqsize; | ||
| 574 | __be64 iqaddr; | ||
| 575 | __be32 iqns_to_fl0congen; | ||
| 576 | __be16 fl0dcaen_to_fl0cidxfthresh; | ||
| 577 | __be16 fl0size; | ||
| 578 | __be64 fl0addr; | ||
| 579 | __be32 fl1cngchmap_to_fl1congen; | ||
| 580 | __be16 fl1dcaen_to_fl1cidxfthresh; | ||
| 581 | __be16 fl1size; | ||
| 582 | __be64 fl1addr; | ||
| 583 | }; | ||
| 584 | |||
| 585 | #define FW_IQ_CMD_PFN(x) ((x) << 8) | ||
| 586 | #define FW_IQ_CMD_VFN(x) ((x) << 0) | ||
| 587 | |||
| 588 | #define FW_IQ_CMD_ALLOC (1U << 31) | ||
| 589 | #define FW_IQ_CMD_FREE (1U << 30) | ||
| 590 | #define FW_IQ_CMD_MODIFY (1U << 29) | ||
| 591 | #define FW_IQ_CMD_IQSTART(x) ((x) << 28) | ||
| 592 | #define FW_IQ_CMD_IQSTOP(x) ((x) << 27) | ||
| 593 | |||
| 594 | #define FW_IQ_CMD_TYPE(x) ((x) << 29) | ||
| 595 | #define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) | ||
| 596 | #define FW_IQ_CMD_VIID(x) ((x) << 16) | ||
| 597 | #define FW_IQ_CMD_IQANDST(x) ((x) << 15) | ||
| 598 | #define FW_IQ_CMD_IQANUS(x) ((x) << 14) | ||
| 599 | #define FW_IQ_CMD_IQANUD(x) ((x) << 12) | ||
| 600 | #define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) | ||
| 601 | |||
| 602 | #define FW_IQ_CMD_IQDROPRSS (1U << 15) | ||
| 603 | #define FW_IQ_CMD_IQGTSMODE (1U << 14) | ||
| 604 | #define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) | ||
| 605 | #define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) | ||
| 606 | #define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) | ||
| 607 | #define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) | ||
| 608 | #define FW_IQ_CMD_IQO (1U << 3) | ||
| 609 | #define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) | ||
| 610 | #define FW_IQ_CMD_IQESIZE(x) ((x) << 0) | ||
| 611 | |||
| 612 | #define FW_IQ_CMD_IQNS(x) ((x) << 31) | ||
| 613 | #define FW_IQ_CMD_IQRO(x) ((x) << 30) | ||
| 614 | #define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) | ||
| 615 | #define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) | ||
| 616 | #define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) | ||
| 617 | #define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) | ||
| 618 | #define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) | ||
| 619 | #define FW_IQ_CMD_FL0DBP(x) ((x) << 14) | ||
| 620 | #define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) | ||
| 621 | #define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) | ||
| 622 | #define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) | ||
| 623 | #define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) | ||
| 624 | #define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) | ||
| 625 | #define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) | ||
| 626 | #define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) | ||
| 627 | #define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) | ||
| 628 | #define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) | ||
| 629 | #define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) | ||
| 630 | #define FW_IQ_CMD_FL0PADEN (1U << 2) | ||
| 631 | #define FW_IQ_CMD_FL0PACKEN (1U << 1) | ||
| 632 | #define FW_IQ_CMD_FL0CONGEN (1U << 0) | ||
| 633 | |||
| 634 | #define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) | ||
| 635 | #define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) | ||
| 636 | #define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) | ||
| 637 | #define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) | ||
| 638 | #define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) | ||
| 639 | #define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) | ||
| 640 | |||
| 641 | #define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) | ||
| 642 | #define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) | ||
| 643 | #define FW_IQ_CMD_FL1DBP(x) ((x) << 14) | ||
| 644 | #define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) | ||
| 645 | #define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) | ||
| 646 | #define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) | ||
| 647 | #define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) | ||
| 648 | #define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) | ||
| 649 | #define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) | ||
| 650 | #define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) | ||
| 651 | #define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) | ||
| 652 | #define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) | ||
| 653 | #define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) | ||
| 654 | #define FW_IQ_CMD_FL1PADEN (1U << 2) | ||
| 655 | #define FW_IQ_CMD_FL1PACKEN (1U << 1) | ||
| 656 | #define FW_IQ_CMD_FL1CONGEN (1U << 0) | ||
| 657 | |||
| 658 | #define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) | ||
| 659 | #define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) | ||
| 660 | #define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) | ||
| 661 | #define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) | ||
| 662 | #define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) | ||
| 663 | #define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) | ||
| 664 | |||
| 665 | struct fw_eq_eth_cmd { | ||
| 666 | __be32 op_to_vfn; | ||
| 667 | __be32 alloc_to_len16; | ||
| 668 | __be32 eqid_pkd; | ||
| 669 | __be32 physeqid_pkd; | ||
| 670 | __be32 fetchszm_to_iqid; | ||
| 671 | __be32 dcaen_to_eqsize; | ||
| 672 | __be64 eqaddr; | ||
| 673 | __be32 viid_pkd; | ||
| 674 | __be32 r8_lo; | ||
| 675 | __be64 r9; | ||
| 676 | }; | ||
| 677 | |||
| 678 | #define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) | ||
| 679 | #define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) | ||
| 680 | #define FW_EQ_ETH_CMD_ALLOC (1U << 31) | ||
| 681 | #define FW_EQ_ETH_CMD_FREE (1U << 30) | ||
| 682 | #define FW_EQ_ETH_CMD_MODIFY (1U << 29) | ||
| 683 | #define FW_EQ_ETH_CMD_EQSTART (1U << 28) | ||
| 684 | #define FW_EQ_ETH_CMD_EQSTOP (1U << 27) | ||
| 685 | |||
| 686 | #define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) | ||
| 687 | #define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
| 688 | #define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) | ||
| 689 | |||
| 690 | #define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) | ||
| 691 | #define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) | ||
| 692 | #define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) | ||
| 693 | #define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) | ||
| 694 | #define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) | ||
| 695 | #define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) | ||
| 696 | #define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) | ||
| 697 | #define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) | ||
| 698 | #define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) | ||
| 699 | #define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) | ||
| 700 | |||
| 701 | #define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) | ||
| 702 | #define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) | ||
| 703 | #define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) | ||
| 704 | #define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) | ||
| 705 | #define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
| 706 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
| 707 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) | ||
| 708 | |||
| 709 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) | ||
| 710 | |||
| 711 | struct fw_eq_ctrl_cmd { | ||
| 712 | __be32 op_to_vfn; | ||
| 713 | __be32 alloc_to_len16; | ||
| 714 | __be32 cmpliqid_eqid; | ||
| 715 | __be32 physeqid_pkd; | ||
| 716 | __be32 fetchszm_to_iqid; | ||
| 717 | __be32 dcaen_to_eqsize; | ||
| 718 | __be64 eqaddr; | ||
| 719 | }; | ||
| 720 | |||
| 721 | #define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) | ||
| 722 | #define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) | ||
| 723 | |||
| 724 | #define FW_EQ_CTRL_CMD_ALLOC (1U << 31) | ||
| 725 | #define FW_EQ_CTRL_CMD_FREE (1U << 30) | ||
| 726 | #define FW_EQ_CTRL_CMD_MODIFY (1U << 29) | ||
| 727 | #define FW_EQ_CTRL_CMD_EQSTART (1U << 28) | ||
| 728 | #define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) | ||
| 729 | |||
| 730 | #define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) | ||
| 731 | #define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) | ||
| 732 | #define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
| 733 | #define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) | ||
| 734 | |||
| 735 | #define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) | ||
| 736 | #define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) | ||
| 737 | #define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) | ||
| 738 | #define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) | ||
| 739 | #define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) | ||
| 740 | #define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) | ||
| 741 | #define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) | ||
| 742 | #define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) | ||
| 743 | #define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) | ||
| 744 | #define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) | ||
| 745 | |||
| 746 | #define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) | ||
| 747 | #define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) | ||
| 748 | #define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) | ||
| 749 | #define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) | ||
| 750 | #define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
| 751 | #define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
| 752 | #define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) | ||
| 753 | |||
| 754 | struct fw_eq_ofld_cmd { | ||
| 755 | __be32 op_to_vfn; | ||
| 756 | __be32 alloc_to_len16; | ||
| 757 | __be32 eqid_pkd; | ||
| 758 | __be32 physeqid_pkd; | ||
| 759 | __be32 fetchszm_to_iqid; | ||
| 760 | __be32 dcaen_to_eqsize; | ||
| 761 | __be64 eqaddr; | ||
| 762 | }; | ||
| 763 | |||
| 764 | #define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) | ||
| 765 | #define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) | ||
| 766 | |||
| 767 | #define FW_EQ_OFLD_CMD_ALLOC (1U << 31) | ||
| 768 | #define FW_EQ_OFLD_CMD_FREE (1U << 30) | ||
| 769 | #define FW_EQ_OFLD_CMD_MODIFY (1U << 29) | ||
| 770 | #define FW_EQ_OFLD_CMD_EQSTART (1U << 28) | ||
| 771 | #define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) | ||
| 772 | |||
| 773 | #define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) | ||
| 774 | #define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) | ||
| 775 | #define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) | ||
| 776 | |||
| 777 | #define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) | ||
| 778 | #define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) | ||
| 779 | #define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) | ||
| 780 | #define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) | ||
| 781 | #define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) | ||
| 782 | #define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) | ||
| 783 | #define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) | ||
| 784 | #define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) | ||
| 785 | #define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) | ||
| 786 | #define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) | ||
| 787 | |||
| 788 | #define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) | ||
| 789 | #define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) | ||
| 790 | #define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) | ||
| 791 | #define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) | ||
| 792 | #define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) | ||
| 793 | #define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) | ||
| 794 | #define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) | ||
| 795 | |||
| 796 | /* | ||
| 797 | * Macros for VIID parsing: | ||
| 798 | * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number | ||
| 799 | */ | ||
| 800 | #define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) | ||
| 801 | #define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) | ||
| 802 | #define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) | ||
| 803 | |||
| 804 | struct fw_vi_cmd { | ||
| 805 | __be32 op_to_vfn; | ||
| 806 | __be32 alloc_to_len16; | ||
| 807 | __be16 viid_pkd; | ||
| 808 | u8 mac[6]; | ||
| 809 | u8 portid_pkd; | ||
| 810 | u8 nmac; | ||
| 811 | u8 nmac0[6]; | ||
| 812 | __be16 rsssize_pkd; | ||
| 813 | u8 nmac1[6]; | ||
| 814 | __be16 r7; | ||
| 815 | u8 nmac2[6]; | ||
| 816 | __be16 r8; | ||
| 817 | u8 nmac3[6]; | ||
| 818 | __be64 r9; | ||
| 819 | __be64 r10; | ||
| 820 | }; | ||
| 821 | |||
| 822 | #define FW_VI_CMD_PFN(x) ((x) << 8) | ||
| 823 | #define FW_VI_CMD_VFN(x) ((x) << 0) | ||
| 824 | #define FW_VI_CMD_ALLOC (1U << 31) | ||
| 825 | #define FW_VI_CMD_FREE (1U << 30) | ||
| 826 | #define FW_VI_CMD_VIID(x) ((x) << 0) | ||
| 827 | #define FW_VI_CMD_PORTID(x) ((x) << 4) | ||
| 828 | #define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) | ||
| 829 | |||
| 830 | /* Special VI_MAC command index ids */ | ||
| 831 | #define FW_VI_MAC_ADD_MAC 0x3FF | ||
| 832 | #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE | ||
| 833 | #define FW_VI_MAC_MAC_BASED_FREE 0x3FD | ||
| 834 | |||
| 835 | enum fw_vi_mac_smac { | ||
| 836 | FW_VI_MAC_MPS_TCAM_ENTRY, | ||
| 837 | FW_VI_MAC_MPS_TCAM_ONLY, | ||
| 838 | FW_VI_MAC_SMT_ONLY, | ||
| 839 | FW_VI_MAC_SMT_AND_MPSTCAM | ||
| 840 | }; | ||
| 841 | |||
| 842 | enum fw_vi_mac_result { | ||
| 843 | FW_VI_MAC_R_SUCCESS, | ||
| 844 | FW_VI_MAC_R_F_NONEXISTENT_NOMEM, | ||
| 845 | FW_VI_MAC_R_SMAC_FAIL, | ||
| 846 | FW_VI_MAC_R_F_ACL_CHECK | ||
| 847 | }; | ||
| 848 | |||
| 849 | struct fw_vi_mac_cmd { | ||
| 850 | __be32 op_to_viid; | ||
| 851 | __be32 freemacs_to_len16; | ||
| 852 | union fw_vi_mac { | ||
| 853 | struct fw_vi_mac_exact { | ||
| 854 | __be16 valid_to_idx; | ||
| 855 | u8 macaddr[6]; | ||
| 856 | } exact[7]; | ||
| 857 | struct fw_vi_mac_hash { | ||
| 858 | __be64 hashvec; | ||
| 859 | } hash; | ||
| 860 | } u; | ||
| 861 | }; | ||
| 862 | |||
| 863 | #define FW_VI_MAC_CMD_VIID(x) ((x) << 0) | ||
| 864 | #define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) | ||
| 865 | #define FW_VI_MAC_CMD_HASHVECEN (1U << 23) | ||
| 866 | #define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) | ||
| 867 | #define FW_VI_MAC_CMD_VALID (1U << 15) | ||
| 868 | #define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) | ||
| 869 | #define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) | ||
| 870 | #define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) | ||
| 871 | #define FW_VI_MAC_CMD_IDX(x) ((x) << 0) | ||
| 872 | #define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) | ||
| 873 | |||
| 874 | #define FW_RXMODE_MTU_NO_CHG 65535 | ||
| 875 | |||
| 876 | struct fw_vi_rxmode_cmd { | ||
| 877 | __be32 op_to_viid; | ||
| 878 | __be32 retval_len16; | ||
| 879 | __be32 mtu_to_broadcasten; | ||
| 880 | __be32 r4_lo; | ||
| 881 | }; | ||
| 882 | |||
| 883 | #define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) | ||
| 884 | #define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) | ||
| 885 | #define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 | ||
| 886 | #define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) | ||
| 887 | #define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 | ||
| 888 | #define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) | ||
| 889 | #define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 | ||
| 890 | #define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) | ||
| 891 | |||
| 892 | struct fw_vi_enable_cmd { | ||
| 893 | __be32 op_to_viid; | ||
| 894 | __be32 ien_to_len16; | ||
| 895 | __be16 blinkdur; | ||
| 896 | __be16 r3; | ||
| 897 | __be32 r4; | ||
| 898 | }; | ||
| 899 | |||
| 900 | #define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) | ||
| 901 | #define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) | ||
| 902 | #define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) | ||
| 903 | #define FW_VI_ENABLE_CMD_LED (1U << 29) | ||
| 904 | |||
| 905 | /* VI VF stats offset definitions */ | ||
| 906 | #define VI_VF_NUM_STATS 16 | ||
| 907 | enum fw_vi_stats_vf_index { | ||
| 908 | FW_VI_VF_STAT_TX_BCAST_BYTES_IX, | ||
| 909 | FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, | ||
| 910 | FW_VI_VF_STAT_TX_MCAST_BYTES_IX, | ||
| 911 | FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, | ||
| 912 | FW_VI_VF_STAT_TX_UCAST_BYTES_IX, | ||
| 913 | FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, | ||
| 914 | FW_VI_VF_STAT_TX_DROP_FRAMES_IX, | ||
| 915 | FW_VI_VF_STAT_TX_OFLD_BYTES_IX, | ||
| 916 | FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, | ||
| 917 | FW_VI_VF_STAT_RX_BCAST_BYTES_IX, | ||
| 918 | FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, | ||
| 919 | FW_VI_VF_STAT_RX_MCAST_BYTES_IX, | ||
| 920 | FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, | ||
| 921 | FW_VI_VF_STAT_RX_UCAST_BYTES_IX, | ||
| 922 | FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, | ||
| 923 | FW_VI_VF_STAT_RX_ERR_FRAMES_IX | ||
| 924 | }; | ||
| 925 | |||
| 926 | /* VI PF stats offset definitions */ | ||
| 927 | #define VI_PF_NUM_STATS 17 | ||
| 928 | enum fw_vi_stats_pf_index { | ||
| 929 | FW_VI_PF_STAT_TX_BCAST_BYTES_IX, | ||
| 930 | FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, | ||
| 931 | FW_VI_PF_STAT_TX_MCAST_BYTES_IX, | ||
| 932 | FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, | ||
| 933 | FW_VI_PF_STAT_TX_UCAST_BYTES_IX, | ||
| 934 | FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, | ||
| 935 | FW_VI_PF_STAT_TX_OFLD_BYTES_IX, | ||
| 936 | FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, | ||
| 937 | FW_VI_PF_STAT_RX_BYTES_IX, | ||
| 938 | FW_VI_PF_STAT_RX_FRAMES_IX, | ||
| 939 | FW_VI_PF_STAT_RX_BCAST_BYTES_IX, | ||
| 940 | FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, | ||
| 941 | FW_VI_PF_STAT_RX_MCAST_BYTES_IX, | ||
| 942 | FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, | ||
| 943 | FW_VI_PF_STAT_RX_UCAST_BYTES_IX, | ||
| 944 | FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, | ||
| 945 | FW_VI_PF_STAT_RX_ERR_FRAMES_IX | ||
| 946 | }; | ||
| 947 | |||
| 948 | struct fw_vi_stats_cmd { | ||
| 949 | __be32 op_to_viid; | ||
| 950 | __be32 retval_len16; | ||
| 951 | union fw_vi_stats { | ||
| 952 | struct fw_vi_stats_ctl { | ||
| 953 | __be16 nstats_ix; | ||
| 954 | __be16 r6; | ||
| 955 | __be32 r7; | ||
| 956 | __be64 stat0; | ||
| 957 | __be64 stat1; | ||
| 958 | __be64 stat2; | ||
| 959 | __be64 stat3; | ||
| 960 | __be64 stat4; | ||
| 961 | __be64 stat5; | ||
| 962 | } ctl; | ||
| 963 | struct fw_vi_stats_pf { | ||
| 964 | __be64 tx_bcast_bytes; | ||
| 965 | __be64 tx_bcast_frames; | ||
| 966 | __be64 tx_mcast_bytes; | ||
| 967 | __be64 tx_mcast_frames; | ||
| 968 | __be64 tx_ucast_bytes; | ||
| 969 | __be64 tx_ucast_frames; | ||
| 970 | __be64 tx_offload_bytes; | ||
| 971 | __be64 tx_offload_frames; | ||
| 972 | __be64 rx_pf_bytes; | ||
| 973 | __be64 rx_pf_frames; | ||
| 974 | __be64 rx_bcast_bytes; | ||
| 975 | __be64 rx_bcast_frames; | ||
| 976 | __be64 rx_mcast_bytes; | ||
| 977 | __be64 rx_mcast_frames; | ||
| 978 | __be64 rx_ucast_bytes; | ||
| 979 | __be64 rx_ucast_frames; | ||
| 980 | __be64 rx_err_frames; | ||
| 981 | } pf; | ||
| 982 | struct fw_vi_stats_vf { | ||
| 983 | __be64 tx_bcast_bytes; | ||
| 984 | __be64 tx_bcast_frames; | ||
| 985 | __be64 tx_mcast_bytes; | ||
| 986 | __be64 tx_mcast_frames; | ||
| 987 | __be64 tx_ucast_bytes; | ||
| 988 | __be64 tx_ucast_frames; | ||
| 989 | __be64 tx_drop_frames; | ||
| 990 | __be64 tx_offload_bytes; | ||
| 991 | __be64 tx_offload_frames; | ||
| 992 | __be64 rx_bcast_bytes; | ||
| 993 | __be64 rx_bcast_frames; | ||
| 994 | __be64 rx_mcast_bytes; | ||
| 995 | __be64 rx_mcast_frames; | ||
| 996 | __be64 rx_ucast_bytes; | ||
| 997 | __be64 rx_ucast_frames; | ||
| 998 | __be64 rx_err_frames; | ||
| 999 | } vf; | ||
| 1000 | } u; | ||
| 1001 | }; | ||
| 1002 | |||
| 1003 | #define FW_VI_STATS_CMD_VIID(x) ((x) << 0) | ||
| 1004 | #define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) | ||
| 1005 | #define FW_VI_STATS_CMD_IX(x) ((x) << 0) | ||
| 1006 | |||
| 1007 | struct fw_acl_mac_cmd { | ||
| 1008 | __be32 op_to_vfn; | ||
| 1009 | __be32 en_to_len16; | ||
| 1010 | u8 nmac; | ||
| 1011 | u8 r3[7]; | ||
| 1012 | __be16 r4; | ||
| 1013 | u8 macaddr0[6]; | ||
| 1014 | __be16 r5; | ||
| 1015 | u8 macaddr1[6]; | ||
| 1016 | __be16 r6; | ||
| 1017 | u8 macaddr2[6]; | ||
| 1018 | __be16 r7; | ||
| 1019 | u8 macaddr3[6]; | ||
| 1020 | }; | ||
| 1021 | |||
| 1022 | #define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) | ||
| 1023 | #define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) | ||
| 1024 | #define FW_ACL_MAC_CMD_EN(x) ((x) << 31) | ||
| 1025 | |||
| 1026 | struct fw_acl_vlan_cmd { | ||
| 1027 | __be32 op_to_vfn; | ||
| 1028 | __be32 en_to_len16; | ||
| 1029 | u8 nvlan; | ||
| 1030 | u8 dropnovlan_fm; | ||
| 1031 | u8 r3_lo[6]; | ||
| 1032 | __be16 vlanid[16]; | ||
| 1033 | }; | ||
| 1034 | |||
| 1035 | #define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) | ||
| 1036 | #define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) | ||
| 1037 | #define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) | ||
| 1038 | #define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) | ||
| 1039 | #define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) | ||
| 1040 | |||
| 1041 | enum fw_port_cap { | ||
| 1042 | FW_PORT_CAP_SPEED_100M = 0x0001, | ||
| 1043 | FW_PORT_CAP_SPEED_1G = 0x0002, | ||
| 1044 | FW_PORT_CAP_SPEED_2_5G = 0x0004, | ||
| 1045 | FW_PORT_CAP_SPEED_10G = 0x0008, | ||
| 1046 | FW_PORT_CAP_SPEED_40G = 0x0010, | ||
| 1047 | FW_PORT_CAP_SPEED_100G = 0x0020, | ||
| 1048 | FW_PORT_CAP_FC_RX = 0x0040, | ||
| 1049 | FW_PORT_CAP_FC_TX = 0x0080, | ||
| 1050 | FW_PORT_CAP_ANEG = 0x0100, | ||
| 1051 | FW_PORT_CAP_MDI_0 = 0x0200, | ||
| 1052 | FW_PORT_CAP_MDI_1 = 0x0400, | ||
| 1053 | FW_PORT_CAP_BEAN = 0x0800, | ||
| 1054 | FW_PORT_CAP_PMA_LPBK = 0x1000, | ||
| 1055 | FW_PORT_CAP_PCS_LPBK = 0x2000, | ||
| 1056 | FW_PORT_CAP_PHYXS_LPBK = 0x4000, | ||
| 1057 | FW_PORT_CAP_FAR_END_LPBK = 0x8000, | ||
| 1058 | }; | ||
| 1059 | |||
| 1060 | enum fw_port_mdi { | ||
| 1061 | FW_PORT_MDI_UNCHANGED, | ||
| 1062 | FW_PORT_MDI_AUTO, | ||
| 1063 | FW_PORT_MDI_F_STRAIGHT, | ||
| 1064 | FW_PORT_MDI_F_CROSSOVER | ||
| 1065 | }; | ||
| 1066 | |||
| 1067 | #define FW_PORT_MDI(x) ((x) << 9) | ||
| 1068 | |||
| 1069 | enum fw_port_action { | ||
| 1070 | FW_PORT_ACTION_L1_CFG = 0x0001, | ||
| 1071 | FW_PORT_ACTION_L2_CFG = 0x0002, | ||
| 1072 | FW_PORT_ACTION_GET_PORT_INFO = 0x0003, | ||
| 1073 | FW_PORT_ACTION_L2_PPP_CFG = 0x0004, | ||
| 1074 | FW_PORT_ACTION_L2_DCB_CFG = 0x0005, | ||
| 1075 | FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, | ||
| 1076 | FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, | ||
| 1077 | FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, | ||
| 1078 | FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, | ||
| 1079 | FW_PORT_ACTION_L1_LPBK = 0x0021, | ||
| 1080 | FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, | ||
| 1081 | FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, | ||
| 1082 | FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, | ||
| 1083 | FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, | ||
| 1084 | FW_PORT_ACTION_PHY_RESET = 0x0040, | ||
| 1085 | FW_PORT_ACTION_PMA_RESET = 0x0041, | ||
| 1086 | FW_PORT_ACTION_PCS_RESET = 0x0042, | ||
| 1087 | FW_PORT_ACTION_PHYXS_RESET = 0x0043, | ||
| 1088 | FW_PORT_ACTION_DTEXS_REEST = 0x0044, | ||
| 1089 | FW_PORT_ACTION_AN_RESET = 0x0045 | ||
| 1090 | }; | ||
| 1091 | |||
| 1092 | enum fw_port_l2cfg_ctlbf { | ||
| 1093 | FW_PORT_L2_CTLBF_OVLAN0 = 0x01, | ||
| 1094 | FW_PORT_L2_CTLBF_OVLAN1 = 0x02, | ||
| 1095 | FW_PORT_L2_CTLBF_OVLAN2 = 0x04, | ||
| 1096 | FW_PORT_L2_CTLBF_OVLAN3 = 0x08, | ||
| 1097 | FW_PORT_L2_CTLBF_IVLAN = 0x10, | ||
| 1098 | FW_PORT_L2_CTLBF_TXIPG = 0x20 | ||
| 1099 | }; | ||
| 1100 | |||
| 1101 | enum fw_port_dcb_cfg { | ||
| 1102 | FW_PORT_DCB_CFG_PG = 0x01, | ||
| 1103 | FW_PORT_DCB_CFG_PFC = 0x02, | ||
| 1104 | FW_PORT_DCB_CFG_APPL = 0x04 | ||
| 1105 | }; | ||
| 1106 | |||
| 1107 | enum fw_port_dcb_cfg_rc { | ||
| 1108 | FW_PORT_DCB_CFG_SUCCESS = 0x0, | ||
| 1109 | FW_PORT_DCB_CFG_ERROR = 0x1 | ||
| 1110 | }; | ||
| 1111 | |||
| 1112 | struct fw_port_cmd { | ||
| 1113 | __be32 op_to_portid; | ||
| 1114 | __be32 action_to_len16; | ||
| 1115 | union fw_port { | ||
| 1116 | struct fw_port_l1cfg { | ||
| 1117 | __be32 rcap; | ||
| 1118 | __be32 r; | ||
| 1119 | } l1cfg; | ||
| 1120 | struct fw_port_l2cfg { | ||
| 1121 | __be16 ctlbf_to_ivlan0; | ||
| 1122 | __be16 ivlantype; | ||
| 1123 | __be32 txipg_pkd; | ||
| 1124 | __be16 ovlan0mask; | ||
| 1125 | __be16 ovlan0type; | ||
| 1126 | __be16 ovlan1mask; | ||
| 1127 | __be16 ovlan1type; | ||
| 1128 | __be16 ovlan2mask; | ||
| 1129 | __be16 ovlan2type; | ||
| 1130 | __be16 ovlan3mask; | ||
| 1131 | __be16 ovlan3type; | ||
| 1132 | } l2cfg; | ||
| 1133 | struct fw_port_info { | ||
| 1134 | __be32 lstatus_to_modtype; | ||
| 1135 | __be16 pcap; | ||
| 1136 | __be16 acap; | ||
| 1137 | } info; | ||
| 1138 | struct fw_port_ppp { | ||
| 1139 | __be32 pppen_to_ncsich; | ||
| 1140 | __be32 r11; | ||
| 1141 | } ppp; | ||
| 1142 | struct fw_port_dcb { | ||
| 1143 | __be16 cfg; | ||
| 1144 | u8 up_map; | ||
| 1145 | u8 sf_cfgrc; | ||
| 1146 | __be16 prot_ix; | ||
| 1147 | u8 pe7_to_pe0; | ||
| 1148 | u8 numTCPFCs; | ||
| 1149 | __be32 pgid0_to_pgid7; | ||
| 1150 | __be32 numTCs_oui; | ||
| 1151 | u8 pgpc[8]; | ||
| 1152 | } dcb; | ||
| 1153 | } u; | ||
| 1154 | }; | ||
| 1155 | |||
| 1156 | #define FW_PORT_CMD_READ (1U << 22) | ||
| 1157 | |||
| 1158 | #define FW_PORT_CMD_PORTID(x) ((x) << 0) | ||
| 1159 | #define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) | ||
| 1160 | |||
| 1161 | #define FW_PORT_CMD_ACTION(x) ((x) << 16) | ||
| 1162 | |||
| 1163 | #define FW_PORT_CMD_CTLBF(x) ((x) << 10) | ||
| 1164 | #define FW_PORT_CMD_OVLAN3(x) ((x) << 7) | ||
| 1165 | #define FW_PORT_CMD_OVLAN2(x) ((x) << 6) | ||
| 1166 | #define FW_PORT_CMD_OVLAN1(x) ((x) << 5) | ||
| 1167 | #define FW_PORT_CMD_OVLAN0(x) ((x) << 4) | ||
| 1168 | #define FW_PORT_CMD_IVLAN0(x) ((x) << 3) | ||
| 1169 | |||
| 1170 | #define FW_PORT_CMD_TXIPG(x) ((x) << 19) | ||
| 1171 | |||
| 1172 | #define FW_PORT_CMD_LSTATUS (1U << 31) | ||
| 1173 | #define FW_PORT_CMD_LSPEED(x) ((x) << 24) | ||
| 1174 | #define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) | ||
| 1175 | #define FW_PORT_CMD_TXPAUSE (1U << 23) | ||
| 1176 | #define FW_PORT_CMD_RXPAUSE (1U << 22) | ||
| 1177 | #define FW_PORT_CMD_MDIOCAP (1U << 21) | ||
| 1178 | #define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) | ||
| 1179 | #define FW_PORT_CMD_LPTXPAUSE (1U << 15) | ||
| 1180 | #define FW_PORT_CMD_LPRXPAUSE (1U << 14) | ||
| 1181 | #define FW_PORT_CMD_PTYPE_MASK 0x1f | ||
| 1182 | #define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) | ||
| 1183 | #define FW_PORT_CMD_MODTYPE_MASK 0x1f | ||
| 1184 | #define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) | ||
| 1185 | |||
| 1186 | #define FW_PORT_CMD_PPPEN(x) ((x) << 31) | ||
| 1187 | #define FW_PORT_CMD_TPSRC(x) ((x) << 28) | ||
| 1188 | #define FW_PORT_CMD_NCSISRC(x) ((x) << 24) | ||
| 1189 | |||
| 1190 | #define FW_PORT_CMD_CH0(x) ((x) << 20) | ||
| 1191 | #define FW_PORT_CMD_CH1(x) ((x) << 16) | ||
| 1192 | #define FW_PORT_CMD_CH2(x) ((x) << 12) | ||
| 1193 | #define FW_PORT_CMD_CH3(x) ((x) << 8) | ||
| 1194 | #define FW_PORT_CMD_NCSICH(x) ((x) << 4) | ||
| 1195 | |||
| 1196 | enum fw_port_type { | ||
| 1197 | FW_PORT_TYPE_FIBER, | ||
| 1198 | FW_PORT_TYPE_KX4, | ||
| 1199 | FW_PORT_TYPE_BT_SGMII, | ||
| 1200 | FW_PORT_TYPE_KX, | ||
| 1201 | FW_PORT_TYPE_BT_XAUI, | ||
| 1202 | FW_PORT_TYPE_KR, | ||
| 1203 | FW_PORT_TYPE_CX4, | ||
| 1204 | FW_PORT_TYPE_TWINAX, | ||
| 1205 | |||
| 1206 | FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK | ||
| 1207 | }; | ||
| 1208 | |||
| 1209 | enum fw_port_module_type { | ||
| 1210 | FW_PORT_MOD_TYPE_NA, | ||
| 1211 | FW_PORT_MOD_TYPE_LR, | ||
| 1212 | FW_PORT_MOD_TYPE_SR, | ||
| 1213 | FW_PORT_MOD_TYPE_ER, | ||
| 1214 | |||
| 1215 | FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK | ||
| 1216 | }; | ||
| 1217 | |||
| 1218 | /* port stats */ | ||
| 1219 | #define FW_NUM_PORT_STATS 50 | ||
| 1220 | #define FW_NUM_PORT_TX_STATS 23 | ||
| 1221 | #define FW_NUM_PORT_RX_STATS 27 | ||
| 1222 | |||
| 1223 | enum fw_port_stats_tx_index { | ||
| 1224 | FW_STAT_TX_PORT_BYTES_IX, | ||
| 1225 | FW_STAT_TX_PORT_FRAMES_IX, | ||
| 1226 | FW_STAT_TX_PORT_BCAST_IX, | ||
| 1227 | FW_STAT_TX_PORT_MCAST_IX, | ||
| 1228 | FW_STAT_TX_PORT_UCAST_IX, | ||
| 1229 | FW_STAT_TX_PORT_ERROR_IX, | ||
| 1230 | FW_STAT_TX_PORT_64B_IX, | ||
| 1231 | FW_STAT_TX_PORT_65B_127B_IX, | ||
| 1232 | FW_STAT_TX_PORT_128B_255B_IX, | ||
| 1233 | FW_STAT_TX_PORT_256B_511B_IX, | ||
| 1234 | FW_STAT_TX_PORT_512B_1023B_IX, | ||
| 1235 | FW_STAT_TX_PORT_1024B_1518B_IX, | ||
| 1236 | FW_STAT_TX_PORT_1519B_MAX_IX, | ||
| 1237 | FW_STAT_TX_PORT_DROP_IX, | ||
| 1238 | FW_STAT_TX_PORT_PAUSE_IX, | ||
| 1239 | FW_STAT_TX_PORT_PPP0_IX, | ||
| 1240 | FW_STAT_TX_PORT_PPP1_IX, | ||
| 1241 | FW_STAT_TX_PORT_PPP2_IX, | ||
| 1242 | FW_STAT_TX_PORT_PPP3_IX, | ||
| 1243 | FW_STAT_TX_PORT_PPP4_IX, | ||
| 1244 | FW_STAT_TX_PORT_PPP5_IX, | ||
| 1245 | FW_STAT_TX_PORT_PPP6_IX, | ||
| 1246 | FW_STAT_TX_PORT_PPP7_IX | ||
| 1247 | }; | ||
| 1248 | |||
| 1249 | enum fw_port_stat_rx_index { | ||
| 1250 | FW_STAT_RX_PORT_BYTES_IX, | ||
| 1251 | FW_STAT_RX_PORT_FRAMES_IX, | ||
| 1252 | FW_STAT_RX_PORT_BCAST_IX, | ||
| 1253 | FW_STAT_RX_PORT_MCAST_IX, | ||
| 1254 | FW_STAT_RX_PORT_UCAST_IX, | ||
| 1255 | FW_STAT_RX_PORT_MTU_ERROR_IX, | ||
| 1256 | FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, | ||
| 1257 | FW_STAT_RX_PORT_CRC_ERROR_IX, | ||
| 1258 | FW_STAT_RX_PORT_LEN_ERROR_IX, | ||
| 1259 | FW_STAT_RX_PORT_SYM_ERROR_IX, | ||
| 1260 | FW_STAT_RX_PORT_64B_IX, | ||
| 1261 | FW_STAT_RX_PORT_65B_127B_IX, | ||
| 1262 | FW_STAT_RX_PORT_128B_255B_IX, | ||
| 1263 | FW_STAT_RX_PORT_256B_511B_IX, | ||
| 1264 | FW_STAT_RX_PORT_512B_1023B_IX, | ||
| 1265 | FW_STAT_RX_PORT_1024B_1518B_IX, | ||
| 1266 | FW_STAT_RX_PORT_1519B_MAX_IX, | ||
| 1267 | FW_STAT_RX_PORT_PAUSE_IX, | ||
| 1268 | FW_STAT_RX_PORT_PPP0_IX, | ||
| 1269 | FW_STAT_RX_PORT_PPP1_IX, | ||
| 1270 | FW_STAT_RX_PORT_PPP2_IX, | ||
| 1271 | FW_STAT_RX_PORT_PPP3_IX, | ||
| 1272 | FW_STAT_RX_PORT_PPP4_IX, | ||
| 1273 | FW_STAT_RX_PORT_PPP5_IX, | ||
| 1274 | FW_STAT_RX_PORT_PPP6_IX, | ||
| 1275 | FW_STAT_RX_PORT_PPP7_IX, | ||
| 1276 | FW_STAT_RX_PORT_LESS_64B_IX | ||
| 1277 | }; | ||
| 1278 | |||
| 1279 | struct fw_port_stats_cmd { | ||
| 1280 | __be32 op_to_portid; | ||
| 1281 | __be32 retval_len16; | ||
| 1282 | union fw_port_stats { | ||
| 1283 | struct fw_port_stats_ctl { | ||
| 1284 | u8 nstats_bg_bm; | ||
| 1285 | u8 tx_ix; | ||
| 1286 | __be16 r6; | ||
| 1287 | __be32 r7; | ||
| 1288 | __be64 stat0; | ||
| 1289 | __be64 stat1; | ||
| 1290 | __be64 stat2; | ||
| 1291 | __be64 stat3; | ||
| 1292 | __be64 stat4; | ||
| 1293 | __be64 stat5; | ||
| 1294 | } ctl; | ||
| 1295 | struct fw_port_stats_all { | ||
| 1296 | __be64 tx_bytes; | ||
| 1297 | __be64 tx_frames; | ||
| 1298 | __be64 tx_bcast; | ||
| 1299 | __be64 tx_mcast; | ||
| 1300 | __be64 tx_ucast; | ||
| 1301 | __be64 tx_error; | ||
| 1302 | __be64 tx_64b; | ||
| 1303 | __be64 tx_65b_127b; | ||
| 1304 | __be64 tx_128b_255b; | ||
| 1305 | __be64 tx_256b_511b; | ||
| 1306 | __be64 tx_512b_1023b; | ||
| 1307 | __be64 tx_1024b_1518b; | ||
| 1308 | __be64 tx_1519b_max; | ||
| 1309 | __be64 tx_drop; | ||
| 1310 | __be64 tx_pause; | ||
| 1311 | __be64 tx_ppp0; | ||
| 1312 | __be64 tx_ppp1; | ||
| 1313 | __be64 tx_ppp2; | ||
| 1314 | __be64 tx_ppp3; | ||
| 1315 | __be64 tx_ppp4; | ||
| 1316 | __be64 tx_ppp5; | ||
| 1317 | __be64 tx_ppp6; | ||
| 1318 | __be64 tx_ppp7; | ||
| 1319 | __be64 rx_bytes; | ||
| 1320 | __be64 rx_frames; | ||
| 1321 | __be64 rx_bcast; | ||
| 1322 | __be64 rx_mcast; | ||
| 1323 | __be64 rx_ucast; | ||
| 1324 | __be64 rx_mtu_error; | ||
| 1325 | __be64 rx_mtu_crc_error; | ||
| 1326 | __be64 rx_crc_error; | ||
| 1327 | __be64 rx_len_error; | ||
| 1328 | __be64 rx_sym_error; | ||
| 1329 | __be64 rx_64b; | ||
| 1330 | __be64 rx_65b_127b; | ||
| 1331 | __be64 rx_128b_255b; | ||
| 1332 | __be64 rx_256b_511b; | ||
| 1333 | __be64 rx_512b_1023b; | ||
| 1334 | __be64 rx_1024b_1518b; | ||
| 1335 | __be64 rx_1519b_max; | ||
| 1336 | __be64 rx_pause; | ||
| 1337 | __be64 rx_ppp0; | ||
| 1338 | __be64 rx_ppp1; | ||
| 1339 | __be64 rx_ppp2; | ||
| 1340 | __be64 rx_ppp3; | ||
| 1341 | __be64 rx_ppp4; | ||
| 1342 | __be64 rx_ppp5; | ||
| 1343 | __be64 rx_ppp6; | ||
| 1344 | __be64 rx_ppp7; | ||
| 1345 | __be64 rx_less_64b; | ||
| 1346 | __be64 rx_bg_drop; | ||
| 1347 | __be64 rx_bg_trunc; | ||
| 1348 | } all; | ||
| 1349 | } u; | ||
| 1350 | }; | ||
| 1351 | |||
| 1352 | #define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) | ||
| 1353 | #define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) | ||
| 1354 | #define FW_PORT_STATS_CMD_TX(x) ((x) << 7) | ||
| 1355 | #define FW_PORT_STATS_CMD_IX(x) ((x) << 0) | ||
| 1356 | |||
| 1357 | /* port loopback stats */ | ||
| 1358 | #define FW_NUM_LB_STATS 16 | ||
| 1359 | enum fw_port_lb_stats_index { | ||
| 1360 | FW_STAT_LB_PORT_BYTES_IX, | ||
| 1361 | FW_STAT_LB_PORT_FRAMES_IX, | ||
| 1362 | FW_STAT_LB_PORT_BCAST_IX, | ||
| 1363 | FW_STAT_LB_PORT_MCAST_IX, | ||
| 1364 | FW_STAT_LB_PORT_UCAST_IX, | ||
| 1365 | FW_STAT_LB_PORT_ERROR_IX, | ||
| 1366 | FW_STAT_LB_PORT_64B_IX, | ||
| 1367 | FW_STAT_LB_PORT_65B_127B_IX, | ||
| 1368 | FW_STAT_LB_PORT_128B_255B_IX, | ||
| 1369 | FW_STAT_LB_PORT_256B_511B_IX, | ||
| 1370 | FW_STAT_LB_PORT_512B_1023B_IX, | ||
| 1371 | FW_STAT_LB_PORT_1024B_1518B_IX, | ||
| 1372 | FW_STAT_LB_PORT_1519B_MAX_IX, | ||
| 1373 | FW_STAT_LB_PORT_DROP_FRAMES_IX | ||
| 1374 | }; | ||
| 1375 | |||
| 1376 | struct fw_port_lb_stats_cmd { | ||
| 1377 | __be32 op_to_lbport; | ||
| 1378 | __be32 retval_len16; | ||
| 1379 | union fw_port_lb_stats { | ||
| 1380 | struct fw_port_lb_stats_ctl { | ||
| 1381 | u8 nstats_bg_bm; | ||
| 1382 | u8 ix_pkd; | ||
| 1383 | __be16 r6; | ||
| 1384 | __be32 r7; | ||
| 1385 | __be64 stat0; | ||
| 1386 | __be64 stat1; | ||
| 1387 | __be64 stat2; | ||
| 1388 | __be64 stat3; | ||
| 1389 | __be64 stat4; | ||
| 1390 | __be64 stat5; | ||
| 1391 | } ctl; | ||
| 1392 | struct fw_port_lb_stats_all { | ||
| 1393 | __be64 tx_bytes; | ||
| 1394 | __be64 tx_frames; | ||
| 1395 | __be64 tx_bcast; | ||
| 1396 | __be64 tx_mcast; | ||
| 1397 | __be64 tx_ucast; | ||
| 1398 | __be64 tx_error; | ||
| 1399 | __be64 tx_64b; | ||
| 1400 | __be64 tx_65b_127b; | ||
| 1401 | __be64 tx_128b_255b; | ||
| 1402 | __be64 tx_256b_511b; | ||
| 1403 | __be64 tx_512b_1023b; | ||
| 1404 | __be64 tx_1024b_1518b; | ||
| 1405 | __be64 tx_1519b_max; | ||
| 1406 | __be64 rx_lb_drop; | ||
| 1407 | __be64 rx_lb_trunc; | ||
| 1408 | } all; | ||
| 1409 | } u; | ||
| 1410 | }; | ||
| 1411 | |||
| 1412 | #define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) | ||
| 1413 | #define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) | ||
| 1414 | #define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) | ||
| 1415 | #define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) | ||
| 1416 | |||
| 1417 | struct fw_rss_ind_tbl_cmd { | ||
| 1418 | __be32 op_to_viid; | ||
| 1419 | #define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) | ||
| 1420 | __be32 retval_len16; | ||
| 1421 | __be16 niqid; | ||
| 1422 | __be16 startidx; | ||
| 1423 | __be32 r3; | ||
| 1424 | __be32 iq0_to_iq2; | ||
| 1425 | #define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) | ||
| 1426 | #define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) | ||
| 1427 | #define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) | ||
| 1428 | __be32 iq3_to_iq5; | ||
| 1429 | __be32 iq6_to_iq8; | ||
| 1430 | __be32 iq9_to_iq11; | ||
| 1431 | __be32 iq12_to_iq14; | ||
| 1432 | __be32 iq15_to_iq17; | ||
| 1433 | __be32 iq18_to_iq20; | ||
| 1434 | __be32 iq21_to_iq23; | ||
| 1435 | __be32 iq24_to_iq26; | ||
| 1436 | __be32 iq27_to_iq29; | ||
| 1437 | __be32 iq30_iq31; | ||
| 1438 | __be32 r15_lo; | ||
| 1439 | }; | ||
| 1440 | |||
| 1441 | struct fw_rss_glb_config_cmd { | ||
| 1442 | __be32 op_to_write; | ||
| 1443 | __be32 retval_len16; | ||
| 1444 | union fw_rss_glb_config { | ||
| 1445 | struct fw_rss_glb_config_manual { | ||
| 1446 | __be32 mode_pkd; | ||
| 1447 | __be32 r3; | ||
| 1448 | __be64 r4; | ||
| 1449 | __be64 r5; | ||
| 1450 | } manual; | ||
| 1451 | struct fw_rss_glb_config_basicvirtual { | ||
| 1452 | __be32 mode_pkd; | ||
| 1453 | __be32 synmapen_to_hashtoeplitz; | ||
| 1454 | #define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) | ||
| 1455 | #define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) | ||
| 1456 | #define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) | ||
| 1457 | #define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) | ||
| 1458 | #define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) | ||
| 1459 | #define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) | ||
| 1460 | #define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) | ||
| 1461 | #define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) | ||
| 1462 | #define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) | ||
| 1463 | __be64 r8; | ||
| 1464 | __be64 r9; | ||
| 1465 | } basicvirtual; | ||
| 1466 | } u; | ||
| 1467 | }; | ||
| 1468 | |||
| 1469 | #define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) | ||
| 1470 | |||
| 1471 | #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 | ||
| 1472 | #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 | ||
| 1473 | |||
| 1474 | struct fw_rss_vi_config_cmd { | ||
| 1475 | __be32 op_to_viid; | ||
| 1476 | #define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) | ||
| 1477 | __be32 retval_len16; | ||
| 1478 | union fw_rss_vi_config { | ||
| 1479 | struct fw_rss_vi_config_manual { | ||
| 1480 | __be64 r3; | ||
| 1481 | __be64 r4; | ||
| 1482 | __be64 r5; | ||
| 1483 | } manual; | ||
| 1484 | struct fw_rss_vi_config_basicvirtual { | ||
| 1485 | __be32 r6; | ||
| 1486 | __be32 defaultq_to_ip4udpen; | ||
| 1487 | #define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) | ||
| 1488 | #define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) | ||
| 1489 | #define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) | ||
| 1490 | #define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) | ||
| 1491 | #define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) | ||
| 1492 | #define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) | ||
| 1493 | __be64 r9; | ||
| 1494 | __be64 r10; | ||
| 1495 | } basicvirtual; | ||
| 1496 | } u; | ||
| 1497 | }; | ||
| 1498 | |||
| 1499 | enum fw_error_type { | ||
| 1500 | FW_ERROR_TYPE_EXCEPTION = 0x0, | ||
| 1501 | FW_ERROR_TYPE_HWMODULE = 0x1, | ||
| 1502 | FW_ERROR_TYPE_WR = 0x2, | ||
| 1503 | FW_ERROR_TYPE_ACL = 0x3, | ||
| 1504 | }; | ||
| 1505 | |||
| 1506 | struct fw_error_cmd { | ||
| 1507 | __be32 op_to_type; | ||
| 1508 | __be32 len16_pkd; | ||
| 1509 | union fw_error { | ||
| 1510 | struct fw_error_exception { | ||
| 1511 | __be32 info[6]; | ||
| 1512 | } exception; | ||
| 1513 | struct fw_error_hwmodule { | ||
| 1514 | __be32 regaddr; | ||
| 1515 | __be32 regval; | ||
| 1516 | } hwmodule; | ||
| 1517 | struct fw_error_wr { | ||
| 1518 | __be16 cidx; | ||
| 1519 | __be16 pfn_vfn; | ||
| 1520 | __be32 eqid; | ||
| 1521 | u8 wrhdr[16]; | ||
| 1522 | } wr; | ||
| 1523 | struct fw_error_acl { | ||
| 1524 | __be16 cidx; | ||
| 1525 | __be16 pfn_vfn; | ||
| 1526 | __be32 eqid; | ||
| 1527 | __be16 mv_pkd; | ||
| 1528 | u8 val[6]; | ||
| 1529 | __be64 r4; | ||
| 1530 | } acl; | ||
| 1531 | } u; | ||
| 1532 | }; | ||
| 1533 | |||
| 1534 | struct fw_debug_cmd { | ||
| 1535 | __be32 op_type; | ||
| 1536 | #define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) | ||
| 1537 | __be32 len16_pkd; | ||
| 1538 | union fw_debug { | ||
| 1539 | struct fw_debug_assert { | ||
| 1540 | __be32 fcid; | ||
| 1541 | __be32 line; | ||
| 1542 | __be32 x; | ||
| 1543 | __be32 y; | ||
| 1544 | u8 filename_0_7[8]; | ||
| 1545 | u8 filename_8_15[8]; | ||
| 1546 | __be64 r3; | ||
| 1547 | } assert; | ||
| 1548 | struct fw_debug_prt { | ||
| 1549 | __be16 dprtstridx; | ||
| 1550 | __be16 r3[3]; | ||
| 1551 | __be32 dprtstrparam0; | ||
| 1552 | __be32 dprtstrparam1; | ||
| 1553 | __be32 dprtstrparam2; | ||
| 1554 | __be32 dprtstrparam3; | ||
| 1555 | } prt; | ||
| 1556 | } u; | ||
| 1557 | }; | ||
| 1558 | |||
| 1559 | struct fw_hdr { | ||
| 1560 | u8 ver; | ||
| 1561 | u8 reserved1; | ||
| 1562 | __be16 len512; /* bin length in units of 512-bytes */ | ||
| 1563 | __be32 fw_ver; /* firmware version */ | ||
| 1564 | __be32 tp_microcode_ver; | ||
| 1565 | u8 intfver_nic; | ||
| 1566 | u8 intfver_vnic; | ||
| 1567 | u8 intfver_ofld; | ||
| 1568 | u8 intfver_ri; | ||
| 1569 | u8 intfver_iscsipdu; | ||
| 1570 | u8 intfver_iscsi; | ||
| 1571 | u8 intfver_fcoe; | ||
| 1572 | u8 reserved2; | ||
| 1573 | __be32 reserved3[27]; | ||
| 1574 | }; | ||
| 1575 | |||
| 1576 | #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) | ||
| 1577 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | ||
| 1578 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | ||
| 1579 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) | ||
| 1580 | #endif /* _T4FW_INTERFACE_H_ */ | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index cfd09cea7214..73d43c53015a 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
| @@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
| 661 | i = 0; | 661 | i = 0; |
| 662 | } | 662 | } |
| 663 | 663 | ||
| 664 | if (i == tx_ring->next_to_use) | ||
| 665 | break; | ||
| 664 | eop = tx_ring->buffer_info[i].next_to_watch; | 666 | eop = tx_ring->buffer_info[i].next_to_watch; |
| 665 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 667 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
| 666 | } | 668 | } |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 73b260c3c654..5c98f7c22425 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 5899 | /* Limit the number of tx's outstanding for hw bug */ | 5899 | /* Limit the number of tx's outstanding for hw bug */ |
| 5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { | 5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { |
| 5901 | np->tx_limit = 1; | 5901 | np->tx_limit = 1; |
| 5902 | if ((id->driver_data & DEV_NEED_TX_LIMIT2) && | 5902 | if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && |
| 5903 | pci_dev->revision >= 0xA2) | 5903 | pci_dev->revision >= 0xA2) |
| 5904 | np->tx_limit = 0; | 5904 | np->tx_limit = 0; |
| 5905 | } | 5905 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 669de028d44f..080d1cea5b26 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -676,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
| 676 | priv->rx_queue[i] = NULL; | 676 | priv->rx_queue[i] = NULL; |
| 677 | 677 | ||
| 678 | for (i = 0; i < priv->num_tx_queues; i++) { | 678 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 679 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | 679 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( |
| 680 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | 680 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); |
| 681 | if (!priv->tx_queue[i]) { | 681 | if (!priv->tx_queue[i]) { |
| 682 | err = -ENOMEM; | 682 | err = -ENOMEM; |
| @@ -689,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | |||
| 689 | } | 689 | } |
| 690 | 690 | ||
| 691 | for (i = 0; i < priv->num_rx_queues; i++) { | 691 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 692 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( | 692 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( |
| 693 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | 693 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); |
| 694 | if (!priv->rx_queue[i]) { | 694 | if (!priv->rx_queue[i]) { |
| 695 | err = -ENOMEM; | 695 | err = -ENOMEM; |
| @@ -1120,10 +1120,10 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 1120 | /* provided which set of benchmarks. */ | 1120 | /* provided which set of benchmarks. */ |
| 1121 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); | 1121 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
| 1122 | for (i = 0; i < priv->num_rx_queues; i++) | 1122 | for (i = 0; i < priv->num_rx_queues; i++) |
| 1123 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", | 1123 | printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", |
| 1124 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | 1124 | dev->name, i, priv->rx_queue[i]->rx_ring_size); |
| 1125 | for(i = 0; i < priv->num_tx_queues; i++) | 1125 | for(i = 0; i < priv->num_tx_queues; i++) |
| 1126 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | 1126 | printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", |
| 1127 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | 1127 | dev->name, i, priv->tx_queue[i]->tx_ring_size); |
| 1128 | 1128 | ||
| 1129 | return 0; | 1129 | return 0; |
| @@ -1638,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv) | |||
| 1638 | /* Go through all the buffer descriptors and free their data buffers */ | 1638 | /* Go through all the buffer descriptors and free their data buffers */ |
| 1639 | for (i = 0; i < priv->num_tx_queues; i++) { | 1639 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1640 | tx_queue = priv->tx_queue[i]; | 1640 | tx_queue = priv->tx_queue[i]; |
| 1641 | if(!tx_queue->tx_skbuff) | 1641 | if(tx_queue->tx_skbuff) |
| 1642 | free_skb_tx_queue(tx_queue); | 1642 | free_skb_tx_queue(tx_queue); |
| 1643 | } | 1643 | } |
| 1644 | 1644 | ||
| 1645 | for (i = 0; i < priv->num_rx_queues; i++) { | 1645 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1646 | rx_queue = priv->rx_queue[i]; | 1646 | rx_queue = priv->rx_queue[i]; |
| 1647 | if(!rx_queue->rx_skbuff) | 1647 | if(rx_queue->rx_skbuff) |
| 1648 | free_skb_rx_queue(rx_queue); | 1648 | free_skb_rx_queue(rx_queue); |
| 1649 | } | 1649 | } |
| 1650 | 1650 | ||
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index d313fae992da..743038490104 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
| @@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
| 1814 | retval = 0; | 1814 | retval = 0; |
| 1815 | break; | 1815 | break; |
| 1816 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1816 | case E1000_DEV_ID_82576_QUAD_COPPER: |
| 1817 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
| 1817 | /* quad port adapters only support WoL on port A */ | 1818 | /* quad port adapters only support WoL on port A */ |
| 1818 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { | 1819 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { |
| 1819 | wol->supported = 0; | 1820 | wol->supported = 0; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 9b3c51ab1758..c9baa2aa98cd 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
| @@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
| 1612 | adapter->eeprom_wol = 0; | 1612 | adapter->eeprom_wol = 0; |
| 1613 | break; | 1613 | break; |
| 1614 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1614 | case E1000_DEV_ID_82576_QUAD_COPPER: |
| 1615 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
| 1615 | /* if quad port adapter, disable WoL on all but port A */ | 1616 | /* if quad port adapter, disable WoL on all but port A */ |
| 1616 | if (global_quad_port_a != 0) | 1617 | if (global_quad_port_a != 0) |
| 1617 | adapter->eeprom_wol = 0; | 1618 | adapter->eeprom_wol = 0; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 471887742b02..ecde0876a785 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev, | |||
| 1690 | if (pause->tx_pause != mgp->pause) | 1690 | if (pause->tx_pause != mgp->pause) |
| 1691 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1691 | return myri10ge_change_pause(mgp, pause->tx_pause); |
| 1692 | if (pause->rx_pause != mgp->pause) | 1692 | if (pause->rx_pause != mgp->pause) |
| 1693 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1693 | return myri10ge_change_pause(mgp, pause->rx_pause); |
| 1694 | if (pause->autoneg != 0) | 1694 | if (pause->autoneg != 0) |
| 1695 | return -EINVAL; | 1695 | return -EINVAL; |
| 1696 | return 0; | 1696 | return 0; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 5adc662c4bfb..fd9d6e34fda4 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
| @@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev, | |||
| 493 | { | 493 | { |
| 494 | struct net_device *dev = priv; | 494 | struct net_device *dev = priv; |
| 495 | cisparse_t parse; | 495 | cisparse_t parse; |
| 496 | u8 *buf; | ||
| 496 | 497 | ||
| 497 | if (pcmcia_parse_tuple(tuple, &parse)) | 498 | if (pcmcia_parse_tuple(tuple, &parse)) |
| 498 | return -EINVAL; | 499 | return -EINVAL; |
| 499 | 500 | ||
| 500 | if ((parse.version_1.ns > 3) && | 501 | buf = parse.version_1.str + parse.version_1.ofs[3]; |
| 501 | (cvt_ascii_address(dev, | 502 | |
| 502 | (parse.version_1.str + parse.version_1.ofs[3])))) | 503 | if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) |
| 503 | return 0; | 504 | return 0; |
| 504 | 505 | ||
| 505 | return -EINVAL; | 506 | return -EINVAL; |
| @@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link) | |||
| 528 | len = pcmcia_get_tuple(link, 0x81, &buf); | 529 | len = pcmcia_get_tuple(link, 0x81, &buf); |
| 529 | if (buf && len >= 13) { | 530 | if (buf && len >= 13) { |
| 530 | buf[12] = '\0'; | 531 | buf[12] = '\0'; |
| 531 | if (cvt_ascii_address(dev, buf)) | 532 | if (cvt_ascii_address(dev, buf) == 0) |
| 532 | rc = 0; | 533 | rc = 0; |
| 533 | } | 534 | } |
| 534 | kfree(buf); | 535 | kfree(buf); |
| @@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link) | |||
| 910 | 911 | ||
| 911 | if (i != 0) { | 912 | if (i != 0) { |
| 912 | printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); | 913 | printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); |
| 913 | goto config_undo; | 914 | goto config_failed; |
| 914 | } | 915 | } |
| 915 | 916 | ||
| 916 | smc->duplex = 0; | 917 | smc->duplex = 0; |
| @@ -998,6 +999,7 @@ config_undo: | |||
| 998 | unregister_netdev(dev); | 999 | unregister_netdev(dev); |
| 999 | config_failed: | 1000 | config_failed: |
| 1000 | smc91c92_release(link); | 1001 | smc91c92_release(link); |
| 1002 | free_netdev(dev); | ||
| 1001 | return -ENODEV; | 1003 | return -ENODEV; |
| 1002 | } /* smc91c92_config */ | 1004 | } /* smc91c92_config */ |
| 1003 | 1005 | ||
| @@ -1606,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev) | |||
| 1606 | { | 1608 | { |
| 1607 | unsigned int ioaddr = dev->base_addr; | 1609 | unsigned int ioaddr = dev->base_addr; |
| 1608 | struct smc_private *smc = netdev_priv(dev); | 1610 | struct smc_private *smc = netdev_priv(dev); |
| 1609 | u_int multicast_table[ 2 ] = { 0, }; | 1611 | unsigned char multicast_table[8]; |
| 1610 | unsigned long flags; | 1612 | unsigned long flags; |
| 1611 | u_short rx_cfg_setting; | 1613 | u_short rx_cfg_setting; |
| 1614 | int i; | ||
| 1615 | |||
| 1616 | memset(multicast_table, 0, sizeof(multicast_table)); | ||
| 1612 | 1617 | ||
| 1613 | if (dev->flags & IFF_PROMISC) { | 1618 | if (dev->flags & IFF_PROMISC) { |
| 1614 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; | 1619 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; |
| @@ -1620,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev) | |||
| 1620 | 1625 | ||
| 1621 | netdev_for_each_mc_addr(mc_addr, dev) { | 1626 | netdev_for_each_mc_addr(mc_addr, dev) { |
| 1622 | u_int position = ether_crc(6, mc_addr->dmi_addr); | 1627 | u_int position = ether_crc(6, mc_addr->dmi_addr); |
| 1623 | #ifndef final_version /* Verify multicast address. */ | ||
| 1624 | if ((mc_addr->dmi_addr[0] & 1) == 0) | ||
| 1625 | continue; | ||
| 1626 | #endif | ||
| 1627 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); | 1628 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); |
| 1628 | } | 1629 | } |
| 1629 | } | 1630 | } |
| @@ -1633,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev) | |||
| 1633 | /* Load MC table and Rx setting into the chip without interrupts. */ | 1634 | /* Load MC table and Rx setting into the chip without interrupts. */ |
| 1634 | spin_lock_irqsave(&smc->lock, flags); | 1635 | spin_lock_irqsave(&smc->lock, flags); |
| 1635 | SMC_SELECT_BANK(3); | 1636 | SMC_SELECT_BANK(3); |
| 1636 | outl(multicast_table[0], ioaddr + MULTICAST0); | 1637 | for (i = 0; i < 8; i++) |
| 1637 | outl(multicast_table[1], ioaddr + MULTICAST4); | 1638 | outb(multicast_table[i], ioaddr + MULTICAST0 + i); |
| 1638 | SMC_SELECT_BANK(0); | 1639 | SMC_SELECT_BANK(0); |
| 1639 | outw(rx_cfg_setting, ioaddr + RCR); | 1640 | outw(rx_cfg_setting, ioaddr + RCR); |
| 1640 | SMC_SELECT_BANK(2); | 1641 | SMC_SELECT_BANK(2); |
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index a6ef266a2fe2..e73ba455aa20 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c | |||
| @@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev) | |||
| 431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | 431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
| 432 | u32 mode = VPORT_MISS_MODE_DROP; | 432 | u32 mode = VPORT_MISS_MODE_DROP; |
| 433 | 433 | ||
| 434 | if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) | ||
| 435 | return; | ||
| 436 | |||
| 434 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); | 437 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); |
| 435 | qlcnic_nic_add_mac(adapter, bcast_addr); | 438 | qlcnic_nic_add_mac(adapter, bcast_addr); |
| 436 | 439 | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 43afdb6b25e6..0298d8c1dcb6 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
| @@ -134,7 +134,7 @@ | |||
| 134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) | 134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) |
| 135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) | 135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) |
| 136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ | 136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ |
| 137 | #define MCAST_MAX 4 /* Max number multicast addresses to filter */ | 137 | #define MCAST_MAX 3 /* Max number multicast addresses to filter */ |
| 138 | 138 | ||
| 139 | /* Descriptor status */ | 139 | /* Descriptor status */ |
| 140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ | 140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ |
| @@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev) | |||
| 982 | crc >>= 26; | 982 | crc >>= 26; |
| 983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); |
| 984 | } | 984 | } |
| 985 | /* Write the index of the hash table */ | ||
| 986 | for (i = 0; i < 4; i++) | ||
| 987 | iowrite16(hash_table[i] << 14, ioaddr + MCR1); | ||
| 988 | /* Fill the MAC hash tables with their values */ | 985 | /* Fill the MAC hash tables with their values */ |
| 989 | iowrite16(hash_table[0], ioaddr + MAR0); | 986 | iowrite16(hash_table[0], ioaddr + MAR0); |
| 990 | iowrite16(hash_table[1], ioaddr + MAR1); | 987 | iowrite16(hash_table[1], ioaddr + MAR1); |
| @@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev) | |||
| 1000 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | 997 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); |
| 1001 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | 998 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); |
| 1002 | } else { | 999 | } else { |
| 1003 | iowrite16(0xffff, ioaddr + MID_0L + 8 * i); | 1000 | iowrite16(0xffff, ioaddr + MID_1L + 8 * i); |
| 1004 | iowrite16(0xffff, ioaddr + MID_0M + 8 * i); | 1001 | iowrite16(0xffff, ioaddr + MID_1M + 8 * i); |
| 1005 | iowrite16(0xffff, ioaddr + MID_0H + 8 * i); | 1002 | iowrite16(0xffff, ioaddr + MID_1H + 8 * i); |
| 1006 | } | 1003 | } |
| 1007 | i++; | 1004 | i++; |
| 1008 | } | 1005 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 96740051cdcc..dbb1f5a1824c 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
| @@ -3227,8 +3227,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | |||
| 3227 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; | 3227 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; |
| 3228 | 3228 | ||
| 3229 | if (max_frame != 16383) | 3229 | if (max_frame != 16383) |
| 3230 | printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" | 3230 | printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " |
| 3231 | "May lead to frame reception errors!\n"); | 3231 | "NIC may lead to frame reception errors!\n"); |
| 3232 | 3232 | ||
| 3233 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; | 3233 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; |
| 3234 | } | 3234 | } |
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index beb537dbe9a9..c8fc896fc460 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
| @@ -593,8 +593,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 593 | /* Setup... */ | 593 | /* Setup... */ |
| 594 | len = skb->len; | 594 | len = skb->len; |
| 595 | if (len < ETH_ZLEN) { | 595 | if (len < ETH_ZLEN) { |
| 596 | if (skb_padto(skb, ETH_ZLEN)) | 596 | if (skb_padto(skb, ETH_ZLEN)) { |
| 597 | spin_unlock_irqrestore(&sp->tx_lock, flags); | ||
| 597 | return NETDEV_TX_OK; | 598 | return NETDEV_TX_OK; |
| 599 | } | ||
| 598 | len = ETH_ZLEN; | 600 | len = ETH_ZLEN; |
| 599 | } | 601 | } |
| 600 | 602 | ||
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig index fb287649a305..eb63d44748a7 100644 --- a/drivers/net/stmmac/Kconfig +++ b/drivers/net/stmmac/Kconfig | |||
| @@ -2,6 +2,7 @@ config STMMAC_ETH | |||
| 2 | tristate "STMicroelectronics 10/100/1000 Ethernet driver" | 2 | tristate "STMicroelectronics 10/100/1000 Ethernet driver" |
| 3 | select MII | 3 | select MII |
| 4 | select PHYLIB | 4 | select PHYLIB |
| 5 | select CRC32 | ||
| 5 | depends on NETDEVICES && CPU_SUBTYPE_ST40 | 6 | depends on NETDEVICES && CPU_SUBTYPE_ST40 |
| 6 | help | 7 | help |
| 7 | This is the driver for the Ethernet IPs are built around a | 8 | This is the driver for the Ethernet IPs are built around a |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a214a1627e8b..4111a85ec80e 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
| @@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
| 1686 | } | 1686 | } |
| 1687 | pr_info("done!\n"); | 1687 | pr_info("done!\n"); |
| 1688 | 1688 | ||
| 1689 | if (!request_mem_region(res->start, (res->end - res->start), | 1689 | if (!request_mem_region(res->start, resource_size(res), |
| 1690 | pdev->name)) { | 1690 | pdev->name)) { |
| 1691 | pr_err("%s: ERROR: memory allocation failed" | 1691 | pr_err("%s: ERROR: memory allocation failed" |
| 1692 | "cannot get the I/O addr 0x%x\n", | 1692 | "cannot get the I/O addr 0x%x\n", |
| @@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
| 1695 | goto out; | 1695 | goto out; |
| 1696 | } | 1696 | } |
| 1697 | 1697 | ||
| 1698 | addr = ioremap(res->start, (res->end - res->start)); | 1698 | addr = ioremap(res->start, resource_size(res)); |
| 1699 | if (!addr) { | 1699 | if (!addr) { |
| 1700 | pr_err("%s: ERROR: memory mapping failed \n", __func__); | 1700 | pr_err("%s: ERROR: memory mapping failed\n", __func__); |
| 1701 | ret = -ENOMEM; | 1701 | ret = -ENOMEM; |
| 1702 | goto out; | 1702 | goto out; |
| 1703 | } | 1703 | } |
| @@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
| 1775 | out: | 1775 | out: |
| 1776 | if (ret < 0) { | 1776 | if (ret < 0) { |
| 1777 | platform_set_drvdata(pdev, NULL); | 1777 | platform_set_drvdata(pdev, NULL); |
| 1778 | release_mem_region(res->start, (res->end - res->start)); | 1778 | release_mem_region(res->start, resource_size(res)); |
| 1779 | if (addr != NULL) | 1779 | if (addr != NULL) |
| 1780 | iounmap(addr); | 1780 | iounmap(addr); |
| 1781 | } | 1781 | } |
| @@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) | |||
| 1813 | 1813 | ||
| 1814 | iounmap((void *)ndev->base_addr); | 1814 | iounmap((void *)ndev->base_addr); |
| 1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1816 | release_mem_region(res->start, (res->end - res->start)); | 1816 | release_mem_region(res->start, resource_size(res)); |
| 1817 | 1817 | ||
| 1818 | free_netdev(ndev); | 1818 | free_netdev(ndev); |
| 1819 | 1819 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 96c39bddc78c..43265207d463 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 387 | } | 387 | } |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | /* Orphan the skb - required as we might hang on to it | ||
| 391 | * for indefinite time. */ | ||
| 392 | skb_orphan(skb); | ||
| 393 | |||
| 390 | /* Enqueue packet */ | 394 | /* Enqueue packet */ |
| 391 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); | 395 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); |
| 392 | dev->trans_start = jiffies; | 396 | dev->trans_start = jiffies; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fb783ce20b9..b0577dd1a42d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | |||
| 327 | struct scatterlist sg[2]; | 327 | struct scatterlist sg[2]; |
| 328 | int err; | 328 | int err; |
| 329 | 329 | ||
| 330 | sg_init_table(sg, 2); | ||
| 330 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); | 331 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); |
| 331 | if (unlikely(!skb)) | 332 | if (unlikely(!skb)) |
| 332 | return -ENOMEM; | 333 | return -ENOMEM; |
| @@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) | |||
| 352 | char *p; | 353 | char *p; |
| 353 | int i, err, offset; | 354 | int i, err, offset; |
| 354 | 355 | ||
| 356 | sg_init_table(sg, MAX_SKB_FRAGS + 2); | ||
| 355 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ | 357 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ |
| 356 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { | 358 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
| 357 | first = get_a_page(vi, gfp); | 359 | first = get_a_page(vi, gfp); |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b9b9d6b01c0b..941f053e650e 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
| @@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev) | |||
| 628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); | 628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); |
| 629 | } | 629 | } |
| 630 | 630 | ||
| 631 | static void ppp_close(struct net_device *dev) | ||
| 632 | { | ||
| 633 | ppp_tx_flush(); | ||
| 634 | } | ||
| 635 | |||
| 631 | static struct hdlc_proto proto = { | 636 | static struct hdlc_proto proto = { |
| 632 | .start = ppp_start, | 637 | .start = ppp_start, |
| 633 | .stop = ppp_stop, | 638 | .stop = ppp_stop, |
| 639 | .close = ppp_close, | ||
| 634 | .type_trans = ppp_type_trans, | 640 | .type_trans = ppp_type_trans, |
| 635 | .ioctl = ppp_ioctl, | 641 | .ioctl = ppp_ioctl, |
| 636 | .netif_rx = ppp_rx, | 642 | .netif_rx = ppp_rx, |
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 0b0d2dc2f38c..99a6da464bd3 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c | |||
| @@ -95,6 +95,8 @@ static struct usb_device_id ar9170_usb_ids[] = { | |||
| 95 | { USB_DEVICE(0x04bb, 0x093f) }, | 95 | { USB_DEVICE(0x04bb, 0x093f) }, |
| 96 | /* AVM FRITZ!WLAN USB Stick N */ | 96 | /* AVM FRITZ!WLAN USB Stick N */ |
| 97 | { USB_DEVICE(0x057C, 0x8401) }, | 97 | { USB_DEVICE(0x057C, 0x8401) }, |
| 98 | /* NEC WL300NU-G */ | ||
| 99 | { USB_DEVICE(0x0409, 0x0249) }, | ||
| 98 | /* AVM FRITZ!WLAN USB Stick N 2.4 */ | 100 | /* AVM FRITZ!WLAN USB Stick N 2.4 */ |
| 99 | { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, | 101 | { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, |
| 100 | 102 | ||
| @@ -417,7 +419,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, | |||
| 417 | spin_unlock_irqrestore(&aru->common.cmdlock, flags); | 419 | spin_unlock_irqrestore(&aru->common.cmdlock, flags); |
| 418 | 420 | ||
| 419 | usb_fill_int_urb(urb, aru->udev, | 421 | usb_fill_int_urb(urb, aru->udev, |
| 420 | usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), | 422 | usb_sndintpipe(aru->udev, AR9170_EP_CMD), |
| 421 | aru->common.cmdbuf, plen + 4, | 423 | aru->common.cmdbuf, plen + 4, |
| 422 | ar9170_usb_tx_urb_complete, NULL, 1); | 424 | ar9170_usb_tx_urb_complete, NULL, 1); |
| 423 | 425 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 67ca4e5a6017..115e1aeedb59 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
| 1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); | 1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); |
| 1533 | ath9k_set_wiphy_idle(aphy, idle); | 1533 | ath9k_set_wiphy_idle(aphy, idle); |
| 1534 | 1534 | ||
| 1535 | if (!idle && all_wiphys_idle) | 1535 | enable_radio = (!idle && all_wiphys_idle); |
| 1536 | enable_radio = true; | ||
| 1537 | 1536 | ||
| 1538 | /* | 1537 | /* |
| 1539 | * After we unlock here its possible another wiphy | 1538 | * After we unlock here its possible another wiphy |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 1bd2cd836026..8972166386cb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
| @@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
| 2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
| 2016 | "%d index %d\n", scd_ssn , index); | 2016 | "%d index %d\n", scd_ssn , index); |
| 2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
| 2018 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2018 | if (qc) |
| 2019 | iwl_free_tfds_in_queue(priv, sta_id, | ||
| 2020 | tid, freed); | ||
| 2019 | 2021 | ||
| 2020 | if (priv->mac80211_registered && | 2022 | if (priv->mac80211_registered && |
| 2021 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2023 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
| @@ -2042,13 +2044,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
| 2042 | 2044 | ||
| 2043 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2045 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
| 2044 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2046 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
| 2045 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 2047 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
| 2048 | else if (sta_id == IWL_INVALID_STATION) | ||
| 2049 | IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); | ||
| 2046 | 2050 | ||
| 2047 | if (priv->mac80211_registered && | 2051 | if (priv->mac80211_registered && |
| 2048 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 2052 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
| 2049 | iwl_wake_queue(priv, txq_id); | 2053 | iwl_wake_queue(priv, txq_id); |
| 2050 | } | 2054 | } |
| 2051 | |||
| 2052 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2055 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
| 2053 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2056 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
| 2054 | 2057 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35f819ac87a3..1460116d329f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
| @@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
| 346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); | 346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | /* | ||
| 350 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
| 351 | * that wraps a NULL pointer check | ||
| 352 | */ | ||
| 353 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
| 354 | { | ||
| 355 | if (tbl->expected_tpt) | ||
| 356 | return tbl->expected_tpt[rs_index]; | ||
| 357 | return 0; | ||
| 358 | } | ||
| 359 | |||
| 349 | /** | 360 | /** |
| 350 | * rs_collect_tx_data - Update the success/failure sliding window | 361 | * rs_collect_tx_data - Update the success/failure sliding window |
| 351 | * | 362 | * |
| @@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
| 353 | * at this rate. window->data contains the bitmask of successful | 364 | * at this rate. window->data contains the bitmask of successful |
| 354 | * packets. | 365 | * packets. |
| 355 | */ | 366 | */ |
| 356 | static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, | 367 | static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, |
| 357 | int scale_index, s32 tpt, int attempts, | 368 | int scale_index, int attempts, int successes) |
| 358 | int successes) | ||
| 359 | { | 369 | { |
| 360 | struct iwl_rate_scale_data *window = NULL; | 370 | struct iwl_rate_scale_data *window = NULL; |
| 361 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); | 371 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); |
| 362 | s32 fail_count; | 372 | s32 fail_count, tpt; |
| 363 | 373 | ||
| 364 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) | 374 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) |
| 365 | return -EINVAL; | 375 | return -EINVAL; |
| 366 | 376 | ||
| 367 | /* Select window for current tx bit rate */ | 377 | /* Select window for current tx bit rate */ |
| 368 | window = &(windows[scale_index]); | 378 | window = &(tbl->win[scale_index]); |
| 379 | |||
| 380 | /* Get expected throughput */ | ||
| 381 | tpt = get_expected_tpt(tbl, scale_index); | ||
| 369 | 382 | ||
| 370 | /* | 383 | /* |
| 371 | * Keep track of only the latest 62 tx frame attempts in this rate's | 384 | * Keep track of only the latest 62 tx frame attempts in this rate's |
| @@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a, | |||
| 739 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && | 752 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && |
| 740 | (a->is_SGI == b->is_SGI); | 753 | (a->is_SGI == b->is_SGI); |
| 741 | } | 754 | } |
| 742 | /* | ||
| 743 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
| 744 | * that wraps a NULL pointer check | ||
| 745 | */ | ||
| 746 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
| 747 | { | ||
| 748 | if (tbl->expected_tpt) | ||
| 749 | return tbl->expected_tpt[rs_index]; | ||
| 750 | return 0; | ||
| 751 | } | ||
| 752 | 755 | ||
| 753 | /* | 756 | /* |
| 754 | * mac80211 sends us Tx status | 757 | * mac80211 sends us Tx status |
| @@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
| 765 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 768 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 766 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 769 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; |
| 767 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 770 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 768 | struct iwl_rate_scale_data *window = NULL; | ||
| 769 | enum mac80211_rate_control_flags mac_flags; | 771 | enum mac80211_rate_control_flags mac_flags; |
| 770 | u32 tx_rate; | 772 | u32 tx_rate; |
| 771 | struct iwl_scale_tbl_info tbl_type; | 773 | struct iwl_scale_tbl_info tbl_type; |
| 772 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl; | 774 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
| 773 | s32 tpt = 0; | ||
| 774 | 775 | ||
| 775 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); | 776 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); |
| 776 | 777 | ||
| @@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
| 853 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); | 854 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); |
| 854 | return; | 855 | return; |
| 855 | } | 856 | } |
| 856 | window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); | ||
| 857 | 857 | ||
| 858 | /* | 858 | /* |
| 859 | * Updating the frame history depends on whether packets were | 859 | * Updating the frame history depends on whether packets were |
| @@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
| 866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); | 866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); |
| 867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, | 867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, |
| 868 | &rs_index); | 868 | &rs_index); |
| 869 | tpt = get_expected_tpt(curr_tbl, rs_index); | 869 | rs_collect_tx_data(curr_tbl, rs_index, |
| 870 | rs_collect_tx_data(window, rs_index, tpt, | ||
| 871 | info->status.ampdu_ack_len, | 870 | info->status.ampdu_ack_len, |
| 872 | info->status.ampdu_ack_map); | 871 | info->status.ampdu_ack_map); |
| 873 | 872 | ||
| @@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
| 897 | * table as active/search. | 896 | * table as active/search. |
| 898 | */ | 897 | */ |
| 899 | if (table_type_matches(&tbl_type, curr_tbl)) | 898 | if (table_type_matches(&tbl_type, curr_tbl)) |
| 900 | tpt = get_expected_tpt(curr_tbl, rs_index); | 899 | tmp_tbl = curr_tbl; |
| 901 | else if (table_type_matches(&tbl_type, other_tbl)) | 900 | else if (table_type_matches(&tbl_type, other_tbl)) |
| 902 | tpt = get_expected_tpt(other_tbl, rs_index); | 901 | tmp_tbl = other_tbl; |
| 903 | else | 902 | else |
| 904 | continue; | 903 | continue; |
| 905 | 904 | rs_collect_tx_data(tmp_tbl, rs_index, 1, | |
| 906 | /* Constants mean 1 transmission, 0 successes */ | 905 | i < retries ? 0 : legacy_success); |
| 907 | if (i < retries) | ||
| 908 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
| 909 | 0); | ||
| 910 | else | ||
| 911 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
| 912 | legacy_success); | ||
| 913 | } | 906 | } |
| 914 | 907 | ||
| 915 | /* Update success/fail counts if not searching for new mode */ | 908 | /* Update success/fail counts if not searching for new mode */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 5e0c6bf3fbb1..8b8e3e1cbb44 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
| @@ -1259,7 +1259,15 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) | |||
| 1259 | /* Ack/clear/reset pending uCode interrupts. | 1259 | /* Ack/clear/reset pending uCode interrupts. |
| 1260 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | 1260 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, |
| 1261 | */ | 1261 | */ |
| 1262 | iwl_write32(priv, CSR_INT, priv->inta); | 1262 | /* There is a hardware bug in the interrupt mask function that some |
| 1263 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | ||
| 1264 | * they are disabled in the CSR_INT_MASK register. Furthermore the | ||
| 1265 | * ICT interrupt handling mechanism has another bug that might cause | ||
| 1266 | * these unmasked interrupts fail to be detected. We workaround the | ||
| 1267 | * hardware bugs here by ACKing all the possible interrupts so that | ||
| 1268 | * interrupt coalescing can still be achieved. | ||
| 1269 | */ | ||
| 1270 | iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); | ||
| 1263 | 1271 | ||
| 1264 | inta = priv->inta; | 1272 | inta = priv->inta; |
| 1265 | 1273 | ||
| @@ -2645,7 +2653,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv) | |||
| 2645 | BIT(NL80211_IFTYPE_STATION) | | 2653 | BIT(NL80211_IFTYPE_STATION) | |
| 2646 | BIT(NL80211_IFTYPE_ADHOC); | 2654 | BIT(NL80211_IFTYPE_ADHOC); |
| 2647 | 2655 | ||
| 2648 | hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | | 2656 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | |
| 2649 | WIPHY_FLAG_DISABLE_BEACON_HINTS; | 2657 | WIPHY_FLAG_DISABLE_BEACON_HINTS; |
| 2650 | 2658 | ||
| 2651 | /* | 2659 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1f..8b516c5ff0bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c | |||
| @@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, | |||
| 808 | } | 808 | } |
| 809 | } | 809 | } |
| 810 | 810 | ||
| 811 | /* | ||
| 812 | * The above algorithm sometimes fails when the ucode | ||
| 813 | * reports 0 for all chains. It's not clear why that | ||
| 814 | * happens to start with, but it is then causing trouble | ||
| 815 | * because this can make us enable more chains than the | ||
| 816 | * hardware really has. | ||
| 817 | * | ||
| 818 | * To be safe, simply mask out any chains that we know | ||
| 819 | * are not on the device. | ||
| 820 | */ | ||
| 821 | active_chains &= priv->hw_params.valid_rx_ant; | ||
| 822 | |||
| 811 | num_tx_chains = 0; | 823 | num_tx_chains = 0; |
| 812 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 824 | for (i = 0; i < NUM_RX_CHAINS; i++) { |
| 813 | /* loops on all the bits of | 825 | /* loops on all the bits of |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index db050b811232..3352f7086632 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
| @@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv) | |||
| 308 | 308 | ||
| 309 | spin_unlock_irqrestore(&priv->lock, flags); | 309 | spin_unlock_irqrestore(&priv->lock, flags); |
| 310 | 310 | ||
| 311 | /* Allocate and init all Tx and Command queues */ | 311 | /* Allocate or reset and init all Tx and Command queues */ |
| 312 | ret = iwl_txq_ctx_reset(priv); | 312 | if (!priv->txq) { |
| 313 | if (ret) | 313 | ret = iwl_txq_ctx_alloc(priv); |
| 314 | return ret; | 314 | if (ret) |
| 315 | return ret; | ||
| 316 | } else | ||
| 317 | iwl_txq_ctx_reset(priv); | ||
| 315 | 318 | ||
| 316 | set_bit(STATUS_INIT, &priv->status); | 319 | set_bit(STATUS_INIT, &priv->status); |
| 317 | 320 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4ef7739f9e8e..732590f5fe30 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
| @@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
| 442 | /***************************************************** | 442 | /***************************************************** |
| 443 | * TX | 443 | * TX |
| 444 | ******************************************************/ | 444 | ******************************************************/ |
| 445 | int iwl_txq_ctx_reset(struct iwl_priv *priv); | 445 | int iwl_txq_ctx_alloc(struct iwl_priv *priv); |
| 446 | void iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
| 446 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 447 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
| 447 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 448 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
| 448 | struct iwl_tx_queue *txq, | 449 | struct iwl_tx_queue *txq, |
| @@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |||
| 456 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 457 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
| 457 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 458 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
| 458 | int slots_num, u32 txq_id); | 459 | int slots_num, u32 txq_id); |
| 460 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
| 461 | int slots_num, u32 txq_id); | ||
| 459 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 462 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
| 460 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | 463 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); |
| 461 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | 464 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index f0b7e6cfbe4f..8dd0c036d547 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
| @@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
| 194 | struct iwl_queue *q = &txq->q; | 194 | struct iwl_queue *q = &txq->q; |
| 195 | struct device *dev = &priv->pci_dev->dev; | 195 | struct device *dev = &priv->pci_dev->dev; |
| 196 | int i; | 196 | int i; |
| 197 | bool huge = false; | ||
| 197 | 198 | ||
| 198 | if (q->n_bd == 0) | 199 | if (q->n_bd == 0) |
| 199 | return; | 200 | return; |
| 200 | 201 | ||
| 202 | for (; q->read_ptr != q->write_ptr; | ||
| 203 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
| 204 | /* we have no way to tell if it is a huge cmd ATM */ | ||
| 205 | i = get_cmd_index(q, q->read_ptr, 0); | ||
| 206 | |||
| 207 | if (txq->meta[i].flags & CMD_SIZE_HUGE) { | ||
| 208 | huge = true; | ||
| 209 | continue; | ||
| 210 | } | ||
| 211 | |||
| 212 | pci_unmap_single(priv->pci_dev, | ||
| 213 | pci_unmap_addr(&txq->meta[i], mapping), | ||
| 214 | pci_unmap_len(&txq->meta[i], len), | ||
| 215 | PCI_DMA_BIDIRECTIONAL); | ||
| 216 | } | ||
| 217 | if (huge) { | ||
| 218 | i = q->n_window; | ||
| 219 | pci_unmap_single(priv->pci_dev, | ||
| 220 | pci_unmap_addr(&txq->meta[i], mapping), | ||
| 221 | pci_unmap_len(&txq->meta[i], len), | ||
| 222 | PCI_DMA_BIDIRECTIONAL); | ||
| 223 | } | ||
| 224 | |||
| 201 | /* De-alloc array of command/tx buffers */ | 225 | /* De-alloc array of command/tx buffers */ |
| 202 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 226 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
| 203 | kfree(txq->cmd[i]); | 227 | kfree(txq->cmd[i]); |
| @@ -410,6 +434,26 @@ out_free_arrays: | |||
| 410 | } | 434 | } |
| 411 | EXPORT_SYMBOL(iwl_tx_queue_init); | 435 | EXPORT_SYMBOL(iwl_tx_queue_init); |
| 412 | 436 | ||
| 437 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
| 438 | int slots_num, u32 txq_id) | ||
| 439 | { | ||
| 440 | int actual_slots = slots_num; | ||
| 441 | |||
| 442 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
| 443 | actual_slots++; | ||
| 444 | |||
| 445 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | ||
| 446 | |||
| 447 | txq->need_update = 0; | ||
| 448 | |||
| 449 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
| 450 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
| 451 | |||
| 452 | /* Tell device where to find queue */ | ||
| 453 | priv->cfg->ops->lib->txq_init(priv, txq); | ||
| 454 | } | ||
| 455 | EXPORT_SYMBOL(iwl_tx_queue_reset); | ||
| 456 | |||
| 413 | /** | 457 | /** |
| 414 | * iwl_hw_txq_ctx_free - Free TXQ Context | 458 | * iwl_hw_txq_ctx_free - Free TXQ Context |
| 415 | * | 459 | * |
| @@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
| 421 | 465 | ||
| 422 | /* Tx queues */ | 466 | /* Tx queues */ |
| 423 | if (priv->txq) { | 467 | if (priv->txq) { |
| 424 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | 468 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
| 425 | txq_id++) | ||
| 426 | if (txq_id == IWL_CMD_QUEUE_NUM) | 469 | if (txq_id == IWL_CMD_QUEUE_NUM) |
| 427 | iwl_cmd_queue_free(priv); | 470 | iwl_cmd_queue_free(priv); |
| 428 | else | 471 | else |
| @@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
| 438 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 481 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
| 439 | 482 | ||
| 440 | /** | 483 | /** |
| 441 | * iwl_txq_ctx_reset - Reset TX queue context | 484 | * iwl_txq_ctx_alloc - allocate TX queue context |
| 442 | * Destroys all DMA structures and initialize them again | 485 | * Allocate all Tx DMA structures and initialize them |
| 443 | * | 486 | * |
| 444 | * @param priv | 487 | * @param priv |
| 445 | * @return error code | 488 | * @return error code |
| 446 | */ | 489 | */ |
| 447 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 490 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) |
| 448 | { | 491 | { |
| 449 | int ret = 0; | 492 | int ret; |
| 450 | int txq_id, slots_num; | 493 | int txq_id, slots_num; |
| 451 | unsigned long flags; | 494 | unsigned long flags; |
| 452 | 495 | ||
| @@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
| 504 | return ret; | 547 | return ret; |
| 505 | } | 548 | } |
| 506 | 549 | ||
| 550 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
| 551 | { | ||
| 552 | int txq_id, slots_num; | ||
| 553 | unsigned long flags; | ||
| 554 | |||
| 555 | spin_lock_irqsave(&priv->lock, flags); | ||
| 556 | |||
| 557 | /* Turn off all Tx DMA fifos */ | ||
| 558 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
| 559 | |||
| 560 | /* Tell NIC where to find the "keep warm" buffer */ | ||
| 561 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
| 562 | |||
| 563 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 564 | |||
| 565 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
| 566 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
| 567 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
| 568 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
| 569 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
| 570 | } | ||
| 571 | } | ||
| 572 | |||
| 507 | /** | 573 | /** |
| 508 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 574 | * iwl_txq_ctx_stop - Stop all Tx DMA channels |
| 509 | */ | 575 | */ |
| 510 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 576 | void iwl_txq_ctx_stop(struct iwl_priv *priv) |
| 511 | { | 577 | { |
| @@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) | |||
| 525 | 1000); | 591 | 1000); |
| 526 | } | 592 | } |
| 527 | spin_unlock_irqrestore(&priv->lock, flags); | 593 | spin_unlock_irqrestore(&priv->lock, flags); |
| 528 | |||
| 529 | /* Deallocate memory for all Tx queues */ | ||
| 530 | iwl_hw_txq_ctx_free(priv); | ||
| 531 | } | 594 | } |
| 532 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 595 | EXPORT_SYMBOL(iwl_txq_ctx_stop); |
| 533 | 596 | ||
| @@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
| 1050 | 1113 | ||
| 1051 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1114 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
| 1052 | 1115 | ||
| 1116 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
| 1117 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
| 1118 | */ | ||
| 1119 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
| 1120 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
| 1121 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
| 1122 | } | ||
| 1123 | |||
| 1053 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 1124 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
| 1054 | out_cmd = txq->cmd[idx]; | 1125 | out_cmd = txq->cmd[idx]; |
| 1055 | out_meta = &txq->meta[idx]; | 1126 | out_meta = &txq->meta[idx]; |
| @@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 1227 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 1298 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
| 1228 | struct iwl_device_cmd *cmd; | 1299 | struct iwl_device_cmd *cmd; |
| 1229 | struct iwl_cmd_meta *meta; | 1300 | struct iwl_cmd_meta *meta; |
| 1301 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
| 1230 | 1302 | ||
| 1231 | /* If a Tx command is being handled and it isn't in the actual | 1303 | /* If a Tx command is being handled and it isn't in the actual |
| 1232 | * command queue then there a command routing bug has been introduced | 1304 | * command queue then there a command routing bug has been introduced |
| @@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 1240 | return; | 1312 | return; |
| 1241 | } | 1313 | } |
| 1242 | 1314 | ||
| 1243 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 1315 | /* If this is a huge cmd, clear the huge flag on the meta.flags |
| 1244 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1316 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap |
| 1245 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1317 | * the DMA buffer for the scan (huge) command. |
| 1318 | */ | ||
| 1319 | if (huge) { | ||
| 1320 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
| 1321 | txq->meta[cmd_index].flags = 0; | ||
| 1322 | } | ||
| 1323 | cmd_index = get_cmd_index(&txq->q, index, huge); | ||
| 1324 | cmd = txq->cmd[cmd_index]; | ||
| 1325 | meta = &txq->meta[cmd_index]; | ||
| 1246 | 1326 | ||
| 1247 | pci_unmap_single(priv->pci_dev, | 1327 | pci_unmap_single(priv->pci_dev, |
| 1248 | pci_unmap_addr(meta, mapping), | 1328 | pci_unmap_addr(meta, mapping), |
| @@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 1264 | get_cmd_string(cmd->hdr.cmd)); | 1344 | get_cmd_string(cmd->hdr.cmd)); |
| 1265 | wake_up_interruptible(&priv->wait_command_queue); | 1345 | wake_up_interruptible(&priv->wait_command_queue); |
| 1266 | } | 1346 | } |
| 1347 | meta->flags = 0; | ||
| 1267 | } | 1348 | } |
| 1268 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 1349 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
| 1269 | 1350 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 1eaa0052c11b..b55e4f39a9e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
| @@ -1956,7 +1956,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv, | |||
| 1956 | { | 1956 | { |
| 1957 | int i; | 1957 | int i; |
| 1958 | 1958 | ||
| 1959 | for (i = 0; i < IWL_RATE_COUNT; i++) { | 1959 | for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { |
| 1960 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; | 1960 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; |
| 1961 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ | 1961 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ |
| 1962 | rates[i].hw_value_short = i; | 1962 | rates[i].hw_value_short = i; |
| @@ -3922,7 +3922,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
| 3922 | BIT(NL80211_IFTYPE_STATION) | | 3922 | BIT(NL80211_IFTYPE_STATION) | |
| 3923 | BIT(NL80211_IFTYPE_ADHOC); | 3923 | BIT(NL80211_IFTYPE_ADHOC); |
| 3924 | 3924 | ||
| 3925 | hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | | 3925 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | |
| 3926 | WIPHY_FLAG_DISABLE_BEACON_HINTS; | 3926 | WIPHY_FLAG_DISABLE_BEACON_HINTS; |
| 3927 | 3927 | ||
| 3928 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | 3928 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; |
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index e196b84914db..ce7bec402a33 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
| @@ -173,6 +173,8 @@ int lbs_cfg_register(struct lbs_private *priv) | |||
| 173 | if (ret < 0) | 173 | if (ret < 0) |
| 174 | lbs_pr_err("cannot register wiphy device\n"); | 174 | lbs_pr_err("cannot register wiphy device\n"); |
| 175 | 175 | ||
| 176 | priv->wiphy_registered = true; | ||
| 177 | |||
| 176 | ret = register_netdev(priv->dev); | 178 | ret = register_netdev(priv->dev); |
| 177 | if (ret) | 179 | if (ret) |
| 178 | lbs_pr_err("cannot register network device\n"); | 180 | lbs_pr_err("cannot register network device\n"); |
| @@ -191,9 +193,11 @@ void lbs_cfg_free(struct lbs_private *priv) | |||
| 191 | if (!wdev) | 193 | if (!wdev) |
| 192 | return; | 194 | return; |
| 193 | 195 | ||
| 194 | if (wdev->wiphy) { | 196 | if (priv->wiphy_registered) |
| 195 | wiphy_unregister(wdev->wiphy); | 197 | wiphy_unregister(wdev->wiphy); |
| 198 | |||
| 199 | if (wdev->wiphy) | ||
| 196 | wiphy_free(wdev->wiphy); | 200 | wiphy_free(wdev->wiphy); |
| 197 | } | 201 | |
| 198 | kfree(wdev); | 202 | kfree(wdev); |
| 199 | } | 203 | } |
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6977ee820214..6875e1498bd5 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h | |||
| @@ -36,6 +36,7 @@ struct lbs_private { | |||
| 36 | 36 | ||
| 37 | /* CFG80211 */ | 37 | /* CFG80211 */ |
| 38 | struct wireless_dev *wdev; | 38 | struct wireless_dev *wdev; |
| 39 | bool wiphy_registered; | ||
| 39 | 40 | ||
| 40 | /* Mesh */ | 41 | /* Mesh */ |
| 41 | struct net_device *mesh_dev; /* Virtual device */ | 42 | struct net_device *mesh_dev; /* Virtual device */ |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 89354c29f088..12fdcb25fd38 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
| @@ -3852,6 +3852,7 @@ MODULE_FIRMWARE("mwl8k/helper_8366.fw"); | |||
| 3852 | MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); | 3852 | MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); |
| 3853 | 3853 | ||
| 3854 | static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { | 3854 | static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { |
| 3855 | { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, | ||
| 3855 | { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, | 3856 | { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, |
| 3856 | { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, | 3857 | { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, |
| 3857 | { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, | 3858 | { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 762952d688e2..743a6c68b29d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
| @@ -36,6 +36,7 @@ MODULE_FIRMWARE("isl3887usb"); | |||
| 36 | static struct usb_device_id p54u_table[] __devinitdata = { | 36 | static struct usb_device_id p54u_table[] __devinitdata = { |
| 37 | /* Version 1 devices (pci chip + net2280) */ | 37 | /* Version 1 devices (pci chip + net2280) */ |
| 38 | {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ | 38 | {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ |
| 39 | {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ | ||
| 39 | {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ | 40 | {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ |
| 40 | {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ | 41 | {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ |
| 41 | {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ | 42 | {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ |
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index d2cc4458477f..8ebb705fe106 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c | |||
| @@ -1644,6 +1644,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
| 1644 | unsigned int i; | 1644 | unsigned int i; |
| 1645 | 1645 | ||
| 1646 | /* | 1646 | /* |
| 1647 | * Disable powersaving as default. | ||
| 1648 | */ | ||
| 1649 | rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | ||
| 1650 | |||
| 1651 | /* | ||
| 1647 | * Initialize all hw fields. | 1652 | * Initialize all hw fields. |
| 1648 | */ | 1653 | */ |
| 1649 | rt2x00dev->hw->flags = | 1654 | rt2x00dev->hw->flags = |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 58c7f218019d..c015ce9fdd09 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -813,9 +813,9 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, | |||
| 813 | rt2800_rfcsr_write(rt2x00dev, 24, | 813 | rt2800_rfcsr_write(rt2x00dev, 24, |
| 814 | rt2x00dev->calibration[conf_is_ht40(conf)]); | 814 | rt2x00dev->calibration[conf_is_ht40(conf)]); |
| 815 | 815 | ||
| 816 | rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); | 816 | rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr); |
| 817 | rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); | 817 | rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); |
| 818 | rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); | 818 | rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); |
| 819 | } | 819 | } |
| 820 | 820 | ||
| 821 | static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, | 821 | static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, |
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 2e47991eccf6..559069a80a3b 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
| @@ -596,19 +596,17 @@ struct pcmcia_align_data { | |||
| 596 | struct resource_map *map; | 596 | struct resource_map *map; |
| 597 | }; | 597 | }; |
| 598 | 598 | ||
| 599 | static resource_size_t | 599 | static resource_size_t pcmcia_common_align(struct pcmcia_align_data *align_data, |
| 600 | pcmcia_common_align(void *align_data, const struct resource *res, | 600 | resource_size_t start) |
| 601 | resource_size_t size, resource_size_t align) | ||
| 602 | { | 601 | { |
| 603 | struct pcmcia_align_data *data = align_data; | 602 | resource_size_t ret; |
| 604 | resource_size_t start; | ||
| 605 | /* | 603 | /* |
| 606 | * Ensure that we have the correct start address | 604 | * Ensure that we have the correct start address |
| 607 | */ | 605 | */ |
| 608 | start = (res->start & ~data->mask) + data->offset; | 606 | ret = (start & ~align_data->mask) + align_data->offset; |
| 609 | if (start < res->start) | 607 | if (ret < start) |
| 610 | start += data->mask + 1; | 608 | ret += align_data->mask + 1; |
| 611 | return start; | 609 | return ret; |
| 612 | } | 610 | } |
| 613 | 611 | ||
| 614 | static resource_size_t | 612 | static resource_size_t |
| @@ -619,29 +617,28 @@ pcmcia_align(void *align_data, const struct resource *res, | |||
| 619 | struct resource_map *m; | 617 | struct resource_map *m; |
| 620 | resource_size_t start; | 618 | resource_size_t start; |
| 621 | 619 | ||
| 622 | start = pcmcia_common_align(data, res, size, align); | 620 | start = pcmcia_common_align(data, res->start); |
| 623 | 621 | ||
| 624 | for (m = data->map->next; m != data->map; m = m->next) { | 622 | for (m = data->map->next; m != data->map; m = m->next) { |
| 625 | unsigned long start = m->base; | 623 | unsigned long map_start = m->base; |
| 626 | unsigned long end = m->base + m->num - 1; | 624 | unsigned long map_end = m->base + m->num - 1; |
| 627 | 625 | ||
| 628 | /* | 626 | /* |
| 629 | * If the lower resources are not available, try aligning | 627 | * If the lower resources are not available, try aligning |
| 630 | * to this entry of the resource database to see if it'll | 628 | * to this entry of the resource database to see if it'll |
| 631 | * fit here. | 629 | * fit here. |
| 632 | */ | 630 | */ |
| 633 | if (res->start < start) { | 631 | if (start < map_start) |
| 634 | start = pcmcia_common_align(data, res, size, align); | 632 | start = pcmcia_common_align(data, map_start); |
| 635 | } | ||
| 636 | 633 | ||
| 637 | /* | 634 | /* |
| 638 | * If we're above the area which was passed in, there's | 635 | * If we're above the area which was passed in, there's |
| 639 | * no point proceeding. | 636 | * no point proceeding. |
| 640 | */ | 637 | */ |
| 641 | if (res->start >= res->end) | 638 | if (start >= res->end) |
| 642 | break; | 639 | break; |
| 643 | 640 | ||
| 644 | if ((res->start + size - 1) <= end) | 641 | if ((start + size - 1) <= map_end) |
| 645 | break; | 642 | break; |
| 646 | } | 643 | } |
| 647 | 644 | ||
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 1190bad4297f..2f795ce2b939 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c | |||
| @@ -397,6 +397,7 @@ static int intel_menlow_add_one_attribute(char *name, int mode, void *show, | |||
| 397 | if (!attr) | 397 | if (!attr) |
| 398 | return -ENOMEM; | 398 | return -ENOMEM; |
| 399 | 399 | ||
| 400 | sysfs_attr_init(&attr->attr.attr); /* That is consistent naming :D */ | ||
| 400 | attr->attr.attr.name = name; | 401 | attr->attr.attr.name = name; |
| 401 | attr->attr.attr.mode = mode; | 402 | attr->attr.attr.mode = mode; |
| 402 | attr->attr.show = show; | 403 | attr->attr.show = show; |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index c6c552f681b7..35bb44af49b3 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
| @@ -274,12 +274,33 @@ static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, | |||
| 274 | pnp_add_bus_resource(dev, start, end); | 274 | pnp_add_bus_resource(dev, start, end); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static u64 addr_space_length(struct pnp_dev *dev, u64 min, u64 max, u64 len) | ||
| 278 | { | ||
| 279 | u64 max_len; | ||
| 280 | |||
| 281 | max_len = max - min + 1; | ||
| 282 | if (len <= max_len) | ||
| 283 | return len; | ||
| 284 | |||
| 285 | /* | ||
| 286 | * Per 6.4.3.5, _LEN cannot exceed _MAX - _MIN + 1, but some BIOSes | ||
| 287 | * don't do this correctly, e.g., | ||
| 288 | * https://bugzilla.kernel.org/show_bug.cgi?id=15480 | ||
| 289 | */ | ||
| 290 | dev_info(&dev->dev, | ||
| 291 | "resource length %#llx doesn't fit in %#llx-%#llx, trimming\n", | ||
| 292 | (unsigned long long) len, (unsigned long long) min, | ||
| 293 | (unsigned long long) max); | ||
| 294 | return max_len; | ||
| 295 | } | ||
| 296 | |||
| 277 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | 297 | static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, |
| 278 | struct acpi_resource *res) | 298 | struct acpi_resource *res) |
| 279 | { | 299 | { |
| 280 | struct acpi_resource_address64 addr, *p = &addr; | 300 | struct acpi_resource_address64 addr, *p = &addr; |
| 281 | acpi_status status; | 301 | acpi_status status; |
| 282 | int window; | 302 | int window; |
| 303 | u64 len; | ||
| 283 | 304 | ||
| 284 | status = acpi_resource_to_address64(res, p); | 305 | status = acpi_resource_to_address64(res, p); |
| 285 | if (!ACPI_SUCCESS(status)) { | 306 | if (!ACPI_SUCCESS(status)) { |
| @@ -288,20 +309,18 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, | |||
| 288 | return; | 309 | return; |
| 289 | } | 310 | } |
| 290 | 311 | ||
| 312 | len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); | ||
| 291 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | 313 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; |
| 292 | 314 | ||
| 293 | if (p->resource_type == ACPI_MEMORY_RANGE) | 315 | if (p->resource_type == ACPI_MEMORY_RANGE) |
| 294 | pnpacpi_parse_allocated_memresource(dev, | 316 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, |
| 295 | p->minimum, p->address_length, | ||
| 296 | p->info.mem.write_protect, window); | 317 | p->info.mem.write_protect, window); |
| 297 | else if (p->resource_type == ACPI_IO_RANGE) | 318 | else if (p->resource_type == ACPI_IO_RANGE) |
| 298 | pnpacpi_parse_allocated_ioresource(dev, | 319 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, |
| 299 | p->minimum, p->address_length, | ||
| 300 | p->granularity == 0xfff ? ACPI_DECODE_10 : | 320 | p->granularity == 0xfff ? ACPI_DECODE_10 : |
| 301 | ACPI_DECODE_16, window); | 321 | ACPI_DECODE_16, window); |
| 302 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | 322 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) |
| 303 | pnpacpi_parse_allocated_busresource(dev, p->minimum, | 323 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); |
| 304 | p->address_length); | ||
| 305 | } | 324 | } |
| 306 | 325 | ||
| 307 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | 326 | static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, |
| @@ -309,21 +328,20 @@ static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, | |||
| 309 | { | 328 | { |
| 310 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; | 329 | struct acpi_resource_extended_address64 *p = &res->data.ext_address64; |
| 311 | int window; | 330 | int window; |
| 331 | u64 len; | ||
| 312 | 332 | ||
| 333 | len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); | ||
| 313 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; | 334 | window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; |
| 314 | 335 | ||
| 315 | if (p->resource_type == ACPI_MEMORY_RANGE) | 336 | if (p->resource_type == ACPI_MEMORY_RANGE) |
| 316 | pnpacpi_parse_allocated_memresource(dev, | 337 | pnpacpi_parse_allocated_memresource(dev, p->minimum, len, |
| 317 | p->minimum, p->address_length, | ||
| 318 | p->info.mem.write_protect, window); | 338 | p->info.mem.write_protect, window); |
| 319 | else if (p->resource_type == ACPI_IO_RANGE) | 339 | else if (p->resource_type == ACPI_IO_RANGE) |
| 320 | pnpacpi_parse_allocated_ioresource(dev, | 340 | pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, |
| 321 | p->minimum, p->address_length, | ||
| 322 | p->granularity == 0xfff ? ACPI_DECODE_10 : | 341 | p->granularity == 0xfff ? ACPI_DECODE_10 : |
| 323 | ACPI_DECODE_16, window); | 342 | ACPI_DECODE_16, window); |
| 324 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) | 343 | else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) |
| 325 | pnpacpi_parse_allocated_busresource(dev, p->minimum, | 344 | pnpacpi_parse_allocated_busresource(dev, p->minimum, len); |
| 326 | p->address_length); | ||
| 327 | } | 345 | } |
| 328 | 346 | ||
| 329 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | 347 | static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index c77f6f72f950..d71fe61db1d6 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
| @@ -384,21 +384,26 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
| 384 | struct rtc_device *rtc; | 384 | struct rtc_device *rtc; |
| 385 | struct rtc_plat_data *pdata = NULL; | 385 | struct rtc_plat_data *pdata = NULL; |
| 386 | u32 reg; | 386 | u32 reg; |
| 387 | int ret, rate; | 387 | unsigned long rate; |
| 388 | int ret; | ||
| 388 | 389 | ||
| 389 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 390 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 390 | if (!res) | 391 | if (!res) |
| 391 | return -ENODEV; | 392 | return -ENODEV; |
| 392 | 393 | ||
| 393 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 394 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
| 394 | if (!pdata) | 395 | if (!pdata) |
| 395 | return -ENOMEM; | 396 | return -ENOMEM; |
| 396 | 397 | ||
| 397 | pdata->ioaddr = ioremap(res->start, resource_size(res)); | 398 | if (!devm_request_mem_region(&pdev->dev, res->start, |
| 399 | resource_size(res), pdev->name)) | ||
| 400 | return -EBUSY; | ||
| 401 | |||
| 402 | pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, | ||
| 403 | resource_size(res)); | ||
| 398 | 404 | ||
| 399 | clk = clk_get(&pdev->dev, "ckil"); | 405 | clk = clk_get(&pdev->dev, "ckil"); |
| 400 | if (IS_ERR(clk)) { | 406 | if (IS_ERR(clk)) { |
| 401 | iounmap(pdata->ioaddr); | ||
| 402 | ret = PTR_ERR(clk); | 407 | ret = PTR_ERR(clk); |
| 403 | goto exit_free_pdata; | 408 | goto exit_free_pdata; |
| 404 | } | 409 | } |
| @@ -413,8 +418,7 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
| 413 | else if (rate == 38400) | 418 | else if (rate == 38400) |
| 414 | reg = RTC_INPUT_CLK_38400HZ; | 419 | reg = RTC_INPUT_CLK_38400HZ; |
| 415 | else { | 420 | else { |
| 416 | dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", | 421 | dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); |
| 417 | clk_get_rate(clk)); | ||
| 418 | ret = -EINVAL; | 422 | ret = -EINVAL; |
| 419 | goto exit_free_pdata; | 423 | goto exit_free_pdata; |
| 420 | } | 424 | } |
| @@ -450,8 +454,8 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
| 450 | pdata->irq = platform_get_irq(pdev, 0); | 454 | pdata->irq = platform_get_irq(pdev, 0); |
| 451 | 455 | ||
| 452 | if (pdata->irq >= 0 && | 456 | if (pdata->irq >= 0 && |
| 453 | request_irq(pdata->irq, mxc_rtc_interrupt, IRQF_SHARED, | 457 | devm_request_irq(&pdev->dev, pdata->irq, mxc_rtc_interrupt, |
| 454 | pdev->name, pdev) < 0) { | 458 | IRQF_SHARED, pdev->name, pdev) < 0) { |
| 455 | dev_warn(&pdev->dev, "interrupt not available.\n"); | 459 | dev_warn(&pdev->dev, "interrupt not available.\n"); |
| 456 | pdata->irq = -1; | 460 | pdata->irq = -1; |
| 457 | } | 461 | } |
| @@ -459,10 +463,10 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
| 459 | return 0; | 463 | return 0; |
| 460 | 464 | ||
| 461 | exit_put_clk: | 465 | exit_put_clk: |
| 466 | clk_disable(pdata->clk); | ||
| 462 | clk_put(pdata->clk); | 467 | clk_put(pdata->clk); |
| 463 | 468 | ||
| 464 | exit_free_pdata: | 469 | exit_free_pdata: |
| 465 | kfree(pdata); | ||
| 466 | 470 | ||
| 467 | return ret; | 471 | return ret; |
| 468 | } | 472 | } |
| @@ -473,12 +477,8 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev) | |||
| 473 | 477 | ||
| 474 | rtc_device_unregister(pdata->rtc); | 478 | rtc_device_unregister(pdata->rtc); |
| 475 | 479 | ||
| 476 | if (pdata->irq >= 0) | ||
| 477 | free_irq(pdata->irq, pdev); | ||
| 478 | |||
| 479 | clk_disable(pdata->clk); | 480 | clk_disable(pdata->clk); |
| 480 | clk_put(pdata->clk); | 481 | clk_put(pdata->clk); |
| 481 | kfree(pdata); | ||
| 482 | platform_set_drvdata(pdev, NULL); | 482 | platform_set_drvdata(pdev, NULL); |
| 483 | 483 | ||
| 484 | return 0; | 484 | return 0; |
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 2aecf7f21361..7ad30e72f868 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c | |||
| @@ -85,7 +85,7 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, | |||
| 85 | rc = copy_from_user(buf, buffer, sizeof(buf)); | 85 | rc = copy_from_user(buf, buffer, sizeof(buf)); |
| 86 | if (rc != 0) | 86 | if (rc != 0) |
| 87 | return -EFAULT; | 87 | return -EFAULT; |
| 88 | buf[len - 1] = '\0'; | 88 | buf[sizeof(buf) - 1] = '\0'; |
| 89 | if (strict_strtoul(buf, 0, &val) != 0) | 89 | if (strict_strtoul(buf, 0, &val) != 0) |
| 90 | return -EINVAL; | 90 | return -EINVAL; |
| 91 | if (val != 0 && val != 1) | 91 | if (val != 0 && val != 1) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 58c62ff42ab3..8b827f37b03e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -2186,7 +2186,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
| 2186 | blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); | 2186 | blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); |
| 2187 | 2187 | ||
| 2188 | gd->driverfs_dev = &sdp->sdev_gendev; | 2188 | gd->driverfs_dev = &sdp->sdev_gendev; |
| 2189 | gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; | 2189 | gd->flags = GENHD_FL_EXT_DEVT; |
| 2190 | if (sdp->removable) | 2190 | if (sdp->removable) |
| 2191 | gd->flags |= GENHD_FL_REMOVABLE; | 2191 | gd->flags |= GENHD_FL_REMOVABLE; |
| 2192 | 2192 | ||
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index f1dcd7969a5c..0e8d35224614 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c | |||
| @@ -246,20 +246,12 @@ static struct pci_controller ssb_pcicore_controller = { | |||
| 246 | .pci_ops = &ssb_pcicore_pciops, | 246 | .pci_ops = &ssb_pcicore_pciops, |
| 247 | .io_resource = &ssb_pcicore_io_resource, | 247 | .io_resource = &ssb_pcicore_io_resource, |
| 248 | .mem_resource = &ssb_pcicore_mem_resource, | 248 | .mem_resource = &ssb_pcicore_mem_resource, |
| 249 | .mem_offset = 0x24000000, | ||
| 250 | }; | 249 | }; |
| 251 | 250 | ||
| 252 | static u32 ssb_pcicore_pcibus_iobase = 0x100; | ||
| 253 | static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA; | ||
| 254 | |||
| 255 | /* This function is called when doing a pci_enable_device(). | 251 | /* This function is called when doing a pci_enable_device(). |
| 256 | * We must first check if the device is a device on the PCI-core bridge. */ | 252 | * We must first check if the device is a device on the PCI-core bridge. */ |
| 257 | int ssb_pcicore_plat_dev_init(struct pci_dev *d) | 253 | int ssb_pcicore_plat_dev_init(struct pci_dev *d) |
| 258 | { | 254 | { |
| 259 | struct resource *res; | ||
| 260 | int pos, size; | ||
| 261 | u32 *base; | ||
| 262 | |||
| 263 | if (d->bus->ops != &ssb_pcicore_pciops) { | 255 | if (d->bus->ops != &ssb_pcicore_pciops) { |
| 264 | /* This is not a device on the PCI-core bridge. */ | 256 | /* This is not a device on the PCI-core bridge. */ |
| 265 | return -ENODEV; | 257 | return -ENODEV; |
| @@ -268,27 +260,6 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d) | |||
| 268 | ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", | 260 | ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", |
| 269 | pci_name(d)); | 261 | pci_name(d)); |
| 270 | 262 | ||
| 271 | /* Fix up resource bases */ | ||
| 272 | for (pos = 0; pos < 6; pos++) { | ||
| 273 | res = &d->resource[pos]; | ||
| 274 | if (res->flags & IORESOURCE_IO) | ||
| 275 | base = &ssb_pcicore_pcibus_iobase; | ||
| 276 | else | ||
| 277 | base = &ssb_pcicore_pcibus_membase; | ||
| 278 | res->flags |= IORESOURCE_PCI_FIXED; | ||
| 279 | if (res->end) { | ||
| 280 | size = res->end - res->start + 1; | ||
| 281 | if (*base & (size - 1)) | ||
| 282 | *base = (*base + size) & ~(size - 1); | ||
| 283 | res->start = *base; | ||
| 284 | res->end = res->start + size - 1; | ||
| 285 | *base += size; | ||
| 286 | pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start); | ||
| 287 | } | ||
| 288 | /* Fix up PCI bridge BAR0 only */ | ||
| 289 | if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0) | ||
| 290 | break; | ||
| 291 | } | ||
| 292 | /* Fix up interrupt lines */ | 263 | /* Fix up interrupt lines */ |
| 293 | d->irq = ssb_mips_irq(extpci_core->dev) + 2; | 264 | d->irq = ssb_mips_irq(extpci_core->dev) + 2; |
| 294 | pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); | 265 | pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 9b6297f07b83..13c72c629329 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
| @@ -506,6 +506,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
| 506 | tz->temp_input.attr.attr.name = tz->temp_input.name; | 506 | tz->temp_input.attr.attr.name = tz->temp_input.name; |
| 507 | tz->temp_input.attr.attr.mode = 0444; | 507 | tz->temp_input.attr.attr.mode = 0444; |
| 508 | tz->temp_input.attr.show = temp_input_show; | 508 | tz->temp_input.attr.show = temp_input_show; |
| 509 | sysfs_attr_init(&tz->temp_input.attr.attr); | ||
| 509 | result = device_create_file(hwmon->device, &tz->temp_input.attr); | 510 | result = device_create_file(hwmon->device, &tz->temp_input.attr); |
| 510 | if (result) | 511 | if (result) |
| 511 | goto unregister_hwmon_device; | 512 | goto unregister_hwmon_device; |
| @@ -518,6 +519,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
| 518 | tz->temp_crit.attr.attr.name = tz->temp_crit.name; | 519 | tz->temp_crit.attr.attr.name = tz->temp_crit.name; |
| 519 | tz->temp_crit.attr.attr.mode = 0444; | 520 | tz->temp_crit.attr.attr.mode = 0444; |
| 520 | tz->temp_crit.attr.show = temp_crit_show; | 521 | tz->temp_crit.attr.show = temp_crit_show; |
| 522 | sysfs_attr_init(&tz->temp_crit.attr.attr); | ||
| 521 | result = device_create_file(hwmon->device, | 523 | result = device_create_file(hwmon->device, |
| 522 | &tz->temp_crit.attr); | 524 | &tz->temp_crit.attr); |
| 523 | if (result) | 525 | if (result) |
| @@ -726,6 +728,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | |||
| 726 | goto release_idr; | 728 | goto release_idr; |
| 727 | 729 | ||
| 728 | sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); | 730 | sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); |
| 731 | sysfs_attr_init(&dev->attr.attr); | ||
| 729 | dev->attr.attr.name = dev->attr_name; | 732 | dev->attr.attr.name = dev->attr_name; |
| 730 | dev->attr.attr.mode = 0444; | 733 | dev->attr.attr.mode = 0444; |
| 731 | dev->attr.show = thermal_cooling_device_trip_point_show; | 734 | dev->attr.show = thermal_cooling_device_trip_point_show; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5be11c99e18f..e69d238c5af0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, | |||
| 236 | int log_all) | 236 | int log_all) |
| 237 | { | 237 | { |
| 238 | int i; | 238 | int i; |
| 239 | |||
| 240 | if (!mem) | ||
| 241 | return 0; | ||
| 242 | |||
| 239 | for (i = 0; i < mem->nregions; ++i) { | 243 | for (i = 0; i < mem->nregions; ++i) { |
| 240 | struct vhost_memory_region *m = mem->regions + i; | 244 | struct vhost_memory_region *m = mem->regions + i; |
| 241 | unsigned long a = m->userspace_addr; | 245 | unsigned long a = m->userspace_addr; |
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 4637bcbe03a4..994358a4f302 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c | |||
| @@ -1536,6 +1536,7 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, | |||
| 1536 | goto error; | 1536 | goto error; |
| 1537 | } | 1537 | } |
| 1538 | 1538 | ||
| 1539 | sysfs_attr_init(&machine_data->dev_attr.attr); | ||
| 1539 | machine_data->dev_attr.attr.name = "monitor"; | 1540 | machine_data->dev_attr.attr.name = "monitor"; |
| 1540 | machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR; | 1541 | machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR; |
| 1541 | machine_data->dev_attr.show = show_monitor; | 1542 | machine_data->dev_attr.show = show_monitor; |
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c index 841424912ece..fe92eed6da70 100644 --- a/drivers/video/mb862xx/mb862xxfb_accel.c +++ b/drivers/video/mb862xx/mb862xxfb_accel.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support | 4 | * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support |
| 5 | * | 5 | * |
| 6 | * (C) 2007 Alexander Shishkin <virtuoso@slind.org> | 6 | * (C) 2007 Alexander Shishkin <virtuoso@slind.org> |
| 7 | * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com> | 7 | * (C) 2009 Valentin Sitdikov <v.sitdikov@gmail.com> |
| 8 | * (C) 2009 Siemens AG | 8 | * (C) 2009 Siemens AG |
| 9 | * | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/module.h> | ||
| 19 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
| 20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 21 | #if defined(CONFIG_OF) | 22 | #if defined(CONFIG_OF) |
| @@ -330,3 +331,5 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres) | |||
| 330 | info->fix.accel = 0xff; /*FIXME: add right define */ | 331 | info->fix.accel = 0xff; /*FIXME: add right define */ |
| 331 | } | 332 | } |
| 332 | EXPORT_SYMBOL(mb862xxfb_init_accel); | 333 | EXPORT_SYMBOL(mb862xxfb_init_accel); |
| 334 | |||
| 335 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c index 54ac91dc070b..0cadf7aee27e 100644 --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c | |||
| @@ -225,7 +225,7 @@ static int __init vesafb_setup(char *options) | |||
| 225 | return 0; | 225 | return 0; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | static int __devinit vesafb_probe(struct platform_device *dev) | 228 | static int __init vesafb_probe(struct platform_device *dev) |
| 229 | { | 229 | { |
| 230 | struct fb_info *info; | 230 | struct fb_info *info; |
| 231 | int i, err; | 231 | int i, err; |
| @@ -476,7 +476,6 @@ err: | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | static struct platform_driver vesafb_driver = { | 478 | static struct platform_driver vesafb_driver = { |
| 479 | .probe = vesafb_probe, | ||
| 480 | .driver = { | 479 | .driver = { |
| 481 | .name = "vesafb", | 480 | .name = "vesafb", |
| 482 | }, | 481 | }, |
| @@ -492,20 +491,21 @@ static int __init vesafb_init(void) | |||
| 492 | /* ignore error return of fb_get_options */ | 491 | /* ignore error return of fb_get_options */ |
| 493 | fb_get_options("vesafb", &option); | 492 | fb_get_options("vesafb", &option); |
| 494 | vesafb_setup(option); | 493 | vesafb_setup(option); |
| 495 | ret = platform_driver_register(&vesafb_driver); | ||
| 496 | 494 | ||
| 497 | if (!ret) { | 495 | vesafb_device = platform_device_alloc("vesafb", 0); |
| 498 | vesafb_device = platform_device_alloc("vesafb", 0); | 496 | if (!vesafb_device) |
| 497 | return -ENOMEM; | ||
| 499 | 498 | ||
| 500 | if (vesafb_device) | 499 | ret = platform_device_add(vesafb_device); |
| 501 | ret = platform_device_add(vesafb_device); | 500 | if (!ret) { |
| 502 | else | 501 | ret = platform_driver_probe(&vesafb_driver, vesafb_probe); |
| 503 | ret = -ENOMEM; | 502 | if (ret) |
| 503 | platform_device_del(vesafb_device); | ||
| 504 | } | ||
| 504 | 505 | ||
| 505 | if (ret) { | 506 | if (ret) { |
| 506 | platform_device_put(vesafb_device); | 507 | platform_device_put(vesafb_device); |
| 507 | platform_driver_unregister(&vesafb_driver); | 508 | vesafb_device = NULL; |
| 508 | } | ||
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | return ret; | 511 | return ret; |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index bdcdbd53da89..0bf5020d0d32 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
| @@ -55,11 +55,6 @@ config SOFT_WATCHDOG | |||
| 55 | To compile this driver as a module, choose M here: the | 55 | To compile this driver as a module, choose M here: the |
| 56 | module will be called softdog. | 56 | module will be called softdog. |
| 57 | 57 | ||
| 58 | config MAX63XX_WATCHDOG | ||
| 59 | tristate "Max63xx watchdog" | ||
| 60 | help | ||
| 61 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. | ||
| 62 | |||
| 63 | config WM831X_WATCHDOG | 58 | config WM831X_WATCHDOG |
| 64 | tristate "WM831x watchdog" | 59 | tristate "WM831x watchdog" |
| 65 | depends on MFD_WM831X | 60 | depends on MFD_WM831X |
| @@ -199,10 +194,10 @@ config EP93XX_WATCHDOG | |||
| 199 | 194 | ||
| 200 | config OMAP_WATCHDOG | 195 | config OMAP_WATCHDOG |
| 201 | tristate "OMAP Watchdog" | 196 | tristate "OMAP Watchdog" |
| 202 | depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 | 197 | depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS |
| 203 | help | 198 | help |
| 204 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' | 199 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y' |
| 205 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. | 200 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. |
| 206 | 201 | ||
| 207 | config PNX4008_WATCHDOG | 202 | config PNX4008_WATCHDOG |
| 208 | tristate "PNX4008 Watchdog" | 203 | tristate "PNX4008 Watchdog" |
| @@ -305,6 +300,12 @@ config TS72XX_WATCHDOG | |||
| 305 | To compile this driver as a module, choose M here: the | 300 | To compile this driver as a module, choose M here: the |
| 306 | module will be called ts72xx_wdt. | 301 | module will be called ts72xx_wdt. |
| 307 | 302 | ||
| 303 | config MAX63XX_WATCHDOG | ||
| 304 | tristate "Max63xx watchdog" | ||
| 305 | depends on ARM && HAS_IOMEM | ||
| 306 | help | ||
| 307 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. | ||
| 308 | |||
| 308 | # AVR32 Architecture | 309 | # AVR32 Architecture |
| 309 | 310 | ||
| 310 | config AT32AP700X_WDT | 311 | config AT32AP700X_WDT |
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 8b724aad6825..500d38342e1e 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c | |||
| @@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT; | |||
| 44 | 44 | ||
| 45 | #ifdef CONFIG_FSL_BOOKE | 45 | #ifdef CONFIG_FSL_BOOKE |
| 46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) | 46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) |
| 47 | #define WDTP_MASK (WDTP(0)) | 47 | #define WDTP_MASK (WDTP(0x3f)) |
| 48 | #else | 48 | #else |
| 49 | #define WDTP(x) (TCR_WP(x)) | 49 | #define WDTP(x) (TCR_WP(x)) |
| 50 | #define WDTP_MASK (TCR_WP_MASK) | 50 | #define WDTP_MASK (TCR_WP_MASK) |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 72f5a3707b48..809e7167a624 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
| @@ -442,7 +442,7 @@ static void hpwdt_ping(void) | |||
| 442 | static int hpwdt_change_timer(int new_margin) | 442 | static int hpwdt_change_timer(int new_margin) |
| 443 | { | 443 | { |
| 444 | /* Arbitrary, can't find the card's limits */ | 444 | /* Arbitrary, can't find the card's limits */ |
| 445 | if (new_margin < 30 || new_margin > 600) { | 445 | if (new_margin < 5 || new_margin > 600) { |
| 446 | printk(KERN_WARNING | 446 | printk(KERN_WARNING |
| 447 | "hpwdt: New value passed in is invalid: %d seconds.\n", | 447 | "hpwdt: New value passed in is invalid: %d seconds.\n", |
| 448 | new_margin); | 448 | new_margin); |
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 44bc6aa46edf..8da886035374 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c | |||
| @@ -115,8 +115,37 @@ enum iTCO_chipsets { | |||
| 115 | TCO_3420, /* 3420 */ | 115 | TCO_3420, /* 3420 */ |
| 116 | TCO_3450, /* 3450 */ | 116 | TCO_3450, /* 3450 */ |
| 117 | TCO_EP80579, /* EP80579 */ | 117 | TCO_EP80579, /* EP80579 */ |
| 118 | TCO_CPTD, /* CPT Desktop */ | 118 | TCO_CPT1, /* Cougar Point */ |
| 119 | TCO_CPTM, /* CPT Mobile */ | 119 | TCO_CPT2, /* Cougar Point Desktop */ |
| 120 | TCO_CPT3, /* Cougar Point Mobile */ | ||
| 121 | TCO_CPT4, /* Cougar Point */ | ||
| 122 | TCO_CPT5, /* Cougar Point */ | ||
| 123 | TCO_CPT6, /* Cougar Point */ | ||
| 124 | TCO_CPT7, /* Cougar Point */ | ||
| 125 | TCO_CPT8, /* Cougar Point */ | ||
| 126 | TCO_CPT9, /* Cougar Point */ | ||
| 127 | TCO_CPT10, /* Cougar Point */ | ||
| 128 | TCO_CPT11, /* Cougar Point */ | ||
| 129 | TCO_CPT12, /* Cougar Point */ | ||
| 130 | TCO_CPT13, /* Cougar Point */ | ||
| 131 | TCO_CPT14, /* Cougar Point */ | ||
| 132 | TCO_CPT15, /* Cougar Point */ | ||
| 133 | TCO_CPT16, /* Cougar Point */ | ||
| 134 | TCO_CPT17, /* Cougar Point */ | ||
| 135 | TCO_CPT18, /* Cougar Point */ | ||
| 136 | TCO_CPT19, /* Cougar Point */ | ||
| 137 | TCO_CPT20, /* Cougar Point */ | ||
| 138 | TCO_CPT21, /* Cougar Point */ | ||
| 139 | TCO_CPT22, /* Cougar Point */ | ||
| 140 | TCO_CPT23, /* Cougar Point */ | ||
| 141 | TCO_CPT24, /* Cougar Point */ | ||
| 142 | TCO_CPT25, /* Cougar Point */ | ||
| 143 | TCO_CPT26, /* Cougar Point */ | ||
| 144 | TCO_CPT27, /* Cougar Point */ | ||
| 145 | TCO_CPT28, /* Cougar Point */ | ||
| 146 | TCO_CPT29, /* Cougar Point */ | ||
| 147 | TCO_CPT30, /* Cougar Point */ | ||
| 148 | TCO_CPT31, /* Cougar Point */ | ||
| 120 | }; | 149 | }; |
| 121 | 150 | ||
| 122 | static struct { | 151 | static struct { |
| @@ -173,8 +202,37 @@ static struct { | |||
| 173 | {"3420", 2}, | 202 | {"3420", 2}, |
| 174 | {"3450", 2}, | 203 | {"3450", 2}, |
| 175 | {"EP80579", 2}, | 204 | {"EP80579", 2}, |
| 176 | {"CPT Desktop", 2}, | 205 | {"Cougar Point", 2}, |
| 177 | {"CPT Mobile", 2}, | 206 | {"Cougar Point", 2}, |
| 207 | {"Cougar Point", 2}, | ||
| 208 | {"Cougar Point", 2}, | ||
| 209 | {"Cougar Point", 2}, | ||
| 210 | {"Cougar Point", 2}, | ||
| 211 | {"Cougar Point", 2}, | ||
| 212 | {"Cougar Point", 2}, | ||
| 213 | {"Cougar Point", 2}, | ||
| 214 | {"Cougar Point", 2}, | ||
| 215 | {"Cougar Point", 2}, | ||
| 216 | {"Cougar Point", 2}, | ||
| 217 | {"Cougar Point", 2}, | ||
| 218 | {"Cougar Point", 2}, | ||
| 219 | {"Cougar Point", 2}, | ||
| 220 | {"Cougar Point", 2}, | ||
| 221 | {"Cougar Point", 2}, | ||
| 222 | {"Cougar Point", 2}, | ||
| 223 | {"Cougar Point", 2}, | ||
| 224 | {"Cougar Point", 2}, | ||
| 225 | {"Cougar Point", 2}, | ||
| 226 | {"Cougar Point", 2}, | ||
| 227 | {"Cougar Point", 2}, | ||
| 228 | {"Cougar Point", 2}, | ||
| 229 | {"Cougar Point", 2}, | ||
| 230 | {"Cougar Point", 2}, | ||
| 231 | {"Cougar Point", 2}, | ||
| 232 | {"Cougar Point", 2}, | ||
| 233 | {"Cougar Point", 2}, | ||
| 234 | {"Cougar Point", 2}, | ||
| 235 | {"Cougar Point", 2}, | ||
| 178 | {NULL, 0} | 236 | {NULL, 0} |
| 179 | }; | 237 | }; |
| 180 | 238 | ||
| @@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { | |||
| 259 | { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, | 317 | { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, |
| 260 | { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, | 318 | { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, |
| 261 | { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, | 319 | { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, |
| 262 | { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)}, | 320 | { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)}, |
| 263 | { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)}, | 321 | { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)}, |
| 322 | { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)}, | ||
| 323 | { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)}, | ||
| 324 | { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)}, | ||
| 325 | { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)}, | ||
| 326 | { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)}, | ||
| 327 | { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)}, | ||
| 328 | { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)}, | ||
| 329 | { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)}, | ||
| 330 | { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)}, | ||
| 331 | { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)}, | ||
| 332 | { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)}, | ||
| 333 | { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)}, | ||
| 334 | { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)}, | ||
| 335 | { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)}, | ||
| 336 | { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)}, | ||
| 337 | { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)}, | ||
| 338 | { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)}, | ||
| 339 | { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)}, | ||
| 340 | { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)}, | ||
| 341 | { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)}, | ||
| 342 | { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)}, | ||
| 343 | { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)}, | ||
| 344 | { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)}, | ||
| 345 | { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)}, | ||
| 346 | { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)}, | ||
| 347 | { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)}, | ||
| 348 | { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, | ||
| 349 | { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, | ||
| 350 | { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, | ||
| 264 | { 0, }, /* End of list */ | 351 | { 0, }, /* End of list */ |
| 265 | }; | 352 | }; |
| 266 | MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); | 353 | MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); |
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 75f3a83c0361..3053ff05ca41 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
| @@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry) | |||
| 154 | 154 | ||
| 155 | static void max63xx_wdt_disable(void) | 155 | static void max63xx_wdt_disable(void) |
| 156 | { | 156 | { |
| 157 | u8 val; | ||
| 158 | |||
| 157 | spin_lock(&io_lock); | 159 | spin_lock(&io_lock); |
| 158 | 160 | ||
| 159 | __raw_writeb(3, wdt_base); | 161 | val = __raw_readb(wdt_base); |
| 162 | val &= ~MAX6369_WDSET; | ||
| 163 | val |= 3; | ||
| 164 | __raw_writeb(val, wdt_base); | ||
| 160 | 165 | ||
| 161 | spin_unlock(&io_lock); | 166 | spin_unlock(&io_lock); |
| 162 | 167 | ||
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c index 435ec2aed4fe..2d22e996e996 100644 --- a/drivers/watchdog/pika_wdt.c +++ b/drivers/watchdog/pika_wdt.c | |||
| @@ -52,7 +52,7 @@ static struct { | |||
| 52 | struct timer_list timer; /* The timer that pings the watchdog */ | 52 | struct timer_list timer; /* The timer that pings the watchdog */ |
| 53 | } pikawdt_private; | 53 | } pikawdt_private; |
| 54 | 54 | ||
| 55 | static const struct watchdog_info ident = { | 55 | static struct watchdog_info ident = { |
| 56 | .identity = DRV_NAME, | 56 | .identity = DRV_NAME, |
| 57 | .options = WDIOF_CARDRESET | | 57 | .options = WDIOF_CARDRESET | |
| 58 | WDIOF_SETTIMEOUT | | 58 | WDIOF_SETTIMEOUT | |
| @@ -554,7 +554,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
| 554 | .bi_rw = bio->bi_rw, | 554 | .bi_rw = bio->bi_rw, |
| 555 | }; | 555 | }; |
| 556 | 556 | ||
| 557 | if (q->merge_bvec_fn(q, &bvm, prev) < len) { | 557 | if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { |
| 558 | prev->bv_len -= len; | 558 | prev->bv_len -= len; |
| 559 | return 0; | 559 | return 0; |
| 560 | } | 560 | } |
| @@ -607,7 +607,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
| 607 | * merge_bvec_fn() returns number of bytes it can accept | 607 | * merge_bvec_fn() returns number of bytes it can accept |
| 608 | * at this offset | 608 | * at this offset |
| 609 | */ | 609 | */ |
| 610 | if (q->merge_bvec_fn(q, &bvm, bvec) < len) { | 610 | if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) { |
| 611 | bvec->bv_page = NULL; | 611 | bvec->bv_page = NULL; |
| 612 | bvec->bv_len = 0; | 612 | bvec->bv_len = 0; |
| 613 | bvec->bv_offset = 0; | 613 | bvec->bv_offset = 0; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index d11d0289f3d2..2a6d0193f139 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) | |||
| 404 | * NULL first argument is nfsd_sync_dir() and that's not a directory. | 404 | * NULL first argument is nfsd_sync_dir() and that's not a directory. |
| 405 | */ | 405 | */ |
| 406 | 406 | ||
| 407 | static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | 407 | int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) |
| 408 | { | 408 | { |
| 409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 409 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
| 410 | int error; | 410 | int error; |
| @@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
| 418 | error = 0; | 418 | error = 0; |
| 419 | return error; | 419 | return error; |
| 420 | } | 420 | } |
| 421 | EXPORT_SYMBOL(blkdev_fsync); | ||
| 421 | 422 | ||
| 422 | /* | 423 | /* |
| 423 | * pseudo-fs | 424 | * pseudo-fs |
| @@ -1481,7 +1482,7 @@ const struct file_operations def_blk_fops = { | |||
| 1481 | .aio_read = generic_file_aio_read, | 1482 | .aio_read = generic_file_aio_read, |
| 1482 | .aio_write = blkdev_aio_write, | 1483 | .aio_write = blkdev_aio_write, |
| 1483 | .mmap = generic_file_mmap, | 1484 | .mmap = generic_file_mmap, |
| 1484 | .fsync = block_fsync, | 1485 | .fsync = blkdev_fsync, |
| 1485 | .unlocked_ioctl = block_ioctl, | 1486 | .unlocked_ioctl = block_ioctl, |
| 1486 | #ifdef CONFIG_COMPAT | 1487 | #ifdef CONFIG_COMPAT |
| 1487 | .compat_ioctl = compat_blkdev_ioctl, | 1488 | .compat_ioctl = compat_blkdev_ioctl, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9e23ffea7f54..b34d32fdaaec 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -3235,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, | |||
| 3235 | u64 bytes) | 3235 | u64 bytes) |
| 3236 | { | 3236 | { |
| 3237 | struct btrfs_space_info *data_sinfo; | 3237 | struct btrfs_space_info *data_sinfo; |
| 3238 | int ret = 0, committed = 0; | 3238 | u64 used; |
| 3239 | int ret = 0, committed = 0, flushed = 0; | ||
| 3239 | 3240 | ||
| 3240 | /* make sure bytes are sectorsize aligned */ | 3241 | /* make sure bytes are sectorsize aligned */ |
| 3241 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | 3242 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); |
| @@ -3247,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, | |||
| 3247 | again: | 3248 | again: |
| 3248 | /* make sure we have enough space to handle the data first */ | 3249 | /* make sure we have enough space to handle the data first */ |
| 3249 | spin_lock(&data_sinfo->lock); | 3250 | spin_lock(&data_sinfo->lock); |
| 3250 | if (data_sinfo->total_bytes - data_sinfo->bytes_used - | 3251 | used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc + |
| 3251 | data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - | 3252 | data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + |
| 3252 | data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - | 3253 | data_sinfo->bytes_readonly + data_sinfo->bytes_may_use + |
| 3253 | data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { | 3254 | data_sinfo->bytes_super; |
| 3255 | |||
| 3256 | if (used + bytes > data_sinfo->total_bytes) { | ||
| 3254 | struct btrfs_trans_handle *trans; | 3257 | struct btrfs_trans_handle *trans; |
| 3255 | 3258 | ||
| 3259 | if (!flushed) { | ||
| 3260 | spin_unlock(&data_sinfo->lock); | ||
| 3261 | flush_delalloc(root, data_sinfo); | ||
| 3262 | flushed = 1; | ||
| 3263 | goto again; | ||
| 3264 | } | ||
| 3265 | |||
| 3256 | /* | 3266 | /* |
| 3257 | * if we don't have enough free bytes in this space then we need | 3267 | * if we don't have enough free bytes in this space then we need |
| 3258 | * to alloc a new chunk. | 3268 | * to alloc a new chunk. |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index aa7dc36dac78..8db7b14bbae8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -2250,6 +2250,12 @@ again: | |||
| 2250 | if (!looped) | 2250 | if (!looped) |
| 2251 | calc_size = max_t(u64, min_stripe_size, calc_size); | 2251 | calc_size = max_t(u64, min_stripe_size, calc_size); |
| 2252 | 2252 | ||
| 2253 | /* | ||
| 2254 | * we're about to do_div by the stripe_len so lets make sure | ||
| 2255 | * we end up with something bigger than a stripe | ||
| 2256 | */ | ||
| 2257 | calc_size = max_t(u64, calc_size, stripe_len * 4); | ||
| 2258 | |||
| 2253 | do_div(calc_size, stripe_len); | 2259 | do_div(calc_size, stripe_len); |
| 2254 | calc_size *= stripe_len; | 2260 | calc_size *= stripe_len; |
| 2255 | 2261 | ||
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index aa3cd7cc3e40..412593703d1e 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -337,16 +337,15 @@ out: | |||
| 337 | /* | 337 | /* |
| 338 | * Get ref for the oldest snapc for an inode with dirty data... that is, the | 338 | * Get ref for the oldest snapc for an inode with dirty data... that is, the |
| 339 | * only snap context we are allowed to write back. | 339 | * only snap context we are allowed to write back. |
| 340 | * | ||
| 341 | * Caller holds i_lock. | ||
| 342 | */ | 340 | */ |
| 343 | static struct ceph_snap_context *__get_oldest_context(struct inode *inode, | 341 | static struct ceph_snap_context *get_oldest_context(struct inode *inode, |
| 344 | u64 *snap_size) | 342 | u64 *snap_size) |
| 345 | { | 343 | { |
| 346 | struct ceph_inode_info *ci = ceph_inode(inode); | 344 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 347 | struct ceph_snap_context *snapc = NULL; | 345 | struct ceph_snap_context *snapc = NULL; |
| 348 | struct ceph_cap_snap *capsnap = NULL; | 346 | struct ceph_cap_snap *capsnap = NULL; |
| 349 | 347 | ||
| 348 | spin_lock(&inode->i_lock); | ||
| 350 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { | 349 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { |
| 351 | dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, | 350 | dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, |
| 352 | capsnap->context, capsnap->dirty_pages); | 351 | capsnap->context, capsnap->dirty_pages); |
| @@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode, | |||
| 357 | break; | 356 | break; |
| 358 | } | 357 | } |
| 359 | } | 358 | } |
| 360 | if (!snapc && ci->i_snap_realm) { | 359 | if (!snapc && ci->i_head_snapc) { |
| 361 | snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); | 360 | snapc = ceph_get_snap_context(ci->i_head_snapc); |
| 362 | dout(" head snapc %p has %d dirty pages\n", | 361 | dout(" head snapc %p has %d dirty pages\n", |
| 363 | snapc, ci->i_wrbuffer_ref_head); | 362 | snapc, ci->i_wrbuffer_ref_head); |
| 364 | } | 363 | } |
| 365 | return snapc; | ||
| 366 | } | ||
| 367 | |||
| 368 | static struct ceph_snap_context *get_oldest_context(struct inode *inode, | ||
| 369 | u64 *snap_size) | ||
| 370 | { | ||
| 371 | struct ceph_snap_context *snapc = NULL; | ||
| 372 | |||
| 373 | spin_lock(&inode->i_lock); | ||
| 374 | snapc = __get_oldest_context(inode, snap_size); | ||
| 375 | spin_unlock(&inode->i_lock); | 364 | spin_unlock(&inode->i_lock); |
| 376 | return snapc; | 365 | return snapc; |
| 377 | } | 366 | } |
| @@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 392 | int len = PAGE_CACHE_SIZE; | 381 | int len = PAGE_CACHE_SIZE; |
| 393 | loff_t i_size; | 382 | loff_t i_size; |
| 394 | int err = 0; | 383 | int err = 0; |
| 395 | struct ceph_snap_context *snapc; | 384 | struct ceph_snap_context *snapc, *oldest; |
| 396 | u64 snap_size = 0; | 385 | u64 snap_size = 0; |
| 397 | long writeback_stat; | 386 | long writeback_stat; |
| 398 | 387 | ||
| @@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 413 | dout("writepage %p page %p not dirty?\n", inode, page); | 402 | dout("writepage %p page %p not dirty?\n", inode, page); |
| 414 | goto out; | 403 | goto out; |
| 415 | } | 404 | } |
| 416 | if (snapc != get_oldest_context(inode, &snap_size)) { | 405 | oldest = get_oldest_context(inode, &snap_size); |
| 406 | if (snapc->seq > oldest->seq) { | ||
| 417 | dout("writepage %p page %p snapc %p not writeable - noop\n", | 407 | dout("writepage %p page %p snapc %p not writeable - noop\n", |
| 418 | inode, page, (void *)page->private); | 408 | inode, page, (void *)page->private); |
| 419 | /* we should only noop if called by kswapd */ | 409 | /* we should only noop if called by kswapd */ |
| 420 | WARN_ON((current->flags & PF_MEMALLOC) == 0); | 410 | WARN_ON((current->flags & PF_MEMALLOC) == 0); |
| 411 | ceph_put_snap_context(oldest); | ||
| 421 | goto out; | 412 | goto out; |
| 422 | } | 413 | } |
| 414 | ceph_put_snap_context(oldest); | ||
| 423 | 415 | ||
| 424 | /* is this a partial page at end of file? */ | 416 | /* is this a partial page at end of file? */ |
| 425 | if (snap_size) | 417 | if (snap_size) |
| @@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 458 | ClearPagePrivate(page); | 450 | ClearPagePrivate(page); |
| 459 | end_page_writeback(page); | 451 | end_page_writeback(page); |
| 460 | ceph_put_wrbuffer_cap_refs(ci, 1, snapc); | 452 | ceph_put_wrbuffer_cap_refs(ci, 1, snapc); |
| 461 | ceph_put_snap_context(snapc); | 453 | ceph_put_snap_context(snapc); /* page's reference */ |
| 462 | out: | 454 | out: |
| 463 | return err; | 455 | return err; |
| 464 | } | 456 | } |
| @@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
| 558 | dout("inode %p skipping page %p\n", inode, page); | 550 | dout("inode %p skipping page %p\n", inode, page); |
| 559 | wbc->pages_skipped++; | 551 | wbc->pages_skipped++; |
| 560 | } | 552 | } |
| 553 | ceph_put_snap_context((void *)page->private); | ||
| 561 | page->private = 0; | 554 | page->private = 0; |
| 562 | ClearPagePrivate(page); | 555 | ClearPagePrivate(page); |
| 563 | ceph_put_snap_context(snapc); | ||
| 564 | dout("unlocking %d %p\n", i, page); | 556 | dout("unlocking %d %p\n", i, page); |
| 565 | end_page_writeback(page); | 557 | end_page_writeback(page); |
| 566 | 558 | ||
| @@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
| 618 | int range_whole = 0; | 610 | int range_whole = 0; |
| 619 | int should_loop = 1; | 611 | int should_loop = 1; |
| 620 | pgoff_t max_pages = 0, max_pages_ever = 0; | 612 | pgoff_t max_pages = 0, max_pages_ever = 0; |
| 621 | struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; | 613 | struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; |
| 622 | struct pagevec pvec; | 614 | struct pagevec pvec; |
| 623 | int done = 0; | 615 | int done = 0; |
| 624 | int rc = 0; | 616 | int rc = 0; |
| @@ -770,9 +762,10 @@ get_more_pages: | |||
| 770 | } | 762 | } |
| 771 | 763 | ||
| 772 | /* only if matching snap context */ | 764 | /* only if matching snap context */ |
| 773 | if (snapc != (void *)page->private) { | 765 | pgsnapc = (void *)page->private; |
| 774 | dout("page snapc %p != oldest %p\n", | 766 | if (pgsnapc->seq > snapc->seq) { |
| 775 | (void *)page->private, snapc); | 767 | dout("page snapc %p %lld > oldest %p %lld\n", |
| 768 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); | ||
| 776 | unlock_page(page); | 769 | unlock_page(page); |
| 777 | if (!locked_pages) | 770 | if (!locked_pages) |
| 778 | continue; /* keep looking for snap */ | 771 | continue; /* keep looking for snap */ |
| @@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode, | |||
| 914 | struct ceph_snap_context *snapc) | 907 | struct ceph_snap_context *snapc) |
| 915 | { | 908 | { |
| 916 | struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); | 909 | struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); |
| 917 | return !oldest || snapc->seq <= oldest->seq; | 910 | int ret = !oldest || snapc->seq <= oldest->seq; |
| 911 | |||
| 912 | ceph_put_snap_context(oldest); | ||
| 913 | return ret; | ||
| 918 | } | 914 | } |
| 919 | 915 | ||
| 920 | /* | 916 | /* |
| @@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file, | |||
| 936 | int pos_in_page = pos & ~PAGE_CACHE_MASK; | 932 | int pos_in_page = pos & ~PAGE_CACHE_MASK; |
| 937 | int end_in_page = pos_in_page + len; | 933 | int end_in_page = pos_in_page + len; |
| 938 | loff_t i_size; | 934 | loff_t i_size; |
| 939 | struct ceph_snap_context *snapc; | ||
| 940 | int r; | 935 | int r; |
| 936 | struct ceph_snap_context *snapc, *oldest; | ||
| 941 | 937 | ||
| 942 | retry_locked: | 938 | retry_locked: |
| 943 | /* writepages currently holds page lock, but if we change that later, */ | 939 | /* writepages currently holds page lock, but if we change that later, */ |
| @@ -947,23 +943,24 @@ retry_locked: | |||
| 947 | BUG_ON(!ci->i_snap_realm); | 943 | BUG_ON(!ci->i_snap_realm); |
| 948 | down_read(&mdsc->snap_rwsem); | 944 | down_read(&mdsc->snap_rwsem); |
| 949 | BUG_ON(!ci->i_snap_realm->cached_context); | 945 | BUG_ON(!ci->i_snap_realm->cached_context); |
| 950 | if (page->private && | 946 | snapc = (void *)page->private; |
| 951 | (void *)page->private != ci->i_snap_realm->cached_context) { | 947 | if (snapc && snapc != ci->i_head_snapc) { |
| 952 | /* | 948 | /* |
| 953 | * this page is already dirty in another (older) snap | 949 | * this page is already dirty in another (older) snap |
| 954 | * context! is it writeable now? | 950 | * context! is it writeable now? |
| 955 | */ | 951 | */ |
| 956 | snapc = get_oldest_context(inode, NULL); | 952 | oldest = get_oldest_context(inode, NULL); |
| 957 | up_read(&mdsc->snap_rwsem); | 953 | up_read(&mdsc->snap_rwsem); |
| 958 | 954 | ||
| 959 | if (snapc != (void *)page->private) { | 955 | if (snapc->seq > oldest->seq) { |
| 956 | ceph_put_snap_context(oldest); | ||
| 960 | dout(" page %p snapc %p not current or oldest\n", | 957 | dout(" page %p snapc %p not current or oldest\n", |
| 961 | page, (void *)page->private); | 958 | page, snapc); |
| 962 | /* | 959 | /* |
| 963 | * queue for writeback, and wait for snapc to | 960 | * queue for writeback, and wait for snapc to |
| 964 | * be writeable or written | 961 | * be writeable or written |
| 965 | */ | 962 | */ |
| 966 | snapc = ceph_get_snap_context((void *)page->private); | 963 | snapc = ceph_get_snap_context(snapc); |
| 967 | unlock_page(page); | 964 | unlock_page(page); |
| 968 | ceph_queue_writeback(inode); | 965 | ceph_queue_writeback(inode); |
| 969 | r = wait_event_interruptible(ci->i_cap_wq, | 966 | r = wait_event_interruptible(ci->i_cap_wq, |
| @@ -973,6 +970,7 @@ retry_locked: | |||
| 973 | return r; | 970 | return r; |
| 974 | return -EAGAIN; | 971 | return -EAGAIN; |
| 975 | } | 972 | } |
| 973 | ceph_put_snap_context(oldest); | ||
| 976 | 974 | ||
| 977 | /* yay, writeable, do it now (without dropping page lock) */ | 975 | /* yay, writeable, do it now (without dropping page lock) */ |
| 978 | dout(" page %p snapc %p not current, but oldest\n", | 976 | dout(" page %p snapc %p not current, but oldest\n", |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 3710e077a857..aa2239fa9a3b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -1205,6 +1205,12 @@ retry: | |||
| 1205 | if (capsnap->dirty_pages || capsnap->writing) | 1205 | if (capsnap->dirty_pages || capsnap->writing) |
| 1206 | continue; | 1206 | continue; |
| 1207 | 1207 | ||
| 1208 | /* | ||
| 1209 | * if cap writeback already occurred, we should have dropped | ||
| 1210 | * the capsnap in ceph_put_wrbuffer_cap_refs. | ||
| 1211 | */ | ||
| 1212 | BUG_ON(capsnap->dirty == 0); | ||
| 1213 | |||
| 1208 | /* pick mds, take s_mutex */ | 1214 | /* pick mds, take s_mutex */ |
| 1209 | mds = __ceph_get_cap_mds(ci, &mseq); | 1215 | mds = __ceph_get_cap_mds(ci, &mseq); |
| 1210 | if (session && session->s_mds != mds) { | 1216 | if (session && session->s_mds != mds) { |
| @@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) | |||
| 2118 | } | 2124 | } |
| 2119 | spin_unlock(&inode->i_lock); | 2125 | spin_unlock(&inode->i_lock); |
| 2120 | 2126 | ||
| 2121 | dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), | 2127 | dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), |
| 2122 | last ? "last" : ""); | 2128 | last ? " last" : "", put ? " put" : ""); |
| 2123 | 2129 | ||
| 2124 | if (last && !flushsnaps) | 2130 | if (last && !flushsnaps) |
| 2125 | ceph_check_caps(ci, 0, NULL); | 2131 | ceph_check_caps(ci, 0, NULL); |
| @@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
| 2143 | { | 2149 | { |
| 2144 | struct inode *inode = &ci->vfs_inode; | 2150 | struct inode *inode = &ci->vfs_inode; |
| 2145 | int last = 0; | 2151 | int last = 0; |
| 2146 | int last_snap = 0; | 2152 | int complete_capsnap = 0; |
| 2153 | int drop_capsnap = 0; | ||
| 2147 | int found = 0; | 2154 | int found = 0; |
| 2148 | struct ceph_cap_snap *capsnap = NULL; | 2155 | struct ceph_cap_snap *capsnap = NULL; |
| 2149 | 2156 | ||
| @@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
| 2166 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { | 2173 | list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { |
| 2167 | if (capsnap->context == snapc) { | 2174 | if (capsnap->context == snapc) { |
| 2168 | found = 1; | 2175 | found = 1; |
| 2169 | capsnap->dirty_pages -= nr; | ||
| 2170 | last_snap = !capsnap->dirty_pages; | ||
| 2171 | break; | 2176 | break; |
| 2172 | } | 2177 | } |
| 2173 | } | 2178 | } |
| 2174 | BUG_ON(!found); | 2179 | BUG_ON(!found); |
| 2180 | capsnap->dirty_pages -= nr; | ||
| 2181 | if (capsnap->dirty_pages == 0) { | ||
| 2182 | complete_capsnap = 1; | ||
| 2183 | if (capsnap->dirty == 0) | ||
| 2184 | /* cap writeback completed before we created | ||
| 2185 | * the cap_snap; no FLUSHSNAP is needed */ | ||
| 2186 | drop_capsnap = 1; | ||
| 2187 | } | ||
| 2175 | dout("put_wrbuffer_cap_refs on %p cap_snap %p " | 2188 | dout("put_wrbuffer_cap_refs on %p cap_snap %p " |
| 2176 | " snap %lld %d/%d -> %d/%d %s%s\n", | 2189 | " snap %lld %d/%d -> %d/%d %s%s%s\n", |
| 2177 | inode, capsnap, capsnap->context->seq, | 2190 | inode, capsnap, capsnap->context->seq, |
| 2178 | ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, | 2191 | ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, |
| 2179 | ci->i_wrbuffer_ref, capsnap->dirty_pages, | 2192 | ci->i_wrbuffer_ref, capsnap->dirty_pages, |
| 2180 | last ? " (wrbuffer last)" : "", | 2193 | last ? " (wrbuffer last)" : "", |
| 2181 | last_snap ? " (capsnap last)" : ""); | 2194 | complete_capsnap ? " (complete capsnap)" : "", |
| 2195 | drop_capsnap ? " (drop capsnap)" : ""); | ||
| 2196 | if (drop_capsnap) { | ||
| 2197 | ceph_put_snap_context(capsnap->context); | ||
| 2198 | list_del(&capsnap->ci_item); | ||
| 2199 | list_del(&capsnap->flushing_item); | ||
| 2200 | ceph_put_cap_snap(capsnap); | ||
| 2201 | } | ||
| 2182 | } | 2202 | } |
| 2183 | 2203 | ||
| 2184 | spin_unlock(&inode->i_lock); | 2204 | spin_unlock(&inode->i_lock); |
| @@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
| 2186 | if (last) { | 2206 | if (last) { |
| 2187 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); | 2207 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); |
| 2188 | iput(inode); | 2208 | iput(inode); |
| 2189 | } else if (last_snap) { | 2209 | } else if (complete_capsnap) { |
| 2190 | ceph_flush_snaps(ci); | 2210 | ceph_flush_snaps(ci); |
| 2191 | wake_up(&ci->i_cap_wq); | 2211 | wake_up(&ci->i_cap_wq); |
| 2192 | } | 2212 | } |
| 2213 | if (drop_capsnap) | ||
| 2214 | iput(inode); | ||
| 2193 | } | 2215 | } |
| 2194 | 2216 | ||
| 2195 | /* | 2217 | /* |
| @@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, | |||
| 2465 | break; | 2487 | break; |
| 2466 | } | 2488 | } |
| 2467 | WARN_ON(capsnap->dirty_pages || capsnap->writing); | 2489 | WARN_ON(capsnap->dirty_pages || capsnap->writing); |
| 2468 | dout(" removing cap_snap %p follows %lld\n", | 2490 | dout(" removing %p cap_snap %p follows %lld\n", |
| 2469 | capsnap, follows); | 2491 | inode, capsnap, follows); |
| 2470 | ceph_put_snap_context(capsnap->context); | 2492 | ceph_put_snap_context(capsnap->context); |
| 2471 | list_del(&capsnap->ci_item); | 2493 | list_del(&capsnap->ci_item); |
| 2472 | list_del(&capsnap->flushing_item); | 2494 | list_del(&capsnap->flushing_item); |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 7261dc6c2ead..ea8ee2e526aa 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
| @@ -171,11 +171,11 @@ more: | |||
| 171 | spin_lock(&inode->i_lock); | 171 | spin_lock(&inode->i_lock); |
| 172 | spin_lock(&dcache_lock); | 172 | spin_lock(&dcache_lock); |
| 173 | 173 | ||
| 174 | last = dentry; | ||
| 175 | |||
| 174 | if (err < 0) | 176 | if (err < 0) |
| 175 | goto out_unlock; | 177 | goto out_unlock; |
| 176 | 178 | ||
| 177 | last = dentry; | ||
| 178 | |||
| 179 | p = p->prev; | 179 | p = p->prev; |
| 180 | filp->f_pos++; | 180 | filp->f_pos++; |
| 181 | 181 | ||
| @@ -312,7 +312,7 @@ more: | |||
| 312 | req->r_readdir_offset = fi->next_offset; | 312 | req->r_readdir_offset = fi->next_offset; |
| 313 | req->r_args.readdir.frag = cpu_to_le32(frag); | 313 | req->r_args.readdir.frag = cpu_to_le32(frag); |
| 314 | req->r_args.readdir.max_entries = cpu_to_le32(max_entries); | 314 | req->r_args.readdir.max_entries = cpu_to_le32(max_entries); |
| 315 | req->r_num_caps = max_entries; | 315 | req->r_num_caps = max_entries + 1; |
| 316 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 316 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
| 317 | if (err < 0) { | 317 | if (err < 0) { |
| 318 | ceph_mdsc_put_request(req); | 318 | ceph_mdsc_put_request(req); |
| @@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | |||
| 489 | struct inode *inode = ceph_get_snapdir(parent); | 489 | struct inode *inode = ceph_get_snapdir(parent); |
| 490 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", | 490 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", |
| 491 | dentry, dentry->d_name.len, dentry->d_name.name, inode); | 491 | dentry, dentry->d_name.len, dentry->d_name.name, inode); |
| 492 | BUG_ON(!d_unhashed(dentry)); | ||
| 492 | d_add(dentry, inode); | 493 | d_add(dentry, inode); |
| 493 | err = 0; | 494 | err = 0; |
| 494 | } | 495 | } |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index aca82d55cc53..26f883c275e8 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 886 | struct inode *in = NULL; | 886 | struct inode *in = NULL; |
| 887 | struct ceph_mds_reply_inode *ininfo; | 887 | struct ceph_mds_reply_inode *ininfo; |
| 888 | struct ceph_vino vino; | 888 | struct ceph_vino vino; |
| 889 | struct ceph_client *client = ceph_sb_to_client(sb); | ||
| 889 | int i = 0; | 890 | int i = 0; |
| 890 | int err = 0; | 891 | int err = 0; |
| 891 | 892 | ||
| @@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 949 | return err; | 950 | return err; |
| 950 | } | 951 | } |
| 951 | 952 | ||
| 952 | if (rinfo->head->is_dentry && !req->r_aborted) { | 953 | /* |
| 954 | * ignore null lease/binding on snapdir ENOENT, or else we | ||
| 955 | * will have trouble splicing in the virtual snapdir later | ||
| 956 | */ | ||
| 957 | if (rinfo->head->is_dentry && !req->r_aborted && | ||
| 958 | (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, | ||
| 959 | client->mount_args->snapdir_name, | ||
| 960 | req->r_dentry->d_name.len))) { | ||
| 953 | /* | 961 | /* |
| 954 | * lookup link rename : null -> possibly existing inode | 962 | * lookup link rename : null -> possibly existing inode |
| 955 | * mknod symlink mkdir : null -> new inode | 963 | * mknod symlink mkdir : null -> new inode |
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 8f1715ffbe4b..cdaaa131add3 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
| @@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG; | |||
| 30 | static char tag_ack = CEPH_MSGR_TAG_ACK; | 30 | static char tag_ack = CEPH_MSGR_TAG_ACK; |
| 31 | static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; | 31 | static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_LOCKDEP | ||
| 34 | static struct lock_class_key socket_class; | ||
| 35 | #endif | ||
| 36 | |||
| 33 | 37 | ||
| 34 | static void queue_con(struct ceph_connection *con); | 38 | static void queue_con(struct ceph_connection *con); |
| 35 | static void con_work(struct work_struct *); | 39 | static void con_work(struct work_struct *); |
| @@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) | |||
| 228 | con->sock = sock; | 232 | con->sock = sock; |
| 229 | sock->sk->sk_allocation = GFP_NOFS; | 233 | sock->sk->sk_allocation = GFP_NOFS; |
| 230 | 234 | ||
| 235 | #ifdef CONFIG_LOCKDEP | ||
| 236 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); | ||
| 237 | #endif | ||
| 238 | |||
| 231 | set_sock_callbacks(sock, con); | 239 | set_sock_callbacks(sock, con); |
| 232 | 240 | ||
| 233 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); | 241 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); |
| @@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con) | |||
| 333 | con->out_msg = NULL; | 341 | con->out_msg = NULL; |
| 334 | } | 342 | } |
| 335 | con->in_seq = 0; | 343 | con->in_seq = 0; |
| 344 | con->in_seq_acked = 0; | ||
| 336 | } | 345 | } |
| 337 | 346 | ||
| 338 | /* | 347 | /* |
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 21c6623c4b07..2e2c15eed82a 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
| @@ -314,71 +314,6 @@ bad: | |||
| 314 | return ERR_PTR(err); | 314 | return ERR_PTR(err); |
| 315 | } | 315 | } |
| 316 | 316 | ||
| 317 | |||
| 318 | /* | ||
| 319 | * osd map | ||
| 320 | */ | ||
| 321 | void ceph_osdmap_destroy(struct ceph_osdmap *map) | ||
| 322 | { | ||
| 323 | dout("osdmap_destroy %p\n", map); | ||
| 324 | if (map->crush) | ||
| 325 | crush_destroy(map->crush); | ||
| 326 | while (!RB_EMPTY_ROOT(&map->pg_temp)) { | ||
| 327 | struct ceph_pg_mapping *pg = | ||
| 328 | rb_entry(rb_first(&map->pg_temp), | ||
| 329 | struct ceph_pg_mapping, node); | ||
| 330 | rb_erase(&pg->node, &map->pg_temp); | ||
| 331 | kfree(pg); | ||
| 332 | } | ||
| 333 | while (!RB_EMPTY_ROOT(&map->pg_pools)) { | ||
| 334 | struct ceph_pg_pool_info *pi = | ||
| 335 | rb_entry(rb_first(&map->pg_pools), | ||
| 336 | struct ceph_pg_pool_info, node); | ||
| 337 | rb_erase(&pi->node, &map->pg_pools); | ||
| 338 | kfree(pi); | ||
| 339 | } | ||
| 340 | kfree(map->osd_state); | ||
| 341 | kfree(map->osd_weight); | ||
| 342 | kfree(map->osd_addr); | ||
| 343 | kfree(map); | ||
| 344 | } | ||
| 345 | |||
| 346 | /* | ||
| 347 | * adjust max osd value. reallocate arrays. | ||
| 348 | */ | ||
| 349 | static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | ||
| 350 | { | ||
| 351 | u8 *state; | ||
| 352 | struct ceph_entity_addr *addr; | ||
| 353 | u32 *weight; | ||
| 354 | |||
| 355 | state = kcalloc(max, sizeof(*state), GFP_NOFS); | ||
| 356 | addr = kcalloc(max, sizeof(*addr), GFP_NOFS); | ||
| 357 | weight = kcalloc(max, sizeof(*weight), GFP_NOFS); | ||
| 358 | if (state == NULL || addr == NULL || weight == NULL) { | ||
| 359 | kfree(state); | ||
| 360 | kfree(addr); | ||
| 361 | kfree(weight); | ||
| 362 | return -ENOMEM; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* copy old? */ | ||
| 366 | if (map->osd_state) { | ||
| 367 | memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); | ||
| 368 | memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); | ||
| 369 | memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); | ||
| 370 | kfree(map->osd_state); | ||
| 371 | kfree(map->osd_addr); | ||
| 372 | kfree(map->osd_weight); | ||
| 373 | } | ||
| 374 | |||
| 375 | map->osd_state = state; | ||
| 376 | map->osd_weight = weight; | ||
| 377 | map->osd_addr = addr; | ||
| 378 | map->max_osd = max; | ||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 382 | /* | 317 | /* |
| 383 | * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid | 318 | * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid |
| 384 | * to a set of osds) | 319 | * to a set of osds) |
| @@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) | |||
| 482 | return NULL; | 417 | return NULL; |
| 483 | } | 418 | } |
| 484 | 419 | ||
| 420 | static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) | ||
| 421 | { | ||
| 422 | rb_erase(&pi->node, root); | ||
| 423 | kfree(pi->name); | ||
| 424 | kfree(pi); | ||
| 425 | } | ||
| 426 | |||
| 485 | void __decode_pool(void **p, struct ceph_pg_pool_info *pi) | 427 | void __decode_pool(void **p, struct ceph_pg_pool_info *pi) |
| 486 | { | 428 | { |
| 487 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); | 429 | ceph_decode_copy(p, &pi->v, sizeof(pi->v)); |
| @@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi) | |||
| 490 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; | 432 | *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; |
| 491 | } | 433 | } |
| 492 | 434 | ||
| 435 | static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) | ||
| 436 | { | ||
| 437 | struct ceph_pg_pool_info *pi; | ||
| 438 | u32 num, len, pool; | ||
| 439 | |||
| 440 | ceph_decode_32_safe(p, end, num, bad); | ||
| 441 | dout(" %d pool names\n", num); | ||
| 442 | while (num--) { | ||
| 443 | ceph_decode_32_safe(p, end, pool, bad); | ||
| 444 | ceph_decode_32_safe(p, end, len, bad); | ||
| 445 | dout(" pool %d len %d\n", pool, len); | ||
| 446 | pi = __lookup_pg_pool(&map->pg_pools, pool); | ||
| 447 | if (pi) { | ||
| 448 | kfree(pi->name); | ||
| 449 | pi->name = kmalloc(len + 1, GFP_NOFS); | ||
| 450 | if (pi->name) { | ||
| 451 | memcpy(pi->name, *p, len); | ||
| 452 | pi->name[len] = '\0'; | ||
| 453 | dout(" name is %s\n", pi->name); | ||
| 454 | } | ||
| 455 | } | ||
| 456 | *p += len; | ||
| 457 | } | ||
| 458 | return 0; | ||
| 459 | |||
| 460 | bad: | ||
| 461 | return -EINVAL; | ||
| 462 | } | ||
| 463 | |||
| 464 | /* | ||
| 465 | * osd map | ||
| 466 | */ | ||
| 467 | void ceph_osdmap_destroy(struct ceph_osdmap *map) | ||
| 468 | { | ||
| 469 | dout("osdmap_destroy %p\n", map); | ||
| 470 | if (map->crush) | ||
| 471 | crush_destroy(map->crush); | ||
| 472 | while (!RB_EMPTY_ROOT(&map->pg_temp)) { | ||
| 473 | struct ceph_pg_mapping *pg = | ||
| 474 | rb_entry(rb_first(&map->pg_temp), | ||
| 475 | struct ceph_pg_mapping, node); | ||
| 476 | rb_erase(&pg->node, &map->pg_temp); | ||
| 477 | kfree(pg); | ||
| 478 | } | ||
| 479 | while (!RB_EMPTY_ROOT(&map->pg_pools)) { | ||
| 480 | struct ceph_pg_pool_info *pi = | ||
| 481 | rb_entry(rb_first(&map->pg_pools), | ||
| 482 | struct ceph_pg_pool_info, node); | ||
| 483 | __remove_pg_pool(&map->pg_pools, pi); | ||
| 484 | } | ||
| 485 | kfree(map->osd_state); | ||
| 486 | kfree(map->osd_weight); | ||
| 487 | kfree(map->osd_addr); | ||
| 488 | kfree(map); | ||
| 489 | } | ||
| 490 | |||
| 491 | /* | ||
| 492 | * adjust max osd value. reallocate arrays. | ||
| 493 | */ | ||
| 494 | static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | ||
| 495 | { | ||
| 496 | u8 *state; | ||
| 497 | struct ceph_entity_addr *addr; | ||
| 498 | u32 *weight; | ||
| 499 | |||
| 500 | state = kcalloc(max, sizeof(*state), GFP_NOFS); | ||
| 501 | addr = kcalloc(max, sizeof(*addr), GFP_NOFS); | ||
| 502 | weight = kcalloc(max, sizeof(*weight), GFP_NOFS); | ||
| 503 | if (state == NULL || addr == NULL || weight == NULL) { | ||
| 504 | kfree(state); | ||
| 505 | kfree(addr); | ||
| 506 | kfree(weight); | ||
| 507 | return -ENOMEM; | ||
| 508 | } | ||
| 509 | |||
| 510 | /* copy old? */ | ||
| 511 | if (map->osd_state) { | ||
| 512 | memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); | ||
| 513 | memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); | ||
| 514 | memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); | ||
| 515 | kfree(map->osd_state); | ||
| 516 | kfree(map->osd_addr); | ||
| 517 | kfree(map->osd_weight); | ||
| 518 | } | ||
| 519 | |||
| 520 | map->osd_state = state; | ||
| 521 | map->osd_weight = weight; | ||
| 522 | map->osd_addr = addr; | ||
| 523 | map->max_osd = max; | ||
| 524 | return 0; | ||
| 525 | } | ||
| 526 | |||
| 493 | /* | 527 | /* |
| 494 | * decode a full map. | 528 | * decode a full map. |
| 495 | */ | 529 | */ |
| @@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 526 | ceph_decode_32_safe(p, end, max, bad); | 560 | ceph_decode_32_safe(p, end, max, bad); |
| 527 | while (max--) { | 561 | while (max--) { |
| 528 | ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); | 562 | ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); |
| 529 | pi = kmalloc(sizeof(*pi), GFP_NOFS); | 563 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
| 530 | if (!pi) | 564 | if (!pi) |
| 531 | goto bad; | 565 | goto bad; |
| 532 | pi->id = ceph_decode_32(p); | 566 | pi->id = ceph_decode_32(p); |
| @@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 539 | __decode_pool(p, pi); | 573 | __decode_pool(p, pi); |
| 540 | __insert_pg_pool(&map->pg_pools, pi); | 574 | __insert_pg_pool(&map->pg_pools, pi); |
| 541 | } | 575 | } |
| 576 | |||
| 577 | if (version >= 5 && __decode_pool_names(p, end, map) < 0) | ||
| 578 | goto bad; | ||
| 579 | |||
| 542 | ceph_decode_32_safe(p, end, map->pool_max, bad); | 580 | ceph_decode_32_safe(p, end, map->pool_max, bad); |
| 543 | 581 | ||
| 544 | ceph_decode_32_safe(p, end, map->flags, bad); | 582 | ceph_decode_32_safe(p, end, map->flags, bad); |
| @@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 712 | } | 750 | } |
| 713 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 751 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
| 714 | if (!pi) { | 752 | if (!pi) { |
| 715 | pi = kmalloc(sizeof(*pi), GFP_NOFS); | 753 | pi = kzalloc(sizeof(*pi), GFP_NOFS); |
| 716 | if (!pi) { | 754 | if (!pi) { |
| 717 | err = -ENOMEM; | 755 | err = -ENOMEM; |
| 718 | goto bad; | 756 | goto bad; |
| @@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 722 | } | 760 | } |
| 723 | __decode_pool(p, pi); | 761 | __decode_pool(p, pi); |
| 724 | } | 762 | } |
| 763 | if (version >= 5 && __decode_pool_names(p, end, map) < 0) | ||
| 764 | goto bad; | ||
| 725 | 765 | ||
| 726 | /* old_pool */ | 766 | /* old_pool */ |
| 727 | ceph_decode_32_safe(p, end, len, bad); | 767 | ceph_decode_32_safe(p, end, len, bad); |
| @@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 730 | 770 | ||
| 731 | ceph_decode_32_safe(p, end, pool, bad); | 771 | ceph_decode_32_safe(p, end, pool, bad); |
| 732 | pi = __lookup_pg_pool(&map->pg_pools, pool); | 772 | pi = __lookup_pg_pool(&map->pg_pools, pool); |
| 733 | if (pi) { | 773 | if (pi) |
| 734 | rb_erase(&pi->node, &map->pg_pools); | 774 | __remove_pg_pool(&map->pg_pools, pi); |
| 735 | kfree(pi); | ||
| 736 | } | ||
| 737 | } | 775 | } |
| 738 | 776 | ||
| 739 | /* new_up */ | 777 | /* new_up */ |
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index 1fb55afb2642..8bc9f1e4f562 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h | |||
| @@ -23,6 +23,7 @@ struct ceph_pg_pool_info { | |||
| 23 | int id; | 23 | int id; |
| 24 | struct ceph_pg_pool v; | 24 | struct ceph_pg_pool v; |
| 25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; | 25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; |
| 26 | char *name; | ||
| 26 | }; | 27 | }; |
| 27 | 28 | ||
| 28 | struct ceph_pg_mapping { | 29 | struct ceph_pg_mapping { |
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 26ac8b89a676..a1fc1d017b58 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h | |||
| @@ -11,8 +11,10 @@ | |||
| 11 | /* | 11 | /* |
| 12 | * osdmap encoding versions | 12 | * osdmap encoding versions |
| 13 | */ | 13 | */ |
| 14 | #define CEPH_OSDMAP_INC_VERSION 4 | 14 | #define CEPH_OSDMAP_INC_VERSION 5 |
| 15 | #define CEPH_OSDMAP_VERSION 4 | 15 | #define CEPH_OSDMAP_INC_VERSION_EXT 5 |
| 16 | #define CEPH_OSDMAP_VERSION 5 | ||
| 17 | #define CEPH_OSDMAP_VERSION_EXT 5 | ||
| 16 | 18 | ||
| 17 | /* | 19 | /* |
| 18 | * fs id | 20 | * fs id |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e6f9bc57d472..2b881262ef67 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
| @@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num) | |||
| 431 | * Caller must hold snap_rwsem for read (i.e., the realm topology won't | 431 | * Caller must hold snap_rwsem for read (i.e., the realm topology won't |
| 432 | * change). | 432 | * change). |
| 433 | */ | 433 | */ |
| 434 | void ceph_queue_cap_snap(struct ceph_inode_info *ci, | 434 | void ceph_queue_cap_snap(struct ceph_inode_info *ci) |
| 435 | struct ceph_snap_context *snapc) | ||
| 436 | { | 435 | { |
| 437 | struct inode *inode = &ci->vfs_inode; | 436 | struct inode *inode = &ci->vfs_inode; |
| 438 | struct ceph_cap_snap *capsnap; | 437 | struct ceph_cap_snap *capsnap; |
| @@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
| 451 | as no new writes are allowed to start when pending, so any | 450 | as no new writes are allowed to start when pending, so any |
| 452 | writes in progress now were started before the previous | 451 | writes in progress now were started before the previous |
| 453 | cap_snap. lucky us. */ | 452 | cap_snap. lucky us. */ |
| 454 | dout("queue_cap_snap %p snapc %p seq %llu used %d" | 453 | dout("queue_cap_snap %p already pending\n", inode); |
| 455 | " already pending\n", inode, snapc, snapc->seq, used); | ||
| 456 | kfree(capsnap); | 454 | kfree(capsnap); |
| 457 | } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { | 455 | } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { |
| 456 | struct ceph_snap_context *snapc = ci->i_head_snapc; | ||
| 457 | |||
| 458 | igrab(inode); | 458 | igrab(inode); |
| 459 | 459 | ||
| 460 | atomic_set(&capsnap->nref, 1); | 460 | atomic_set(&capsnap->nref, 1); |
| @@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
| 463 | INIT_LIST_HEAD(&capsnap->flushing_item); | 463 | INIT_LIST_HEAD(&capsnap->flushing_item); |
| 464 | 464 | ||
| 465 | capsnap->follows = snapc->seq - 1; | 465 | capsnap->follows = snapc->seq - 1; |
| 466 | capsnap->context = ceph_get_snap_context(snapc); | ||
| 467 | capsnap->issued = __ceph_caps_issued(ci, NULL); | 466 | capsnap->issued = __ceph_caps_issued(ci, NULL); |
| 468 | capsnap->dirty = __ceph_caps_dirty(ci); | 467 | capsnap->dirty = __ceph_caps_dirty(ci); |
| 469 | 468 | ||
| @@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, | |||
| 480 | snapshot. */ | 479 | snapshot. */ |
| 481 | capsnap->dirty_pages = ci->i_wrbuffer_ref_head; | 480 | capsnap->dirty_pages = ci->i_wrbuffer_ref_head; |
| 482 | ci->i_wrbuffer_ref_head = 0; | 481 | ci->i_wrbuffer_ref_head = 0; |
| 483 | ceph_put_snap_context(ci->i_head_snapc); | 482 | capsnap->context = snapc; |
| 484 | ci->i_head_snapc = NULL; | 483 | ci->i_head_snapc = NULL; |
| 485 | list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); | 484 | list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); |
| 486 | 485 | ||
| @@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | |||
| 522 | capsnap->ctime = inode->i_ctime; | 521 | capsnap->ctime = inode->i_ctime; |
| 523 | capsnap->time_warp_seq = ci->i_time_warp_seq; | 522 | capsnap->time_warp_seq = ci->i_time_warp_seq; |
| 524 | if (capsnap->dirty_pages) { | 523 | if (capsnap->dirty_pages) { |
| 525 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " | 524 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu " |
| 526 | "still has %d dirty pages\n", inode, capsnap, | 525 | "still has %d dirty pages\n", inode, capsnap, |
| 527 | capsnap->context, capsnap->context->seq, | 526 | capsnap->context, capsnap->context->seq, |
| 528 | capsnap->size, capsnap->dirty_pages); | 527 | ceph_cap_string(capsnap->dirty), capsnap->size, |
| 528 | capsnap->dirty_pages); | ||
| 529 | return 0; | 529 | return 0; |
| 530 | } | 530 | } |
| 531 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", | 531 | dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n", |
| 532 | inode, capsnap, capsnap->context, | 532 | inode, capsnap, capsnap->context, |
| 533 | capsnap->context->seq, capsnap->size); | 533 | capsnap->context->seq, ceph_cap_string(capsnap->dirty), |
| 534 | capsnap->size); | ||
| 534 | 535 | ||
| 535 | spin_lock(&mdsc->snap_flush_lock); | 536 | spin_lock(&mdsc->snap_flush_lock); |
| 536 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); | 537 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); |
| @@ -602,7 +603,7 @@ more: | |||
| 602 | if (lastinode) | 603 | if (lastinode) |
| 603 | iput(lastinode); | 604 | iput(lastinode); |
| 604 | lastinode = inode; | 605 | lastinode = inode; |
| 605 | ceph_queue_cap_snap(ci, realm->cached_context); | 606 | ceph_queue_cap_snap(ci); |
| 606 | spin_lock(&realm->inodes_with_caps_lock); | 607 | spin_lock(&realm->inodes_with_caps_lock); |
| 607 | } | 608 | } |
| 608 | spin_unlock(&realm->inodes_with_caps_lock); | 609 | spin_unlock(&realm->inodes_with_caps_lock); |
| @@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |||
| 824 | spin_unlock(&realm->inodes_with_caps_lock); | 825 | spin_unlock(&realm->inodes_with_caps_lock); |
| 825 | spin_unlock(&inode->i_lock); | 826 | spin_unlock(&inode->i_lock); |
| 826 | 827 | ||
| 827 | ceph_queue_cap_snap(ci, | 828 | ceph_queue_cap_snap(ci); |
| 828 | ci->i_snap_realm->cached_context); | ||
| 829 | 829 | ||
| 830 | iput(inode); | 830 | iput(inode); |
| 831 | continue; | 831 | continue; |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ca702c67bc66..e30dfbb056c3 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m, | |||
| 715 | extern void ceph_handle_snap(struct ceph_mds_client *mdsc, | 715 | extern void ceph_handle_snap(struct ceph_mds_client *mdsc, |
| 716 | struct ceph_mds_session *session, | 716 | struct ceph_mds_session *session, |
| 717 | struct ceph_msg *msg); | 717 | struct ceph_msg *msg); |
| 718 | extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, | 718 | extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); |
| 719 | struct ceph_snap_context *snapc); | ||
| 720 | extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | 719 | extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, |
| 721 | struct ceph_cap_snap *capsnap); | 720 | struct ceph_cap_snap *capsnap); |
| 722 | extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); | 721 | extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5183bc2a1916..ded66be6597c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -808,6 +808,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { | |||
| 808 | .release = cifs_close, | 808 | .release = cifs_close, |
| 809 | .fsync = cifs_fsync, | 809 | .fsync = cifs_fsync, |
| 810 | .flush = cifs_flush, | 810 | .flush = cifs_flush, |
| 811 | .mmap = cifs_file_mmap, | ||
| 811 | .splice_read = generic_file_splice_read, | 812 | .splice_read = generic_file_splice_read, |
| 812 | #ifdef CONFIG_CIFS_POSIX | 813 | #ifdef CONFIG_CIFS_POSIX |
| 813 | .unlocked_ioctl = cifs_ioctl, | 814 | .unlocked_ioctl = cifs_ioctl, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3f4fbd670507..5d3f29fef532 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -1431,6 +1431,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
| 1431 | __u32 bytes_sent; | 1431 | __u32 bytes_sent; |
| 1432 | __u16 byte_count; | 1432 | __u16 byte_count; |
| 1433 | 1433 | ||
| 1434 | *nbytes = 0; | ||
| 1435 | |||
| 1434 | /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ | 1436 | /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ |
| 1435 | if (tcon->ses == NULL) | 1437 | if (tcon->ses == NULL) |
| 1436 | return -ECONNABORTED; | 1438 | return -ECONNABORTED; |
| @@ -1513,11 +1515,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
| 1513 | cifs_stats_inc(&tcon->num_writes); | 1515 | cifs_stats_inc(&tcon->num_writes); |
| 1514 | if (rc) { | 1516 | if (rc) { |
| 1515 | cFYI(1, ("Send error in write = %d", rc)); | 1517 | cFYI(1, ("Send error in write = %d", rc)); |
| 1516 | *nbytes = 0; | ||
| 1517 | } else { | 1518 | } else { |
| 1518 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1519 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
| 1519 | *nbytes = (*nbytes) << 16; | 1520 | *nbytes = (*nbytes) << 16; |
| 1520 | *nbytes += le16_to_cpu(pSMBr->Count); | 1521 | *nbytes += le16_to_cpu(pSMBr->Count); |
| 1522 | |||
| 1523 | /* | ||
| 1524 | * Mask off high 16 bits when bytes written as returned by the | ||
| 1525 | * server is greater than bytes requested by the client. Some | ||
| 1526 | * OS/2 servers are known to set incorrect CountHigh values. | ||
| 1527 | */ | ||
| 1528 | if (*nbytes > count) | ||
| 1529 | *nbytes &= 0xFFFF; | ||
| 1521 | } | 1530 | } |
| 1522 | 1531 | ||
| 1523 | cifs_buf_release(pSMB); | 1532 | cifs_buf_release(pSMB); |
| @@ -1606,6 +1615,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
| 1606 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1615 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
| 1607 | *nbytes = (*nbytes) << 16; | 1616 | *nbytes = (*nbytes) << 16; |
| 1608 | *nbytes += le16_to_cpu(pSMBr->Count); | 1617 | *nbytes += le16_to_cpu(pSMBr->Count); |
| 1618 | |||
| 1619 | /* | ||
| 1620 | * Mask off high 16 bits when bytes written as returned by the | ||
| 1621 | * server is greater than bytes requested by the client. OS/2 | ||
| 1622 | * servers are known to set incorrect CountHigh values. | ||
| 1623 | */ | ||
| 1624 | if (*nbytes > count) | ||
| 1625 | *nbytes &= 0xFFFF; | ||
| 1609 | } | 1626 | } |
| 1610 | 1627 | ||
| 1611 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 1628 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ |
| @@ -1794,8 +1811,21 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
| 1794 | } | 1811 | } |
| 1795 | parm_data = (struct cifs_posix_lock *) | 1812 | parm_data = (struct cifs_posix_lock *) |
| 1796 | ((char *)&pSMBr->hdr.Protocol + data_offset); | 1813 | ((char *)&pSMBr->hdr.Protocol + data_offset); |
| 1797 | if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) | 1814 | if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK)) |
| 1798 | pLockData->fl_type = F_UNLCK; | 1815 | pLockData->fl_type = F_UNLCK; |
| 1816 | else { | ||
| 1817 | if (parm_data->lock_type == | ||
| 1818 | __constant_cpu_to_le16(CIFS_RDLCK)) | ||
| 1819 | pLockData->fl_type = F_RDLCK; | ||
| 1820 | else if (parm_data->lock_type == | ||
| 1821 | __constant_cpu_to_le16(CIFS_WRLCK)) | ||
| 1822 | pLockData->fl_type = F_WRLCK; | ||
| 1823 | |||
| 1824 | pLockData->fl_start = parm_data->start; | ||
| 1825 | pLockData->fl_end = parm_data->start + | ||
| 1826 | parm_data->length - 1; | ||
| 1827 | pLockData->fl_pid = parm_data->pid; | ||
| 1828 | } | ||
| 1799 | } | 1829 | } |
| 1800 | 1830 | ||
| 1801 | plk_err_exit: | 1831 | plk_err_exit: |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 058b390d3da8..9b11a8f56f3a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -839,8 +839,32 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
| 839 | 839 | ||
| 840 | } else { | 840 | } else { |
| 841 | /* if rc == ERR_SHARING_VIOLATION ? */ | 841 | /* if rc == ERR_SHARING_VIOLATION ? */ |
| 842 | rc = 0; /* do not change lock type to unlock | 842 | rc = 0; |
| 843 | since range in use */ | 843 | |
| 844 | if (lockType & LOCKING_ANDX_SHARED_LOCK) { | ||
| 845 | pfLock->fl_type = F_WRLCK; | ||
| 846 | } else { | ||
| 847 | rc = CIFSSMBLock(xid, tcon, netfid, length, | ||
| 848 | pfLock->fl_start, 0, 1, | ||
| 849 | lockType | LOCKING_ANDX_SHARED_LOCK, | ||
| 850 | 0 /* wait flag */); | ||
| 851 | if (rc == 0) { | ||
| 852 | rc = CIFSSMBLock(xid, tcon, netfid, | ||
| 853 | length, pfLock->fl_start, 1, 0, | ||
| 854 | lockType | | ||
| 855 | LOCKING_ANDX_SHARED_LOCK, | ||
| 856 | 0 /* wait flag */); | ||
| 857 | pfLock->fl_type = F_RDLCK; | ||
| 858 | if (rc != 0) | ||
| 859 | cERROR(1, ("Error unlocking " | ||
| 860 | "previously locked range %d " | ||
| 861 | "during test of lock", rc)); | ||
| 862 | rc = 0; | ||
| 863 | } else { | ||
| 864 | pfLock->fl_type = F_WRLCK; | ||
| 865 | rc = 0; | ||
| 866 | } | ||
| 867 | } | ||
| 844 | } | 868 | } |
| 845 | 869 | ||
| 846 | FreeXid(xid); | 870 | FreeXid(xid); |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index efb2b9400391..1cc087635a5e 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
| @@ -382,8 +382,8 @@ out: | |||
| 382 | static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, | 382 | static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, |
| 383 | struct ecryptfs_crypt_stat *crypt_stat) | 383 | struct ecryptfs_crypt_stat *crypt_stat) |
| 384 | { | 384 | { |
| 385 | (*offset) = (crypt_stat->num_header_bytes_at_front | 385 | (*offset) = ecryptfs_lower_header_size(crypt_stat) |
| 386 | + (crypt_stat->extent_size * extent_num)); | 386 | + (crypt_stat->extent_size * extent_num); |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | /** | 389 | /** |
| @@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) | |||
| 835 | set_extent_mask_and_shift(crypt_stat); | 835 | set_extent_mask_and_shift(crypt_stat); |
| 836 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; | 836 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; |
| 837 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 837 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
| 838 | crypt_stat->num_header_bytes_at_front = 0; | 838 | crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
| 839 | else { | 839 | else { |
| 840 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) | 840 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) |
| 841 | crypt_stat->num_header_bytes_at_front = | 841 | crypt_stat->metadata_size = |
| 842 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | 842 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
| 843 | else | 843 | else |
| 844 | crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; | 844 | crypt_stat->metadata_size = PAGE_CACHE_SIZE; |
| 845 | } | 845 | } |
| 846 | } | 846 | } |
| 847 | 847 | ||
| @@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written) | |||
| 1108 | (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; | 1108 | (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; |
| 1109 | } | 1109 | } |
| 1110 | 1110 | ||
| 1111 | static void | 1111 | void ecryptfs_write_crypt_stat_flags(char *page_virt, |
| 1112 | write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, | 1112 | struct ecryptfs_crypt_stat *crypt_stat, |
| 1113 | size_t *written) | 1113 | size_t *written) |
| 1114 | { | 1114 | { |
| 1115 | u32 flags = 0; | 1115 | u32 flags = 0; |
| 1116 | int i; | 1116 | int i; |
| @@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt, | |||
| 1238 | 1238 | ||
| 1239 | header_extent_size = (u32)crypt_stat->extent_size; | 1239 | header_extent_size = (u32)crypt_stat->extent_size; |
| 1240 | num_header_extents_at_front = | 1240 | num_header_extents_at_front = |
| 1241 | (u16)(crypt_stat->num_header_bytes_at_front | 1241 | (u16)(crypt_stat->metadata_size / crypt_stat->extent_size); |
| 1242 | / crypt_stat->extent_size); | ||
| 1243 | put_unaligned_be32(header_extent_size, virt); | 1242 | put_unaligned_be32(header_extent_size, virt); |
| 1244 | virt += 4; | 1243 | virt += 4; |
| 1245 | put_unaligned_be16(num_header_extents_at_front, virt); | 1244 | put_unaligned_be16(num_header_extents_at_front, virt); |
| @@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, | |||
| 1292 | offset = ECRYPTFS_FILE_SIZE_BYTES; | 1291 | offset = ECRYPTFS_FILE_SIZE_BYTES; |
| 1293 | write_ecryptfs_marker((page_virt + offset), &written); | 1292 | write_ecryptfs_marker((page_virt + offset), &written); |
| 1294 | offset += written; | 1293 | offset += written; |
| 1295 | write_ecryptfs_flags((page_virt + offset), crypt_stat, &written); | 1294 | ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat, |
| 1295 | &written); | ||
| 1296 | offset += written; | 1296 | offset += written; |
| 1297 | ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, | 1297 | ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, |
| 1298 | &written); | 1298 | &written); |
| @@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
| 1382 | rc = -EINVAL; | 1382 | rc = -EINVAL; |
| 1383 | goto out; | 1383 | goto out; |
| 1384 | } | 1384 | } |
| 1385 | virt_len = crypt_stat->num_header_bytes_at_front; | 1385 | virt_len = crypt_stat->metadata_size; |
| 1386 | order = get_order(virt_len); | 1386 | order = get_order(virt_len); |
| 1387 | /* Released in this function */ | 1387 | /* Released in this function */ |
| 1388 | virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); | 1388 | virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); |
| @@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
| 1428 | header_extent_size = get_unaligned_be32(virt); | 1428 | header_extent_size = get_unaligned_be32(virt); |
| 1429 | virt += sizeof(__be32); | 1429 | virt += sizeof(__be32); |
| 1430 | num_header_extents_at_front = get_unaligned_be16(virt); | 1430 | num_header_extents_at_front = get_unaligned_be16(virt); |
| 1431 | crypt_stat->num_header_bytes_at_front = | 1431 | crypt_stat->metadata_size = (((size_t)num_header_extents_at_front |
| 1432 | (((size_t)num_header_extents_at_front | 1432 | * (size_t)header_extent_size)); |
| 1433 | * (size_t)header_extent_size)); | ||
| 1434 | (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); | 1433 | (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); |
| 1435 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) | 1434 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) |
| 1436 | && (crypt_stat->num_header_bytes_at_front | 1435 | && (crypt_stat->metadata_size |
| 1437 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { | 1436 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { |
| 1438 | rc = -EINVAL; | 1437 | rc = -EINVAL; |
| 1439 | printk(KERN_WARNING "Invalid header size: [%zd]\n", | 1438 | printk(KERN_WARNING "Invalid header size: [%zd]\n", |
| 1440 | crypt_stat->num_header_bytes_at_front); | 1439 | crypt_stat->metadata_size); |
| 1441 | } | 1440 | } |
| 1442 | return rc; | 1441 | return rc; |
| 1443 | } | 1442 | } |
| @@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
| 1452 | */ | 1451 | */ |
| 1453 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) | 1452 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) |
| 1454 | { | 1453 | { |
| 1455 | crypt_stat->num_header_bytes_at_front = | 1454 | crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
| 1456 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
| 1457 | } | 1455 | } |
| 1458 | 1456 | ||
| 1459 | /** | 1457 | /** |
| @@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
| 1607 | ecryptfs_dentry, | 1605 | ecryptfs_dentry, |
| 1608 | ECRYPTFS_VALIDATE_HEADER_SIZE); | 1606 | ECRYPTFS_VALIDATE_HEADER_SIZE); |
| 1609 | if (rc) { | 1607 | if (rc) { |
| 1608 | memset(page_virt, 0, PAGE_CACHE_SIZE); | ||
| 1610 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); | 1609 | rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); |
| 1611 | if (rc) { | 1610 | if (rc) { |
| 1612 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " | 1611 | printk(KERN_DEBUG "Valid eCryptfs headers not found in " |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 542f625312f3..bc7115403f38 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
| @@ -273,7 +273,7 @@ struct ecryptfs_crypt_stat { | |||
| 273 | u32 flags; | 273 | u32 flags; |
| 274 | unsigned int file_version; | 274 | unsigned int file_version; |
| 275 | size_t iv_bytes; | 275 | size_t iv_bytes; |
| 276 | size_t num_header_bytes_at_front; | 276 | size_t metadata_size; |
| 277 | size_t extent_size; /* Data extent size; default is 4096 */ | 277 | size_t extent_size; /* Data extent size; default is 4096 */ |
| 278 | size_t key_size; | 278 | size_t key_size; |
| 279 | size_t extent_shift; | 279 | size_t extent_shift; |
| @@ -464,6 +464,14 @@ struct ecryptfs_daemon { | |||
| 464 | 464 | ||
| 465 | extern struct mutex ecryptfs_daemon_hash_mux; | 465 | extern struct mutex ecryptfs_daemon_hash_mux; |
| 466 | 466 | ||
| 467 | static inline size_t | ||
| 468 | ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat) | ||
| 469 | { | ||
| 470 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | ||
| 471 | return 0; | ||
| 472 | return crypt_stat->metadata_size; | ||
| 473 | } | ||
| 474 | |||
| 467 | static inline struct ecryptfs_file_info * | 475 | static inline struct ecryptfs_file_info * |
| 468 | ecryptfs_file_to_private(struct file *file) | 476 | ecryptfs_file_to_private(struct file *file) |
| 469 | { | 477 | { |
| @@ -651,6 +659,9 @@ int ecryptfs_decrypt_page(struct page *page); | |||
| 651 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); | 659 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); |
| 652 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); | 660 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); |
| 653 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); | 661 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); |
| 662 | void ecryptfs_write_crypt_stat_flags(char *page_virt, | ||
| 663 | struct ecryptfs_crypt_stat *crypt_stat, | ||
| 664 | size_t *written); | ||
| 654 | int ecryptfs_read_and_validate_header_region(char *data, | 665 | int ecryptfs_read_and_validate_header_region(char *data, |
| 655 | struct inode *ecryptfs_inode); | 666 | struct inode *ecryptfs_inode); |
| 656 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, | 667 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index d3362faf3852..e2d4418affac 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
| @@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
| 324 | rc = ecryptfs_read_and_validate_header_region(page_virt, | 324 | rc = ecryptfs_read_and_validate_header_region(page_virt, |
| 325 | ecryptfs_dentry->d_inode); | 325 | ecryptfs_dentry->d_inode); |
| 326 | if (rc) { | 326 | if (rc) { |
| 327 | memset(page_virt, 0, PAGE_CACHE_SIZE); | ||
| 327 | rc = ecryptfs_read_and_validate_xattr_region(page_virt, | 328 | rc = ecryptfs_read_and_validate_xattr_region(page_virt, |
| 328 | ecryptfs_dentry); | 329 | ecryptfs_dentry); |
| 329 | if (rc) { | 330 | if (rc) { |
| @@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
| 336 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 337 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
| 337 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { | 338 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { |
| 338 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 339 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
| 339 | file_size = (crypt_stat->num_header_bytes_at_front | 340 | file_size = (crypt_stat->metadata_size |
| 340 | + i_size_read(lower_dentry->d_inode)); | 341 | + i_size_read(lower_dentry->d_inode)); |
| 341 | else | 342 | else |
| 342 | file_size = i_size_read(lower_dentry->d_inode); | 343 | file_size = i_size_read(lower_dentry->d_inode); |
| @@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
| 388 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); | 389 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
| 389 | if (IS_ERR(lower_dentry)) { | 390 | if (IS_ERR(lower_dentry)) { |
| 390 | rc = PTR_ERR(lower_dentry); | 391 | rc = PTR_ERR(lower_dentry); |
| 391 | printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " | 392 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
| 392 | "lower_dentry = [%s]\n", __func__, rc, | 393 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
| 393 | ecryptfs_dentry->d_name.name); | 394 | encrypted_and_encoded_name); |
| 394 | goto out_d_drop; | 395 | goto out_d_drop; |
| 395 | } | 396 | } |
| 396 | if (lower_dentry->d_inode) | 397 | if (lower_dentry->d_inode) |
| @@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
| 417 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); | 418 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
| 418 | if (IS_ERR(lower_dentry)) { | 419 | if (IS_ERR(lower_dentry)) { |
| 419 | rc = PTR_ERR(lower_dentry); | 420 | rc = PTR_ERR(lower_dentry); |
| 420 | printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " | 421 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
| 421 | "lower_dentry = [%s]\n", __func__, rc, | 422 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
| 422 | encrypted_and_encoded_name); | 423 | encrypted_and_encoded_name); |
| 423 | goto out_d_drop; | 424 | goto out_d_drop; |
| 424 | } | 425 | } |
| 425 | lookup_and_interpose: | 426 | lookup_and_interpose: |
| @@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 456 | rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); | 457 | rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); |
| 457 | if (rc) | 458 | if (rc) |
| 458 | goto out_lock; | 459 | goto out_lock; |
| 459 | fsstack_copy_attr_times(dir, lower_new_dentry->d_inode); | 460 | fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); |
| 460 | fsstack_copy_inode_size(dir, lower_new_dentry->d_inode); | 461 | fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); |
| 461 | old_dentry->d_inode->i_nlink = | 462 | old_dentry->d_inode->i_nlink = |
| 462 | ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; | 463 | ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; |
| 463 | i_size_write(new_dentry->d_inode, file_size_save); | 464 | i_size_write(new_dentry->d_inode, file_size_save); |
| @@ -648,38 +649,17 @@ out_lock: | |||
| 648 | return rc; | 649 | return rc; |
| 649 | } | 650 | } |
| 650 | 651 | ||
| 651 | static int | 652 | static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, |
| 652 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | 653 | size_t *bufsiz) |
| 653 | { | 654 | { |
| 655 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
| 654 | char *lower_buf; | 656 | char *lower_buf; |
| 655 | size_t lower_bufsiz; | 657 | size_t lower_bufsiz = PATH_MAX; |
| 656 | struct dentry *lower_dentry; | ||
| 657 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
| 658 | char *plaintext_name; | ||
| 659 | size_t plaintext_name_size; | ||
| 660 | mm_segment_t old_fs; | 658 | mm_segment_t old_fs; |
| 661 | int rc; | 659 | int rc; |
| 662 | 660 | ||
| 663 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | ||
| 664 | if (!lower_dentry->d_inode->i_op->readlink) { | ||
| 665 | rc = -EINVAL; | ||
| 666 | goto out; | ||
| 667 | } | ||
| 668 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
| 669 | dentry->d_sb)->mount_crypt_stat; | ||
| 670 | /* | ||
| 671 | * If the lower filename is encrypted, it will result in a significantly | ||
| 672 | * longer name. If needed, truncate the name after decode and decrypt. | ||
| 673 | */ | ||
| 674 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) | ||
| 675 | lower_bufsiz = PATH_MAX; | ||
| 676 | else | ||
| 677 | lower_bufsiz = bufsiz; | ||
| 678 | /* Released in this function */ | ||
| 679 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); | 661 | lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); |
| 680 | if (lower_buf == NULL) { | 662 | if (!lower_buf) { |
| 681 | printk(KERN_ERR "%s: Out of memory whilst attempting to " | ||
| 682 | "kmalloc [%zd] bytes\n", __func__, lower_bufsiz); | ||
| 683 | rc = -ENOMEM; | 663 | rc = -ENOMEM; |
| 684 | goto out; | 664 | goto out; |
| 685 | } | 665 | } |
| @@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | |||
| 689 | (char __user *)lower_buf, | 669 | (char __user *)lower_buf, |
| 690 | lower_bufsiz); | 670 | lower_bufsiz); |
| 691 | set_fs(old_fs); | 671 | set_fs(old_fs); |
| 692 | if (rc >= 0) { | 672 | if (rc < 0) |
| 693 | rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, | 673 | goto out; |
| 694 | &plaintext_name_size, | 674 | lower_bufsiz = rc; |
| 695 | dentry, lower_buf, | 675 | rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, |
| 696 | rc); | 676 | lower_buf, lower_bufsiz); |
| 697 | if (rc) { | 677 | out: |
| 698 | printk(KERN_ERR "%s: Error attempting to decode and " | ||
| 699 | "decrypt filename; rc = [%d]\n", __func__, | ||
| 700 | rc); | ||
| 701 | goto out_free_lower_buf; | ||
| 702 | } | ||
| 703 | /* Check for bufsiz <= 0 done in sys_readlinkat() */ | ||
| 704 | rc = copy_to_user(buf, plaintext_name, | ||
| 705 | min((size_t) bufsiz, plaintext_name_size)); | ||
| 706 | if (rc) | ||
| 707 | rc = -EFAULT; | ||
| 708 | else | ||
| 709 | rc = plaintext_name_size; | ||
| 710 | kfree(plaintext_name); | ||
| 711 | fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode); | ||
| 712 | } | ||
| 713 | out_free_lower_buf: | ||
| 714 | kfree(lower_buf); | 678 | kfree(lower_buf); |
| 679 | return rc; | ||
| 680 | } | ||
| 681 | |||
| 682 | static int | ||
| 683 | ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) | ||
| 684 | { | ||
| 685 | char *kbuf; | ||
| 686 | size_t kbufsiz, copied; | ||
| 687 | int rc; | ||
| 688 | |||
| 689 | rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); | ||
| 690 | if (rc) | ||
| 691 | goto out; | ||
| 692 | copied = min_t(size_t, bufsiz, kbufsiz); | ||
| 693 | rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied; | ||
| 694 | kfree(kbuf); | ||
| 695 | fsstack_copy_attr_atime(dentry->d_inode, | ||
| 696 | ecryptfs_dentry_to_lower(dentry)->d_inode); | ||
| 715 | out: | 697 | out: |
| 716 | return rc; | 698 | return rc; |
| 717 | } | 699 | } |
| @@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, | |||
| 769 | { | 751 | { |
| 770 | loff_t lower_size; | 752 | loff_t lower_size; |
| 771 | 753 | ||
| 772 | lower_size = crypt_stat->num_header_bytes_at_front; | 754 | lower_size = ecryptfs_lower_header_size(crypt_stat); |
| 773 | if (upper_size != 0) { | 755 | if (upper_size != 0) { |
| 774 | loff_t num_extents; | 756 | loff_t num_extents; |
| 775 | 757 | ||
| @@ -1016,6 +998,28 @@ out: | |||
| 1016 | return rc; | 998 | return rc; |
| 1017 | } | 999 | } |
| 1018 | 1000 | ||
| 1001 | int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, | ||
| 1002 | struct kstat *stat) | ||
| 1003 | { | ||
| 1004 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | ||
| 1005 | int rc = 0; | ||
| 1006 | |||
| 1007 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
| 1008 | dentry->d_sb)->mount_crypt_stat; | ||
| 1009 | generic_fillattr(dentry->d_inode, stat); | ||
| 1010 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { | ||
| 1011 | char *target; | ||
| 1012 | size_t targetsiz; | ||
| 1013 | |||
| 1014 | rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz); | ||
| 1015 | if (!rc) { | ||
| 1016 | kfree(target); | ||
| 1017 | stat->size = targetsiz; | ||
| 1018 | } | ||
| 1019 | } | ||
| 1020 | return rc; | ||
| 1021 | } | ||
| 1022 | |||
| 1019 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1023 | int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, |
| 1020 | struct kstat *stat) | 1024 | struct kstat *stat) |
| 1021 | { | 1025 | { |
| @@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | |||
| 1040 | 1044 | ||
| 1041 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1045 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
| 1042 | if (!lower_dentry->d_inode->i_op->setxattr) { | 1046 | if (!lower_dentry->d_inode->i_op->setxattr) { |
| 1043 | rc = -ENOSYS; | 1047 | rc = -EOPNOTSUPP; |
| 1044 | goto out; | 1048 | goto out; |
| 1045 | } | 1049 | } |
| 1046 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1050 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
| @@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, | |||
| 1058 | int rc = 0; | 1062 | int rc = 0; |
| 1059 | 1063 | ||
| 1060 | if (!lower_dentry->d_inode->i_op->getxattr) { | 1064 | if (!lower_dentry->d_inode->i_op->getxattr) { |
| 1061 | rc = -ENOSYS; | 1065 | rc = -EOPNOTSUPP; |
| 1062 | goto out; | 1066 | goto out; |
| 1063 | } | 1067 | } |
| 1064 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1068 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
| @@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) | |||
| 1085 | 1089 | ||
| 1086 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1090 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
| 1087 | if (!lower_dentry->d_inode->i_op->listxattr) { | 1091 | if (!lower_dentry->d_inode->i_op->listxattr) { |
| 1088 | rc = -ENOSYS; | 1092 | rc = -EOPNOTSUPP; |
| 1089 | goto out; | 1093 | goto out; |
| 1090 | } | 1094 | } |
| 1091 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1095 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
| @@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name) | |||
| 1102 | 1106 | ||
| 1103 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 1107 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
| 1104 | if (!lower_dentry->d_inode->i_op->removexattr) { | 1108 | if (!lower_dentry->d_inode->i_op->removexattr) { |
| 1105 | rc = -ENOSYS; | 1109 | rc = -EOPNOTSUPP; |
| 1106 | goto out; | 1110 | goto out; |
| 1107 | } | 1111 | } |
| 1108 | mutex_lock(&lower_dentry->d_inode->i_mutex); | 1112 | mutex_lock(&lower_dentry->d_inode->i_mutex); |
| @@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = { | |||
| 1133 | .put_link = ecryptfs_put_link, | 1137 | .put_link = ecryptfs_put_link, |
| 1134 | .permission = ecryptfs_permission, | 1138 | .permission = ecryptfs_permission, |
| 1135 | .setattr = ecryptfs_setattr, | 1139 | .setattr = ecryptfs_setattr, |
| 1140 | .getattr = ecryptfs_getattr_link, | ||
| 1136 | .setxattr = ecryptfs_setxattr, | 1141 | .setxattr = ecryptfs_setxattr, |
| 1137 | .getxattr = ecryptfs_getxattr, | 1142 | .getxattr = ecryptfs_getxattr, |
| 1138 | .listxattr = ecryptfs_listxattr, | 1143 | .listxattr = ecryptfs_listxattr, |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index d491237c98e7..2ee9a3a7b68c 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
| @@ -83,6 +83,19 @@ out: | |||
| 83 | return rc; | 83 | return rc; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static void strip_xattr_flag(char *page_virt, | ||
| 87 | struct ecryptfs_crypt_stat *crypt_stat) | ||
| 88 | { | ||
| 89 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { | ||
| 90 | size_t written; | ||
| 91 | |||
| 92 | crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR; | ||
| 93 | ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat, | ||
| 94 | &written); | ||
| 95 | crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 86 | /** | 99 | /** |
| 87 | * Header Extent: | 100 | * Header Extent: |
| 88 | * Octets 0-7: Unencrypted file size (big-endian) | 101 | * Octets 0-7: Unencrypted file size (big-endian) |
| @@ -98,19 +111,6 @@ out: | |||
| 98 | * (big-endian) | 111 | * (big-endian) |
| 99 | * Octet 26: Begin RFC 2440 authentication token packet set | 112 | * Octet 26: Begin RFC 2440 authentication token packet set |
| 100 | */ | 113 | */ |
| 101 | static void set_header_info(char *page_virt, | ||
| 102 | struct ecryptfs_crypt_stat *crypt_stat) | ||
| 103 | { | ||
| 104 | size_t written; | ||
| 105 | size_t save_num_header_bytes_at_front = | ||
| 106 | crypt_stat->num_header_bytes_at_front; | ||
| 107 | |||
| 108 | crypt_stat->num_header_bytes_at_front = | ||
| 109 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
| 110 | ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); | ||
| 111 | crypt_stat->num_header_bytes_at_front = | ||
| 112 | save_num_header_bytes_at_front; | ||
| 113 | } | ||
| 114 | 114 | ||
| 115 | /** | 115 | /** |
| 116 | * ecryptfs_copy_up_encrypted_with_header | 116 | * ecryptfs_copy_up_encrypted_with_header |
| @@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
| 136 | * num_extents_per_page) | 136 | * num_extents_per_page) |
| 137 | + extent_num_in_page); | 137 | + extent_num_in_page); |
| 138 | size_t num_header_extents_at_front = | 138 | size_t num_header_extents_at_front = |
| 139 | (crypt_stat->num_header_bytes_at_front | 139 | (crypt_stat->metadata_size / crypt_stat->extent_size); |
| 140 | / crypt_stat->extent_size); | ||
| 141 | 140 | ||
| 142 | if (view_extent_num < num_header_extents_at_front) { | 141 | if (view_extent_num < num_header_extents_at_front) { |
| 143 | /* This is a header extent */ | 142 | /* This is a header extent */ |
| @@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
| 147 | memset(page_virt, 0, PAGE_CACHE_SIZE); | 146 | memset(page_virt, 0, PAGE_CACHE_SIZE); |
| 148 | /* TODO: Support more than one header extent */ | 147 | /* TODO: Support more than one header extent */ |
| 149 | if (view_extent_num == 0) { | 148 | if (view_extent_num == 0) { |
| 149 | size_t written; | ||
| 150 | |||
| 150 | rc = ecryptfs_read_xattr_region( | 151 | rc = ecryptfs_read_xattr_region( |
| 151 | page_virt, page->mapping->host); | 152 | page_virt, page->mapping->host); |
| 152 | set_header_info(page_virt, crypt_stat); | 153 | strip_xattr_flag(page_virt + 16, crypt_stat); |
| 154 | ecryptfs_write_header_metadata(page_virt + 20, | ||
| 155 | crypt_stat, | ||
| 156 | &written); | ||
| 153 | } | 157 | } |
| 154 | kunmap_atomic(page_virt, KM_USER0); | 158 | kunmap_atomic(page_virt, KM_USER0); |
| 155 | flush_dcache_page(page); | 159 | flush_dcache_page(page); |
| @@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
| 162 | /* This is an encrypted data extent */ | 166 | /* This is an encrypted data extent */ |
| 163 | loff_t lower_offset = | 167 | loff_t lower_offset = |
| 164 | ((view_extent_num * crypt_stat->extent_size) | 168 | ((view_extent_num * crypt_stat->extent_size) |
| 165 | - crypt_stat->num_header_bytes_at_front); | 169 | - crypt_stat->metadata_size); |
| 166 | 170 | ||
| 167 | rc = ecryptfs_read_lower_page_segment( | 171 | rc = ecryptfs_read_lower_page_segment( |
| 168 | page, (lower_offset >> PAGE_CACHE_SHIFT), | 172 | page, (lower_offset >> PAGE_CACHE_SHIFT), |
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index fcef41c1d2cf..278743c7716a 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
| @@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode) | |||
| 86 | if (lower_dentry->d_inode) { | 86 | if (lower_dentry->d_inode) { |
| 87 | fput(inode_info->lower_file); | 87 | fput(inode_info->lower_file); |
| 88 | inode_info->lower_file = NULL; | 88 | inode_info->lower_file = NULL; |
| 89 | d_drop(lower_dentry); | ||
| 90 | } | 89 | } |
| 91 | } | 90 | } |
| 92 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); | 91 | ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); |
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c index 4e2426e22bbe..565cf817bbf1 100644 --- a/fs/ext2/symlink.c +++ b/fs/ext2/symlink.c | |||
| @@ -32,6 +32,7 @@ const struct inode_operations ext2_symlink_inode_operations = { | |||
| 32 | .readlink = generic_readlink, | 32 | .readlink = generic_readlink, |
| 33 | .follow_link = page_follow_link_light, | 33 | .follow_link = page_follow_link_light, |
| 34 | .put_link = page_put_link, | 34 | .put_link = page_put_link, |
| 35 | .setattr = ext2_setattr, | ||
| 35 | #ifdef CONFIG_EXT2_FS_XATTR | 36 | #ifdef CONFIG_EXT2_FS_XATTR |
| 36 | .setxattr = generic_setxattr, | 37 | .setxattr = generic_setxattr, |
| 37 | .getxattr = generic_getxattr, | 38 | .getxattr = generic_getxattr, |
| @@ -43,6 +44,7 @@ const struct inode_operations ext2_symlink_inode_operations = { | |||
| 43 | const struct inode_operations ext2_fast_symlink_inode_operations = { | 44 | const struct inode_operations ext2_fast_symlink_inode_operations = { |
| 44 | .readlink = generic_readlink, | 45 | .readlink = generic_readlink, |
| 45 | .follow_link = ext2_follow_link, | 46 | .follow_link = ext2_follow_link, |
| 47 | .setattr = ext2_setattr, | ||
| 46 | #ifdef CONFIG_EXT2_FS_XATTR | 48 | #ifdef CONFIG_EXT2_FS_XATTR |
| 47 | .setxattr = generic_setxattr, | 49 | .setxattr = generic_setxattr, |
| 48 | .getxattr = generic_getxattr, | 50 | .getxattr = generic_getxattr, |
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c index ff7b4ccd8983..7c4898207776 100644 --- a/fs/ext3/symlink.c +++ b/fs/ext3/symlink.c | |||
| @@ -34,6 +34,7 @@ const struct inode_operations ext3_symlink_inode_operations = { | |||
| 34 | .readlink = generic_readlink, | 34 | .readlink = generic_readlink, |
| 35 | .follow_link = page_follow_link_light, | 35 | .follow_link = page_follow_link_light, |
| 36 | .put_link = page_put_link, | 36 | .put_link = page_put_link, |
| 37 | .setattr = ext3_setattr, | ||
| 37 | #ifdef CONFIG_EXT3_FS_XATTR | 38 | #ifdef CONFIG_EXT3_FS_XATTR |
| 38 | .setxattr = generic_setxattr, | 39 | .setxattr = generic_setxattr, |
| 39 | .getxattr = generic_getxattr, | 40 | .getxattr = generic_getxattr, |
| @@ -45,6 +46,7 @@ const struct inode_operations ext3_symlink_inode_operations = { | |||
| 45 | const struct inode_operations ext3_fast_symlink_inode_operations = { | 46 | const struct inode_operations ext3_fast_symlink_inode_operations = { |
| 46 | .readlink = generic_readlink, | 47 | .readlink = generic_readlink, |
| 47 | .follow_link = ext3_follow_link, | 48 | .follow_link = ext3_follow_link, |
| 49 | .setattr = ext3_setattr, | ||
| 48 | #ifdef CONFIG_EXT3_FS_XATTR | 50 | #ifdef CONFIG_EXT3_FS_XATTR |
| 49 | .setxattr = generic_setxattr, | 51 | .setxattr = generic_setxattr, |
| 50 | .getxattr = generic_getxattr, | 52 | .getxattr = generic_getxattr, |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 781a322ccb45..4b37f7cea4dd 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -554,108 +554,85 @@ select_queue: | |||
| 554 | return ret; | 554 | return ret; |
| 555 | } | 555 | } |
| 556 | 556 | ||
| 557 | static void unpin_sb_for_writeback(struct super_block **psb) | 557 | static void unpin_sb_for_writeback(struct super_block *sb) |
| 558 | { | 558 | { |
| 559 | struct super_block *sb = *psb; | 559 | up_read(&sb->s_umount); |
| 560 | 560 | put_super(sb); | |
| 561 | if (sb) { | ||
| 562 | up_read(&sb->s_umount); | ||
| 563 | put_super(sb); | ||
| 564 | *psb = NULL; | ||
| 565 | } | ||
| 566 | } | 561 | } |
| 567 | 562 | ||
| 563 | enum sb_pin_state { | ||
| 564 | SB_PINNED, | ||
| 565 | SB_NOT_PINNED, | ||
| 566 | SB_PIN_FAILED | ||
| 567 | }; | ||
| 568 | |||
| 568 | /* | 569 | /* |
| 569 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned | 570 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned |
| 570 | * before calling writeback. So make sure that we do pin it, so it doesn't | 571 | * before calling writeback. So make sure that we do pin it, so it doesn't |
| 571 | * go away while we are writing inodes from it. | 572 | * go away while we are writing inodes from it. |
| 572 | * | ||
| 573 | * Returns 0 if the super was successfully pinned (or pinning wasn't needed), | ||
| 574 | * 1 if we failed. | ||
| 575 | */ | 573 | */ |
| 576 | static int pin_sb_for_writeback(struct writeback_control *wbc, | 574 | static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, |
| 577 | struct inode *inode, struct super_block **psb) | 575 | struct super_block *sb) |
| 578 | { | 576 | { |
| 579 | struct super_block *sb = inode->i_sb; | ||
| 580 | |||
| 581 | /* | ||
| 582 | * If this sb is already pinned, nothing more to do. If not and | ||
| 583 | * *psb is non-NULL, unpin the old one first | ||
| 584 | */ | ||
| 585 | if (sb == *psb) | ||
| 586 | return 0; | ||
| 587 | else if (*psb) | ||
| 588 | unpin_sb_for_writeback(psb); | ||
| 589 | |||
| 590 | /* | 577 | /* |
| 591 | * Caller must already hold the ref for this | 578 | * Caller must already hold the ref for this |
| 592 | */ | 579 | */ |
| 593 | if (wbc->sync_mode == WB_SYNC_ALL) { | 580 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| 594 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 581 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
| 595 | return 0; | 582 | return SB_NOT_PINNED; |
| 596 | } | 583 | } |
| 597 | |||
| 598 | spin_lock(&sb_lock); | 584 | spin_lock(&sb_lock); |
| 599 | sb->s_count++; | 585 | sb->s_count++; |
| 600 | if (down_read_trylock(&sb->s_umount)) { | 586 | if (down_read_trylock(&sb->s_umount)) { |
| 601 | if (sb->s_root) { | 587 | if (sb->s_root) { |
| 602 | spin_unlock(&sb_lock); | 588 | spin_unlock(&sb_lock); |
| 603 | goto pinned; | 589 | return SB_PINNED; |
| 604 | } | 590 | } |
| 605 | /* | 591 | /* |
| 606 | * umounted, drop rwsem again and fall through to failure | 592 | * umounted, drop rwsem again and fall through to failure |
| 607 | */ | 593 | */ |
| 608 | up_read(&sb->s_umount); | 594 | up_read(&sb->s_umount); |
| 609 | } | 595 | } |
| 610 | |||
| 611 | sb->s_count--; | 596 | sb->s_count--; |
| 612 | spin_unlock(&sb_lock); | 597 | spin_unlock(&sb_lock); |
| 613 | return 1; | 598 | return SB_PIN_FAILED; |
| 614 | pinned: | ||
| 615 | *psb = sb; | ||
| 616 | return 0; | ||
| 617 | } | 599 | } |
| 618 | 600 | ||
| 619 | static void writeback_inodes_wb(struct bdi_writeback *wb, | 601 | /* |
| 620 | struct writeback_control *wbc) | 602 | * Write a portion of b_io inodes which belong to @sb. |
| 603 | * If @wbc->sb != NULL, then find and write all such | ||
| 604 | * inodes. Otherwise write only ones which go sequentially | ||
| 605 | * in reverse order. | ||
| 606 | * Return 1, if the caller writeback routine should be | ||
| 607 | * interrupted. Otherwise return 0. | ||
| 608 | */ | ||
| 609 | static int writeback_sb_inodes(struct super_block *sb, | ||
| 610 | struct bdi_writeback *wb, | ||
| 611 | struct writeback_control *wbc) | ||
| 621 | { | 612 | { |
| 622 | struct super_block *sb = wbc->sb, *pin_sb = NULL; | ||
| 623 | const unsigned long start = jiffies; /* livelock avoidance */ | ||
| 624 | |||
| 625 | spin_lock(&inode_lock); | ||
| 626 | |||
| 627 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) | ||
| 628 | queue_io(wb, wbc->older_than_this); | ||
| 629 | |||
| 630 | while (!list_empty(&wb->b_io)) { | 613 | while (!list_empty(&wb->b_io)) { |
| 631 | struct inode *inode = list_entry(wb->b_io.prev, | ||
| 632 | struct inode, i_list); | ||
| 633 | long pages_skipped; | 614 | long pages_skipped; |
| 634 | 615 | struct inode *inode = list_entry(wb->b_io.prev, | |
| 635 | /* | 616 | struct inode, i_list); |
| 636 | * super block given and doesn't match, skip this inode | 617 | if (wbc->sb && sb != inode->i_sb) { |
| 637 | */ | 618 | /* super block given and doesn't |
| 638 | if (sb && sb != inode->i_sb) { | 619 | match, skip this inode */ |
| 639 | redirty_tail(inode); | 620 | redirty_tail(inode); |
| 640 | continue; | 621 | continue; |
| 641 | } | 622 | } |
| 642 | 623 | if (sb != inode->i_sb) | |
| 624 | /* finish with this superblock */ | ||
| 625 | return 0; | ||
| 643 | if (inode->i_state & (I_NEW | I_WILL_FREE)) { | 626 | if (inode->i_state & (I_NEW | I_WILL_FREE)) { |
| 644 | requeue_io(inode); | 627 | requeue_io(inode); |
| 645 | continue; | 628 | continue; |
| 646 | } | 629 | } |
| 647 | |||
| 648 | /* | 630 | /* |
| 649 | * Was this inode dirtied after sync_sb_inodes was called? | 631 | * Was this inode dirtied after sync_sb_inodes was called? |
| 650 | * This keeps sync from extra jobs and livelock. | 632 | * This keeps sync from extra jobs and livelock. |
| 651 | */ | 633 | */ |
| 652 | if (inode_dirtied_after(inode, start)) | 634 | if (inode_dirtied_after(inode, wbc->wb_start)) |
| 653 | break; | 635 | return 1; |
| 654 | |||
| 655 | if (pin_sb_for_writeback(wbc, inode, &pin_sb)) { | ||
| 656 | requeue_io(inode); | ||
| 657 | continue; | ||
| 658 | } | ||
| 659 | 636 | ||
| 660 | BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); | 637 | BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); |
| 661 | __iget(inode); | 638 | __iget(inode); |
| @@ -674,14 +651,50 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
| 674 | spin_lock(&inode_lock); | 651 | spin_lock(&inode_lock); |
| 675 | if (wbc->nr_to_write <= 0) { | 652 | if (wbc->nr_to_write <= 0) { |
| 676 | wbc->more_io = 1; | 653 | wbc->more_io = 1; |
| 677 | break; | 654 | return 1; |
| 678 | } | 655 | } |
| 679 | if (!list_empty(&wb->b_more_io)) | 656 | if (!list_empty(&wb->b_more_io)) |
| 680 | wbc->more_io = 1; | 657 | wbc->more_io = 1; |
| 681 | } | 658 | } |
| 659 | /* b_io is empty */ | ||
| 660 | return 1; | ||
| 661 | } | ||
| 662 | |||
| 663 | static void writeback_inodes_wb(struct bdi_writeback *wb, | ||
| 664 | struct writeback_control *wbc) | ||
| 665 | { | ||
| 666 | int ret = 0; | ||
| 682 | 667 | ||
| 683 | unpin_sb_for_writeback(&pin_sb); | 668 | wbc->wb_start = jiffies; /* livelock avoidance */ |
| 669 | spin_lock(&inode_lock); | ||
| 670 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) | ||
| 671 | queue_io(wb, wbc->older_than_this); | ||
| 672 | |||
| 673 | while (!list_empty(&wb->b_io)) { | ||
| 674 | struct inode *inode = list_entry(wb->b_io.prev, | ||
| 675 | struct inode, i_list); | ||
| 676 | struct super_block *sb = inode->i_sb; | ||
| 677 | enum sb_pin_state state; | ||
| 678 | |||
| 679 | if (wbc->sb && sb != wbc->sb) { | ||
| 680 | /* super block given and doesn't | ||
| 681 | match, skip this inode */ | ||
| 682 | redirty_tail(inode); | ||
| 683 | continue; | ||
| 684 | } | ||
| 685 | state = pin_sb_for_writeback(wbc, sb); | ||
| 686 | |||
| 687 | if (state == SB_PIN_FAILED) { | ||
| 688 | requeue_io(inode); | ||
| 689 | continue; | ||
| 690 | } | ||
| 691 | ret = writeback_sb_inodes(sb, wb, wbc); | ||
| 684 | 692 | ||
| 693 | if (state == SB_PINNED) | ||
| 694 | unpin_sb_for_writeback(sb); | ||
| 695 | if (ret) | ||
| 696 | break; | ||
| 697 | } | ||
| 685 | spin_unlock(&inode_lock); | 698 | spin_unlock(&inode_lock); |
| 686 | /* Leave any unwritten inodes on b_io */ | 699 | /* Leave any unwritten inodes on b_io */ |
| 687 | } | 700 | } |
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 46435f3aae68..4765190d537f 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c | |||
| @@ -165,8 +165,8 @@ static int fscache_stats_show(struct seq_file *m, void *v) | |||
| 165 | atomic_read(&fscache_n_object_lookups), | 165 | atomic_read(&fscache_n_object_lookups), |
| 166 | atomic_read(&fscache_n_object_lookups_negative), | 166 | atomic_read(&fscache_n_object_lookups_negative), |
| 167 | atomic_read(&fscache_n_object_lookups_positive), | 167 | atomic_read(&fscache_n_object_lookups_positive), |
| 168 | atomic_read(&fscache_n_object_lookups_timed_out), | 168 | atomic_read(&fscache_n_object_created), |
| 169 | atomic_read(&fscache_n_object_created)); | 169 | atomic_read(&fscache_n_object_lookups_timed_out)); |
| 170 | 170 | ||
| 171 | seq_printf(m, "Updates: n=%u nul=%u run=%u\n", | 171 | seq_printf(m, "Updates: n=%u nul=%u run=%u\n", |
| 172 | atomic_read(&fscache_n_updates), | 172 | atomic_read(&fscache_n_updates), |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2a3d352c0bff..a8766c4ef2e0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server, | |||
| 1294 | 1294 | ||
| 1295 | /* Initialise the client representation from the mount data */ | 1295 | /* Initialise the client representation from the mount data */ |
| 1296 | server->flags = data->flags; | 1296 | server->flags = data->flags; |
| 1297 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; | 1297 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| |
| 1298 | NFS_CAP_POSIX_LOCK; | ||
| 1298 | server->options = data->options; | 1299 | server->options = data->options; |
| 1299 | 1300 | ||
| 1300 | /* Get a client record */ | 1301 | /* Get a client record */ |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index c6f2750648f4..be46f26c9a56 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
| @@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
| 1025 | res = NULL; | 1025 | res = NULL; |
| 1026 | goto out; | 1026 | goto out; |
| 1027 | /* This turned out not to be a regular file */ | 1027 | /* This turned out not to be a regular file */ |
| 1028 | case -EISDIR: | ||
| 1028 | case -ENOTDIR: | 1029 | case -ENOTDIR: |
| 1029 | goto no_open; | 1030 | goto no_open; |
| 1030 | case -ELOOP: | 1031 | case -ELOOP: |
| 1031 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1032 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
| 1032 | goto no_open; | 1033 | goto no_open; |
| 1033 | /* case -EISDIR: */ | ||
| 1034 | /* case -EINVAL: */ | 1034 | /* case -EINVAL: */ |
| 1035 | default: | 1035 | default: |
| 1036 | goto out; | 1036 | goto out; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 737128f777f3..50a56edca0b5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c | |||
| 623 | list_for_each_entry(pos, &nfsi->open_files, list) { | 623 | list_for_each_entry(pos, &nfsi->open_files, list) { |
| 624 | if (cred != NULL && pos->cred != cred) | 624 | if (cred != NULL && pos->cred != cred) |
| 625 | continue; | 625 | continue; |
| 626 | if ((pos->mode & mode) == mode) { | 626 | if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode) |
| 627 | ctx = get_nfs_open_context(pos); | 627 | continue; |
| 628 | break; | 628 | ctx = get_nfs_open_context(pos); |
| 629 | } | 629 | break; |
| 630 | } | 630 | } |
| 631 | spin_unlock(&inode->i_lock); | 631 | spin_unlock(&inode->i_lock); |
| 632 | return ctx; | 632 | return ctx; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d79a7b37e56c..638067007c65 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
| 1523 | nfs_post_op_update_inode(dir, o_res->dir_attr); | 1523 | nfs_post_op_update_inode(dir, o_res->dir_attr); |
| 1524 | } else | 1524 | } else |
| 1525 | nfs_refresh_inode(dir, o_res->dir_attr); | 1525 | nfs_refresh_inode(dir, o_res->dir_attr); |
| 1526 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | ||
| 1527 | server->caps &= ~NFS_CAP_POSIX_LOCK; | ||
| 1526 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1528 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
| 1527 | status = _nfs4_proc_open_confirm(data); | 1529 | status = _nfs4_proc_open_confirm(data); |
| 1528 | if (status != 0) | 1530 | if (status != 0) |
| @@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in | |||
| 1664 | status = PTR_ERR(state); | 1666 | status = PTR_ERR(state); |
| 1665 | if (IS_ERR(state)) | 1667 | if (IS_ERR(state)) |
| 1666 | goto err_opendata_put; | 1668 | goto err_opendata_put; |
| 1667 | if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) | 1669 | if (server->caps & NFS_CAP_POSIX_LOCK) |
| 1668 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | 1670 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); |
| 1669 | nfs4_opendata_put(opendata); | 1671 | nfs4_opendata_put(opendata); |
| 1670 | nfs4_put_state_owner(sp); | 1672 | nfs4_put_state_owner(sp); |
| @@ -2068,8 +2070,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st | |||
| 2068 | case -EDQUOT: | 2070 | case -EDQUOT: |
| 2069 | case -ENOSPC: | 2071 | case -ENOSPC: |
| 2070 | case -EROFS: | 2072 | case -EROFS: |
| 2071 | lookup_instantiate_filp(nd, (struct dentry *)state, NULL); | 2073 | return PTR_ERR(state); |
| 2072 | return 1; | ||
| 2073 | default: | 2074 | default: |
| 2074 | goto out_drop; | 2075 | goto out_drop; |
| 2075 | } | 2076 | } |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 53ff70e23993..de38d63aa920 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page) | |||
| 201 | struct inode *inode = page->mapping->host; | 201 | struct inode *inode = page->mapping->host; |
| 202 | struct nfs_server *nfss = NFS_SERVER(inode); | 202 | struct nfs_server *nfss = NFS_SERVER(inode); |
| 203 | 203 | ||
| 204 | page_cache_get(page); | ||
| 204 | if (atomic_long_inc_return(&nfss->writeback) > | 205 | if (atomic_long_inc_return(&nfss->writeback) > |
| 205 | NFS_CONGESTION_ON_THRESH) { | 206 | NFS_CONGESTION_ON_THRESH) { |
| 206 | set_bdi_congested(&nfss->backing_dev_info, | 207 | set_bdi_congested(&nfss->backing_dev_info, |
| @@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page) | |||
| 216 | struct nfs_server *nfss = NFS_SERVER(inode); | 217 | struct nfs_server *nfss = NFS_SERVER(inode); |
| 217 | 218 | ||
| 218 | end_page_writeback(page); | 219 | end_page_writeback(page); |
| 220 | page_cache_release(page); | ||
| 219 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) | 221 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) |
| 220 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); | 222 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); |
| 221 | } | 223 | } |
| @@ -421,6 +423,7 @@ static void | |||
| 421 | nfs_mark_request_dirty(struct nfs_page *req) | 423 | nfs_mark_request_dirty(struct nfs_page *req) |
| 422 | { | 424 | { |
| 423 | __set_page_dirty_nobuffers(req->wb_page); | 425 | __set_page_dirty_nobuffers(req->wb_page); |
| 426 | __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); | ||
| 424 | } | 427 | } |
| 425 | 428 | ||
| 426 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 429 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
| @@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
| 660 | req = nfs_setup_write_request(ctx, page, offset, count); | 663 | req = nfs_setup_write_request(ctx, page, offset, count); |
| 661 | if (IS_ERR(req)) | 664 | if (IS_ERR(req)) |
| 662 | return PTR_ERR(req); | 665 | return PTR_ERR(req); |
| 666 | nfs_mark_request_dirty(req); | ||
| 663 | /* Update file length */ | 667 | /* Update file length */ |
| 664 | nfs_grow_file(page, offset, count); | 668 | nfs_grow_file(page, offset, count); |
| 665 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | 669 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); |
| 670 | nfs_mark_request_dirty(req); | ||
| 666 | nfs_clear_page_tag_locked(req); | 671 | nfs_clear_page_tag_locked(req); |
| 667 | return 0; | 672 | return 0; |
| 668 | } | 673 | } |
| @@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 739 | status = nfs_writepage_setup(ctx, page, offset, count); | 744 | status = nfs_writepage_setup(ctx, page, offset, count); |
| 740 | if (status < 0) | 745 | if (status < 0) |
| 741 | nfs_set_pageerror(page); | 746 | nfs_set_pageerror(page); |
| 742 | else | ||
| 743 | __set_page_dirty_nobuffers(page); | ||
| 744 | 747 | ||
| 745 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", | 748 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", |
| 746 | status, (long long)i_size_read(inode)); | 749 | status, (long long)i_size_read(inode)); |
| @@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 749 | 752 | ||
| 750 | static void nfs_writepage_release(struct nfs_page *req) | 753 | static void nfs_writepage_release(struct nfs_page *req) |
| 751 | { | 754 | { |
| 755 | struct page *page = req->wb_page; | ||
| 752 | 756 | ||
| 753 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { | 757 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) |
| 754 | nfs_end_page_writeback(req->wb_page); | ||
| 755 | nfs_inode_remove_request(req); | 758 | nfs_inode_remove_request(req); |
| 756 | } else | ||
| 757 | nfs_end_page_writeback(req->wb_page); | ||
| 758 | nfs_clear_page_tag_locked(req); | 759 | nfs_clear_page_tag_locked(req); |
| 760 | nfs_end_page_writeback(page); | ||
| 759 | } | 761 | } |
| 760 | 762 | ||
| 761 | static int flush_task_priority(int how) | 763 | static int flush_task_priority(int how) |
| @@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
| 779 | int how) | 781 | int how) |
| 780 | { | 782 | { |
| 781 | struct inode *inode = req->wb_context->path.dentry->d_inode; | 783 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
| 782 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
| 783 | int priority = flush_task_priority(how); | 784 | int priority = flush_task_priority(how); |
| 784 | struct rpc_task *task; | 785 | struct rpc_task *task; |
| 785 | struct rpc_message msg = { | 786 | struct rpc_message msg = { |
| @@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
| 794 | .callback_ops = call_ops, | 795 | .callback_ops = call_ops, |
| 795 | .callback_data = data, | 796 | .callback_data = data, |
| 796 | .workqueue = nfsiod_workqueue, | 797 | .workqueue = nfsiod_workqueue, |
| 797 | .flags = flags, | 798 | .flags = RPC_TASK_ASYNC, |
| 798 | .priority = priority, | 799 | .priority = priority, |
| 799 | }; | 800 | }; |
| 801 | int ret = 0; | ||
| 800 | 802 | ||
| 801 | /* Set up the RPC argument and reply structs | 803 | /* Set up the RPC argument and reply structs |
| 802 | * NB: take care not to mess about with data->commit et al. */ | 804 | * NB: take care not to mess about with data->commit et al. */ |
| @@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
| 835 | (unsigned long long)data->args.offset); | 837 | (unsigned long long)data->args.offset); |
| 836 | 838 | ||
| 837 | task = rpc_run_task(&task_setup_data); | 839 | task = rpc_run_task(&task_setup_data); |
| 838 | if (IS_ERR(task)) | 840 | if (IS_ERR(task)) { |
| 839 | return PTR_ERR(task); | 841 | ret = PTR_ERR(task); |
| 842 | goto out; | ||
| 843 | } | ||
| 844 | if (how & FLUSH_SYNC) { | ||
| 845 | ret = rpc_wait_for_completion_task(task); | ||
| 846 | if (ret == 0) | ||
| 847 | ret = task->tk_status; | ||
| 848 | } | ||
| 840 | rpc_put_task(task); | 849 | rpc_put_task(task); |
| 841 | return 0; | 850 | out: |
| 851 | return ret; | ||
| 842 | } | 852 | } |
| 843 | 853 | ||
| 844 | /* If a nfs_flush_* function fails, it should remove reqs from @head and | 854 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
| @@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
| 847 | */ | 857 | */ |
| 848 | static void nfs_redirty_request(struct nfs_page *req) | 858 | static void nfs_redirty_request(struct nfs_page *req) |
| 849 | { | 859 | { |
| 860 | struct page *page = req->wb_page; | ||
| 861 | |||
| 850 | nfs_mark_request_dirty(req); | 862 | nfs_mark_request_dirty(req); |
| 851 | nfs_end_page_writeback(req->wb_page); | ||
| 852 | nfs_clear_page_tag_locked(req); | 863 | nfs_clear_page_tag_locked(req); |
| 864 | nfs_end_page_writeback(page); | ||
| 853 | } | 865 | } |
| 854 | 866 | ||
| 855 | /* | 867 | /* |
| @@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata) | |||
| 1084 | if (nfs_write_need_commit(data)) { | 1096 | if (nfs_write_need_commit(data)) { |
| 1085 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1097 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
| 1086 | nfs_mark_request_commit(req); | 1098 | nfs_mark_request_commit(req); |
| 1087 | nfs_end_page_writeback(page); | ||
| 1088 | dprintk(" marked for commit\n"); | 1099 | dprintk(" marked for commit\n"); |
| 1089 | goto next; | 1100 | goto next; |
| 1090 | } | 1101 | } |
| 1091 | dprintk(" OK\n"); | 1102 | dprintk(" OK\n"); |
| 1092 | remove_request: | 1103 | remove_request: |
| 1093 | nfs_end_page_writeback(page); | ||
| 1094 | nfs_inode_remove_request(req); | 1104 | nfs_inode_remove_request(req); |
| 1095 | next: | 1105 | next: |
| 1096 | nfs_clear_page_tag_locked(req); | 1106 | nfs_clear_page_tag_locked(req); |
| 1107 | nfs_end_page_writeback(page); | ||
| 1097 | } | 1108 | } |
| 1098 | nfs_writedata_release(calldata); | 1109 | nfs_writedata_release(calldata); |
| 1099 | } | 1110 | } |
| @@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
| 1207 | { | 1218 | { |
| 1208 | struct nfs_page *first = nfs_list_entry(head->next); | 1219 | struct nfs_page *first = nfs_list_entry(head->next); |
| 1209 | struct inode *inode = first->wb_context->path.dentry->d_inode; | 1220 | struct inode *inode = first->wb_context->path.dentry->d_inode; |
| 1210 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
| 1211 | int priority = flush_task_priority(how); | 1221 | int priority = flush_task_priority(how); |
| 1212 | struct rpc_task *task; | 1222 | struct rpc_task *task; |
| 1213 | struct rpc_message msg = { | 1223 | struct rpc_message msg = { |
| @@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
| 1222 | .callback_ops = &nfs_commit_ops, | 1232 | .callback_ops = &nfs_commit_ops, |
| 1223 | .callback_data = data, | 1233 | .callback_data = data, |
| 1224 | .workqueue = nfsiod_workqueue, | 1234 | .workqueue = nfsiod_workqueue, |
| 1225 | .flags = flags, | 1235 | .flags = RPC_TASK_ASYNC, |
| 1226 | .priority = priority, | 1236 | .priority = priority, |
| 1227 | }; | 1237 | }; |
| 1228 | 1238 | ||
| @@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
| 1252 | task = rpc_run_task(&task_setup_data); | 1262 | task = rpc_run_task(&task_setup_data); |
| 1253 | if (IS_ERR(task)) | 1263 | if (IS_ERR(task)) |
| 1254 | return PTR_ERR(task); | 1264 | return PTR_ERR(task); |
| 1265 | if (how & FLUSH_SYNC) | ||
| 1266 | rpc_wait_for_completion_task(task); | ||
| 1255 | rpc_put_task(task); | 1267 | rpc_put_task(task); |
| 1256 | return 0; | 1268 | return 0; |
| 1257 | } | 1269 | } |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 8d6356a804f3..7cfb87e692da 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
| @@ -426,7 +426,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, | |||
| 426 | bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); | 426 | bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); |
| 427 | if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), | 427 | if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), |
| 428 | group_offset, bitmap)) | 428 | group_offset, bitmap)) |
| 429 | printk(KERN_WARNING "%s: entry numer %llu already freed\n", | 429 | printk(KERN_WARNING "%s: entry number %llu already freed\n", |
| 430 | __func__, (unsigned long long)req->pr_entry_nr); | 430 | __func__, (unsigned long long)req->pr_entry_nr); |
| 431 | 431 | ||
| 432 | nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); | 432 | nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 7cdd98b8d514..76c38e3e19d2 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
| @@ -1879,7 +1879,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree, | |||
| 1879 | struct nilfs_btree_path *path, | 1879 | struct nilfs_btree_path *path, |
| 1880 | int level, struct buffer_head *bh) | 1880 | int level, struct buffer_head *bh) |
| 1881 | { | 1881 | { |
| 1882 | int maxlevel, ret; | 1882 | int maxlevel = 0, ret; |
| 1883 | struct nilfs_btree_node *parent; | 1883 | struct nilfs_btree_node *parent; |
| 1884 | struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); | 1884 | struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); |
| 1885 | __u64 ptr; | 1885 | __u64 ptr; |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index c2ff1b306012..f90a33d9a5b0 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
| @@ -649,7 +649,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, | |||
| 649 | long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 649 | long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 650 | { | 650 | { |
| 651 | struct inode *inode = filp->f_dentry->d_inode; | 651 | struct inode *inode = filp->f_dentry->d_inode; |
| 652 | void __user *argp = (void * __user *)arg; | 652 | void __user *argp = (void __user *)arg; |
| 653 | 653 | ||
| 654 | switch (cmd) { | 654 | switch (cmd) { |
| 655 | case NILFS_IOCTL_CHANGE_CPMODE: | 655 | case NILFS_IOCTL_CHANGE_CPMODE: |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index caf0337dff73..070553427dd5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -662,31 +662,18 @@ static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) | |||
| 662 | return pme; | 662 | return pme; |
| 663 | } | 663 | } |
| 664 | 664 | ||
| 665 | static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr, | 665 | /* This function walks within one hugetlb entry in the single call */ |
| 666 | unsigned long end, struct mm_walk *walk) | 666 | static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, |
| 667 | unsigned long addr, unsigned long end, | ||
| 668 | struct mm_walk *walk) | ||
| 667 | { | 669 | { |
| 668 | struct vm_area_struct *vma; | ||
| 669 | struct pagemapread *pm = walk->private; | 670 | struct pagemapread *pm = walk->private; |
| 670 | struct hstate *hs = NULL; | ||
| 671 | int err = 0; | 671 | int err = 0; |
| 672 | u64 pfn; | ||
| 672 | 673 | ||
| 673 | vma = find_vma(walk->mm, addr); | ||
| 674 | if (vma) | ||
| 675 | hs = hstate_vma(vma); | ||
| 676 | for (; addr != end; addr += PAGE_SIZE) { | 674 | for (; addr != end; addr += PAGE_SIZE) { |
| 677 | u64 pfn = PM_NOT_PRESENT; | 675 | int offset = (addr & ~hmask) >> PAGE_SHIFT; |
| 678 | 676 | pfn = huge_pte_to_pagemap_entry(*pte, offset); | |
| 679 | if (vma && (addr >= vma->vm_end)) { | ||
| 680 | vma = find_vma(walk->mm, addr); | ||
| 681 | if (vma) | ||
| 682 | hs = hstate_vma(vma); | ||
| 683 | } | ||
| 684 | |||
| 685 | if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) { | ||
| 686 | /* calculate pfn of the "raw" page in the hugepage. */ | ||
| 687 | int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT; | ||
| 688 | pfn = huge_pte_to_pagemap_entry(*pte, offset); | ||
| 689 | } | ||
| 690 | err = add_to_pagemap(addr, pfn, pm); | 677 | err = add_to_pagemap(addr, pfn, pm); |
| 691 | if (err) | 678 | if (err) |
| 692 | return err; | 679 | return err; |
| @@ -800,7 +787,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
| 800 | start_vaddr = end; | 787 | start_vaddr = end; |
| 801 | 788 | ||
| 802 | len = min(count, PM_ENTRY_BYTES * pm.pos); | 789 | len = min(count, PM_ENTRY_BYTES * pm.pos); |
| 803 | if (copy_to_user(buf, pm.buffer, len) < 0) { | 790 | if (copy_to_user(buf, pm.buffer, len)) { |
| 804 | ret = -EFAULT; | 791 | ret = -EFAULT; |
| 805 | goto out_free; | 792 | goto out_free; |
| 806 | } | 793 | } |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index e0b870f4749f..a0a9405b202a 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -874,14 +874,18 @@ static int dqinit_needed(struct inode *inode, int type) | |||
| 874 | static void add_dquot_ref(struct super_block *sb, int type) | 874 | static void add_dquot_ref(struct super_block *sb, int type) |
| 875 | { | 875 | { |
| 876 | struct inode *inode, *old_inode = NULL; | 876 | struct inode *inode, *old_inode = NULL; |
| 877 | #ifdef __DQUOT_PARANOIA | ||
| 877 | int reserved = 0; | 878 | int reserved = 0; |
| 879 | #endif | ||
| 878 | 880 | ||
| 879 | spin_lock(&inode_lock); | 881 | spin_lock(&inode_lock); |
| 880 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 882 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
| 881 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) | 883 | if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) |
| 882 | continue; | 884 | continue; |
| 885 | #ifdef __DQUOT_PARANOIA | ||
| 883 | if (unlikely(inode_get_rsv_space(inode) > 0)) | 886 | if (unlikely(inode_get_rsv_space(inode) > 0)) |
| 884 | reserved = 1; | 887 | reserved = 1; |
| 888 | #endif | ||
| 885 | if (!atomic_read(&inode->i_writecount)) | 889 | if (!atomic_read(&inode->i_writecount)) |
| 886 | continue; | 890 | continue; |
| 887 | if (!dqinit_needed(inode, type)) | 891 | if (!dqinit_needed(inode, type)) |
| @@ -903,11 +907,13 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
| 903 | spin_unlock(&inode_lock); | 907 | spin_unlock(&inode_lock); |
| 904 | iput(old_inode); | 908 | iput(old_inode); |
| 905 | 909 | ||
| 910 | #ifdef __DQUOT_PARANOIA | ||
| 906 | if (reserved) { | 911 | if (reserved) { |
| 907 | printk(KERN_WARNING "VFS (%s): Writes happened before quota" | 912 | printk(KERN_WARNING "VFS (%s): Writes happened before quota" |
| 908 | " was turned on thus quota information is probably " | 913 | " was turned on thus quota information is probably " |
| 909 | "inconsistent. Please run quotacheck(8).\n", sb->s_id); | 914 | "inconsistent. Please run quotacheck(8).\n", sb->s_id); |
| 910 | } | 915 | } |
| 916 | #endif | ||
| 911 | } | 917 | } |
| 912 | 918 | ||
| 913 | /* | 919 | /* |
| @@ -2322,34 +2328,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | |||
| 2322 | if (di->dqb_valid & QIF_SPACE) { | 2328 | if (di->dqb_valid & QIF_SPACE) { |
| 2323 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; | 2329 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; |
| 2324 | check_blim = 1; | 2330 | check_blim = 1; |
| 2325 | __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | 2331 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); |
| 2326 | } | 2332 | } |
| 2327 | if (di->dqb_valid & QIF_BLIMITS) { | 2333 | if (di->dqb_valid & QIF_BLIMITS) { |
| 2328 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); | 2334 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); |
| 2329 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); | 2335 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); |
| 2330 | check_blim = 1; | 2336 | check_blim = 1; |
| 2331 | __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | 2337 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); |
| 2332 | } | 2338 | } |
| 2333 | if (di->dqb_valid & QIF_INODES) { | 2339 | if (di->dqb_valid & QIF_INODES) { |
| 2334 | dm->dqb_curinodes = di->dqb_curinodes; | 2340 | dm->dqb_curinodes = di->dqb_curinodes; |
| 2335 | check_ilim = 1; | 2341 | check_ilim = 1; |
| 2336 | __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | 2342 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); |
| 2337 | } | 2343 | } |
| 2338 | if (di->dqb_valid & QIF_ILIMITS) { | 2344 | if (di->dqb_valid & QIF_ILIMITS) { |
| 2339 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | 2345 | dm->dqb_isoftlimit = di->dqb_isoftlimit; |
| 2340 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | 2346 | dm->dqb_ihardlimit = di->dqb_ihardlimit; |
| 2341 | check_ilim = 1; | 2347 | check_ilim = 1; |
| 2342 | __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | 2348 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); |
| 2343 | } | 2349 | } |
| 2344 | if (di->dqb_valid & QIF_BTIME) { | 2350 | if (di->dqb_valid & QIF_BTIME) { |
| 2345 | dm->dqb_btime = di->dqb_btime; | 2351 | dm->dqb_btime = di->dqb_btime; |
| 2346 | check_blim = 1; | 2352 | check_blim = 1; |
| 2347 | __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | 2353 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); |
| 2348 | } | 2354 | } |
| 2349 | if (di->dqb_valid & QIF_ITIME) { | 2355 | if (di->dqb_valid & QIF_ITIME) { |
| 2350 | dm->dqb_itime = di->dqb_itime; | 2356 | dm->dqb_itime = di->dqb_itime; |
| 2351 | check_ilim = 1; | 2357 | check_ilim = 1; |
| 2352 | __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | 2358 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); |
| 2353 | } | 2359 | } |
| 2354 | 2360 | ||
| 2355 | if (check_blim) { | 2361 | if (check_blim) { |
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 19626e2491c4..9a9378b4eb5a 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
| @@ -125,9 +125,8 @@ static void udf_bitmap_free_blocks(struct super_block *sb, | |||
| 125 | 125 | ||
| 126 | mutex_lock(&sbi->s_alloc_mutex); | 126 | mutex_lock(&sbi->s_alloc_mutex); |
| 127 | partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; | 127 | partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; |
| 128 | if (bloc->logicalBlockNum < 0 || | 128 | if (bloc->logicalBlockNum + count < count || |
| 129 | (bloc->logicalBlockNum + count) > | 129 | (bloc->logicalBlockNum + count) > partmap->s_partition_len) { |
| 130 | partmap->s_partition_len) { | ||
| 131 | udf_debug("%d < %d || %d + %d > %d\n", | 130 | udf_debug("%d < %d || %d + %d > %d\n", |
| 132 | bloc->logicalBlockNum, 0, bloc->logicalBlockNum, | 131 | bloc->logicalBlockNum, 0, bloc->logicalBlockNum, |
| 133 | count, partmap->s_partition_len); | 132 | count, partmap->s_partition_len); |
| @@ -393,9 +392,8 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
| 393 | 392 | ||
| 394 | mutex_lock(&sbi->s_alloc_mutex); | 393 | mutex_lock(&sbi->s_alloc_mutex); |
| 395 | partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; | 394 | partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; |
| 396 | if (bloc->logicalBlockNum < 0 || | 395 | if (bloc->logicalBlockNum + count < count || |
| 397 | (bloc->logicalBlockNum + count) > | 396 | (bloc->logicalBlockNum + count) > partmap->s_partition_len) { |
| 398 | partmap->s_partition_len) { | ||
| 399 | udf_debug("%d < %d || %d + %d > %d\n", | 397 | udf_debug("%d < %d || %d + %d > %d\n", |
| 400 | bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, | 398 | bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, |
| 401 | partmap->s_partition_len); | 399 | partmap->s_partition_len); |
diff --git a/fs/udf/file.c b/fs/udf/file.c index 1eb06774ed90..4b6a46ccbf46 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
| @@ -218,7 +218,7 @@ const struct file_operations udf_file_operations = { | |||
| 218 | .llseek = generic_file_llseek, | 218 | .llseek = generic_file_llseek, |
| 219 | }; | 219 | }; |
| 220 | 220 | ||
| 221 | static int udf_setattr(struct dentry *dentry, struct iattr *iattr) | 221 | int udf_setattr(struct dentry *dentry, struct iattr *iattr) |
| 222 | { | 222 | { |
| 223 | struct inode *inode = dentry->d_inode; | 223 | struct inode *inode = dentry->d_inode; |
| 224 | int error; | 224 | int error; |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index bb863fe579ac..8a3fbd177cab 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -1314,7 +1314,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
| 1314 | break; | 1314 | break; |
| 1315 | case ICBTAG_FILE_TYPE_SYMLINK: | 1315 | case ICBTAG_FILE_TYPE_SYMLINK: |
| 1316 | inode->i_data.a_ops = &udf_symlink_aops; | 1316 | inode->i_data.a_ops = &udf_symlink_aops; |
| 1317 | inode->i_op = &page_symlink_inode_operations; | 1317 | inode->i_op = &udf_symlink_inode_operations; |
| 1318 | inode->i_mode = S_IFLNK | S_IRWXUGO; | 1318 | inode->i_mode = S_IFLNK | S_IRWXUGO; |
| 1319 | break; | 1319 | break; |
| 1320 | case ICBTAG_FILE_TYPE_MAIN: | 1320 | case ICBTAG_FILE_TYPE_MAIN: |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index db423ab078b1..75816025f95f 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -925,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
| 925 | iinfo = UDF_I(inode); | 925 | iinfo = UDF_I(inode); |
| 926 | inode->i_mode = S_IFLNK | S_IRWXUGO; | 926 | inode->i_mode = S_IFLNK | S_IRWXUGO; |
| 927 | inode->i_data.a_ops = &udf_symlink_aops; | 927 | inode->i_data.a_ops = &udf_symlink_aops; |
| 928 | inode->i_op = &page_symlink_inode_operations; | 928 | inode->i_op = &udf_symlink_inode_operations; |
| 929 | 929 | ||
| 930 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { | 930 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { |
| 931 | struct kernel_lb_addr eloc; | 931 | struct kernel_lb_addr eloc; |
| @@ -1393,6 +1393,7 @@ const struct export_operations udf_export_ops = { | |||
| 1393 | const struct inode_operations udf_dir_inode_operations = { | 1393 | const struct inode_operations udf_dir_inode_operations = { |
| 1394 | .lookup = udf_lookup, | 1394 | .lookup = udf_lookup, |
| 1395 | .create = udf_create, | 1395 | .create = udf_create, |
| 1396 | .setattr = udf_setattr, | ||
| 1396 | .link = udf_link, | 1397 | .link = udf_link, |
| 1397 | .unlink = udf_unlink, | 1398 | .unlink = udf_unlink, |
| 1398 | .symlink = udf_symlink, | 1399 | .symlink = udf_symlink, |
| @@ -1401,3 +1402,9 @@ const struct inode_operations udf_dir_inode_operations = { | |||
| 1401 | .mknod = udf_mknod, | 1402 | .mknod = udf_mknod, |
| 1402 | .rename = udf_rename, | 1403 | .rename = udf_rename, |
| 1403 | }; | 1404 | }; |
| 1405 | const struct inode_operations udf_symlink_inode_operations = { | ||
| 1406 | .readlink = generic_readlink, | ||
| 1407 | .follow_link = page_follow_link_light, | ||
| 1408 | .put_link = page_put_link, | ||
| 1409 | .setattr = udf_setattr, | ||
| 1410 | }; | ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 4223ac855da9..702a1148e702 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
| @@ -76,6 +76,7 @@ extern const struct inode_operations udf_dir_inode_operations; | |||
| 76 | extern const struct file_operations udf_dir_operations; | 76 | extern const struct file_operations udf_dir_operations; |
| 77 | extern const struct inode_operations udf_file_inode_operations; | 77 | extern const struct inode_operations udf_file_inode_operations; |
| 78 | extern const struct file_operations udf_file_operations; | 78 | extern const struct file_operations udf_file_operations; |
| 79 | extern const struct inode_operations udf_symlink_inode_operations; | ||
| 79 | extern const struct address_space_operations udf_aops; | 80 | extern const struct address_space_operations udf_aops; |
| 80 | extern const struct address_space_operations udf_adinicb_aops; | 81 | extern const struct address_space_operations udf_adinicb_aops; |
| 81 | extern const struct address_space_operations udf_symlink_aops; | 82 | extern const struct address_space_operations udf_symlink_aops; |
| @@ -131,7 +132,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, | |||
| 131 | /* file.c */ | 132 | /* file.c */ |
| 132 | extern int udf_ioctl(struct inode *, struct file *, unsigned int, | 133 | extern int udf_ioctl(struct inode *, struct file *, unsigned int, |
| 133 | unsigned long); | 134 | unsigned long); |
| 134 | 135 | extern int udf_setattr(struct dentry *dentry, struct iattr *iattr); | |
| 135 | /* inode.c */ | 136 | /* inode.c */ |
| 136 | extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); | 137 | extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); |
| 137 | extern int udf_sync_inode(struct inode *); | 138 | extern int udf_sync_inode(struct inode *); |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 05cd85317f6f..fd9698215759 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -820,10 +820,10 @@ xfs_reclaim_inode( | |||
| 820 | * call into reclaim to find it in a clean state instead of waiting for | 820 | * call into reclaim to find it in a clean state instead of waiting for |
| 821 | * it now. We also don't return errors here - if the error is transient | 821 | * it now. We also don't return errors here - if the error is transient |
| 822 | * then the next reclaim pass will flush the inode, and if the error | 822 | * then the next reclaim pass will flush the inode, and if the error |
| 823 | * is permanent then the next sync reclaim will relcaim the inode and | 823 | * is permanent then the next sync reclaim will reclaim the inode and |
| 824 | * pass on the error. | 824 | * pass on the error. |
| 825 | */ | 825 | */ |
| 826 | if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 826 | if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
| 827 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, | 827 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, |
| 828 | "inode 0x%llx background reclaim flush failed with %d", | 828 | "inode 0x%llx background reclaim flush failed with %d", |
| 829 | (long long)ip->i_ino, error); | 829 | (long long)ip->i_ino, error); |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e8fba92d7cd9..2be019136287 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
| 745 | 745 | ||
| 746 | /* | 746 | /* |
| 747 | * Determine if we have a transaction that has gone to disk | 747 | * Determine if we have a transaction that has gone to disk |
| 748 | * that needs to be covered. Log activity needs to be idle (no AIL and | 748 | * that needs to be covered. To begin the transition to the idle state |
| 749 | * nothing in the iclogs). And, we need to be in the right state indicating | 749 | * firstly the log needs to be idle (no AIL and nothing in the iclogs). |
| 750 | * something has gone out. | 750 | * If we are then in a state where covering is needed, the caller is informed |
| 751 | * that dummy transactions are required to move the log into the idle state. | ||
| 752 | * | ||
| 753 | * Because this is called as part of the sync process, we should also indicate | ||
| 754 | * that dummy transactions should be issued in anything but the covered or | ||
| 755 | * idle states. This ensures that the log tail is accurately reflected in | ||
| 756 | * the log at the end of the sync, hence if a crash occurrs avoids replay | ||
| 757 | * of transactions where the metadata is already on disk. | ||
| 751 | */ | 758 | */ |
| 752 | int | 759 | int |
| 753 | xfs_log_need_covered(xfs_mount_t *mp) | 760 | xfs_log_need_covered(xfs_mount_t *mp) |
| @@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp) | |||
| 759 | return 0; | 766 | return 0; |
| 760 | 767 | ||
| 761 | spin_lock(&log->l_icloglock); | 768 | spin_lock(&log->l_icloglock); |
| 762 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || | 769 | switch (log->l_covered_state) { |
| 763 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) | 770 | case XLOG_STATE_COVER_DONE: |
| 764 | && !xfs_trans_ail_tail(log->l_ailp) | 771 | case XLOG_STATE_COVER_DONE2: |
| 765 | && xlog_iclogs_empty(log)) { | 772 | case XLOG_STATE_COVER_IDLE: |
| 766 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) | 773 | break; |
| 767 | log->l_covered_state = XLOG_STATE_COVER_DONE; | 774 | case XLOG_STATE_COVER_NEED: |
| 768 | else { | 775 | case XLOG_STATE_COVER_NEED2: |
| 769 | ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); | 776 | if (!xfs_trans_ail_tail(log->l_ailp) && |
| 770 | log->l_covered_state = XLOG_STATE_COVER_DONE2; | 777 | xlog_iclogs_empty(log)) { |
| 778 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) | ||
| 779 | log->l_covered_state = XLOG_STATE_COVER_DONE; | ||
| 780 | else | ||
| 781 | log->l_covered_state = XLOG_STATE_COVER_DONE2; | ||
| 771 | } | 782 | } |
| 783 | /* FALLTHRU */ | ||
| 784 | default: | ||
| 772 | needed = 1; | 785 | needed = 1; |
| 786 | break; | ||
| 773 | } | 787 | } |
| 774 | spin_unlock(&log->l_icloglock); | 788 | spin_unlock(&log->l_icloglock); |
| 775 | return needed; | 789 | return needed; |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 04a6ebc27b96..2d428b088cc8 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 9 | {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 9 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 10 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
| 10 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 11 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
| 11 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ | 12 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ |
diff --git a/include/linux/ata.h b/include/linux/ata.h index b4c85e2adef5..700c5b9b3583 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -1025,8 +1025,8 @@ static inline int ata_ok(u8 status) | |||
| 1025 | 1025 | ||
| 1026 | static inline int lba_28_ok(u64 block, u32 n_block) | 1026 | static inline int lba_28_ok(u64 block, u32 n_block) |
| 1027 | { | 1027 | { |
| 1028 | /* check the ending block number */ | 1028 | /* check the ending block number: must be LESS THAN 0x0fffffff */ |
| 1029 | return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); | 1029 | return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); |
| 1030 | } | 1030 | } |
| 1031 | 1031 | ||
| 1032 | static inline int lba_48_ok(u64 block, u32 n_block) | 1032 | static inline int lba_48_ok(u64 block, u32 n_block) |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index b79389879238..b796eab5ca75 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -21,9 +21,6 @@ | |||
| 21 | (bit) < (size); \ | 21 | (bit) < (size); \ |
| 22 | (bit) = find_next_bit((addr), (size), (bit) + 1)) | 22 | (bit) = find_next_bit((addr), (size), (bit) + 1)) |
| 23 | 23 | ||
| 24 | /* Temporary */ | ||
| 25 | #define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size) | ||
| 26 | |||
| 27 | static __inline__ int get_bitmask_order(unsigned int count) | 24 | static __inline__ int get_bitmask_order(unsigned int count) |
| 28 | { | 25 | { |
| 29 | int order; | 26 | int order; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ebd22dbed861..6690e8bae7bb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -158,7 +158,6 @@ enum rq_flag_bits { | |||
| 158 | struct request { | 158 | struct request { |
| 159 | struct list_head queuelist; | 159 | struct list_head queuelist; |
| 160 | struct call_single_data csd; | 160 | struct call_single_data csd; |
| 161 | int cpu; | ||
| 162 | 161 | ||
| 163 | struct request_queue *q; | 162 | struct request_queue *q; |
| 164 | 163 | ||
| @@ -166,9 +165,11 @@ struct request { | |||
| 166 | enum rq_cmd_type_bits cmd_type; | 165 | enum rq_cmd_type_bits cmd_type; |
| 167 | unsigned long atomic_flags; | 166 | unsigned long atomic_flags; |
| 168 | 167 | ||
| 168 | int cpu; | ||
| 169 | |||
| 169 | /* the following two fields are internal, NEVER access directly */ | 170 | /* the following two fields are internal, NEVER access directly */ |
| 170 | sector_t __sector; /* sector cursor */ | ||
| 171 | unsigned int __data_len; /* total data len */ | 171 | unsigned int __data_len; /* total data len */ |
| 172 | sector_t __sector; /* sector cursor */ | ||
| 172 | 173 | ||
| 173 | struct bio *bio; | 174 | struct bio *bio; |
| 174 | struct bio *biotail; | 175 | struct bio *biotail; |
| @@ -201,20 +202,20 @@ struct request { | |||
| 201 | 202 | ||
| 202 | unsigned short ioprio; | 203 | unsigned short ioprio; |
| 203 | 204 | ||
| 205 | int ref_count; | ||
| 206 | |||
| 204 | void *special; /* opaque pointer available for LLD use */ | 207 | void *special; /* opaque pointer available for LLD use */ |
| 205 | char *buffer; /* kaddr of the current segment if available */ | 208 | char *buffer; /* kaddr of the current segment if available */ |
| 206 | 209 | ||
| 207 | int tag; | 210 | int tag; |
| 208 | int errors; | 211 | int errors; |
| 209 | 212 | ||
| 210 | int ref_count; | ||
| 211 | |||
| 212 | /* | 213 | /* |
| 213 | * when request is used as a packet command carrier | 214 | * when request is used as a packet command carrier |
| 214 | */ | 215 | */ |
| 215 | unsigned short cmd_len; | ||
| 216 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
| 217 | unsigned char *cmd; | 217 | unsigned char *cmd; |
| 218 | unsigned short cmd_len; | ||
| 218 | 219 | ||
| 219 | unsigned int extra_len; /* length of alignment and padding */ | 220 | unsigned int extra_len; /* length of alignment and padding */ |
| 220 | unsigned int sense_len; | 221 | unsigned int sense_len; |
| @@ -921,26 +922,7 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
| 921 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 922 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 922 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 923 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 923 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 924 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 924 | |||
| 925 | /* Temporary compatibility wrapper */ | ||
| 926 | static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) | ||
| 927 | { | ||
| 928 | blk_queue_max_hw_sectors(q, max); | ||
| 929 | } | ||
| 930 | |||
| 931 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 925 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
| 932 | |||
| 933 | static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) | ||
| 934 | { | ||
| 935 | blk_queue_max_segments(q, max); | ||
| 936 | } | ||
| 937 | |||
| 938 | static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) | ||
| 939 | { | ||
| 940 | blk_queue_max_segments(q, max); | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 944 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 926 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 945 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 927 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| 946 | unsigned int max_discard_sectors); | 928 | unsigned int max_discard_sectors); |
| @@ -1030,11 +1012,6 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
| 1030 | 1012 | ||
| 1031 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 1013 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
| 1032 | 1014 | ||
| 1033 | #define MAX_PHYS_SEGMENTS 128 | ||
| 1034 | #define MAX_HW_SEGMENTS 128 | ||
| 1035 | #define SAFE_MAX_SECTORS 255 | ||
| 1036 | #define MAX_SEGMENT_SIZE 65536 | ||
| 1037 | |||
| 1038 | enum blk_default_limits { | 1015 | enum blk_default_limits { |
| 1039 | BLK_MAX_SEGMENTS = 128, | 1016 | BLK_MAX_SEGMENTS = 128, |
| 1040 | BLK_SAFE_MAX_SECTORS = 255, | 1017 | BLK_SAFE_MAX_SECTORS = 255, |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 78962272338a..4341b1a97a34 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
| @@ -56,7 +56,7 @@ extern const char *drbd_buildtag(void); | |||
| 56 | #define REL_VERSION "8.3.7" | 56 | #define REL_VERSION "8.3.7" |
| 57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
| 58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
| 59 | #define PRO_VERSION_MAX 91 | 59 | #define PRO_VERSION_MAX 92 |
| 60 | 60 | ||
| 61 | 61 | ||
| 62 | enum drbd_io_error_p { | 62 | enum drbd_io_error_p { |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index a4d82f895994..f7431a4ca608 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #endif | 12 | #endif |
| 13 | 13 | ||
| 14 | NL_PACKET(primary, 1, | 14 | NL_PACKET(primary, 1, |
| 15 | NL_BIT( 1, T_MAY_IGNORE, overwrite_peer) | 15 | NL_BIT( 1, T_MAY_IGNORE, primary_force) |
| 16 | ) | 16 | ) |
| 17 | 17 | ||
| 18 | NL_PACKET(secondary, 2, ) | 18 | NL_PACKET(secondary, 2, ) |
| @@ -63,6 +63,7 @@ NL_PACKET(net_conf, 5, | |||
| 63 | NL_BIT( 41, T_MAY_IGNORE, always_asbp) | 63 | NL_BIT( 41, T_MAY_IGNORE, always_asbp) |
| 64 | NL_BIT( 61, T_MAY_IGNORE, no_cork) | 64 | NL_BIT( 61, T_MAY_IGNORE, no_cork) |
| 65 | NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) | 65 | NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) |
| 66 | NL_BIT( 70, T_MANDATORY, dry_run) | ||
| 66 | ) | 67 | ) |
| 67 | 68 | ||
| 68 | NL_PACKET(disconnect, 6, ) | 69 | NL_PACKET(disconnect, 6, ) |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 40b11013408e..81f3b14d5d76 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
| @@ -1,21 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Char device interface. | 2 | * Char device interface. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * (at your option) any later version. | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * This program is distributed in the hope that it will be useful, | 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * The above copyright notice and this permission notice (including the next |
| 14 | * GNU General Public License for more details. | 14 | * paragraph) shall be included in all copies or substantial portions of the |
| 15 | * | 15 | * Software. |
| 16 | * You should have received a copy of the GNU General Public License | 16 | * |
| 17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 23 | * DEALINGS IN THE SOFTWARE. | ||
| 19 | */ | 24 | */ |
| 20 | 25 | ||
| 21 | #ifndef _LINUX_FIREWIRE_CDEV_H | 26 | #ifndef _LINUX_FIREWIRE_CDEV_H |
| @@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor { | |||
| 438 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE | 443 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE |
| 439 | * @header_size: Header size to strip for receive contexts | 444 | * @header_size: Header size to strip for receive contexts |
| 440 | * @channel: Channel to bind to | 445 | * @channel: Channel to bind to |
| 441 | * @speed: Speed to transmit at | 446 | * @speed: Speed for transmit contexts |
| 442 | * @closure: To be returned in &fw_cdev_event_iso_interrupt | 447 | * @closure: To be returned in &fw_cdev_event_iso_interrupt |
| 443 | * @handle: Handle to context, written back by kernel | 448 | * @handle: Handle to context, written back by kernel |
| 444 | * | 449 | * |
| @@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor { | |||
| 451 | * If a context was successfully created, the kernel writes back a handle to the | 456 | * If a context was successfully created, the kernel writes back a handle to the |
| 452 | * context, which must be passed in for subsequent operations on that context. | 457 | * context, which must be passed in for subsequent operations on that context. |
| 453 | * | 458 | * |
| 459 | * For receive contexts, @header_size must be at least 4 and must be a multiple | ||
| 460 | * of 4. | ||
| 461 | * | ||
| 454 | * Note that the effect of a @header_size > 4 depends on | 462 | * Note that the effect of a @header_size > 4 depends on |
| 455 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. | 463 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. |
| 456 | */ | 464 | */ |
| @@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context { | |||
| 481 | * | 489 | * |
| 482 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. | 490 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. |
| 483 | * | 491 | * |
| 484 | * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are | 492 | * Use the FW_CDEV_ISO_ macros to fill in @control. |
| 485 | * specified by IEEE 1394a and IEC 61883. | 493 | * |
| 486 | * | 494 | * For transmit packets, the header length must be a multiple of 4 and specifies |
| 487 | * FIXME - finish this documentation | 495 | * the numbers of bytes in @header that will be prepended to the packet's |
| 496 | * payload; these bytes are copied into the kernel and will not be accessed | ||
| 497 | * after the ioctl has returned. The sy and tag fields are copied to the iso | ||
| 498 | * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). | ||
| 499 | * The skip flag specifies that no packet is to be sent in a frame; when using | ||
| 500 | * this, all other fields except the interrupt flag must be zero. | ||
| 501 | * | ||
| 502 | * For receive packets, the header length must be a multiple of the context's | ||
| 503 | * header size; if the header length is larger than the context's header size, | ||
| 504 | * multiple packets are queued for this entry. The sy and tag fields are | ||
| 505 | * ignored. If the sync flag is set, the context drops all packets until | ||
| 506 | * a packet with a matching sy field is received (the sync value to wait for is | ||
| 507 | * specified in the &fw_cdev_start_iso structure). The payload length defines | ||
| 508 | * how many payload bytes can be received for one packet (in addition to payload | ||
| 509 | * quadlets that have been defined as headers and are stripped and returned in | ||
| 510 | * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the | ||
| 511 | * additional bytes are dropped. If less bytes are received, the remaining | ||
| 512 | * bytes in this part of the payload buffer will not be written to, not even by | ||
| 513 | * the next packet, i.e., packets received in consecutive frames will not | ||
| 514 | * necessarily be consecutive in memory. If an entry has queued multiple | ||
| 515 | * packets, the payload length is divided equally among them. | ||
| 516 | * | ||
| 517 | * When a packet with the interrupt flag set has been completed, the | ||
| 518 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued | ||
| 519 | * multiple receive packets is completed when its last packet is completed. | ||
| 488 | */ | 520 | */ |
| 489 | struct fw_cdev_iso_packet { | 521 | struct fw_cdev_iso_packet { |
| 490 | __u32 control; | 522 | __u32 control; |
| @@ -501,7 +533,7 @@ struct fw_cdev_iso_packet { | |||
| 501 | * Queue a number of isochronous packets for reception or transmission. | 533 | * Queue a number of isochronous packets for reception or transmission. |
| 502 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, | 534 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, |
| 503 | * which describe how to transmit from or receive into a contiguous region | 535 | * which describe how to transmit from or receive into a contiguous region |
| 504 | * of a mmap()'ed payload buffer. As part of the packet descriptors, | 536 | * of a mmap()'ed payload buffer. As part of transmit packet descriptors, |
| 505 | * a series of headers can be supplied, which will be prepended to the | 537 | * a series of headers can be supplied, which will be prepended to the |
| 506 | * payload during DMA. | 538 | * payload during DMA. |
| 507 | * | 539 | * |
| @@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 { | |||
| 620 | * instead of allocated. | 652 | * instead of allocated. |
| 621 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. | 653 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. |
| 622 | * | 654 | * |
| 623 | * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources | 655 | * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources |
| 624 | * for the lifetime of the fd or handle. | 656 | * for the lifetime of the fd or @handle. |
| 625 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources | 657 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources |
| 626 | * for the duration of a bus generation. | 658 | * for the duration of a bus generation. |
| 627 | * | 659 | * |
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index b316770a43fd..9c63f06e67f2 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h | |||
| @@ -1,3 +1,28 @@ | |||
| 1 | /* | ||
| 2 | * IEEE 1394 constants. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice (including the next | ||
| 14 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 15 | * Software. | ||
| 16 | * | ||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 23 | * DEALINGS IN THE SOFTWARE. | ||
| 24 | */ | ||
| 25 | |||
| 1 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H | 26 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H |
| 2 | #define _LINUX_FIREWIRE_CONSTANTS_H | 27 | #define _LINUX_FIREWIRE_CONSTANTS_H |
| 3 | 28 | ||
| @@ -21,7 +46,7 @@ | |||
| 21 | #define EXTCODE_WRAP_ADD 0x6 | 46 | #define EXTCODE_WRAP_ADD 0x6 |
| 22 | #define EXTCODE_VENDOR_DEPENDENT 0x7 | 47 | #define EXTCODE_VENDOR_DEPENDENT 0x7 |
| 23 | 48 | ||
| 24 | /* Juju specific tcodes */ | 49 | /* Linux firewire-core (Juju) specific tcodes */ |
| 25 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) | 50 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) |
| 26 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) | 51 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) |
| 27 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) | 52 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) |
| @@ -36,7 +61,7 @@ | |||
| 36 | #define RCODE_TYPE_ERROR 0x6 | 61 | #define RCODE_TYPE_ERROR 0x6 |
| 37 | #define RCODE_ADDRESS_ERROR 0x7 | 62 | #define RCODE_ADDRESS_ERROR 0x7 |
| 38 | 63 | ||
| 39 | /* Juju specific rcodes */ | 64 | /* Linux firewire-core (Juju) specific rcodes */ |
| 40 | #define RCODE_SEND_ERROR 0x10 | 65 | #define RCODE_SEND_ERROR 0x10 |
| 41 | #define RCODE_CANCELLED 0x11 | 66 | #define RCODE_CANCELLED 0x11 |
| 42 | #define RCODE_BUSY 0x12 | 67 | #define RCODE_BUSY 0x12 |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 10b8dedcd18b..39d57bc6cc71 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2212,6 +2212,7 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
| 2212 | /* fs/block_dev.c */ | 2212 | /* fs/block_dev.c */ |
| 2213 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, | 2213 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, |
| 2214 | unsigned long nr_segs, loff_t pos); | 2214 | unsigned long nr_segs, loff_t pos); |
| 2215 | extern int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync); | ||
| 2215 | 2216 | ||
| 2216 | /* fs/splice.c */ | 2217 | /* fs/splice.c */ |
| 2217 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2218 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 56b50514ab25..5f2f4c4d8fb0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -109,7 +109,7 @@ struct hd_struct { | |||
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | #define GENHD_FL_REMOVABLE 1 | 111 | #define GENHD_FL_REMOVABLE 1 |
| 112 | #define GENHD_FL_DRIVERFS 2 | 112 | /* 2 is unused */ |
| 113 | #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 | 113 | #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 |
| 114 | #define GENHD_FL_CD 8 | 114 | #define GENHD_FL_CD 8 |
| 115 | #define GENHD_FL_UP 16 | 115 | #define GENHD_FL_UP 16 |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 87018dc5527d..9e7a12d6385d 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
| @@ -782,7 +782,6 @@ extern int i2o_exec_lct_get(struct i2o_controller *); | |||
| 782 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) | 782 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) |
| 783 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) | 783 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) |
| 784 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) | 784 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) |
| 785 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) | ||
| 786 | 785 | ||
| 787 | /** | 786 | /** |
| 788 | * i2o_out_to_virt - Turn an I2O message to a virtual address | 787 | * i2o_out_to_virt - Turn an I2O message to a virtual address |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 97e6ab435184..3239d1c10acb 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -1169,6 +1169,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); | |||
| 1169 | extern void ide_timer_expiry(unsigned long); | 1169 | extern void ide_timer_expiry(unsigned long); |
| 1170 | extern irqreturn_t ide_intr(int irq, void *dev_id); | 1170 | extern irqreturn_t ide_intr(int irq, void *dev_id); |
| 1171 | extern void do_ide_request(struct request_queue *); | 1171 | extern void do_ide_request(struct request_queue *); |
| 1172 | extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); | ||
| 1172 | 1173 | ||
| 1173 | void ide_init_disk(struct gendisk *, ide_drive_t *); | 1174 | void ide_init_disk(struct gendisk *, ide_drive_t *); |
| 1174 | 1175 | ||
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 3bd018baae20..c964cd7f436a 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
| @@ -44,6 +44,7 @@ struct matrix_keymap_data { | |||
| 44 | * @active_low: gpio polarity | 44 | * @active_low: gpio polarity |
| 45 | * @wakeup: controls whether the device should be set up as wakeup | 45 | * @wakeup: controls whether the device should be set up as wakeup |
| 46 | * source | 46 | * source |
| 47 | * @no_autorepeat: disable key autorepeat | ||
| 47 | * | 48 | * |
| 48 | * This structure represents platform-specific data that use used by | 49 | * This structure represents platform-specific data that use used by |
| 49 | * matrix_keypad driver to perform proper initialization. | 50 | * matrix_keypad driver to perform proper initialization. |
| @@ -64,6 +65,7 @@ struct matrix_keypad_platform_data { | |||
| 64 | 65 | ||
| 65 | bool active_low; | 66 | bool active_low; |
| 66 | bool wakeup; | 67 | bool wakeup; |
| 68 | bool no_autorepeat; | ||
| 67 | }; | 69 | }; |
| 68 | 70 | ||
| 69 | /** | 71 | /** |
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index 6092487e2950..d2e4042f8f5e 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h | |||
| @@ -42,9 +42,13 @@ extern struct ibft_table_header *ibft_addr; | |||
| 42 | * mapped address is set in the ibft_addr variable. | 42 | * mapped address is set in the ibft_addr variable. |
| 43 | */ | 43 | */ |
| 44 | #ifdef CONFIG_ISCSI_IBFT_FIND | 44 | #ifdef CONFIG_ISCSI_IBFT_FIND |
| 45 | extern void __init reserve_ibft_region(void); | 45 | unsigned long find_ibft_region(unsigned long *sizep); |
| 46 | #else | 46 | #else |
| 47 | static inline void reserve_ibft_region(void) { } | 47 | static inline unsigned long find_ibft_region(unsigned long *sizep) |
| 48 | { | ||
| 49 | *sizep = 0; | ||
| 50 | return 0; | ||
| 51 | } | ||
| 48 | #endif | 52 | #endif |
| 49 | 53 | ||
| 50 | #endif /* ISCSI_IBFT_H */ | 54 | #endif /* ISCSI_IBFT_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7f0707463360..9365227dbaf6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -426,7 +426,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
| 426 | .burst = DEFAULT_RATELIMIT_BURST, \ | 426 | .burst = DEFAULT_RATELIMIT_BURST, \ |
| 427 | }; \ | 427 | }; \ |
| 428 | \ | 428 | \ |
| 429 | if (!__ratelimit(&_rs)) \ | 429 | if (__ratelimit(&_rs)) \ |
| 430 | printk(fmt, ##__VA_ARGS__); \ | 430 | printk(fmt, ##__VA_ARGS__); \ |
| 431 | }) | 431 | }) |
| 432 | #else | 432 | #else |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ece0b1c33816..e117b1aee69c 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -86,7 +86,8 @@ union { \ | |||
| 86 | */ | 86 | */ |
| 87 | #define INIT_KFIFO(name) \ | 87 | #define INIT_KFIFO(name) \ |
| 88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | 88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ |
| 89 | sizeof(struct kfifo), name##kfifo_buffer) | 89 | sizeof(struct kfifo), \ |
| 90 | name##kfifo_buffer + sizeof(struct kfifo)) | ||
| 90 | 91 | ||
| 91 | /** | 92 | /** |
| 92 | * DEFINE_KFIFO - macro to define and initialize a kfifo | 93 | * DEFINE_KFIFO - macro to define and initialize a kfifo |
diff --git a/include/linux/lcm.h b/include/linux/lcm.h new file mode 100644 index 000000000000..7bf01d779b45 --- /dev/null +++ b/include/linux/lcm.h | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | #ifndef _LCM_H | ||
| 2 | #define _LCM_H | ||
| 3 | |||
| 4 | #include <linux/compiler.h> | ||
| 5 | |||
| 6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; | ||
| 7 | |||
| 8 | #endif /* _LCM_H */ | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index f8ea71e6d0e2..b2f2003b92e5 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -146,6 +146,7 @@ enum { | |||
| 146 | ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ | 146 | ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ |
| 147 | ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ | 147 | ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ |
| 148 | ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ | 148 | ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ |
| 149 | ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ | ||
| 149 | ATA_DFLAG_INIT_MASK = (1 << 24) - 1, | 150 | ATA_DFLAG_INIT_MASK = (1 << 24) - 1, |
| 150 | 151 | ||
| 151 | ATA_DFLAG_DETACH = (1 << 24), | 152 | ATA_DFLAG_DETACH = (1 << 24), |
diff --git a/include/linux/mm.h b/include/linux/mm.h index e70f21beb4b4..462acaf36f3a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -783,8 +783,8 @@ struct mm_walk { | |||
| 783 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | 783 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); |
| 784 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | 784 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); |
| 785 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | 785 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); |
| 786 | int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, | 786 | int (*hugetlb_entry)(pte_t *, unsigned long, |
| 787 | struct mm_walk *); | 787 | unsigned long, unsigned long, struct mm_walk *); |
| 788 | struct mm_struct *mm; | 788 | struct mm_struct *mm; |
| 789 | void *private; | 789 | void *private; |
| 790 | }; | 790 | }; |
diff --git a/include/linux/module.h b/include/linux/module.h index 8bd399a00343..515d53ae6a79 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -368,7 +368,8 @@ struct module | |||
| 368 | void (*exit)(void); | 368 | void (*exit)(void); |
| 369 | 369 | ||
| 370 | struct module_ref { | 370 | struct module_ref { |
| 371 | int count; | 371 | unsigned int incs; |
| 372 | unsigned int decs; | ||
| 372 | } __percpu *refptr; | 373 | } __percpu *refptr; |
| 373 | #endif | 374 | #endif |
| 374 | 375 | ||
| @@ -463,9 +464,9 @@ static inline void __module_get(struct module *module) | |||
| 463 | { | 464 | { |
| 464 | if (module) { | 465 | if (module) { |
| 465 | preempt_disable(); | 466 | preempt_disable(); |
| 466 | __this_cpu_inc(module->refptr->count); | 467 | __this_cpu_inc(module->refptr->incs); |
| 467 | trace_module_get(module, _THIS_IP_, | 468 | trace_module_get(module, _THIS_IP_, |
| 468 | __this_cpu_read(module->refptr->count)); | 469 | __this_cpu_read(module->refptr->incs)); |
| 469 | preempt_enable(); | 470 | preempt_enable(); |
| 470 | } | 471 | } |
| 471 | } | 472 | } |
| @@ -478,11 +479,10 @@ static inline int try_module_get(struct module *module) | |||
| 478 | preempt_disable(); | 479 | preempt_disable(); |
| 479 | 480 | ||
| 480 | if (likely(module_is_live(module))) { | 481 | if (likely(module_is_live(module))) { |
| 481 | __this_cpu_inc(module->refptr->count); | 482 | __this_cpu_inc(module->refptr->incs); |
| 482 | trace_module_get(module, _THIS_IP_, | 483 | trace_module_get(module, _THIS_IP_, |
| 483 | __this_cpu_read(module->refptr->count)); | 484 | __this_cpu_read(module->refptr->incs)); |
| 484 | } | 485 | } else |
| 485 | else | ||
| 486 | ret = 0; | 486 | ret = 0; |
| 487 | 487 | ||
| 488 | preempt_enable(); | 488 | preempt_enable(); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 717a5e54eb1d..e82957acea56 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -176,6 +176,7 @@ struct nfs_server { | |||
| 176 | #define NFS_CAP_ATIME (1U << 11) | 176 | #define NFS_CAP_ATIME (1U << 11) |
| 177 | #define NFS_CAP_CTIME (1U << 12) | 177 | #define NFS_CAP_CTIME (1U << 12) |
| 178 | #define NFS_CAP_MTIME (1U << 13) | 178 | #define NFS_CAP_MTIME (1U << 13) |
| 179 | #define NFS_CAP_POSIX_LOCK (1U << 14) | ||
| 179 | 180 | ||
| 180 | 181 | ||
| 181 | /* maximum number of slots to use */ | 182 | /* maximum number of slots to use */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 30b08136fdf3..aef22ae2af47 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
| @@ -39,6 +39,7 @@ enum { | |||
| 39 | PCG_CACHE, /* charged as cache */ | 39 | PCG_CACHE, /* charged as cache */ |
| 40 | PCG_USED, /* this object is in use. */ | 40 | PCG_USED, /* this object is in use. */ |
| 41 | PCG_ACCT_LRU, /* page has been accounted for */ | 41 | PCG_ACCT_LRU, /* page has been accounted for */ |
| 42 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | #define TESTPCGFLAG(uname, lname) \ | 45 | #define TESTPCGFLAG(uname, lname) \ |
| @@ -73,6 +74,11 @@ CLEARPCGFLAG(AcctLRU, ACCT_LRU) | |||
| 73 | TESTPCGFLAG(AcctLRU, ACCT_LRU) | 74 | TESTPCGFLAG(AcctLRU, ACCT_LRU) |
| 74 | TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) | 75 | TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) |
| 75 | 76 | ||
| 77 | |||
| 78 | SETPCGFLAG(FileMapped, FILE_MAPPED) | ||
| 79 | CLEARPCGFLAG(FileMapped, FILE_MAPPED) | ||
| 80 | TESTPCGFLAG(FileMapped, FILE_MAPPED) | ||
| 81 | |||
| 76 | static inline int page_cgroup_nid(struct page_cgroup *pc) | 82 | static inline int page_cgroup_nid(struct page_cgroup *pc) |
| 77 | { | 83 | { |
| 78 | return page_to_nid(pc->page); | 84 | return page_to_nid(pc->page); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c5da74918096..55ca73cf25e5 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -121,6 +121,13 @@ do { \ | |||
| 121 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | 121 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control |
| 122 | * access to data items when inserting into or looking up from the radix tree) | 122 | * access to data items when inserting into or looking up from the radix tree) |
| 123 | * | 123 | * |
| 124 | * Note that the value returned by radix_tree_tag_get() may not be relied upon | ||
| 125 | * if only the RCU read lock is held. Functions to set/clear tags and to | ||
| 126 | * delete nodes running concurrently with it may affect its result such that | ||
| 127 | * two consecutive reads in the same locked section may return different | ||
| 128 | * values. If reliability is required, modification functions must also be | ||
| 129 | * excluded from concurrency. | ||
| 130 | * | ||
| 124 | * radix_tree_tagged is able to be called without locking or RCU. | 131 | * radix_tree_tagged is able to be called without locking or RCU. |
| 125 | */ | 132 | */ |
| 126 | 133 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 872a98e13d6a..07db2feb8572 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
| 101 | # define rcu_read_release_sched() \ | 101 | # define rcu_read_release_sched() \ |
| 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
| 103 | 103 | ||
| 104 | static inline int debug_lockdep_rcu_enabled(void) | 104 | extern int debug_lockdep_rcu_enabled(void); |
| 105 | { | ||
| 106 | return likely(rcu_scheduler_active && debug_locks); | ||
| 107 | } | ||
| 108 | 105 | ||
| 109 | /** | 106 | /** |
| 110 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
| @@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 195 | 192 | ||
| 196 | /** | 193 | /** |
| 197 | * rcu_dereference_check - rcu_dereference with debug checking | 194 | * rcu_dereference_check - rcu_dereference with debug checking |
| 195 | * @p: The pointer to read, prior to dereferencing | ||
| 196 | * @c: The conditions under which the dereference will take place | ||
| 197 | * | ||
| 198 | * Do an rcu_dereference(), but check that the conditions under which the | ||
| 199 | * dereference will take place are correct. Typically the conditions indicate | ||
| 200 | * the various locking conditions that should be held at that point. The check | ||
| 201 | * should return true if the conditions are satisfied. | ||
| 202 | * | ||
| 203 | * For example: | ||
| 204 | * | ||
| 205 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
| 206 | * lockdep_is_held(&foo->lock)); | ||
| 198 | * | 207 | * |
| 199 | * Do an rcu_dereference(), but check that the context is correct. | 208 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
| 200 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | 209 | * if either the RCU read lock is held, or that the lock required to replace |
| 201 | * ensure that the rcu_dereference_check() executes within an RCU | 210 | * the bar struct at foo->bar is held. |
| 202 | * read-side critical section. It is also possible to check for | 211 | * |
| 203 | * locks being held, for example, by using lockdep_is_held(). | 212 | * Note that the list of conditions may also include indications of when a lock |
| 213 | * need not be held, for example during initialisation or destruction of the | ||
| 214 | * target struct: | ||
| 215 | * | ||
| 216 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
| 217 | * lockdep_is_held(&foo->lock) || | ||
| 218 | * atomic_read(&foo->usage) == 0); | ||
| 204 | */ | 219 | */ |
| 205 | #define rcu_dereference_check(p, c) \ | 220 | #define rcu_dereference_check(p, c) \ |
| 206 | ({ \ | 221 | ({ \ |
| @@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 209 | rcu_dereference_raw(p); \ | 224 | rcu_dereference_raw(p); \ |
| 210 | }) | 225 | }) |
| 211 | 226 | ||
| 227 | /** | ||
| 228 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | ||
| 229 | * | ||
| 230 | * Return the value of the specified RCU-protected pointer, but omit | ||
| 231 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | ||
| 232 | * is useful in cases where update-side locks prevent the value of the | ||
| 233 | * pointer from changing. Please note that this primitive does -not- | ||
| 234 | * prevent the compiler from repeating this reference or combining it | ||
| 235 | * with other references, so it should not be used without protection | ||
| 236 | * of appropriate locks. | ||
| 237 | */ | ||
| 238 | #define rcu_dereference_protected(p, c) \ | ||
| 239 | ({ \ | ||
| 240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | ||
| 241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 242 | (p); \ | ||
| 243 | }) | ||
| 244 | |||
| 212 | #else /* #ifdef CONFIG_PROVE_RCU */ | 245 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 213 | 246 | ||
| 214 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 247 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) |
| 248 | #define rcu_dereference_protected(p, c) (p) | ||
| 215 | 249 | ||
| 216 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 250 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 217 | 251 | ||
| 218 | /** | 252 | /** |
| 253 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | ||
| 254 | * | ||
| 255 | * Return the value of the specified RCU-protected pointer, but omit the | ||
| 256 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
| 257 | * when the value of this pointer is accessed, but the pointer is not | ||
| 258 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
| 259 | * NULL. This may also be used in cases where update-side locks prevent | ||
| 260 | * the value of the pointer from changing, but rcu_dereference_protected() | ||
| 261 | * is a lighter-weight primitive for this use case. | ||
| 262 | */ | ||
| 263 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | ||
| 264 | |||
| 265 | /** | ||
| 219 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 266 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
| 220 | * | 267 | * |
| 221 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 268 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 488446289cab..49d1247cd6d9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -106,6 +106,7 @@ int kmem_cache_shrink(struct kmem_cache *); | |||
| 106 | void kmem_cache_free(struct kmem_cache *, void *); | 106 | void kmem_cache_free(struct kmem_cache *, void *); |
| 107 | unsigned int kmem_cache_size(struct kmem_cache *); | 107 | unsigned int kmem_cache_size(struct kmem_cache *); |
| 108 | const char *kmem_cache_name(struct kmem_cache *); | 108 | const char *kmem_cache_name(struct kmem_cache *); |
| 109 | int kern_ptr_validate(const void *ptr, unsigned long size); | ||
| 109 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | 110 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); |
| 110 | 111 | ||
| 111 | /* | 112 | /* |
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index ae4f039515b4..92228a8fbcbc 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h | |||
| @@ -12,37 +12,14 @@ | |||
| 12 | 12 | ||
| 13 | /* Feature bits */ | 13 | /* Feature bits */ |
| 14 | #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ | 14 | #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ |
| 15 | #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ | ||
| 16 | 15 | ||
| 17 | struct virtio_console_config { | 16 | struct virtio_console_config { |
| 18 | /* colums of the screens */ | 17 | /* colums of the screens */ |
| 19 | __u16 cols; | 18 | __u16 cols; |
| 20 | /* rows of the screens */ | 19 | /* rows of the screens */ |
| 21 | __u16 rows; | 20 | __u16 rows; |
| 22 | /* max. number of ports this device can hold */ | ||
| 23 | __u32 max_nr_ports; | ||
| 24 | /* number of ports added so far */ | ||
| 25 | __u32 nr_ports; | ||
| 26 | } __attribute__((packed)); | 21 | } __attribute__((packed)); |
| 27 | 22 | ||
| 28 | /* | ||
| 29 | * A message that's passed between the Host and the Guest for a | ||
| 30 | * particular port. | ||
| 31 | */ | ||
| 32 | struct virtio_console_control { | ||
| 33 | __u32 id; /* Port number */ | ||
| 34 | __u16 event; /* The kind of control event (see below) */ | ||
| 35 | __u16 value; /* Extra information for the key */ | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* Some events for control messages */ | ||
| 39 | #define VIRTIO_CONSOLE_PORT_READY 0 | ||
| 40 | #define VIRTIO_CONSOLE_CONSOLE_PORT 1 | ||
| 41 | #define VIRTIO_CONSOLE_RESIZE 2 | ||
| 42 | #define VIRTIO_CONSOLE_PORT_OPEN 3 | ||
| 43 | #define VIRTIO_CONSOLE_PORT_NAME 4 | ||
| 44 | #define VIRTIO_CONSOLE_PORT_REMOVE 5 | ||
| 45 | |||
| 46 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 47 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); | 24 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); |
| 48 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 76e8903cd204..36520ded3e06 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -34,6 +34,9 @@ struct writeback_control { | |||
| 34 | enum writeback_sync_modes sync_mode; | 34 | enum writeback_sync_modes sync_mode; |
| 35 | unsigned long *older_than_this; /* If !NULL, only write back inodes | 35 | unsigned long *older_than_this; /* If !NULL, only write back inodes |
| 36 | older than this */ | 36 | older than this */ |
| 37 | unsigned long wb_start; /* Time writeback_inodes_wb was | ||
| 38 | called. This is needed to avoid | ||
| 39 | extra jobs and livelock */ | ||
| 37 | long nr_to_write; /* Write this many pages, and decrement | 40 | long nr_to_write; /* Write this many pages, and decrement |
| 38 | this for each page written */ | 41 | this for each page written */ |
| 39 | long pages_skipped; /* Pages which were not written */ | 42 | long pages_skipped; /* Pages which were not written */ |
diff --git a/include/net/x25.h b/include/net/x25.h index 15ef9624ab75..468551ea4f1d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h | |||
| @@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout; | |||
| 183 | extern int sysctl_x25_ack_holdback_timeout; | 183 | extern int sysctl_x25_ack_holdback_timeout; |
| 184 | extern int sysctl_x25_forward; | 184 | extern int sysctl_x25_forward; |
| 185 | 185 | ||
| 186 | extern int x25_parse_address_block(struct sk_buff *skb, | ||
| 187 | struct x25_address *called_addr, | ||
| 188 | struct x25_address *calling_addr); | ||
| 189 | |||
| 186 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, | 190 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, |
| 187 | struct x25_address *); | 191 | struct x25_address *); |
| 188 | extern int x25_addr_aton(unsigned char *, struct x25_address *, | 192 | extern int x25_addr_aton(unsigned char *, struct x25_address *, |
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h index 8988edae1609..2609048c1d44 100644 --- a/include/sound/ak4113.h +++ b/include/sound/ak4113.h | |||
| @@ -307,7 +307,7 @@ struct ak4113 { | |||
| 307 | 307 | ||
| 308 | int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, | 308 | int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, |
| 309 | ak4113_write_t *write, | 309 | ak4113_write_t *write, |
| 310 | const unsigned char pgm[AK4113_WRITABLE_REGS], | 310 | const unsigned char *pgm, |
| 311 | void *private_data, struct ak4113 **r_ak4113); | 311 | void *private_data, struct ak4113 **r_ak4113); |
| 312 | void snd_ak4113_reg_write(struct ak4113 *ak4113, unsigned char reg, | 312 | void snd_ak4113_reg_write(struct ak4113 *ak4113, unsigned char reg, |
| 313 | unsigned char mask, unsigned char val); | 313 | unsigned char mask, unsigned char val); |
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 061f16d4c878..0a0b019d41ad 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h | |||
| @@ -219,7 +219,6 @@ struct snd_soc_dai { | |||
| 219 | struct snd_soc_codec *codec; | 219 | struct snd_soc_codec *codec; |
| 220 | unsigned int active; | 220 | unsigned int active; |
| 221 | unsigned char pop_wait:1; | 221 | unsigned char pop_wait:1; |
| 222 | void *dma_data; | ||
| 223 | 222 | ||
| 224 | /* DAI private data */ | 223 | /* DAI private data */ |
| 225 | void *private_data; | 224 | void *private_data; |
| @@ -230,4 +229,21 @@ struct snd_soc_dai { | |||
| 230 | struct list_head list; | 229 | struct list_head list; |
| 231 | }; | 230 | }; |
| 232 | 231 | ||
| 232 | static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai, | ||
| 233 | const struct snd_pcm_substream *ss) | ||
| 234 | { | ||
| 235 | return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ? | ||
| 236 | dai->playback.dma_data : dai->capture.dma_data; | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai, | ||
| 240 | const struct snd_pcm_substream *ss, | ||
| 241 | void *data) | ||
| 242 | { | ||
| 243 | if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
| 244 | dai->playback.dma_data = data; | ||
| 245 | else | ||
| 246 | dai->capture.dma_data = data; | ||
| 247 | } | ||
| 248 | |||
| 233 | #endif | 249 | #endif |
diff --git a/include/sound/soc.h b/include/sound/soc.h index 5d234a8c2506..a57fbfcd4c8f 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
| @@ -375,6 +375,7 @@ struct snd_soc_pcm_stream { | |||
| 375 | unsigned int channels_min; /* min channels */ | 375 | unsigned int channels_min; /* min channels */ |
| 376 | unsigned int channels_max; /* max channels */ | 376 | unsigned int channels_max; /* max channels */ |
| 377 | unsigned int active:1; /* stream is in use */ | 377 | unsigned int active:1; /* stream is in use */ |
| 378 | void *dma_data; /* used by platform code */ | ||
| 378 | }; | 379 | }; |
| 379 | 380 | ||
| 380 | /* SoC audio ops */ | 381 | /* SoC audio ops */ |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 5fb72733331e..d870a918559c 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
| 40 | __entry->nr_sector, __entry->errors) | 40 | __entry->nr_sector, __entry->errors) |
| 41 | ); | 41 | ); |
| 42 | 42 | ||
| 43 | /** | ||
| 44 | * block_rq_abort - abort block operation request | ||
| 45 | * @q: queue containing the block operation request | ||
| 46 | * @rq: block IO operation request | ||
| 47 | * | ||
| 48 | * Called immediately after pending block IO operation request @rq in | ||
| 49 | * queue @q is aborted. The fields in the operation request @rq | ||
| 50 | * can be examined to determine which device and sectors the pending | ||
| 51 | * operation would access. | ||
| 52 | */ | ||
| 43 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, | 53 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, |
| 44 | 54 | ||
| 45 | TP_PROTO(struct request_queue *q, struct request *rq), | 55 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort, | |||
| 47 | TP_ARGS(q, rq) | 57 | TP_ARGS(q, rq) |
| 48 | ); | 58 | ); |
| 49 | 59 | ||
| 60 | /** | ||
| 61 | * block_rq_requeue - place block IO request back on a queue | ||
| 62 | * @q: queue holding operation | ||
| 63 | * @rq: block IO operation request | ||
| 64 | * | ||
| 65 | * The block operation request @rq is being placed back into queue | ||
| 66 | * @q. For some reason the request was not completed and needs to be | ||
| 67 | * put back in the queue. | ||
| 68 | */ | ||
| 50 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | 69 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
| 51 | 70 | ||
| 52 | TP_PROTO(struct request_queue *q, struct request *rq), | 71 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | |||
| 54 | TP_ARGS(q, rq) | 73 | TP_ARGS(q, rq) |
| 55 | ); | 74 | ); |
| 56 | 75 | ||
| 76 | /** | ||
| 77 | * block_rq_complete - block IO operation completed by device driver | ||
| 78 | * @q: queue containing the block operation request | ||
| 79 | * @rq: block operations request | ||
| 80 | * | ||
| 81 | * The block_rq_complete tracepoint event indicates that some portion | ||
| 82 | * of operation request has been completed by the device driver. If | ||
| 83 | * the @rq->bio is %NULL, then there is absolutely no additional work to | ||
| 84 | * do for the request. If @rq->bio is non-NULL then there is | ||
| 85 | * additional work required to complete the request. | ||
| 86 | */ | ||
| 57 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, | 87 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, |
| 58 | 88 | ||
| 59 | TP_PROTO(struct request_queue *q, struct request *rq), | 89 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq, | |||
| 95 | __entry->nr_sector, __entry->comm) | 125 | __entry->nr_sector, __entry->comm) |
| 96 | ); | 126 | ); |
| 97 | 127 | ||
| 128 | /** | ||
| 129 | * block_rq_insert - insert block operation request into queue | ||
| 130 | * @q: target queue | ||
| 131 | * @rq: block IO operation request | ||
| 132 | * | ||
| 133 | * Called immediately before block operation request @rq is inserted | ||
| 134 | * into queue @q. The fields in the operation request @rq struct can | ||
| 135 | * be examined to determine which device and sectors the pending | ||
| 136 | * operation would access. | ||
| 137 | */ | ||
| 98 | DEFINE_EVENT(block_rq, block_rq_insert, | 138 | DEFINE_EVENT(block_rq, block_rq_insert, |
| 99 | 139 | ||
| 100 | TP_PROTO(struct request_queue *q, struct request *rq), | 140 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert, | |||
| 102 | TP_ARGS(q, rq) | 142 | TP_ARGS(q, rq) |
| 103 | ); | 143 | ); |
| 104 | 144 | ||
| 145 | /** | ||
| 146 | * block_rq_issue - issue pending block IO request operation to device driver | ||
| 147 | * @q: queue holding operation | ||
| 148 | * @rq: block IO operation operation request | ||
| 149 | * | ||
| 150 | * Called when block operation request @rq from queue @q is sent to a | ||
| 151 | * device driver for processing. | ||
| 152 | */ | ||
| 105 | DEFINE_EVENT(block_rq, block_rq_issue, | 153 | DEFINE_EVENT(block_rq, block_rq_issue, |
| 106 | 154 | ||
| 107 | TP_PROTO(struct request_queue *q, struct request *rq), | 155 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue, | |||
| 109 | TP_ARGS(q, rq) | 157 | TP_ARGS(q, rq) |
| 110 | ); | 158 | ); |
| 111 | 159 | ||
| 160 | /** | ||
| 161 | * block_bio_bounce - used bounce buffer when processing block operation | ||
| 162 | * @q: queue holding the block operation | ||
| 163 | * @bio: block operation | ||
| 164 | * | ||
| 165 | * A bounce buffer was used to handle the block operation @bio in @q. | ||
| 166 | * This occurs when hardware limitations prevent a direct transfer of | ||
| 167 | * data between the @bio data memory area and the IO device. Use of a | ||
| 168 | * bounce buffer requires extra copying of data and decreases | ||
| 169 | * performance. | ||
| 170 | */ | ||
| 112 | TRACE_EVENT(block_bio_bounce, | 171 | TRACE_EVENT(block_bio_bounce, |
| 113 | 172 | ||
| 114 | TP_PROTO(struct request_queue *q, struct bio *bio), | 173 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| @@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce, | |||
| 138 | __entry->nr_sector, __entry->comm) | 197 | __entry->nr_sector, __entry->comm) |
| 139 | ); | 198 | ); |
| 140 | 199 | ||
| 200 | /** | ||
| 201 | * block_bio_complete - completed all work on the block operation | ||
| 202 | * @q: queue holding the block operation | ||
| 203 | * @bio: block operation completed | ||
| 204 | * | ||
| 205 | * This tracepoint indicates there is no further work to do on this | ||
| 206 | * block IO operation @bio. | ||
| 207 | */ | ||
| 141 | TRACE_EVENT(block_bio_complete, | 208 | TRACE_EVENT(block_bio_complete, |
| 142 | 209 | ||
| 143 | TP_PROTO(struct request_queue *q, struct bio *bio), | 210 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| @@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio, | |||
| 193 | __entry->nr_sector, __entry->comm) | 260 | __entry->nr_sector, __entry->comm) |
| 194 | ); | 261 | ); |
| 195 | 262 | ||
| 263 | /** | ||
| 264 | * block_bio_backmerge - merging block operation to the end of an existing operation | ||
| 265 | * @q: queue holding operation | ||
| 266 | * @bio: new block operation to merge | ||
| 267 | * | ||
| 268 | * Merging block request @bio to the end of an existing block request | ||
| 269 | * in queue @q. | ||
| 270 | */ | ||
| 196 | DEFINE_EVENT(block_bio, block_bio_backmerge, | 271 | DEFINE_EVENT(block_bio, block_bio_backmerge, |
| 197 | 272 | ||
| 198 | TP_PROTO(struct request_queue *q, struct bio *bio), | 273 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| @@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge, | |||
| 200 | TP_ARGS(q, bio) | 275 | TP_ARGS(q, bio) |
| 201 | ); | 276 | ); |
| 202 | 277 | ||
| 278 | /** | ||
| 279 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | ||
| 280 | * @q: queue holding operation | ||
| 281 | * @bio: new block operation to merge | ||
| 282 | * | ||
| 283 | * Merging block IO operation @bio to the beginning of an existing block | ||
| 284 | * operation in queue @q. | ||
| 285 | */ | ||
| 203 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | 286 | DEFINE_EVENT(block_bio, block_bio_frontmerge, |
| 204 | 287 | ||
| 205 | TP_PROTO(struct request_queue *q, struct bio *bio), | 288 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| @@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge, | |||
| 207 | TP_ARGS(q, bio) | 290 | TP_ARGS(q, bio) |
| 208 | ); | 291 | ); |
| 209 | 292 | ||
| 293 | /** | ||
| 294 | * block_bio_queue - putting new block IO operation in queue | ||
| 295 | * @q: queue holding operation | ||
| 296 | * @bio: new block operation | ||
| 297 | * | ||
| 298 | * About to place the block IO operation @bio into queue @q. | ||
| 299 | */ | ||
| 210 | DEFINE_EVENT(block_bio, block_bio_queue, | 300 | DEFINE_EVENT(block_bio, block_bio_queue, |
| 211 | 301 | ||
| 212 | TP_PROTO(struct request_queue *q, struct bio *bio), | 302 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| @@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
| 243 | __entry->nr_sector, __entry->comm) | 333 | __entry->nr_sector, __entry->comm) |
| 244 | ); | 334 | ); |
| 245 | 335 | ||
| 336 | /** | ||
| 337 | * block_getrq - get a free request entry in queue for block IO operations | ||
| 338 | * @q: queue for operations | ||
| 339 | * @bio: pending block IO operation | ||
| 340 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
| 341 | * | ||
| 342 | * A request struct for queue @q has been allocated to handle the | ||
| 343 | * block IO operation @bio. | ||
| 344 | */ | ||
| 246 | DEFINE_EVENT(block_get_rq, block_getrq, | 345 | DEFINE_EVENT(block_get_rq, block_getrq, |
| 247 | 346 | ||
| 248 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 347 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
| @@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq, | |||
| 250 | TP_ARGS(q, bio, rw) | 349 | TP_ARGS(q, bio, rw) |
| 251 | ); | 350 | ); |
| 252 | 351 | ||
| 352 | /** | ||
| 353 | * block_sleeprq - waiting to get a free request entry in queue for block IO operation | ||
| 354 | * @q: queue for operation | ||
| 355 | * @bio: pending block IO operation | ||
| 356 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
| 357 | * | ||
| 358 | * In the case where a request struct cannot be provided for queue @q | ||
| 359 | * the process needs to wait for an request struct to become | ||
| 360 | * available. This tracepoint event is generated each time the | ||
| 361 | * process goes to sleep waiting for request struct become available. | ||
| 362 | */ | ||
| 253 | DEFINE_EVENT(block_get_rq, block_sleeprq, | 363 | DEFINE_EVENT(block_get_rq, block_sleeprq, |
| 254 | 364 | ||
| 255 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 365 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
| @@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq, | |||
| 257 | TP_ARGS(q, bio, rw) | 367 | TP_ARGS(q, bio, rw) |
| 258 | ); | 368 | ); |
| 259 | 369 | ||
| 370 | /** | ||
| 371 | * block_plug - keep operations requests in request queue | ||
| 372 | * @q: request queue to plug | ||
| 373 | * | ||
| 374 | * Plug the request queue @q. Do not allow block operation requests | ||
| 375 | * to be sent to the device driver. Instead, accumulate requests in | ||
| 376 | * the queue to improve throughput performance of the block device. | ||
| 377 | */ | ||
| 260 | TRACE_EVENT(block_plug, | 378 | TRACE_EVENT(block_plug, |
| 261 | 379 | ||
| 262 | TP_PROTO(struct request_queue *q), | 380 | TP_PROTO(struct request_queue *q), |
| @@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
| 293 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 411 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) |
| 294 | ); | 412 | ); |
| 295 | 413 | ||
| 414 | /** | ||
| 415 | * block_unplug_timer - timed release of operations requests in queue to device driver | ||
| 416 | * @q: request queue to unplug | ||
| 417 | * | ||
| 418 | * Unplug the request queue @q because a timer expired and allow block | ||
| 419 | * operation requests to be sent to the device driver. | ||
| 420 | */ | ||
| 296 | DEFINE_EVENT(block_unplug, block_unplug_timer, | 421 | DEFINE_EVENT(block_unplug, block_unplug_timer, |
| 297 | 422 | ||
| 298 | TP_PROTO(struct request_queue *q), | 423 | TP_PROTO(struct request_queue *q), |
| @@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer, | |||
| 300 | TP_ARGS(q) | 425 | TP_ARGS(q) |
| 301 | ); | 426 | ); |
| 302 | 427 | ||
| 428 | /** | ||
| 429 | * block_unplug_io - release of operations requests in request queue | ||
| 430 | * @q: request queue to unplug | ||
| 431 | * | ||
| 432 | * Unplug request queue @q because device driver is scheduled to work | ||
| 433 | * on elements in the request queue. | ||
| 434 | */ | ||
| 303 | DEFINE_EVENT(block_unplug, block_unplug_io, | 435 | DEFINE_EVENT(block_unplug, block_unplug_io, |
| 304 | 436 | ||
| 305 | TP_PROTO(struct request_queue *q), | 437 | TP_PROTO(struct request_queue *q), |
| @@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io, | |||
| 307 | TP_ARGS(q) | 439 | TP_ARGS(q) |
| 308 | ); | 440 | ); |
| 309 | 441 | ||
| 442 | /** | ||
| 443 | * block_split - split a single bio struct into two bio structs | ||
| 444 | * @q: queue containing the bio | ||
| 445 | * @bio: block operation being split | ||
| 446 | * @new_sector: The starting sector for the new bio | ||
| 447 | * | ||
| 448 | * The bio request @bio in request queue @q needs to be split into two | ||
| 449 | * bio requests. The newly created @bio request starts at | ||
| 450 | * @new_sector. This split may be required due to hardware limitation | ||
| 451 | * such as operation crossing device boundaries in a RAID system. | ||
| 452 | */ | ||
| 310 | TRACE_EVENT(block_split, | 453 | TRACE_EVENT(block_split, |
| 311 | 454 | ||
| 312 | TP_PROTO(struct request_queue *q, struct bio *bio, | 455 | TP_PROTO(struct request_queue *q, struct bio *bio, |
| @@ -337,6 +480,16 @@ TRACE_EVENT(block_split, | |||
| 337 | __entry->comm) | 480 | __entry->comm) |
| 338 | ); | 481 | ); |
| 339 | 482 | ||
| 483 | /** | ||
| 484 | * block_remap - map request for a partition to the raw device | ||
| 485 | * @q: queue holding the operation | ||
| 486 | * @bio: revised operation | ||
| 487 | * @dev: device for the operation | ||
| 488 | * @from: original sector for the operation | ||
| 489 | * | ||
| 490 | * An operation for a partition on a block device has been mapped to the | ||
| 491 | * raw block device. | ||
| 492 | */ | ||
| 340 | TRACE_EVENT(block_remap, | 493 | TRACE_EVENT(block_remap, |
| 341 | 494 | ||
| 342 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | 495 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, |
| @@ -370,6 +523,17 @@ TRACE_EVENT(block_remap, | |||
| 370 | (unsigned long long)__entry->old_sector) | 523 | (unsigned long long)__entry->old_sector) |
| 371 | ); | 524 | ); |
| 372 | 525 | ||
| 526 | /** | ||
| 527 | * block_rq_remap - map request for a block operation request | ||
| 528 | * @q: queue holding the operation | ||
| 529 | * @rq: block IO operation request | ||
| 530 | * @dev: device for the operation | ||
| 531 | * @from: original sector for the operation | ||
| 532 | * | ||
| 533 | * The block operation request @rq in @q has been remapped. The block | ||
| 534 | * operation request @rq holds the current information and @from hold | ||
| 535 | * the original sector. | ||
| 536 | */ | ||
| 373 | TRACE_EVENT(block_rq_remap, | 537 | TRACE_EVENT(block_rq_remap, |
| 374 | 538 | ||
| 375 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | 539 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, |
diff --git a/kernel/exit.c b/kernel/exit.c index cce59cb5ee6a..7f2683a10ac4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -953,7 +953,8 @@ NORET_TYPE void do_exit(long code) | |||
| 953 | 953 | ||
| 954 | acct_update_integrals(tsk); | 954 | acct_update_integrals(tsk); |
| 955 | /* sync mm's RSS info before statistics gathering */ | 955 | /* sync mm's RSS info before statistics gathering */ |
| 956 | sync_mm_rss(tsk, tsk->mm); | 956 | if (tsk->mm) |
| 957 | sync_mm_rss(tsk, tsk->mm); | ||
| 957 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 958 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
| 958 | if (group_dead) { | 959 | if (group_dead) { |
| 959 | hrtimer_cancel(&tsk->signal->real_timer); | 960 | hrtimer_cancel(&tsk->signal->real_timer); |
diff --git a/kernel/fork.c b/kernel/fork.c index 4799c5f0e6d0..44b0791b0a2e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1052,6 +1052,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1052 | p->prev_utime = cputime_zero; | 1052 | p->prev_utime = cputime_zero; |
| 1053 | p->prev_stime = cputime_zero; | 1053 | p->prev_stime = cputime_zero; |
| 1054 | #endif | 1054 | #endif |
| 1055 | #if defined(SPLIT_RSS_COUNTING) | ||
| 1056 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); | ||
| 1057 | #endif | ||
| 1055 | 1058 | ||
| 1056 | p->default_timer_slack_ns = current->timer_slack_ns; | 1059 | p->default_timer_slack_ns = current->timer_slack_ns; |
| 1057 | 1060 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 398fda155f6e..704e488730a5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -757,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 757 | if (new->flags & IRQF_ONESHOT) | 757 | if (new->flags & IRQF_ONESHOT) |
| 758 | desc->status |= IRQ_ONESHOT; | 758 | desc->status |= IRQ_ONESHOT; |
| 759 | 759 | ||
| 760 | /* | ||
| 761 | * Force MSI interrupts to run with interrupts | ||
| 762 | * disabled. The multi vector cards can cause stack | ||
| 763 | * overflows due to nested interrupts when enough of | ||
| 764 | * them are directed to a core and fire at the same | ||
| 765 | * time. | ||
| 766 | */ | ||
| 767 | if (desc->msi_desc) | ||
| 768 | new->flags |= IRQF_DISABLED; | ||
| 769 | |||
| 760 | if (!(desc->status & IRQ_NOAUTOEN)) { | 770 | if (!(desc->status & IRQ_NOAUTOEN)) { |
| 761 | desc->depth = 0; | 771 | desc->depth = 0; |
| 762 | desc->status &= ~IRQ_DISABLED; | 772 | desc->status &= ~IRQ_DISABLED; |
diff --git a/kernel/module.c b/kernel/module.c index 9f8d23d8b3a8..1016b75b026a 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -521,11 +521,13 @@ static void module_unload_init(struct module *mod) | |||
| 521 | int cpu; | 521 | int cpu; |
| 522 | 522 | ||
| 523 | INIT_LIST_HEAD(&mod->modules_which_use_me); | 523 | INIT_LIST_HEAD(&mod->modules_which_use_me); |
| 524 | for_each_possible_cpu(cpu) | 524 | for_each_possible_cpu(cpu) { |
| 525 | per_cpu_ptr(mod->refptr, cpu)->count = 0; | 525 | per_cpu_ptr(mod->refptr, cpu)->incs = 0; |
| 526 | per_cpu_ptr(mod->refptr, cpu)->decs = 0; | ||
| 527 | } | ||
| 526 | 528 | ||
| 527 | /* Hold reference count during initialization. */ | 529 | /* Hold reference count during initialization. */ |
| 528 | __this_cpu_write(mod->refptr->count, 1); | 530 | __this_cpu_write(mod->refptr->incs, 1); |
| 529 | /* Backwards compatibility macros put refcount during init. */ | 531 | /* Backwards compatibility macros put refcount during init. */ |
| 530 | mod->waiter = current; | 532 | mod->waiter = current; |
| 531 | } | 533 | } |
| @@ -664,12 +666,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
| 664 | 666 | ||
| 665 | unsigned int module_refcount(struct module *mod) | 667 | unsigned int module_refcount(struct module *mod) |
| 666 | { | 668 | { |
| 667 | unsigned int total = 0; | 669 | unsigned int incs = 0, decs = 0; |
| 668 | int cpu; | 670 | int cpu; |
| 669 | 671 | ||
| 670 | for_each_possible_cpu(cpu) | 672 | for_each_possible_cpu(cpu) |
| 671 | total += per_cpu_ptr(mod->refptr, cpu)->count; | 673 | decs += per_cpu_ptr(mod->refptr, cpu)->decs; |
| 672 | return total; | 674 | /* |
| 675 | * ensure the incs are added up after the decs. | ||
| 676 | * module_put ensures incs are visible before decs with smp_wmb. | ||
| 677 | * | ||
| 678 | * This 2-count scheme avoids the situation where the refcount | ||
| 679 | * for CPU0 is read, then CPU0 increments the module refcount, | ||
| 680 | * then CPU1 drops that refcount, then the refcount for CPU1 is | ||
| 681 | * read. We would record a decrement but not its corresponding | ||
| 682 | * increment so we would see a low count (disaster). | ||
| 683 | * | ||
| 684 | * Rare situation? But module_refcount can be preempted, and we | ||
| 685 | * might be tallying up 4096+ CPUs. So it is not impossible. | ||
| 686 | */ | ||
| 687 | smp_rmb(); | ||
| 688 | for_each_possible_cpu(cpu) | ||
| 689 | incs += per_cpu_ptr(mod->refptr, cpu)->incs; | ||
| 690 | return incs - decs; | ||
| 673 | } | 691 | } |
| 674 | EXPORT_SYMBOL(module_refcount); | 692 | EXPORT_SYMBOL(module_refcount); |
| 675 | 693 | ||
| @@ -846,10 +864,11 @@ void module_put(struct module *module) | |||
| 846 | { | 864 | { |
| 847 | if (module) { | 865 | if (module) { |
| 848 | preempt_disable(); | 866 | preempt_disable(); |
| 849 | __this_cpu_dec(module->refptr->count); | 867 | smp_wmb(); /* see comment in module_refcount */ |
| 868 | __this_cpu_inc(module->refptr->decs); | ||
| 850 | 869 | ||
| 851 | trace_module_put(module, _RET_IP_, | 870 | trace_module_put(module, _RET_IP_, |
| 852 | __this_cpu_read(module->refptr->count)); | 871 | __this_cpu_read(module->refptr->decs)); |
| 853 | /* Maybe they're waiting for us to drop reference? */ | 872 | /* Maybe they're waiting for us to drop reference? */ |
| 854 | if (unlikely(!module_is_live(module))) | 873 | if (unlikely(!module_is_live(module))) |
| 855 | wake_up_process(module->waiter); | 874 | wake_up_process(module->waiter); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 4d2289626a84..a8c96212bc1b 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
| @@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 420 | * User space encodes device types as two-byte values, | 420 | * User space encodes device types as two-byte values, |
| 421 | * so we need to recode them | 421 | * so we need to recode them |
| 422 | */ | 422 | */ |
| 423 | swdev = old_decode_dev(swap_area.dev); | 423 | swdev = new_decode_dev(swap_area.dev); |
| 424 | if (swdev) { | 424 | if (swdev) { |
| 425 | offset = swap_area.offset; | 425 | offset = swap_area.offset; |
| 426 | data->swap = swap_type_of(swdev, offset, NULL); | 426 | data->swap = swap_type_of(swdev, offset, NULL); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 63fe25433980..03a7ea1579f6 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); | |||
| 69 | 69 | ||
| 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 71 | 71 | ||
| 72 | int debug_lockdep_rcu_enabled(void) | ||
| 73 | { | ||
| 74 | return rcu_scheduler_active && debug_locks && | ||
| 75 | current->lockdep_recursion == 0; | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | ||
| 78 | |||
| 72 | /** | 79 | /** |
| 73 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | 80 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? |
| 74 | * | 81 | * |
diff --git a/kernel/sched.c b/kernel/sched.c index a3dff1f3f9b0..6af210a7de70 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4903,7 +4903,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
| 4903 | int ret; | 4903 | int ret; |
| 4904 | cpumask_var_t mask; | 4904 | cpumask_var_t mask; |
| 4905 | 4905 | ||
| 4906 | if (len < nr_cpu_ids) | 4906 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
| 4907 | return -EINVAL; | 4907 | return -EINVAL; |
| 4908 | if (len & (sizeof(unsigned long)-1)) | 4908 | if (len & (sizeof(unsigned long)-1)) |
| 4909 | return -EINVAL; | 4909 | return -EINVAL; |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1fafb4b99c9b..935248bdbc47 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -356,7 +356,7 @@ config SLUB_STATS | |||
| 356 | config DEBUG_KMEMLEAK | 356 | config DEBUG_KMEMLEAK |
| 357 | bool "Kernel memory leak detector" | 357 | bool "Kernel memory leak detector" |
| 358 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 358 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
| 359 | (X86 || ARM || PPC || S390 || SUPERH) | 359 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) |
| 360 | 360 | ||
| 361 | select DEBUG_FS if SYSFS | 361 | select DEBUG_FS if SYSFS |
| 362 | select STACKTRACE if STACKTRACE_SUPPORT | 362 | select STACKTRACE if STACKTRACE_SUPPORT |
diff --git a/lib/Makefile b/lib/Makefile index 2e152aed7198..0d4015205c64 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o | |||
| 21 | 21 | ||
| 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 24 | string_helpers.o gcd.o list_sort.o | 24 | string_helpers.o gcd.o lcm.o list_sort.o |
| 25 | 25 | ||
| 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 27 | CFLAGS_kobject.o += -DDEBUG | 27 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ba8b67039d13..01e64270e246 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf, | |||
| 570 | * Now parse out the first token and use it as the name for the | 570 | * Now parse out the first token and use it as the name for the |
| 571 | * driver to filter for. | 571 | * driver to filter for. |
| 572 | */ | 572 | */ |
| 573 | for (i = 0; i < NAME_MAX_LEN; ++i) { | 573 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
| 574 | current_driver_name[i] = buf[i]; | 574 | current_driver_name[i] = buf[i]; |
| 575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | 575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
| 576 | break; | 576 | break; |
diff --git a/lib/lcm.c b/lib/lcm.c new file mode 100644 index 000000000000..157cd88a6ffc --- /dev/null +++ b/lib/lcm.c | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/gcd.h> | ||
| 3 | #include <linux/module.h> | ||
| 4 | |||
| 5 | /* Lowest common multiple */ | ||
| 6 | unsigned long lcm(unsigned long a, unsigned long b) | ||
| 7 | { | ||
| 8 | if (a && b) | ||
| 9 | return (a * b) / gcd(a, b); | ||
| 10 | else if (b) | ||
| 11 | return b; | ||
| 12 | |||
| 13 | return a; | ||
| 14 | } | ||
| 15 | EXPORT_SYMBOL_GPL(lcm); | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0871582aa29d..2a087e0f9863 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -555,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear); | |||
| 555 | * | 555 | * |
| 556 | * 0: tag not present or not set | 556 | * 0: tag not present or not set |
| 557 | * 1: tag set | 557 | * 1: tag set |
| 558 | * | ||
| 559 | * Note that the return value of this function may not be relied on, even if | ||
| 560 | * the RCU lock is held, unless tag modification and node deletion are excluded | ||
| 561 | * from concurrency. | ||
| 558 | */ | 562 | */ |
| 559 | int radix_tree_tag_get(struct radix_tree_root *root, | 563 | int radix_tree_tag_get(struct radix_tree_root *root, |
| 560 | unsigned long index, unsigned int tag) | 564 | unsigned long index, unsigned int tag) |
| @@ -595,12 +599,8 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 595 | */ | 599 | */ |
| 596 | if (!tag_get(node, tag, offset)) | 600 | if (!tag_get(node, tag, offset)) |
| 597 | saw_unset_tag = 1; | 601 | saw_unset_tag = 1; |
| 598 | if (height == 1) { | 602 | if (height == 1) |
| 599 | int ret = tag_get(node, tag, offset); | 603 | return !!tag_get(node, tag, offset); |
| 600 | |||
| 601 | BUG_ON(ret && saw_unset_tag); | ||
| 602 | return !!ret; | ||
| 603 | } | ||
| 604 | node = rcu_dereference_raw(node->slots[offset]); | 604 | node = rcu_dereference_raw(node->slots[offset]); |
| 605 | shift -= RADIX_TREE_MAP_SHIFT; | 605 | shift -= RADIX_TREE_MAP_SHIFT; |
| 606 | height--; | 606 | height--; |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 09f5ce1810dc..027a03f4c56d 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -16,9 +16,14 @@ | |||
| 16 | /* | 16 | /* |
| 17 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
| 18 | * @rs: ratelimit_state data | 18 | * @rs: ratelimit_state data |
| 19 | * @func: name of calling function | ||
| 19 | * | 20 | * |
| 20 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks | 21 | * This enforces a rate limit: not more than @rs->burst callbacks |
| 21 | * in every @rs->ratelimit_jiffies | 22 | * in every @rs->interval |
| 23 | * | ||
| 24 | * RETURNS: | ||
| 25 | * 0 means callbacks will be suppressed. | ||
| 26 | * 1 means go ahead and do it. | ||
| 22 | */ | 27 | */ |
| 23 | int ___ratelimit(struct ratelimit_state *rs, const char *func) | 28 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
| 24 | { | 29 | { |
| @@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
| 35 | * the entity that is holding the lock already: | 40 | * the entity that is holding the lock already: |
| 36 | */ | 41 | */ |
| 37 | if (!spin_trylock_irqsave(&rs->lock, flags)) | 42 | if (!spin_trylock_irqsave(&rs->lock, flags)) |
| 38 | return 1; | 43 | return 0; |
| 39 | 44 | ||
| 40 | if (!rs->begin) | 45 | if (!rs->begin) |
| 41 | rs->begin = jiffies; | 46 | rs->begin = jiffies; |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ccf95bff7984..ffc9fc7f3b05 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
| @@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 143 | { | 143 | { |
| 144 | struct rwsem_waiter waiter; | 144 | struct rwsem_waiter waiter; |
| 145 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
| 146 | unsigned long flags; | ||
| 146 | 147 | ||
| 147 | spin_lock_irq(&sem->wait_lock); | 148 | spin_lock_irqsave(&sem->wait_lock, flags); |
| 148 | 149 | ||
| 149 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 150 | /* granted */ | 151 | /* granted */ |
| 151 | sem->activity++; | 152 | sem->activity++; |
| 152 | spin_unlock_irq(&sem->wait_lock); | 153 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 153 | goto out; | 154 | goto out; |
| 154 | } | 155 | } |
| 155 | 156 | ||
| @@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 164 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
| 165 | 166 | ||
| 166 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
| 167 | spin_unlock_irq(&sem->wait_lock); | 168 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 168 | 169 | ||
| 169 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
| 170 | for (;;) { | 171 | for (;;) { |
| @@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 209 | { | 210 | { |
| 210 | struct rwsem_waiter waiter; | 211 | struct rwsem_waiter waiter; |
| 211 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
| 213 | unsigned long flags; | ||
| 212 | 214 | ||
| 213 | spin_lock_irq(&sem->wait_lock); | 215 | spin_lock_irqsave(&sem->wait_lock, flags); |
| 214 | 216 | ||
| 215 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 216 | /* granted */ | 218 | /* granted */ |
| 217 | sem->activity = -1; | 219 | sem->activity = -1; |
| 218 | spin_unlock_irq(&sem->wait_lock); | 220 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 219 | goto out; | 221 | goto out; |
| 220 | } | 222 | } |
| 221 | 223 | ||
| @@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 230 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
| 231 | 233 | ||
| 232 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
| 233 | spin_unlock_irq(&sem->wait_lock); | 235 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 234 | 236 | ||
| 235 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
| 236 | for (;;) { | 238 | for (;;) { |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 24112e5a5780..7376b7c55ffe 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -408,12 +408,12 @@ enum format_type { | |||
| 408 | }; | 408 | }; |
| 409 | 409 | ||
| 410 | struct printf_spec { | 410 | struct printf_spec { |
| 411 | u16 type; | 411 | u8 type; /* format_type enum */ |
| 412 | s16 field_width; /* width of output field */ | ||
| 413 | u8 flags; /* flags to number() */ | 412 | u8 flags; /* flags to number() */ |
| 414 | u8 base; | 413 | u8 base; /* number base, 8, 10 or 16 only */ |
| 415 | s8 precision; /* # of digits/chars */ | 414 | u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ |
| 416 | u8 qualifier; | 415 | s16 field_width; /* width of output field */ |
| 416 | s16 precision; /* # of digits/chars */ | ||
| 417 | }; | 417 | }; |
| 418 | 418 | ||
| 419 | static char *number(char *buf, char *end, unsigned long long num, | 419 | static char *number(char *buf, char *end, unsigned long long num, |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0e8ca0347707..f13e067e1467 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -227,6 +227,9 @@ static struct device_attribute bdi_dev_attrs[] = { | |||
| 227 | static __init int bdi_class_init(void) | 227 | static __init int bdi_class_init(void) |
| 228 | { | 228 | { |
| 229 | bdi_class = class_create(THIS_MODULE, "bdi"); | 229 | bdi_class = class_create(THIS_MODULE, "bdi"); |
| 230 | if (IS_ERR(bdi_class)) | ||
| 231 | return PTR_ERR(bdi_class); | ||
| 232 | |||
| 230 | bdi_class->dev_attrs = bdi_dev_attrs; | 233 | bdi_class->dev_attrs = bdi_dev_attrs; |
| 231 | bdi_debug_init(); | 234 | bdi_debug_init(); |
| 232 | return 0; | 235 | return 0; |
diff --git a/mm/bootmem.c b/mm/bootmem.c index eff224220571..58c66cc5056a 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
| @@ -304,9 +304,22 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | |||
| 304 | unsigned long __init free_all_bootmem(void) | 304 | unsigned long __init free_all_bootmem(void) |
| 305 | { | 305 | { |
| 306 | #ifdef CONFIG_NO_BOOTMEM | 306 | #ifdef CONFIG_NO_BOOTMEM |
| 307 | return free_all_memory_core_early(NODE_DATA(0)->node_id); | 307 | /* |
| 308 | * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id | ||
| 309 | * because in some case like Node0 doesnt have RAM installed | ||
| 310 | * low ram will be on Node1 | ||
| 311 | * Use MAX_NUMNODES will make sure all ranges in early_node_map[] | ||
| 312 | * will be used instead of only Node0 related | ||
| 313 | */ | ||
| 314 | return free_all_memory_core_early(MAX_NUMNODES); | ||
| 308 | #else | 315 | #else |
| 309 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | 316 | unsigned long total_pages = 0; |
| 317 | bootmem_data_t *bdata; | ||
| 318 | |||
| 319 | list_for_each_entry(bdata, &bdata_list, list) | ||
| 320 | total_pages += free_all_bootmem_core(bdata); | ||
| 321 | |||
| 322 | return total_pages; | ||
| 310 | #endif | 323 | #endif |
| 311 | } | 324 | } |
| 312 | 325 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9ed760dc7448..f4ede99c8b9b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1359,16 +1359,19 @@ void mem_cgroup_update_file_mapped(struct page *page, int val) | |||
| 1359 | 1359 | ||
| 1360 | lock_page_cgroup(pc); | 1360 | lock_page_cgroup(pc); |
| 1361 | mem = pc->mem_cgroup; | 1361 | mem = pc->mem_cgroup; |
| 1362 | if (!mem) | 1362 | if (!mem || !PageCgroupUsed(pc)) |
| 1363 | goto done; | ||
| 1364 | |||
| 1365 | if (!PageCgroupUsed(pc)) | ||
| 1366 | goto done; | 1363 | goto done; |
| 1367 | 1364 | ||
| 1368 | /* | 1365 | /* |
| 1369 | * Preemption is already disabled. We can use __this_cpu_xxx | 1366 | * Preemption is already disabled. We can use __this_cpu_xxx |
| 1370 | */ | 1367 | */ |
| 1371 | __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val); | 1368 | if (val > 0) { |
| 1369 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | ||
| 1370 | SetPageCgroupFileMapped(pc); | ||
| 1371 | } else { | ||
| 1372 | __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | ||
| 1373 | ClearPageCgroupFileMapped(pc); | ||
| 1374 | } | ||
| 1372 | 1375 | ||
| 1373 | done: | 1376 | done: |
| 1374 | unlock_page_cgroup(pc); | 1377 | unlock_page_cgroup(pc); |
| @@ -1801,16 +1804,13 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, | |||
| 1801 | static void __mem_cgroup_move_account(struct page_cgroup *pc, | 1804 | static void __mem_cgroup_move_account(struct page_cgroup *pc, |
| 1802 | struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) | 1805 | struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) |
| 1803 | { | 1806 | { |
| 1804 | struct page *page; | ||
| 1805 | |||
| 1806 | VM_BUG_ON(from == to); | 1807 | VM_BUG_ON(from == to); |
| 1807 | VM_BUG_ON(PageLRU(pc->page)); | 1808 | VM_BUG_ON(PageLRU(pc->page)); |
| 1808 | VM_BUG_ON(!PageCgroupLocked(pc)); | 1809 | VM_BUG_ON(!PageCgroupLocked(pc)); |
| 1809 | VM_BUG_ON(!PageCgroupUsed(pc)); | 1810 | VM_BUG_ON(!PageCgroupUsed(pc)); |
| 1810 | VM_BUG_ON(pc->mem_cgroup != from); | 1811 | VM_BUG_ON(pc->mem_cgroup != from); |
| 1811 | 1812 | ||
| 1812 | page = pc->page; | 1813 | if (PageCgroupFileMapped(pc)) { |
| 1813 | if (page_mapped(page) && !PageAnon(page)) { | ||
| 1814 | /* Update mapped_file data for mem_cgroup */ | 1814 | /* Update mapped_file data for mem_cgroup */ |
| 1815 | preempt_disable(); | 1815 | preempt_disable(); |
| 1816 | __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | 1816 | __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); |
diff --git a/mm/memory.c b/mm/memory.c index 1d2ea39260e5..833952d8b74d 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -125,13 +125,12 @@ core_initcall(init_zero_pfn); | |||
| 125 | 125 | ||
| 126 | #if defined(SPLIT_RSS_COUNTING) | 126 | #if defined(SPLIT_RSS_COUNTING) |
| 127 | 127 | ||
| 128 | void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) | 128 | static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) |
| 129 | { | 129 | { |
| 130 | int i; | 130 | int i; |
| 131 | 131 | ||
| 132 | for (i = 0; i < NR_MM_COUNTERS; i++) { | 132 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
| 133 | if (task->rss_stat.count[i]) { | 133 | if (task->rss_stat.count[i]) { |
| 134 | BUG_ON(!mm); | ||
| 135 | add_mm_counter(mm, i, task->rss_stat.count[i]); | 134 | add_mm_counter(mm, i, task->rss_stat.count[i]); |
| 136 | task->rss_stat.count[i] = 0; | 135 | task->rss_stat.count[i] = 0; |
| 137 | } | 136 | } |
| @@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, | |||
| 507 | struct address_space *mapping = NULL; | 507 | struct address_space *mapping = NULL; |
| 508 | struct prio_tree_root *root = NULL; | 508 | struct prio_tree_root *root = NULL; |
| 509 | struct file *file = vma->vm_file; | 509 | struct file *file = vma->vm_file; |
| 510 | struct anon_vma *anon_vma = NULL; | ||
| 511 | long adjust_next = 0; | 510 | long adjust_next = 0; |
| 512 | int remove_next = 0; | 511 | int remove_next = 0; |
| 513 | 512 | ||
| 514 | if (next && !insert) { | 513 | if (next && !insert) { |
| 514 | struct vm_area_struct *exporter = NULL; | ||
| 515 | |||
| 515 | if (end >= next->vm_end) { | 516 | if (end >= next->vm_end) { |
| 516 | /* | 517 | /* |
| 517 | * vma expands, overlapping all the next, and | 518 | * vma expands, overlapping all the next, and |
| @@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, | |||
| 519 | */ | 520 | */ |
| 520 | again: remove_next = 1 + (end > next->vm_end); | 521 | again: remove_next = 1 + (end > next->vm_end); |
| 521 | end = next->vm_end; | 522 | end = next->vm_end; |
| 522 | anon_vma = next->anon_vma; | 523 | exporter = next; |
| 523 | importer = vma; | 524 | importer = vma; |
| 524 | } else if (end > next->vm_start) { | 525 | } else if (end > next->vm_start) { |
| 525 | /* | 526 | /* |
| @@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
| 527 | * mprotect case 5 shifting the boundary up. | 528 | * mprotect case 5 shifting the boundary up. |
| 528 | */ | 529 | */ |
| 529 | adjust_next = (end - next->vm_start) >> PAGE_SHIFT; | 530 | adjust_next = (end - next->vm_start) >> PAGE_SHIFT; |
| 530 | anon_vma = next->anon_vma; | 531 | exporter = next; |
| 531 | importer = vma; | 532 | importer = vma; |
| 532 | } else if (end < vma->vm_end) { | 533 | } else if (end < vma->vm_end) { |
| 533 | /* | 534 | /* |
| @@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end); | |||
| 536 | * mprotect case 4 shifting the boundary down. | 537 | * mprotect case 4 shifting the boundary down. |
| 537 | */ | 538 | */ |
| 538 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); | 539 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); |
| 539 | anon_vma = next->anon_vma; | 540 | exporter = vma; |
| 540 | importer = next; | 541 | importer = next; |
| 541 | } | 542 | } |
| 542 | } | ||
| 543 | 543 | ||
| 544 | /* | ||
| 545 | * When changing only vma->vm_end, we don't really need anon_vma lock. | ||
| 546 | */ | ||
| 547 | if (vma->anon_vma && (insert || importer || start != vma->vm_start)) | ||
| 548 | anon_vma = vma->anon_vma; | ||
| 549 | if (anon_vma) { | ||
| 550 | /* | 544 | /* |
| 551 | * Easily overlooked: when mprotect shifts the boundary, | 545 | * Easily overlooked: when mprotect shifts the boundary, |
| 552 | * make sure the expanding vma has anon_vma set if the | 546 | * make sure the expanding vma has anon_vma set if the |
| 553 | * shrinking vma had, to cover any anon pages imported. | 547 | * shrinking vma had, to cover any anon pages imported. |
| 554 | */ | 548 | */ |
| 555 | if (importer && !importer->anon_vma) { | 549 | if (exporter && exporter->anon_vma && !importer->anon_vma) { |
| 556 | /* Block reverse map lookups until things are set up. */ | 550 | if (anon_vma_clone(importer, exporter)) |
| 557 | if (anon_vma_clone(importer, vma)) { | ||
| 558 | return -ENOMEM; | 551 | return -ENOMEM; |
| 559 | } | 552 | importer->anon_vma = exporter->anon_vma; |
| 560 | importer->anon_vma = anon_vma; | ||
| 561 | } | 553 | } |
| 562 | } | 554 | } |
| 563 | 555 | ||
| @@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
| 825 | } | 817 | } |
| 826 | 818 | ||
| 827 | /* | 819 | /* |
| 820 | * Rough compatbility check to quickly see if it's even worth looking | ||
| 821 | * at sharing an anon_vma. | ||
| 822 | * | ||
| 823 | * They need to have the same vm_file, and the flags can only differ | ||
| 824 | * in things that mprotect may change. | ||
| 825 | * | ||
| 826 | * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that | ||
| 827 | * we can merge the two vma's. For example, we refuse to merge a vma if | ||
| 828 | * there is a vm_ops->close() function, because that indicates that the | ||
| 829 | * driver is doing some kind of reference counting. But that doesn't | ||
| 830 | * really matter for the anon_vma sharing case. | ||
| 831 | */ | ||
| 832 | static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) | ||
| 833 | { | ||
| 834 | return a->vm_end == b->vm_start && | ||
| 835 | mpol_equal(vma_policy(a), vma_policy(b)) && | ||
| 836 | a->vm_file == b->vm_file && | ||
| 837 | !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && | ||
| 838 | b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); | ||
| 839 | } | ||
| 840 | |||
| 841 | /* | ||
| 842 | * Do some basic sanity checking to see if we can re-use the anon_vma | ||
| 843 | * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be | ||
| 844 | * the same as 'old', the other will be the new one that is trying | ||
| 845 | * to share the anon_vma. | ||
| 846 | * | ||
| 847 | * NOTE! This runs with mm_sem held for reading, so it is possible that | ||
| 848 | * the anon_vma of 'old' is concurrently in the process of being set up | ||
| 849 | * by another page fault trying to merge _that_. But that's ok: if it | ||
| 850 | * is being set up, that automatically means that it will be a singleton | ||
| 851 | * acceptable for merging, so we can do all of this optimistically. But | ||
| 852 | * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. | ||
| 853 | * | ||
| 854 | * IOW: that the "list_is_singular()" test on the anon_vma_chain only | ||
| 855 | * matters for the 'stable anon_vma' case (ie the thing we want to avoid | ||
| 856 | * is to return an anon_vma that is "complex" due to having gone through | ||
| 857 | * a fork). | ||
| 858 | * | ||
| 859 | * We also make sure that the two vma's are compatible (adjacent, | ||
| 860 | * and with the same memory policies). That's all stable, even with just | ||
| 861 | * a read lock on the mm_sem. | ||
| 862 | */ | ||
| 863 | static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) | ||
| 864 | { | ||
| 865 | if (anon_vma_compatible(a, b)) { | ||
| 866 | struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); | ||
| 867 | |||
| 868 | if (anon_vma && list_is_singular(&old->anon_vma_chain)) | ||
| 869 | return anon_vma; | ||
| 870 | } | ||
| 871 | return NULL; | ||
| 872 | } | ||
| 873 | |||
| 874 | /* | ||
| 828 | * find_mergeable_anon_vma is used by anon_vma_prepare, to check | 875 | * find_mergeable_anon_vma is used by anon_vma_prepare, to check |
| 829 | * neighbouring vmas for a suitable anon_vma, before it goes off | 876 | * neighbouring vmas for a suitable anon_vma, before it goes off |
| 830 | * to allocate a new anon_vma. It checks because a repetitive | 877 | * to allocate a new anon_vma. It checks because a repetitive |
| @@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
| 834 | */ | 881 | */ |
| 835 | struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) | 882 | struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) |
| 836 | { | 883 | { |
| 884 | struct anon_vma *anon_vma; | ||
| 837 | struct vm_area_struct *near; | 885 | struct vm_area_struct *near; |
| 838 | unsigned long vm_flags; | ||
| 839 | 886 | ||
| 840 | near = vma->vm_next; | 887 | near = vma->vm_next; |
| 841 | if (!near) | 888 | if (!near) |
| 842 | goto try_prev; | 889 | goto try_prev; |
| 843 | 890 | ||
| 844 | /* | 891 | anon_vma = reusable_anon_vma(near, vma, near); |
| 845 | * Since only mprotect tries to remerge vmas, match flags | 892 | if (anon_vma) |
| 846 | * which might be mprotected into each other later on. | 893 | return anon_vma; |
| 847 | * Neither mlock nor madvise tries to remerge at present, | ||
| 848 | * so leave their flags as obstructing a merge. | ||
| 849 | */ | ||
| 850 | vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); | ||
| 851 | vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); | ||
| 852 | |||
| 853 | if (near->anon_vma && vma->vm_end == near->vm_start && | ||
| 854 | mpol_equal(vma_policy(vma), vma_policy(near)) && | ||
| 855 | can_vma_merge_before(near, vm_flags, | ||
| 856 | NULL, vma->vm_file, vma->vm_pgoff + | ||
| 857 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) | ||
| 858 | return near->anon_vma; | ||
| 859 | try_prev: | 894 | try_prev: |
| 860 | /* | 895 | /* |
| 861 | * It is potentially slow to have to call find_vma_prev here. | 896 | * It is potentially slow to have to call find_vma_prev here. |
| @@ -868,14 +903,9 @@ try_prev: | |||
| 868 | if (!near) | 903 | if (!near) |
| 869 | goto none; | 904 | goto none; |
| 870 | 905 | ||
| 871 | vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); | 906 | anon_vma = reusable_anon_vma(near, near, vma); |
| 872 | vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); | 907 | if (anon_vma) |
| 873 | 908 | return anon_vma; | |
| 874 | if (near->anon_vma && near->vm_end == vma->vm_start && | ||
| 875 | mpol_equal(vma_policy(near), vma_policy(vma)) && | ||
| 876 | can_vma_merge_after(near, vm_flags, | ||
| 877 | NULL, vma->vm_file, vma->vm_pgoff)) | ||
| 878 | return near->anon_vma; | ||
| 879 | none: | 909 | none: |
| 880 | /* | 910 | /* |
| 881 | * There's no absolute need to look only at touching neighbours: | 911 | * There's no absolute need to look only at touching neighbours: |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7b47a57b6646..8b1a2ce21ee5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -80,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 80 | return err; | 80 | return err; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 84 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, | ||
| 85 | unsigned long end) | ||
| 86 | { | ||
| 87 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); | ||
| 88 | return boundary < end ? boundary : end; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int walk_hugetlb_range(struct vm_area_struct *vma, | ||
| 92 | unsigned long addr, unsigned long end, | ||
| 93 | struct mm_walk *walk) | ||
| 94 | { | ||
| 95 | struct hstate *h = hstate_vma(vma); | ||
| 96 | unsigned long next; | ||
| 97 | unsigned long hmask = huge_page_mask(h); | ||
| 98 | pte_t *pte; | ||
| 99 | int err = 0; | ||
| 100 | |||
| 101 | do { | ||
| 102 | next = hugetlb_entry_end(h, addr, end); | ||
| 103 | pte = huge_pte_offset(walk->mm, addr & hmask); | ||
| 104 | if (pte && walk->hugetlb_entry) | ||
| 105 | err = walk->hugetlb_entry(pte, hmask, addr, next, walk); | ||
| 106 | if (err) | ||
| 107 | return err; | ||
| 108 | } while (addr = next, addr != end); | ||
| 109 | |||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | #endif | ||
| 113 | |||
| 83 | /** | 114 | /** |
| 84 | * walk_page_range - walk a memory map's page tables with a callback | 115 | * walk_page_range - walk a memory map's page tables with a callback |
| 85 | * @mm: memory map to walk | 116 | * @mm: memory map to walk |
| @@ -128,20 +159,16 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
| 128 | vma = find_vma(walk->mm, addr); | 159 | vma = find_vma(walk->mm, addr); |
| 129 | #ifdef CONFIG_HUGETLB_PAGE | 160 | #ifdef CONFIG_HUGETLB_PAGE |
| 130 | if (vma && is_vm_hugetlb_page(vma)) { | 161 | if (vma && is_vm_hugetlb_page(vma)) { |
| 131 | pte_t *pte; | ||
| 132 | struct hstate *hs; | ||
| 133 | |||
| 134 | if (vma->vm_end < next) | 162 | if (vma->vm_end < next) |
| 135 | next = vma->vm_end; | 163 | next = vma->vm_end; |
| 136 | hs = hstate_vma(vma); | 164 | /* |
| 137 | pte = huge_pte_offset(walk->mm, | 165 | * Hugepage is very tightly coupled with vma, so |
| 138 | addr & huge_page_mask(hs)); | 166 | * walk through hugetlb entries within a given vma. |
| 139 | if (pte && !huge_pte_none(huge_ptep_get(pte)) | 167 | */ |
| 140 | && walk->hugetlb_entry) | 168 | err = walk_hugetlb_range(vma, addr, next, walk); |
| 141 | err = walk->hugetlb_entry(pte, addr, | ||
| 142 | next, walk); | ||
| 143 | if (err) | 169 | if (err) |
| 144 | break; | 170 | break; |
| 171 | pgd = pgd_offset(walk->mm, next); | ||
| 145 | continue; | 172 | continue; |
| 146 | } | 173 | } |
| 147 | #endif | 174 | #endif |
diff --git a/mm/readahead.c b/mm/readahead.c index 999b54bb462f..dfa9a1a03a11 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
| @@ -503,7 +503,7 @@ void page_cache_sync_readahead(struct address_space *mapping, | |||
| 503 | return; | 503 | return; |
| 504 | 504 | ||
| 505 | /* be dumb */ | 505 | /* be dumb */ |
| 506 | if (filp->f_mode & FMODE_RANDOM) { | 506 | if (filp && (filp->f_mode & FMODE_RANDOM)) { |
| 507 | force_page_cache_readahead(mapping, filp, offset, req_size); | 507 | force_page_cache_readahead(mapping, filp, offset, req_size); |
| 508 | return; | 508 | return; |
| 509 | } | 509 | } |
| @@ -182,7 +182,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
| 182 | { | 182 | { |
| 183 | struct anon_vma_chain *avc, *pavc; | 183 | struct anon_vma_chain *avc, *pavc; |
| 184 | 184 | ||
| 185 | list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { | 185 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
| 186 | avc = anon_vma_chain_alloc(); | 186 | avc = anon_vma_chain_alloc(); |
| 187 | if (!avc) | 187 | if (!avc) |
| 188 | goto enomem_failure; | 188 | goto enomem_failure; |
| @@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page, | |||
| 730 | * @page: the page to add the mapping to | 730 | * @page: the page to add the mapping to |
| 731 | * @vma: the vm area in which the mapping is added | 731 | * @vma: the vm area in which the mapping is added |
| 732 | * @address: the user virtual address mapped | 732 | * @address: the user virtual address mapped |
| 733 | * @exclusive: the page is exclusively owned by the current process | ||
| 733 | */ | 734 | */ |
| 734 | static void __page_set_anon_rmap(struct page *page, | 735 | static void __page_set_anon_rmap(struct page *page, |
| 735 | struct vm_area_struct *vma, unsigned long address) | 736 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
| 736 | { | 737 | { |
| 737 | struct anon_vma *anon_vma = vma->anon_vma; | 738 | struct anon_vma *anon_vma = vma->anon_vma; |
| 738 | 739 | ||
| 739 | BUG_ON(!anon_vma); | 740 | BUG_ON(!anon_vma); |
| 741 | |||
| 742 | /* | ||
| 743 | * If the page isn't exclusively mapped into this vma, | ||
| 744 | * we must use the _oldest_ possible anon_vma for the | ||
| 745 | * page mapping! | ||
| 746 | * | ||
| 747 | * So take the last AVC chain entry in the vma, which is | ||
| 748 | * the deepest ancestor, and use the anon_vma from that. | ||
| 749 | */ | ||
| 750 | if (!exclusive) { | ||
| 751 | struct anon_vma_chain *avc; | ||
| 752 | avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); | ||
| 753 | anon_vma = avc->anon_vma; | ||
| 754 | } | ||
| 755 | |||
| 740 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 756 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
| 741 | page->mapping = (struct address_space *) anon_vma; | 757 | page->mapping = (struct address_space *) anon_vma; |
| 742 | page->index = linear_page_index(vma, address); | 758 | page->index = linear_page_index(vma, address); |
| @@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page, | |||
| 791 | VM_BUG_ON(!PageLocked(page)); | 807 | VM_BUG_ON(!PageLocked(page)); |
| 792 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 808 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
| 793 | if (first) | 809 | if (first) |
| 794 | __page_set_anon_rmap(page, vma, address); | 810 | __page_set_anon_rmap(page, vma, address, 0); |
| 795 | else | 811 | else |
| 796 | __page_check_anon_rmap(page, vma, address); | 812 | __page_check_anon_rmap(page, vma, address); |
| 797 | } | 813 | } |
| @@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
| 813 | SetPageSwapBacked(page); | 829 | SetPageSwapBacked(page); |
| 814 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | 830 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ |
| 815 | __inc_zone_page_state(page, NR_ANON_PAGES); | 831 | __inc_zone_page_state(page, NR_ANON_PAGES); |
| 816 | __page_set_anon_rmap(page, vma, address); | 832 | __page_set_anon_rmap(page, vma, address, 1); |
| 817 | if (page_evictable(page, vma)) | 833 | if (page_evictable(page, vma)) |
| 818 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); | 834 | lru_cache_add_lru(page, LRU_ACTIVE_ANON); |
| 819 | else | 835 | else |
| @@ -3602,21 +3602,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_notrace); | |||
| 3602 | */ | 3602 | */ |
| 3603 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) | 3603 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) |
| 3604 | { | 3604 | { |
| 3605 | unsigned long addr = (unsigned long)ptr; | ||
| 3606 | unsigned long min_addr = PAGE_OFFSET; | ||
| 3607 | unsigned long align_mask = BYTES_PER_WORD - 1; | ||
| 3608 | unsigned long size = cachep->buffer_size; | 3605 | unsigned long size = cachep->buffer_size; |
| 3609 | struct page *page; | 3606 | struct page *page; |
| 3610 | 3607 | ||
| 3611 | if (unlikely(addr < min_addr)) | 3608 | if (unlikely(!kern_ptr_validate(ptr, size))) |
| 3612 | goto out; | ||
| 3613 | if (unlikely(addr > (unsigned long)high_memory - size)) | ||
| 3614 | goto out; | ||
| 3615 | if (unlikely(addr & align_mask)) | ||
| 3616 | goto out; | ||
| 3617 | if (unlikely(!kern_addr_valid(addr))) | ||
| 3618 | goto out; | ||
| 3619 | if (unlikely(!kern_addr_valid(addr + size - 1))) | ||
| 3620 | goto out; | 3609 | goto out; |
| 3621 | page = virt_to_page(ptr); | 3610 | page = virt_to_page(ptr); |
| 3622 | if (unlikely(!PageSlab(page))) | 3611 | if (unlikely(!PageSlab(page))) |
| @@ -2386,6 +2386,9 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) | |||
| 2386 | { | 2386 | { |
| 2387 | struct page *page; | 2387 | struct page *page; |
| 2388 | 2388 | ||
| 2389 | if (!kern_ptr_validate(object, s->size)) | ||
| 2390 | return 0; | ||
| 2391 | |||
| 2389 | page = get_object_page(object); | 2392 | page = get_object_page(object); |
| 2390 | 2393 | ||
| 2391 | if (!page || s != page->slab) | 2394 | if (!page || s != page->slab) |
| @@ -186,6 +186,27 @@ void kzfree(const void *p) | |||
| 186 | } | 186 | } |
| 187 | EXPORT_SYMBOL(kzfree); | 187 | EXPORT_SYMBOL(kzfree); |
| 188 | 188 | ||
| 189 | int kern_ptr_validate(const void *ptr, unsigned long size) | ||
| 190 | { | ||
| 191 | unsigned long addr = (unsigned long)ptr; | ||
| 192 | unsigned long min_addr = PAGE_OFFSET; | ||
| 193 | unsigned long align_mask = sizeof(void *) - 1; | ||
| 194 | |||
| 195 | if (unlikely(addr < min_addr)) | ||
| 196 | goto out; | ||
| 197 | if (unlikely(addr > (unsigned long)high_memory - size)) | ||
| 198 | goto out; | ||
| 199 | if (unlikely(addr & align_mask)) | ||
| 200 | goto out; | ||
| 201 | if (unlikely(!kern_addr_valid(addr))) | ||
| 202 | goto out; | ||
| 203 | if (unlikely(!kern_addr_valid(addr + size - 1))) | ||
| 204 | goto out; | ||
| 205 | return 1; | ||
| 206 | out: | ||
| 207 | return 0; | ||
| 208 | } | ||
| 209 | |||
| 189 | /* | 210 | /* |
| 190 | * strndup_user - duplicate an existing string from user space | 211 | * strndup_user - duplicate an existing string from user space |
| 191 | * @s: The string to duplicate | 212 | * @s: The string to duplicate |
diff --git a/mm/vmscan.c b/mm/vmscan.c index e0e5f15bb726..3ff3311447f5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1535,13 +1535,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
| 1535 | unsigned long ap, fp; | 1535 | unsigned long ap, fp; |
| 1536 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1536 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
| 1537 | 1537 | ||
| 1538 | /* If we have no swap space, do not bother scanning anon pages. */ | ||
| 1539 | if (!sc->may_swap || (nr_swap_pages <= 0)) { | ||
| 1540 | percent[0] = 0; | ||
| 1541 | percent[1] = 100; | ||
| 1542 | return; | ||
| 1543 | } | ||
| 1544 | |||
| 1545 | anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + | 1538 | anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + |
| 1546 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); | 1539 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); |
| 1547 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + | 1540 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + |
| @@ -1639,20 +1632,22 @@ static void shrink_zone(int priority, struct zone *zone, | |||
| 1639 | unsigned long nr_reclaimed = sc->nr_reclaimed; | 1632 | unsigned long nr_reclaimed = sc->nr_reclaimed; |
| 1640 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; | 1633 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; |
| 1641 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1634 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
| 1635 | int noswap = 0; | ||
| 1642 | 1636 | ||
| 1643 | get_scan_ratio(zone, sc, percent); | 1637 | /* If we have no swap space, do not bother scanning anon pages. */ |
| 1638 | if (!sc->may_swap || (nr_swap_pages <= 0)) { | ||
| 1639 | noswap = 1; | ||
| 1640 | percent[0] = 0; | ||
| 1641 | percent[1] = 100; | ||
| 1642 | } else | ||
| 1643 | get_scan_ratio(zone, sc, percent); | ||
| 1644 | 1644 | ||
| 1645 | for_each_evictable_lru(l) { | 1645 | for_each_evictable_lru(l) { |
| 1646 | int file = is_file_lru(l); | 1646 | int file = is_file_lru(l); |
| 1647 | unsigned long scan; | 1647 | unsigned long scan; |
| 1648 | 1648 | ||
| 1649 | if (percent[file] == 0) { | ||
| 1650 | nr[l] = 0; | ||
| 1651 | continue; | ||
| 1652 | } | ||
| 1653 | |||
| 1654 | scan = zone_nr_lru_pages(zone, sc, l); | 1649 | scan = zone_nr_lru_pages(zone, sc, l); |
| 1655 | if (priority) { | 1650 | if (priority || noswap) { |
| 1656 | scan >>= priority; | 1651 | scan >>= priority; |
| 1657 | scan = (scan * percent[file]) / 100; | 1652 | scan = (scan * percent[file]) / 100; |
| 1658 | } | 1653 | } |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7794a2e2adce..99d68c34e4f1 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
| @@ -1002,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
| 1002 | 1002 | ||
| 1003 | BT_DBG("sk %p", sk); | 1003 | BT_DBG("sk %p", sk); |
| 1004 | 1004 | ||
| 1005 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 1005 | if (!addr || alen < sizeof(addr->sa_family) || |
| 1006 | addr->sa_family != AF_BLUETOOTH) | ||
| 1006 | return -EINVAL; | 1007 | return -EINVAL; |
| 1007 | 1008 | ||
| 1008 | memset(&la, 0, sizeof(la)); | 1009 | memset(&la, 0, sizeof(la)); |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7f439765403d..8ed3c37684fa 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -397,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a | |||
| 397 | 397 | ||
| 398 | BT_DBG("sk %p", sk); | 398 | BT_DBG("sk %p", sk); |
| 399 | 399 | ||
| 400 | if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) | 400 | if (alen < sizeof(struct sockaddr_rc) || |
| 401 | addr->sa_family != AF_BLUETOOTH) | ||
| 401 | return -EINVAL; | 402 | return -EINVAL; |
| 402 | 403 | ||
| 403 | lock_sock(sk); | 404 | lock_sock(sk); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index e5b16b76b22e..ca6b2ad1c3fc 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -499,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen | |||
| 499 | 499 | ||
| 500 | BT_DBG("sk %p", sk); | 500 | BT_DBG("sk %p", sk); |
| 501 | 501 | ||
| 502 | if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) | 502 | if (alen < sizeof(struct sockaddr_sco) || |
| 503 | addr->sa_family != AF_BLUETOOTH) | ||
| 503 | return -EINVAL; | 504 | return -EINVAL; |
| 504 | 505 | ||
| 505 | if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) | 506 | if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6980625537ca..f29ada827a6a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
| 723 | if (!pskb_may_pull(skb, len)) | 723 | if (!pskb_may_pull(skb, len)) |
| 724 | return -EINVAL; | 724 | return -EINVAL; |
| 725 | 725 | ||
| 726 | grec = (void *)(skb->data + len); | 726 | grec = (void *)(skb->data + len - sizeof(*grec)); |
| 727 | group = grec->grec_mca; | 727 | group = grec->grec_mca; |
| 728 | type = grec->grec_type; | 728 | type = grec->grec_type; |
| 729 | 729 | ||
diff --git a/net/can/bcm.c b/net/can/bcm.c index a2dee522b43e..907dc871fac8 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -1479,6 +1479,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
| 1479 | struct sock *sk = sock->sk; | 1479 | struct sock *sk = sock->sk; |
| 1480 | struct bcm_sock *bo = bcm_sk(sk); | 1480 | struct bcm_sock *bo = bcm_sk(sk); |
| 1481 | 1481 | ||
| 1482 | if (len < sizeof(*addr)) | ||
| 1483 | return -EINVAL; | ||
| 1484 | |||
| 1482 | if (bo->bound) | 1485 | if (bo->bound) |
| 1483 | return -EISCONN; | 1486 | return -EISCONN; |
| 1484 | 1487 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index 3a7dffb6519c..da99cf153b33 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
| @@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
| 445 | return -EFAULT; | 445 | return -EFAULT; |
| 446 | } | 446 | } |
| 447 | } else if (count == 1) { | 447 | } else if (count == 1) { |
| 448 | if (copy_from_user(&sfilter, optval, optlen)) | 448 | if (copy_from_user(&sfilter, optval, sizeof(sfilter))) |
| 449 | return -EFAULT; | 449 | return -EFAULT; |
| 450 | } | 450 | } |
| 451 | 451 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 1c8a0ce473a8..92584bfef09b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1989,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
| 1989 | if (dev->real_num_tx_queues > 1) | 1989 | if (dev->real_num_tx_queues > 1) |
| 1990 | queue_index = skb_tx_hash(dev, skb); | 1990 | queue_index = skb_tx_hash(dev, skb); |
| 1991 | 1991 | ||
| 1992 | if (sk && sk->sk_dst_cache) | 1992 | if (sk) { |
| 1993 | sk_tx_queue_set(sk, queue_index); | 1993 | struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache); |
| 1994 | |||
| 1995 | if (dst && skb_dst(skb) == dst) | ||
| 1996 | sk_tx_queue_set(sk, queue_index); | ||
| 1997 | } | ||
| 1994 | } | 1998 | } |
| 1995 | } | 1999 | } |
| 1996 | 2000 | ||
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index 79886d546bab..c7da600750bb 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
| @@ -127,6 +127,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 127 | { | 127 | { |
| 128 | struct sock *sk = sock->sk; | 128 | struct sock *sk = sock->sk; |
| 129 | 129 | ||
| 130 | if (addr_len < sizeof(uaddr->sa_family)) | ||
| 131 | return -EINVAL; | ||
| 132 | |||
| 130 | if (uaddr->sa_family == AF_UNSPEC) | 133 | if (uaddr->sa_family == AF_UNSPEC) |
| 131 | return sk->sk_prot->disconnect(sk, flags); | 134 | return sk->sk_prot->disconnect(sk, flags); |
| 132 | 135 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 2ed85714540f..f71357422380 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -531,6 +531,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | |||
| 531 | { | 531 | { |
| 532 | struct sock *sk = sock->sk; | 532 | struct sock *sk = sock->sk; |
| 533 | 533 | ||
| 534 | if (addr_len < sizeof(uaddr->sa_family)) | ||
| 535 | return -EINVAL; | ||
| 534 | if (uaddr->sa_family == AF_UNSPEC) | 536 | if (uaddr->sa_family == AF_UNSPEC) |
| 535 | return sk->sk_prot->disconnect(sk, flags); | 537 | return sk->sk_prot->disconnect(sk, flags); |
| 536 | 538 | ||
| @@ -574,6 +576,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 574 | int err; | 576 | int err; |
| 575 | long timeo; | 577 | long timeo; |
| 576 | 578 | ||
| 579 | if (addr_len < sizeof(uaddr->sa_family)) | ||
| 580 | return -EINVAL; | ||
| 581 | |||
| 577 | lock_sock(sk); | 582 | lock_sock(sk); |
| 578 | 583 | ||
| 579 | if (uaddr->sa_family == AF_UNSPEC) { | 584 | if (uaddr->sa_family == AF_UNSPEC) { |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 59a838795e3e..c98f115fb0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) | |||
| 209 | { | 209 | { |
| 210 | struct node *ret = tnode_get_child(tn, i); | 210 | struct node *ret = tnode_get_child(tn, i); |
| 211 | 211 | ||
| 212 | return rcu_dereference(ret); | 212 | return rcu_dereference_check(ret, |
| 213 | rcu_read_lock_held() || | ||
| 214 | lockdep_rtnl_is_held()); | ||
| 213 | } | 215 | } |
| 214 | 216 | ||
| 215 | static inline int tnode_child_length(const struct tnode *tn) | 217 | static inline int tnode_child_length(const struct tnode *tn) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c65f18e0936e..d1bcc9f21d4f 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
| 120 | newskb->pkt_type = PACKET_LOOPBACK; | 120 | newskb->pkt_type = PACKET_LOOPBACK; |
| 121 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 121 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
| 122 | WARN_ON(!skb_dst(newskb)); | 122 | WARN_ON(!skb_dst(newskb)); |
| 123 | netif_rx(newskb); | 123 | netif_rx_ni(newskb); |
| 124 | return 0; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | 126 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7a1f1d78893f..0f8caf64caa3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1369,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |||
| 1369 | sk_eat_skb(sk, skb, 0); | 1369 | sk_eat_skb(sk, skb, 0); |
| 1370 | if (!desc->count) | 1370 | if (!desc->count) |
| 1371 | break; | 1371 | break; |
| 1372 | tp->copied_seq = seq; | ||
| 1372 | } | 1373 | } |
| 1373 | tp->copied_seq = seq; | 1374 | tp->copied_seq = seq; |
| 1374 | 1375 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 954bbfb39dff..8fef859db35d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
| 472 | if (hslot->count < hslot2->count) | 472 | if (hslot->count < hslot2->count) |
| 473 | goto begin; | 473 | goto begin; |
| 474 | 474 | ||
| 475 | result = udp4_lib_lookup2(net, INADDR_ANY, sport, | 475 | result = udp4_lib_lookup2(net, saddr, sport, |
| 476 | daddr, hnum, dif, | 476 | INADDR_ANY, hnum, dif, |
| 477 | hslot2, slot2); | 477 | hslot2, slot2); |
| 478 | } | 478 | } |
| 479 | rcu_read_unlock(); | 479 | rcu_read_unlock(); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16c4391f952b..65f9c379df38 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -108,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
| 108 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 108 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
| 109 | WARN_ON(!skb_dst(newskb)); | 109 | WARN_ON(!skb_dst(newskb)); |
| 110 | 110 | ||
| 111 | netif_rx(newskb); | 111 | netif_rx_ni(newskb); |
| 112 | return 0; | 112 | return 0; |
| 113 | } | 113 | } |
| 114 | 114 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c177aea88c0b..90824852f598 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
| 259 | if (hslot->count < hslot2->count) | 259 | if (hslot->count < hslot2->count) |
| 260 | goto begin; | 260 | goto begin; |
| 261 | 261 | ||
| 262 | result = udp6_lib_lookup2(net, &in6addr_any, sport, | 262 | result = udp6_lib_lookup2(net, saddr, sport, |
| 263 | daddr, hnum, dif, | 263 | &in6addr_any, hnum, dif, |
| 264 | hslot2, slot2); | 264 | hslot2, slot2); |
| 265 | } | 265 | } |
| 266 | rcu_read_unlock(); | 266 | rcu_read_unlock(); |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 06c33b68d8e5..b887e484ae04 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
| 225 | switch (sdata->vif.type) { | 225 | switch (sdata->vif.type) { |
| 226 | case NL80211_IFTYPE_AP: | 226 | case NL80211_IFTYPE_AP: |
| 227 | sdata->vif.bss_conf.enable_beacon = | 227 | sdata->vif.bss_conf.enable_beacon = |
| 228 | !!rcu_dereference(sdata->u.ap.beacon); | 228 | !!sdata->u.ap.beacon; |
| 229 | break; | 229 | break; |
| 230 | case NL80211_IFTYPE_ADHOC: | 230 | case NL80211_IFTYPE_ADHOC: |
| 231 | sdata->vif.bss_conf.enable_beacon = | 231 | sdata->vif.bss_conf.enable_beacon = |
| 232 | !!rcu_dereference(sdata->u.ibss.presp); | 232 | !!sdata->u.ibss.presp; |
| 233 | break; | 233 | break; |
| 234 | case NL80211_IFTYPE_MESH_POINT: | 234 | case NL80211_IFTYPE_MESH_POINT: |
| 235 | sdata->vif.bss_conf.enable_beacon = true; | 235 | sdata->vif.bss_conf.enable_beacon = true; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 58e3e3a61d99..859ee5f3d941 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
| @@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
| 750 | 750 | ||
| 751 | switch (fc & IEEE80211_FCTL_STYPE) { | 751 | switch (fc & IEEE80211_FCTL_STYPE) { |
| 752 | case IEEE80211_STYPE_ACTION: | 752 | case IEEE80211_STYPE_ACTION: |
| 753 | if (skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
| 754 | return RX_DROP_MONITOR; | ||
| 755 | /* fall through */ | ||
| 756 | case IEEE80211_STYPE_PROBE_RESP: | 753 | case IEEE80211_STYPE_PROBE_RESP: |
| 757 | case IEEE80211_STYPE_BEACON: | 754 | case IEEE80211_STYPE_BEACON: |
| 758 | skb_queue_tail(&ifmsh->skb_queue, skb); | 755 | skb_queue_tail(&ifmsh->skb_queue, skb); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 122c11380ffe..fefc45c4b4e8 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
| @@ -392,7 +392,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
| 392 | if (SN_GT(mpath->sn, orig_sn) || | 392 | if (SN_GT(mpath->sn, orig_sn) || |
| 393 | (mpath->sn == orig_sn && | 393 | (mpath->sn == orig_sn && |
| 394 | action == MPATH_PREQ && | 394 | action == MPATH_PREQ && |
| 395 | new_metric > mpath->metric)) { | 395 | new_metric >= mpath->metric)) { |
| 396 | process = false; | 396 | process = false; |
| 397 | fresh_info = false; | 397 | fresh_info = false; |
| 398 | } | 398 | } |
| @@ -612,7 +612,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
| 612 | 612 | ||
| 613 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | 613 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, |
| 614 | cpu_to_le32(orig_sn), 0, target_addr, | 614 | cpu_to_le32(orig_sn), 0, target_addr, |
| 615 | cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, | 615 | cpu_to_le32(target_sn), next_hop, hopcount, |
| 616 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), | 616 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), |
| 617 | 0, sdata); | 617 | 0, sdata); |
| 618 | rcu_read_unlock(); | 618 | rcu_read_unlock(); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f0accf622cd7..04ea07f0e78a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 1974 | goto handled; | 1974 | goto handled; |
| 1975 | } | 1975 | } |
| 1976 | break; | 1976 | break; |
| 1977 | case MESH_PLINK_CATEGORY: | ||
| 1978 | case MESH_PATH_SEL_CATEGORY: | ||
| 1979 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
| 1980 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
| 1981 | break; | ||
| 1977 | } | 1982 | } |
| 1978 | 1983 | ||
| 1979 | /* | 1984 | /* |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 56422d894351..fb12cec4d333 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, | |||
| 93 | struct ieee80211_local *local = sdata->local; | 93 | struct ieee80211_local *local = sdata->local; |
| 94 | struct sta_info *sta; | 94 | struct sta_info *sta; |
| 95 | 95 | ||
| 96 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 96 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
| 97 | rcu_read_lock_held() || | ||
| 98 | lockdep_is_held(&local->sta_lock) || | ||
| 99 | lockdep_is_held(&local->sta_mtx)); | ||
| 97 | while (sta) { | 100 | while (sta) { |
| 98 | if (sta->sdata == sdata && | 101 | if (sta->sdata == sdata && |
| 99 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 102 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
| 100 | break; | 103 | break; |
| 101 | sta = rcu_dereference(sta->hnext); | 104 | sta = rcu_dereference_check(sta->hnext, |
| 105 | rcu_read_lock_held() || | ||
| 106 | lockdep_is_held(&local->sta_lock) || | ||
| 107 | lockdep_is_held(&local->sta_mtx)); | ||
| 102 | } | 108 | } |
| 103 | return sta; | 109 | return sta; |
| 104 | } | 110 | } |
| @@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, | |||
| 113 | struct ieee80211_local *local = sdata->local; | 119 | struct ieee80211_local *local = sdata->local; |
| 114 | struct sta_info *sta; | 120 | struct sta_info *sta; |
| 115 | 121 | ||
| 116 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 122 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
| 123 | rcu_read_lock_held() || | ||
| 124 | lockdep_is_held(&local->sta_lock) || | ||
| 125 | lockdep_is_held(&local->sta_mtx)); | ||
| 117 | while (sta) { | 126 | while (sta) { |
| 118 | if ((sta->sdata == sdata || | 127 | if ((sta->sdata == sdata || |
| 119 | sta->sdata->bss == sdata->bss) && | 128 | sta->sdata->bss == sdata->bss) && |
| 120 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 129 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
| 121 | break; | 130 | break; |
| 122 | sta = rcu_dereference(sta->hnext); | 131 | sta = rcu_dereference_check(sta->hnext, |
| 132 | rcu_read_lock_held() || | ||
| 133 | lockdep_is_held(&local->sta_lock) || | ||
| 134 | lockdep_is_held(&local->sta_mtx)); | ||
| 123 | } | 135 | } |
| 124 | return sta; | 136 | return sta; |
| 125 | } | 137 | } |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cbe53ed4fb0b..cfc473e1b050 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -1991,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
| 1991 | void ieee80211_tx_pending(unsigned long data) | 1991 | void ieee80211_tx_pending(unsigned long data) |
| 1992 | { | 1992 | { |
| 1993 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1993 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
| 1994 | struct ieee80211_sub_if_data *sdata; | ||
| 1994 | unsigned long flags; | 1995 | unsigned long flags; |
| 1995 | int i; | 1996 | int i; |
| 1996 | bool txok; | 1997 | bool txok; |
| @@ -2029,6 +2030,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
| 2029 | if (!txok) | 2030 | if (!txok) |
| 2030 | break; | 2031 | break; |
| 2031 | } | 2032 | } |
| 2033 | |||
| 2034 | if (skb_queue_empty(&local->pending[i])) | ||
| 2035 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
| 2036 | netif_tx_wake_queue( | ||
| 2037 | netdev_get_tx_queue(sdata->dev, i)); | ||
| 2032 | } | 2038 | } |
| 2033 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 2039 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
| 2034 | 2040 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c453226f06b2..53af57047435 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
| 279 | /* someone still has this queue stopped */ | 279 | /* someone still has this queue stopped */ |
| 280 | return; | 280 | return; |
| 281 | 281 | ||
| 282 | if (!skb_queue_empty(&local->pending[queue])) | 282 | if (skb_queue_empty(&local->pending[queue])) { |
| 283 | rcu_read_lock(); | ||
| 284 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
| 285 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
| 286 | rcu_read_unlock(); | ||
| 287 | } else | ||
| 283 | tasklet_schedule(&local->tx_pending_tasklet); | 288 | tasklet_schedule(&local->tx_pending_tasklet); |
| 284 | |||
| 285 | rcu_read_lock(); | ||
| 286 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
| 287 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
| 288 | rcu_read_unlock(); | ||
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 291 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
| @@ -1097,9 +1097,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
| 1097 | */ | 1097 | */ |
| 1098 | res = drv_start(local); | 1098 | res = drv_start(local); |
| 1099 | if (res) { | 1099 | if (res) { |
| 1100 | WARN(local->suspended, "Harware became unavailable " | 1100 | WARN(local->suspended, "Hardware became unavailable " |
| 1101 | "upon resume. This is could be a software issue" | 1101 | "upon resume. This could be a software issue " |
| 1102 | "prior to suspend or a hardware issue\n"); | 1102 | "prior to suspend or a hardware issue.\n"); |
| 1103 | return res; | 1103 | return res; |
| 1104 | } | 1104 | } |
| 1105 | 1105 | ||
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 016ab9c75ebd..d37b7f80fa37 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
| @@ -51,9 +51,12 @@ struct netlbl_domhsh_tbl { | |||
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | /* Domain hash table */ | 53 | /* Domain hash table */ |
| 54 | /* XXX - updates should be so rare that having one spinlock for the entire | 54 | /* updates should be so rare that having one spinlock for the entire hash table |
| 55 | * hash table should be okay */ | 55 | * should be okay */ |
| 56 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); | 56 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); |
| 57 | #define netlbl_domhsh_rcu_deref(p) \ | ||
| 58 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
| 59 | lockdep_is_held(&netlbl_domhsh_lock)) | ||
| 57 | static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; | 60 | static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; |
| 58 | static struct netlbl_dom_map *netlbl_domhsh_def = NULL; | 61 | static struct netlbl_dom_map *netlbl_domhsh_def = NULL; |
| 59 | 62 | ||
| @@ -107,7 +110,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) | |||
| 107 | * Description: | 110 | * Description: |
| 108 | * This is the hashing function for the domain hash table, it returns the | 111 | * This is the hashing function for the domain hash table, it returns the |
| 109 | * correct bucket number for the domain. The caller is responsibile for | 112 | * correct bucket number for the domain. The caller is responsibile for |
| 110 | * calling the rcu_read_[un]lock() functions. | 113 | * ensuring that the hash table is protected with either a RCU read lock or the |
| 114 | * hash table lock. | ||
| 111 | * | 115 | * |
| 112 | */ | 116 | */ |
| 113 | static u32 netlbl_domhsh_hash(const char *key) | 117 | static u32 netlbl_domhsh_hash(const char *key) |
| @@ -121,7 +125,7 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
| 121 | 125 | ||
| 122 | for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) | 126 | for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) |
| 123 | val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; | 127 | val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; |
| 124 | return val & (rcu_dereference(netlbl_domhsh)->size - 1); | 128 | return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); |
| 125 | } | 129 | } |
| 126 | 130 | ||
| 127 | /** | 131 | /** |
| @@ -131,7 +135,8 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
| 131 | * Description: | 135 | * Description: |
| 132 | * Searches the domain hash table and returns a pointer to the hash table | 136 | * Searches the domain hash table and returns a pointer to the hash table |
| 133 | * entry if found, otherwise NULL is returned. The caller is responsibile for | 137 | * entry if found, otherwise NULL is returned. The caller is responsibile for |
| 134 | * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). | 138 | * ensuring that the hash table is protected with either a RCU read lock or the |
| 139 | * hash table lock. | ||
| 135 | * | 140 | * |
| 136 | */ | 141 | */ |
| 137 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | 142 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) |
| @@ -142,7 +147,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
| 142 | 147 | ||
| 143 | if (domain != NULL) { | 148 | if (domain != NULL) { |
| 144 | bkt = netlbl_domhsh_hash(domain); | 149 | bkt = netlbl_domhsh_hash(domain); |
| 145 | bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; | 150 | bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; |
| 146 | list_for_each_entry_rcu(iter, bkt_list, list) | 151 | list_for_each_entry_rcu(iter, bkt_list, list) |
| 147 | if (iter->valid && strcmp(iter->domain, domain) == 0) | 152 | if (iter->valid && strcmp(iter->domain, domain) == 0) |
| 148 | return iter; | 153 | return iter; |
| @@ -160,8 +165,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
| 160 | * Searches the domain hash table and returns a pointer to the hash table | 165 | * Searches the domain hash table and returns a pointer to the hash table |
| 161 | * entry if an exact match is found, if an exact match is not present in the | 166 | * entry if an exact match is found, if an exact match is not present in the |
| 162 | * hash table then the default entry is returned if valid otherwise NULL is | 167 | * hash table then the default entry is returned if valid otherwise NULL is |
| 163 | * returned. The caller is responsibile for the rcu hash table locks | 168 | * returned. The caller is responsibile ensuring that the hash table is |
| 164 | * (i.e. the caller much call rcu_read_[un]lock()). | 169 | * protected with either a RCU read lock or the hash table lock. |
| 165 | * | 170 | * |
| 166 | */ | 171 | */ |
| 167 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | 172 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) |
| @@ -170,7 +175,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | |||
| 170 | 175 | ||
| 171 | entry = netlbl_domhsh_search(domain); | 176 | entry = netlbl_domhsh_search(domain); |
| 172 | if (entry == NULL) { | 177 | if (entry == NULL) { |
| 173 | entry = rcu_dereference(netlbl_domhsh_def); | 178 | entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); |
| 174 | if (entry != NULL && !entry->valid) | 179 | if (entry != NULL && !entry->valid) |
| 175 | entry = NULL; | 180 | entry = NULL; |
| 176 | } | 181 | } |
| @@ -307,8 +312,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
| 307 | struct netlbl_af6list *tmp6; | 312 | struct netlbl_af6list *tmp6; |
| 308 | #endif /* IPv6 */ | 313 | #endif /* IPv6 */ |
| 309 | 314 | ||
| 315 | /* XXX - we can remove this RCU read lock as the spinlock protects the | ||
| 316 | * entire function, but before we do we need to fixup the | ||
| 317 | * netlbl_af[4,6]list RCU functions to do "the right thing" with | ||
| 318 | * respect to rcu_dereference() when only a spinlock is held. */ | ||
| 310 | rcu_read_lock(); | 319 | rcu_read_lock(); |
| 311 | |||
| 312 | spin_lock(&netlbl_domhsh_lock); | 320 | spin_lock(&netlbl_domhsh_lock); |
| 313 | if (entry->domain != NULL) | 321 | if (entry->domain != NULL) |
| 314 | entry_old = netlbl_domhsh_search(entry->domain); | 322 | entry_old = netlbl_domhsh_search(entry->domain); |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index d7ea2cf390b7..a3d64aabe2f7 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
| @@ -115,6 +115,9 @@ struct netlbl_unlhsh_walk_arg { | |||
| 115 | /* updates should be so rare that having one spinlock for the entire | 115 | /* updates should be so rare that having one spinlock for the entire |
| 116 | * hash table should be okay */ | 116 | * hash table should be okay */ |
| 117 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); | 117 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); |
| 118 | #define netlbl_unlhsh_rcu_deref(p) \ | ||
| 119 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
| 120 | lockdep_is_held(&netlbl_unlhsh_lock)) | ||
| 118 | static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; | 121 | static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; |
| 119 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; | 122 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; |
| 120 | 123 | ||
| @@ -236,15 +239,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) | |||
| 236 | * Description: | 239 | * Description: |
| 237 | * This is the hashing function for the unlabeled hash table, it returns the | 240 | * This is the hashing function for the unlabeled hash table, it returns the |
| 238 | * bucket number for the given device/interface. The caller is responsible for | 241 | * bucket number for the given device/interface. The caller is responsible for |
| 239 | * calling the rcu_read_[un]lock() functions. | 242 | * ensuring that the hash table is protected with either a RCU read lock or |
| 243 | * the hash table lock. | ||
| 240 | * | 244 | * |
| 241 | */ | 245 | */ |
| 242 | static u32 netlbl_unlhsh_hash(int ifindex) | 246 | static u32 netlbl_unlhsh_hash(int ifindex) |
| 243 | { | 247 | { |
| 244 | /* this is taken _almost_ directly from | 248 | return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1); |
| 245 | * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much | ||
| 246 | * the same thing */ | ||
| 247 | return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1); | ||
| 248 | } | 249 | } |
| 249 | 250 | ||
| 250 | /** | 251 | /** |
| @@ -254,7 +255,8 @@ static u32 netlbl_unlhsh_hash(int ifindex) | |||
| 254 | * Description: | 255 | * Description: |
| 255 | * Searches the unlabeled connection hash table and returns a pointer to the | 256 | * Searches the unlabeled connection hash table and returns a pointer to the |
| 256 | * interface entry which matches @ifindex, otherwise NULL is returned. The | 257 | * interface entry which matches @ifindex, otherwise NULL is returned. The |
| 257 | * caller is responsible for calling the rcu_read_[un]lock() functions. | 258 | * caller is responsible for ensuring that the hash table is protected with |
| 259 | * either a RCU read lock or the hash table lock. | ||
| 258 | * | 260 | * |
| 259 | */ | 261 | */ |
| 260 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | 262 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) |
| @@ -264,7 +266,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | |||
| 264 | struct netlbl_unlhsh_iface *iter; | 266 | struct netlbl_unlhsh_iface *iter; |
| 265 | 267 | ||
| 266 | bkt = netlbl_unlhsh_hash(ifindex); | 268 | bkt = netlbl_unlhsh_hash(ifindex); |
| 267 | bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; | 269 | bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; |
| 268 | list_for_each_entry_rcu(iter, bkt_list, list) | 270 | list_for_each_entry_rcu(iter, bkt_list, list) |
| 269 | if (iter->valid && iter->ifindex == ifindex) | 271 | if (iter->valid && iter->ifindex == ifindex) |
| 270 | return iter; | 272 | return iter; |
| @@ -273,33 +275,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | |||
| 273 | } | 275 | } |
| 274 | 276 | ||
| 275 | /** | 277 | /** |
| 276 | * netlbl_unlhsh_search_iface_def - Search for a matching interface entry | ||
| 277 | * @ifindex: the network interface | ||
| 278 | * | ||
| 279 | * Description: | ||
| 280 | * Searches the unlabeled connection hash table and returns a pointer to the | ||
| 281 | * interface entry which matches @ifindex. If an exact match can not be found | ||
| 282 | * and there is a valid default entry, the default entry is returned, otherwise | ||
| 283 | * NULL is returned. The caller is responsible for calling the | ||
| 284 | * rcu_read_[un]lock() functions. | ||
| 285 | * | ||
| 286 | */ | ||
| 287 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) | ||
| 288 | { | ||
| 289 | struct netlbl_unlhsh_iface *entry; | ||
| 290 | |||
| 291 | entry = netlbl_unlhsh_search_iface(ifindex); | ||
| 292 | if (entry != NULL) | ||
| 293 | return entry; | ||
| 294 | |||
| 295 | entry = rcu_dereference(netlbl_unlhsh_def); | ||
| 296 | if (entry != NULL && entry->valid) | ||
| 297 | return entry; | ||
| 298 | |||
| 299 | return NULL; | ||
| 300 | } | ||
| 301 | |||
| 302 | /** | ||
| 303 | * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table | 278 | * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table |
| 304 | * @iface: the associated interface entry | 279 | * @iface: the associated interface entry |
| 305 | * @addr: IPv4 address in network byte order | 280 | * @addr: IPv4 address in network byte order |
| @@ -309,8 +284,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) | |||
| 309 | * Description: | 284 | * Description: |
| 310 | * Add a new address entry into the unlabeled connection hash table using the | 285 | * Add a new address entry into the unlabeled connection hash table using the |
| 311 | * interface entry specified by @iface. On success zero is returned, otherwise | 286 | * interface entry specified by @iface. On success zero is returned, otherwise |
| 312 | * a negative value is returned. The caller is responsible for calling the | 287 | * a negative value is returned. |
| 313 | * rcu_read_[un]lock() functions. | ||
| 314 | * | 288 | * |
| 315 | */ | 289 | */ |
| 316 | static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, | 290 | static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, |
| @@ -350,8 +324,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, | |||
| 350 | * Description: | 324 | * Description: |
| 351 | * Add a new address entry into the unlabeled connection hash table using the | 325 | * Add a new address entry into the unlabeled connection hash table using the |
| 352 | * interface entry specified by @iface. On success zero is returned, otherwise | 326 | * interface entry specified by @iface. On success zero is returned, otherwise |
| 353 | * a negative value is returned. The caller is responsible for calling the | 327 | * a negative value is returned. |
| 354 | * rcu_read_[un]lock() functions. | ||
| 355 | * | 328 | * |
| 356 | */ | 329 | */ |
| 357 | static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, | 330 | static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, |
| @@ -392,8 +365,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, | |||
| 392 | * Description: | 365 | * Description: |
| 393 | * Add a new, empty, interface entry into the unlabeled connection hash table. | 366 | * Add a new, empty, interface entry into the unlabeled connection hash table. |
| 394 | * On success a pointer to the new interface entry is returned, on failure NULL | 367 | * On success a pointer to the new interface entry is returned, on failure NULL |
| 395 | * is returned. The caller is responsible for calling the rcu_read_[un]lock() | 368 | * is returned. |
| 396 | * functions. | ||
| 397 | * | 369 | * |
| 398 | */ | 370 | */ |
| 399 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) | 371 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) |
| @@ -416,10 +388,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) | |||
| 416 | if (netlbl_unlhsh_search_iface(ifindex) != NULL) | 388 | if (netlbl_unlhsh_search_iface(ifindex) != NULL) |
| 417 | goto add_iface_failure; | 389 | goto add_iface_failure; |
| 418 | list_add_tail_rcu(&iface->list, | 390 | list_add_tail_rcu(&iface->list, |
| 419 | &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); | 391 | &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); |
| 420 | } else { | 392 | } else { |
| 421 | INIT_LIST_HEAD(&iface->list); | 393 | INIT_LIST_HEAD(&iface->list); |
| 422 | if (rcu_dereference(netlbl_unlhsh_def) != NULL) | 394 | if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) |
| 423 | goto add_iface_failure; | 395 | goto add_iface_failure; |
| 424 | rcu_assign_pointer(netlbl_unlhsh_def, iface); | 396 | rcu_assign_pointer(netlbl_unlhsh_def, iface); |
| 425 | } | 397 | } |
| @@ -549,8 +521,7 @@ unlhsh_add_return: | |||
| 549 | * | 521 | * |
| 550 | * Description: | 522 | * Description: |
| 551 | * Remove an IP address entry from the unlabeled connection hash table. | 523 | * Remove an IP address entry from the unlabeled connection hash table. |
| 552 | * Returns zero on success, negative values on failure. The caller is | 524 | * Returns zero on success, negative values on failure. |
| 553 | * responsible for calling the rcu_read_[un]lock() functions. | ||
| 554 | * | 525 | * |
| 555 | */ | 526 | */ |
| 556 | static int netlbl_unlhsh_remove_addr4(struct net *net, | 527 | static int netlbl_unlhsh_remove_addr4(struct net *net, |
| @@ -612,8 +583,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
| 612 | * | 583 | * |
| 613 | * Description: | 584 | * Description: |
| 614 | * Remove an IP address entry from the unlabeled connection hash table. | 585 | * Remove an IP address entry from the unlabeled connection hash table. |
| 615 | * Returns zero on success, negative values on failure. The caller is | 586 | * Returns zero on success, negative values on failure. |
| 616 | * responsible for calling the rcu_read_[un]lock() functions. | ||
| 617 | * | 587 | * |
| 618 | */ | 588 | */ |
| 619 | static int netlbl_unlhsh_remove_addr6(struct net *net, | 589 | static int netlbl_unlhsh_remove_addr6(struct net *net, |
| @@ -1548,8 +1518,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, | |||
| 1548 | struct netlbl_unlhsh_iface *iface; | 1518 | struct netlbl_unlhsh_iface *iface; |
| 1549 | 1519 | ||
| 1550 | rcu_read_lock(); | 1520 | rcu_read_lock(); |
| 1551 | iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); | 1521 | iface = netlbl_unlhsh_search_iface(skb->skb_iif); |
| 1552 | if (iface == NULL) | 1522 | if (iface == NULL) |
| 1523 | iface = rcu_dereference(netlbl_unlhsh_def); | ||
| 1524 | if (iface == NULL || !iface->valid) | ||
| 1553 | goto unlabel_getattr_nolabel; | 1525 | goto unlabel_getattr_nolabel; |
| 1554 | switch (family) { | 1526 | switch (family) { |
| 1555 | case PF_INET: { | 1527 | case PF_INET: { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index acbbae1e89b5..795424396aff 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |||
| 683 | struct netlink_sock *nlk = nlk_sk(sk); | 683 | struct netlink_sock *nlk = nlk_sk(sk); |
| 684 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 684 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
| 685 | 685 | ||
| 686 | if (alen < sizeof(addr->sa_family)) | ||
| 687 | return -EINVAL; | ||
| 688 | |||
| 686 | if (addr->sa_family == AF_UNSPEC) { | 689 | if (addr->sa_family == AF_UNSPEC) { |
| 687 | sk->sk_state = NETLINK_UNCONNECTED; | 690 | sk->sk_state = NETLINK_UNCONNECTED; |
| 688 | nlk->dst_pid = 0; | 691 | nlk->dst_pid = 0; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index cc90363d7e7a..243946d4809d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2169,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
| 2169 | case SIOCGIFDSTADDR: | 2169 | case SIOCGIFDSTADDR: |
| 2170 | case SIOCSIFDSTADDR: | 2170 | case SIOCSIFDSTADDR: |
| 2171 | case SIOCSIFFLAGS: | 2171 | case SIOCSIFFLAGS: |
| 2172 | if (!net_eq(sock_net(sk), &init_net)) | ||
| 2173 | return -ENOIOCTLCMD; | ||
| 2174 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 2172 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
| 2175 | #endif | 2173 | #endif |
| 2176 | 2174 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index fd90eb89842b..edea15a54e51 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
| @@ -679,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
| 679 | int ret; | 679 | int ret; |
| 680 | 680 | ||
| 681 | dprintk("svcrdma: Creating RDMA socket\n"); | 681 | dprintk("svcrdma: Creating RDMA socket\n"); |
| 682 | 682 | if (sa->sa_family != AF_INET) { | |
| 683 | dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); | ||
| 684 | return ERR_PTR(-EAFNOSUPPORT); | ||
| 685 | } | ||
| 683 | cma_xprt = rdma_create_xprt(serv, 1); | 686 | cma_xprt = rdma_create_xprt(serv, 1); |
| 684 | if (!cma_xprt) | 687 | if (!cma_xprt) |
| 685 | return ERR_PTR(-ENOMEM); | 688 | return ERR_PTR(-ENOMEM); |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b7604b823f46..422da20d1e5b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -325,7 +325,7 @@ struct reg_regdb_search_request { | |||
| 325 | }; | 325 | }; |
| 326 | 326 | ||
| 327 | static LIST_HEAD(reg_regdb_search_list); | 327 | static LIST_HEAD(reg_regdb_search_list); |
| 328 | static DEFINE_SPINLOCK(reg_regdb_search_lock); | 328 | static DEFINE_MUTEX(reg_regdb_search_mutex); |
| 329 | 329 | ||
| 330 | static void reg_regdb_search(struct work_struct *work) | 330 | static void reg_regdb_search(struct work_struct *work) |
| 331 | { | 331 | { |
| @@ -333,7 +333,7 @@ static void reg_regdb_search(struct work_struct *work) | |||
| 333 | const struct ieee80211_regdomain *curdom, *regdom; | 333 | const struct ieee80211_regdomain *curdom, *regdom; |
| 334 | int i, r; | 334 | int i, r; |
| 335 | 335 | ||
| 336 | spin_lock(®_regdb_search_lock); | 336 | mutex_lock(®_regdb_search_mutex); |
| 337 | while (!list_empty(®_regdb_search_list)) { | 337 | while (!list_empty(®_regdb_search_list)) { |
| 338 | request = list_first_entry(®_regdb_search_list, | 338 | request = list_first_entry(®_regdb_search_list, |
| 339 | struct reg_regdb_search_request, | 339 | struct reg_regdb_search_request, |
| @@ -347,18 +347,16 @@ static void reg_regdb_search(struct work_struct *work) | |||
| 347 | r = reg_copy_regd(®dom, curdom); | 347 | r = reg_copy_regd(®dom, curdom); |
| 348 | if (r) | 348 | if (r) |
| 349 | break; | 349 | break; |
| 350 | spin_unlock(®_regdb_search_lock); | ||
| 351 | mutex_lock(&cfg80211_mutex); | 350 | mutex_lock(&cfg80211_mutex); |
| 352 | set_regdom(regdom); | 351 | set_regdom(regdom); |
| 353 | mutex_unlock(&cfg80211_mutex); | 352 | mutex_unlock(&cfg80211_mutex); |
| 354 | spin_lock(®_regdb_search_lock); | ||
| 355 | break; | 353 | break; |
| 356 | } | 354 | } |
| 357 | } | 355 | } |
| 358 | 356 | ||
| 359 | kfree(request); | 357 | kfree(request); |
| 360 | } | 358 | } |
| 361 | spin_unlock(®_regdb_search_lock); | 359 | mutex_unlock(®_regdb_search_mutex); |
| 362 | } | 360 | } |
| 363 | 361 | ||
| 364 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); | 362 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); |
| @@ -376,9 +374,9 @@ static void reg_regdb_query(const char *alpha2) | |||
| 376 | 374 | ||
| 377 | memcpy(request->alpha2, alpha2, 2); | 375 | memcpy(request->alpha2, alpha2, 2); |
| 378 | 376 | ||
| 379 | spin_lock(®_regdb_search_lock); | 377 | mutex_lock(®_regdb_search_mutex); |
| 380 | list_add_tail(&request->list, ®_regdb_search_list); | 378 | list_add_tail(&request->list, ®_regdb_search_list); |
| 381 | spin_unlock(®_regdb_search_lock); | 379 | mutex_unlock(®_regdb_search_mutex); |
| 382 | 380 | ||
| 383 | schedule_work(®_regdb_work); | 381 | schedule_work(®_regdb_work); |
| 384 | } | 382 | } |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index e56f711baccc..cbddd0cb83f1 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct { | |||
| 83 | }; | 83 | }; |
| 84 | #endif | 84 | #endif |
| 85 | 85 | ||
| 86 | |||
| 87 | int x25_parse_address_block(struct sk_buff *skb, | ||
| 88 | struct x25_address *called_addr, | ||
| 89 | struct x25_address *calling_addr) | ||
| 90 | { | ||
| 91 | unsigned char len; | ||
| 92 | int needed; | ||
| 93 | int rc; | ||
| 94 | |||
| 95 | if (skb->len < 1) { | ||
| 96 | /* packet has no address block */ | ||
| 97 | rc = 0; | ||
| 98 | goto empty; | ||
| 99 | } | ||
| 100 | |||
| 101 | len = *skb->data; | ||
| 102 | needed = 1 + (len >> 4) + (len & 0x0f); | ||
| 103 | |||
| 104 | if (skb->len < needed) { | ||
| 105 | /* packet is too short to hold the addresses it claims | ||
| 106 | to hold */ | ||
| 107 | rc = -1; | ||
| 108 | goto empty; | ||
| 109 | } | ||
| 110 | |||
| 111 | return x25_addr_ntoa(skb->data, called_addr, calling_addr); | ||
| 112 | |||
| 113 | empty: | ||
| 114 | *called_addr->x25_addr = 0; | ||
| 115 | *calling_addr->x25_addr = 0; | ||
| 116 | |||
| 117 | return rc; | ||
| 118 | } | ||
| 119 | |||
| 120 | |||
| 86 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, | 121 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, |
| 87 | struct x25_address *calling_addr) | 122 | struct x25_address *calling_addr) |
| 88 | { | 123 | { |
| @@ -554,7 +589,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, | |||
| 554 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; | 589 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; |
| 555 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; | 590 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; |
| 556 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; | 591 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; |
| 557 | x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; | 592 | x25->facilities.throughput = 0; /* by default don't negotiate |
| 593 | throughput */ | ||
| 558 | x25->facilities.reverse = X25_DEFAULT_REVERSE; | 594 | x25->facilities.reverse = X25_DEFAULT_REVERSE; |
| 559 | x25->dte_facilities.calling_len = 0; | 595 | x25->dte_facilities.calling_len = 0; |
| 560 | x25->dte_facilities.called_len = 0; | 596 | x25->dte_facilities.called_len = 0; |
| @@ -922,16 +958,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
| 922 | /* | 958 | /* |
| 923 | * Extract the X.25 addresses and convert them to ASCII strings, | 959 | * Extract the X.25 addresses and convert them to ASCII strings, |
| 924 | * and remove them. | 960 | * and remove them. |
| 961 | * | ||
| 962 | * Address block is mandatory in call request packets | ||
| 925 | */ | 963 | */ |
| 926 | addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); | 964 | addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); |
| 965 | if (addr_len <= 0) | ||
| 966 | goto out_clear_request; | ||
| 927 | skb_pull(skb, addr_len); | 967 | skb_pull(skb, addr_len); |
| 928 | 968 | ||
| 929 | /* | 969 | /* |
| 930 | * Get the length of the facilities, skip past them for the moment | 970 | * Get the length of the facilities, skip past them for the moment |
| 931 | * get the call user data because this is needed to determine | 971 | * get the call user data because this is needed to determine |
| 932 | * the correct listener | 972 | * the correct listener |
| 973 | * | ||
| 974 | * Facilities length is mandatory in call request packets | ||
| 933 | */ | 975 | */ |
| 976 | if (skb->len < 1) | ||
| 977 | goto out_clear_request; | ||
| 934 | len = skb->data[0] + 1; | 978 | len = skb->data[0] + 1; |
| 979 | if (skb->len < len) | ||
| 980 | goto out_clear_request; | ||
| 935 | skb_pull(skb,len); | 981 | skb_pull(skb,len); |
| 936 | 982 | ||
| 937 | /* | 983 | /* |
| @@ -1415,9 +1461,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
| 1415 | if (facilities.winsize_in < 1 || | 1461 | if (facilities.winsize_in < 1 || |
| 1416 | facilities.winsize_in > 127) | 1462 | facilities.winsize_in > 127) |
| 1417 | break; | 1463 | break; |
| 1418 | if (facilities.throughput < 0x03 || | 1464 | if (facilities.throughput) { |
| 1419 | facilities.throughput > 0xDD) | 1465 | int out = facilities.throughput & 0xf0; |
| 1420 | break; | 1466 | int in = facilities.throughput & 0x0f; |
| 1467 | if (!out) | ||
| 1468 | facilities.throughput |= | ||
| 1469 | X25_DEFAULT_THROUGHPUT << 4; | ||
| 1470 | else if (out < 0x30 || out > 0xD0) | ||
| 1471 | break; | ||
| 1472 | if (!in) | ||
| 1473 | facilities.throughput |= | ||
| 1474 | X25_DEFAULT_THROUGHPUT; | ||
| 1475 | else if (in < 0x03 || in > 0x0D) | ||
| 1476 | break; | ||
| 1477 | } | ||
| 1421 | if (facilities.reverse && | 1478 | if (facilities.reverse && |
| 1422 | (facilities.reverse & 0x81) != 0x81) | 1479 | (facilities.reverse & 0x81) != 0x81) |
| 1423 | break; | 1480 | break; |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21f6646eb3a..771bab00754b 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
| @@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
| 35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) | 35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) |
| 36 | { | 36 | { |
| 37 | unsigned char *p = skb->data; | 37 | unsigned char *p = skb->data; |
| 38 | unsigned int len = *p++; | 38 | unsigned int len; |
| 39 | 39 | ||
| 40 | *vc_fac_mask = 0; | 40 | *vc_fac_mask = 0; |
| 41 | 41 | ||
| @@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
| 50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); | 50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); |
| 51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); | 51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); |
| 52 | 52 | ||
| 53 | if (skb->len < 1) | ||
| 54 | return 0; | ||
| 55 | |||
| 56 | len = *p++; | ||
| 57 | |||
| 58 | if (len >= skb->len) | ||
| 59 | return -1; | ||
| 60 | |||
| 53 | while (len > 0) { | 61 | while (len > 0) { |
| 54 | switch (*p & X25_FAC_CLASS_MASK) { | 62 | switch (*p & X25_FAC_CLASS_MASK) { |
| 55 | case X25_FAC_CLASS_A: | 63 | case X25_FAC_CLASS_A: |
| @@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
| 247 | memcpy(new, ours, sizeof(*new)); | 255 | memcpy(new, ours, sizeof(*new)); |
| 248 | 256 | ||
| 249 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); | 257 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); |
| 258 | if (len < 0) | ||
| 259 | return len; | ||
| 250 | 260 | ||
| 251 | /* | 261 | /* |
| 252 | * They want reverse charging, we won't accept it. | 262 | * They want reverse charging, we won't accept it. |
| @@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
| 259 | new->reverse = theirs.reverse; | 269 | new->reverse = theirs.reverse; |
| 260 | 270 | ||
| 261 | if (theirs.throughput) { | 271 | if (theirs.throughput) { |
| 262 | if (theirs.throughput < ours->throughput) { | 272 | int theirs_in = theirs.throughput & 0x0f; |
| 263 | SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); | 273 | int theirs_out = theirs.throughput & 0xf0; |
| 264 | new->throughput = theirs.throughput; | 274 | int ours_in = ours->throughput & 0x0f; |
| 275 | int ours_out = ours->throughput & 0xf0; | ||
| 276 | if (!ours_in || theirs_in < ours_in) { | ||
| 277 | SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); | ||
| 278 | new->throughput = (new->throughput & 0xf0) | theirs_in; | ||
| 279 | } | ||
| 280 | if (!ours_out || theirs_out < ours_out) { | ||
| 281 | SOCK_DEBUG(sk, | ||
| 282 | "X.25: outbound throughput negotiated\n"); | ||
| 283 | new->throughput = (new->throughput & 0x0f) | theirs_out; | ||
| 265 | } | 284 | } |
| 266 | } | 285 | } |
| 267 | 286 | ||
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index a31b3b9e5966..372ac226e648 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
| @@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) | |||
| 90 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) | 90 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
| 91 | { | 91 | { |
| 92 | struct x25_address source_addr, dest_addr; | 92 | struct x25_address source_addr, dest_addr; |
| 93 | int len; | ||
| 93 | 94 | ||
| 94 | switch (frametype) { | 95 | switch (frametype) { |
| 95 | case X25_CALL_ACCEPTED: { | 96 | case X25_CALL_ACCEPTED: { |
| @@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
| 107 | * Parse the data in the frame. | 108 | * Parse the data in the frame. |
| 108 | */ | 109 | */ |
| 109 | skb_pull(skb, X25_STD_MIN_LEN); | 110 | skb_pull(skb, X25_STD_MIN_LEN); |
| 110 | skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); | 111 | |
| 111 | skb_pull(skb, | 112 | len = x25_parse_address_block(skb, &source_addr, |
| 112 | x25_parse_facilities(skb, &x25->facilities, | 113 | &dest_addr); |
| 114 | if (len > 0) | ||
| 115 | skb_pull(skb, len); | ||
| 116 | |||
| 117 | len = x25_parse_facilities(skb, &x25->facilities, | ||
| 113 | &x25->dte_facilities, | 118 | &x25->dte_facilities, |
| 114 | &x25->vc_facil_mask)); | 119 | &x25->vc_facil_mask); |
| 120 | if (len > 0) | ||
| 121 | skb_pull(skb, len); | ||
| 115 | /* | 122 | /* |
| 116 | * Copy any Call User Data. | 123 | * Copy any Call User Data. |
| 117 | */ | 124 | */ |
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index 8da6a8428086..cd4f734e2749 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h | |||
| @@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified | |||
| 82 | void avtab_cache_init(void); | 82 | void avtab_cache_init(void); |
| 83 | void avtab_cache_destroy(void); | 83 | void avtab_cache_destroy(void); |
| 84 | 84 | ||
| 85 | #define MAX_AVTAB_HASH_BITS 13 | 85 | #define MAX_AVTAB_HASH_BITS 11 |
| 86 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) | 86 | #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) |
| 87 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) | 87 | #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) |
| 88 | #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS | 88 | #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS |
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 656e474dca47..91acc9a243ec 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c | |||
| @@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci) | |||
| 863 | struct snd_ac97 *ac97; | 863 | struct snd_ac97 *ac97; |
| 864 | int ret; | 864 | int ret; |
| 865 | 865 | ||
| 866 | writel(0, aaci->base + AC97_POWERDOWN); | ||
| 867 | /* | 866 | /* |
| 868 | * Assert AACIRESET for 2us | 867 | * Assert AACIRESET for 2us |
| 869 | */ | 868 | */ |
| @@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) | |||
| 1047 | 1046 | ||
| 1048 | writel(0x1fff, aaci->base + AACI_INTCLR); | 1047 | writel(0x1fff, aaci->base + AACI_INTCLR); |
| 1049 | writel(aaci->maincr, aaci->base + AACI_MAINCR); | 1048 | writel(aaci->maincr, aaci->base + AACI_MAINCR); |
| 1050 | 1049 | /* | |
| 1050 | * Fix: ac97 read back fail errors by reading | ||
| 1051 | * from any arbitrary aaci register. | ||
| 1052 | */ | ||
| 1053 | readl(aaci->base + AACI_CSCH1); | ||
| 1051 | ret = aaci_probe_ac97(aaci); | 1054 | ret = aaci_probe_ac97(aaci); |
| 1052 | if (ret) | 1055 | if (ret) |
| 1053 | goto out; | 1056 | goto out; |
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c index fff62cc8607c..971a84a4fa77 100644 --- a/sound/i2c/other/ak4113.c +++ b/sound/i2c/other/ak4113.c | |||
| @@ -70,7 +70,7 @@ static int snd_ak4113_dev_free(struct snd_device *device) | |||
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, | 72 | int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, |
| 73 | ak4113_write_t *write, const unsigned char pgm[5], | 73 | ak4113_write_t *write, const unsigned char *pgm, |
| 74 | void *private_data, struct ak4113 **r_ak4113) | 74 | void *private_data, struct ak4113 **r_ak4113) |
| 75 | { | 75 | { |
| 76 | struct ak4113 *chip; | 76 | struct ak4113 *chip; |
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 8dab82d7d19d..668a5ec04499 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c | |||
| @@ -2184,10 +2184,9 @@ static int __devinit snd_echo_probe(struct pci_dev *pci, | |||
| 2184 | goto ctl_error; | 2184 | goto ctl_error; |
| 2185 | #endif | 2185 | #endif |
| 2186 | 2186 | ||
| 2187 | if ((err = snd_card_register(card)) < 0) { | 2187 | err = snd_card_register(card); |
| 2188 | snd_card_free(card); | 2188 | if (err < 0) |
| 2189 | goto ctl_error; | 2189 | goto ctl_error; |
| 2190 | } | ||
| 2191 | snd_printk(KERN_INFO "Card registered: %s\n", card->longname); | 2190 | snd_printk(KERN_INFO "Card registered: %s\n", card->longname); |
| 2192 | 2191 | ||
| 2193 | pci_set_drvdata(pci, chip); | 2192 | pci_set_drvdata(pci, chip); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4bb90675f70f..f669442b7c82 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2272,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
| 2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), | 2272 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), |
| 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), | 2273 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
| 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), | 2274 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
| 2275 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), | ||
| 2275 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), | 2276 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
| 2276 | {} | 2277 | {} |
| 2277 | }; | 2278 | }; |
| @@ -2362,6 +2363,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = { | |||
| 2362 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ | 2363 | SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ |
| 2363 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ | 2364 | SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ |
| 2364 | SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ | 2365 | SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ |
| 2366 | SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */ | ||
| 2365 | {} | 2367 | {} |
| 2366 | }; | 2368 | }; |
| 2367 | 2369 | ||
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index e6d1bdff1b6e..af34606c30c3 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
| @@ -1896,6 +1896,14 @@ static int patch_ad1981(struct hda_codec *codec) | |||
| 1896 | case AD1981_THINKPAD: | 1896 | case AD1981_THINKPAD: |
| 1897 | spec->mixers[0] = ad1981_thinkpad_mixers; | 1897 | spec->mixers[0] = ad1981_thinkpad_mixers; |
| 1898 | spec->input_mux = &ad1981_thinkpad_capture_source; | 1898 | spec->input_mux = &ad1981_thinkpad_capture_source; |
| 1899 | /* set the upper-limit for mixer amp to 0dB for avoiding the | ||
| 1900 | * possible damage by overloading | ||
| 1901 | */ | ||
| 1902 | snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT, | ||
| 1903 | (0x17 << AC_AMPCAP_OFFSET_SHIFT) | | ||
| 1904 | (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) | | ||
| 1905 | (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) | | ||
| 1906 | (1 << AC_AMPCAP_MUTE_SHIFT)); | ||
| 1899 | break; | 1907 | break; |
| 1900 | case AD1981_TOSHIBA: | 1908 | case AD1981_TOSHIBA: |
| 1901 | spec->mixers[0] = ad1981_hp_mixers; | 1909 | spec->mixers[0] = ad1981_hp_mixers; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9a23444e9e7a..aad1627f56f1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -230,6 +230,7 @@ enum { | |||
| 230 | ALC888_ACER_ASPIRE_7730G, | 230 | ALC888_ACER_ASPIRE_7730G, |
| 231 | ALC883_MEDION, | 231 | ALC883_MEDION, |
| 232 | ALC883_MEDION_MD2, | 232 | ALC883_MEDION_MD2, |
| 233 | ALC883_MEDION_WIM2160, | ||
| 233 | ALC883_LAPTOP_EAPD, | 234 | ALC883_LAPTOP_EAPD, |
| 234 | ALC883_LENOVO_101E_2ch, | 235 | ALC883_LENOVO_101E_2ch, |
| 235 | ALC883_LENOVO_NB0763, | 236 | ALC883_LENOVO_NB0763, |
| @@ -1389,22 +1390,31 @@ struct alc_fixup { | |||
| 1389 | 1390 | ||
| 1390 | static void alc_pick_fixup(struct hda_codec *codec, | 1391 | static void alc_pick_fixup(struct hda_codec *codec, |
| 1391 | const struct snd_pci_quirk *quirk, | 1392 | const struct snd_pci_quirk *quirk, |
| 1392 | const struct alc_fixup *fix) | 1393 | const struct alc_fixup *fix, |
| 1394 | int pre_init) | ||
| 1393 | { | 1395 | { |
| 1394 | const struct alc_pincfg *cfg; | 1396 | const struct alc_pincfg *cfg; |
| 1395 | 1397 | ||
| 1396 | quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); | 1398 | quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); |
| 1397 | if (!quirk) | 1399 | if (!quirk) |
| 1398 | return; | 1400 | return; |
| 1399 | |||
| 1400 | fix += quirk->value; | 1401 | fix += quirk->value; |
| 1401 | cfg = fix->pins; | 1402 | cfg = fix->pins; |
| 1402 | if (cfg) { | 1403 | if (pre_init && cfg) { |
| 1404 | #ifdef CONFIG_SND_DEBUG_VERBOSE | ||
| 1405 | snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n", | ||
| 1406 | codec->chip_name, quirk->name); | ||
| 1407 | #endif | ||
| 1403 | for (; cfg->nid; cfg++) | 1408 | for (; cfg->nid; cfg++) |
| 1404 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); | 1409 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); |
| 1405 | } | 1410 | } |
| 1406 | if (fix->verbs) | 1411 | if (!pre_init && fix->verbs) { |
| 1412 | #ifdef CONFIG_SND_DEBUG_VERBOSE | ||
| 1413 | snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n", | ||
| 1414 | codec->chip_name, quirk->name); | ||
| 1415 | #endif | ||
| 1407 | add_verb(codec->spec, fix->verbs); | 1416 | add_verb(codec->spec, fix->verbs); |
| 1417 | } | ||
| 1408 | } | 1418 | } |
| 1409 | 1419 | ||
| 1410 | static int alc_read_coef_idx(struct hda_codec *codec, | 1420 | static int alc_read_coef_idx(struct hda_codec *codec, |
| @@ -1621,6 +1631,11 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { | |||
| 1621 | */ | 1631 | */ |
| 1622 | 1632 | ||
| 1623 | static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { | 1633 | static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { |
| 1634 | /* Route to built-in subwoofer as well as speakers */ | ||
| 1635 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | ||
| 1636 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | ||
| 1637 | {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | ||
| 1638 | {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | ||
| 1624 | /* Bias voltage on for external mic port */ | 1639 | /* Bias voltage on for external mic port */ |
| 1625 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, | 1640 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, |
| 1626 | /* Front Mic: set to PIN_IN (empty by default) */ | 1641 | /* Front Mic: set to PIN_IN (empty by default) */ |
| @@ -1632,10 +1647,12 @@ static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { | |||
| 1632 | /* Enable speaker output */ | 1647 | /* Enable speaker output */ |
| 1633 | {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, | 1648 | {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, |
| 1634 | {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | 1649 | {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
| 1650 | {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2}, | ||
| 1635 | /* Enable headphone output */ | 1651 | /* Enable headphone output */ |
| 1636 | {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP}, | 1652 | {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP}, |
| 1637 | {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | 1653 | {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
| 1638 | {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, | 1654 | {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, |
| 1655 | {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2}, | ||
| 1639 | { } | 1656 | { } |
| 1640 | }; | 1657 | }; |
| 1641 | 1658 | ||
| @@ -4801,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec) | |||
| 4801 | } | 4818 | } |
| 4802 | } | 4819 | } |
| 4803 | 4820 | ||
| 4821 | static void alc880_auto_init_input_src(struct hda_codec *codec) | ||
| 4822 | { | ||
| 4823 | struct alc_spec *spec = codec->spec; | ||
| 4824 | int c; | ||
| 4825 | |||
| 4826 | for (c = 0; c < spec->num_adc_nids; c++) { | ||
| 4827 | unsigned int mux_idx; | ||
| 4828 | const struct hda_input_mux *imux; | ||
| 4829 | mux_idx = c >= spec->num_mux_defs ? 0 : c; | ||
| 4830 | imux = &spec->input_mux[mux_idx]; | ||
| 4831 | if (!imux->num_items && mux_idx > 0) | ||
| 4832 | imux = &spec->input_mux[0]; | ||
| 4833 | if (imux) | ||
| 4834 | snd_hda_codec_write(codec, spec->adc_nids[c], 0, | ||
| 4835 | AC_VERB_SET_CONNECT_SEL, | ||
| 4836 | imux->items[0].index); | ||
| 4837 | } | ||
| 4838 | } | ||
| 4839 | |||
| 4804 | /* parse the BIOS configuration and set up the alc_spec */ | 4840 | /* parse the BIOS configuration and set up the alc_spec */ |
| 4805 | /* return 1 if successful, 0 if the proper config is not found, | 4841 | /* return 1 if successful, 0 if the proper config is not found, |
| 4806 | * or a negative error code | 4842 | * or a negative error code |
| @@ -4879,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec) | |||
| 4879 | alc880_auto_init_multi_out(codec); | 4915 | alc880_auto_init_multi_out(codec); |
| 4880 | alc880_auto_init_extra_out(codec); | 4916 | alc880_auto_init_extra_out(codec); |
| 4881 | alc880_auto_init_analog_input(codec); | 4917 | alc880_auto_init_analog_input(codec); |
| 4918 | alc880_auto_init_input_src(codec); | ||
| 4882 | if (spec->unsol_event) | 4919 | if (spec->unsol_event) |
| 4883 | alc_inithook(codec); | 4920 | alc_inithook(codec); |
| 4884 | } | 4921 | } |
| @@ -4984,6 +5021,70 @@ static void set_capture_mixer(struct hda_codec *codec) | |||
| 4984 | } | 5021 | } |
| 4985 | } | 5022 | } |
| 4986 | 5023 | ||
| 5024 | /* fill adc_nids (and capsrc_nids) containing all active input pins */ | ||
| 5025 | static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, | ||
| 5026 | int num_nids) | ||
| 5027 | { | ||
| 5028 | struct alc_spec *spec = codec->spec; | ||
| 5029 | int n; | ||
| 5030 | hda_nid_t fallback_adc = 0, fallback_cap = 0; | ||
| 5031 | |||
| 5032 | for (n = 0; n < num_nids; n++) { | ||
| 5033 | hda_nid_t adc, cap; | ||
| 5034 | hda_nid_t conn[HDA_MAX_NUM_INPUTS]; | ||
| 5035 | int nconns, i, j; | ||
| 5036 | |||
| 5037 | adc = nids[n]; | ||
| 5038 | if (get_wcaps_type(get_wcaps(codec, adc)) != AC_WID_AUD_IN) | ||
| 5039 | continue; | ||
| 5040 | cap = adc; | ||
| 5041 | nconns = snd_hda_get_connections(codec, cap, conn, | ||
| 5042 | ARRAY_SIZE(conn)); | ||
| 5043 | if (nconns == 1) { | ||
| 5044 | cap = conn[0]; | ||
| 5045 | nconns = snd_hda_get_connections(codec, cap, conn, | ||
| 5046 | ARRAY_SIZE(conn)); | ||
| 5047 | } | ||
| 5048 | if (nconns <= 0) | ||
| 5049 | continue; | ||
| 5050 | if (!fallback_adc) { | ||
| 5051 | fallback_adc = adc; | ||
| 5052 | fallback_cap = cap; | ||
| 5053 | } | ||
| 5054 | for (i = 0; i < AUTO_PIN_LAST; i++) { | ||
| 5055 | hda_nid_t nid = spec->autocfg.input_pins[i]; | ||
| 5056 | if (!nid) | ||
| 5057 | continue; | ||
| 5058 | for (j = 0; j < nconns; j++) { | ||
| 5059 | if (conn[j] == nid) | ||
| 5060 | break; | ||
| 5061 | } | ||
| 5062 | if (j >= nconns) | ||
| 5063 | break; | ||
| 5064 | } | ||
| 5065 | if (i >= AUTO_PIN_LAST) { | ||
| 5066 | int num_adcs = spec->num_adc_nids; | ||
| 5067 | spec->private_adc_nids[num_adcs] = adc; | ||
| 5068 | spec->private_capsrc_nids[num_adcs] = cap; | ||
| 5069 | spec->num_adc_nids++; | ||
| 5070 | spec->adc_nids = spec->private_adc_nids; | ||
| 5071 | if (adc != cap) | ||
| 5072 | spec->capsrc_nids = spec->private_capsrc_nids; | ||
| 5073 | } | ||
| 5074 | } | ||
| 5075 | if (!spec->num_adc_nids) { | ||
| 5076 | printk(KERN_WARNING "hda_codec: %s: no valid ADC found;" | ||
| 5077 | " using fallback 0x%x\n", | ||
| 5078 | codec->chip_name, fallback_adc); | ||
| 5079 | spec->private_adc_nids[0] = fallback_adc; | ||
| 5080 | spec->adc_nids = spec->private_adc_nids; | ||
| 5081 | if (fallback_adc != fallback_cap) { | ||
| 5082 | spec->private_capsrc_nids[0] = fallback_cap; | ||
| 5083 | spec->capsrc_nids = spec->private_adc_nids; | ||
| 5084 | } | ||
| 5085 | } | ||
| 5086 | } | ||
| 5087 | |||
| 4987 | #ifdef CONFIG_SND_HDA_INPUT_BEEP | 5088 | #ifdef CONFIG_SND_HDA_INPUT_BEEP |
| 4988 | #define set_beep_amp(spec, nid, idx, dir) \ | 5089 | #define set_beep_amp(spec, nid, idx, dir) \ |
| 4989 | ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) | 5090 | ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) |
| @@ -6326,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec) | |||
| 6326 | } | 6427 | } |
| 6327 | } | 6428 | } |
| 6328 | 6429 | ||
| 6430 | #define alc260_auto_init_input_src alc880_auto_init_input_src | ||
| 6431 | |||
| 6329 | /* | 6432 | /* |
| 6330 | * generic initialization of ADC, input mixers and output mixers | 6433 | * generic initialization of ADC, input mixers and output mixers |
| 6331 | */ | 6434 | */ |
| @@ -6412,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec) | |||
| 6412 | struct alc_spec *spec = codec->spec; | 6515 | struct alc_spec *spec = codec->spec; |
| 6413 | alc260_auto_init_multi_out(codec); | 6516 | alc260_auto_init_multi_out(codec); |
| 6414 | alc260_auto_init_analog_input(codec); | 6517 | alc260_auto_init_analog_input(codec); |
| 6518 | alc260_auto_init_input_src(codec); | ||
| 6415 | if (spec->unsol_event) | 6519 | if (spec->unsol_event) |
| 6416 | alc_inithook(codec); | 6520 | alc_inithook(codec); |
| 6417 | } | 6521 | } |
| @@ -8384,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = { | |||
| 8384 | { } /* end */ | 8488 | { } /* end */ |
| 8385 | }; | 8489 | }; |
| 8386 | 8490 | ||
| 8491 | static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = { | ||
| 8492 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | ||
| 8493 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | ||
| 8494 | HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT), | ||
| 8495 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT), | ||
| 8496 | HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT), | ||
| 8497 | HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT), | ||
| 8498 | { } /* end */ | ||
| 8499 | }; | ||
| 8500 | |||
| 8501 | static struct hda_verb alc883_medion_wim2160_verbs[] = { | ||
| 8502 | /* Unmute front mixer */ | ||
| 8503 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | ||
| 8504 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | ||
| 8505 | |||
| 8506 | /* Set speaker pin to front mixer */ | ||
| 8507 | {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, | ||
| 8508 | |||
| 8509 | /* Init headphone pin */ | ||
| 8510 | {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, | ||
| 8511 | {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | ||
| 8512 | {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00}, | ||
| 8513 | {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, | ||
| 8514 | |||
| 8515 | { } /* end */ | ||
| 8516 | }; | ||
| 8517 | |||
| 8518 | /* toggle speaker-output according to the hp-jack state */ | ||
| 8519 | static void alc883_medion_wim2160_setup(struct hda_codec *codec) | ||
| 8520 | { | ||
| 8521 | struct alc_spec *spec = codec->spec; | ||
| 8522 | |||
| 8523 | spec->autocfg.hp_pins[0] = 0x1a; | ||
| 8524 | spec->autocfg.speaker_pins[0] = 0x15; | ||
| 8525 | } | ||
| 8526 | |||
| 8387 | static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { | 8527 | static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { |
| 8388 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8528 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
| 8389 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | 8529 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), |
| @@ -8398,9 +8538,7 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { | |||
| 8398 | 8538 | ||
| 8399 | static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { | 8539 | static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { |
| 8400 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8540 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
| 8401 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | ||
| 8402 | HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), | 8541 | HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), |
| 8403 | HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), | ||
| 8404 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), | 8542 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), |
| 8405 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | 8543 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), |
| 8406 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | 8544 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), |
| @@ -9095,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = { | |||
| 9095 | [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", | 9233 | [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", |
| 9096 | [ALC883_MEDION] = "medion", | 9234 | [ALC883_MEDION] = "medion", |
| 9097 | [ALC883_MEDION_MD2] = "medion-md2", | 9235 | [ALC883_MEDION_MD2] = "medion-md2", |
| 9236 | [ALC883_MEDION_WIM2160] = "medion-wim2160", | ||
| 9098 | [ALC883_LAPTOP_EAPD] = "laptop-eapd", | 9237 | [ALC883_LAPTOP_EAPD] = "laptop-eapd", |
| 9099 | [ALC883_LENOVO_101E_2ch] = "lenovo-101e", | 9238 | [ALC883_LENOVO_101E_2ch] = "lenovo-101e", |
| 9100 | [ALC883_LENOVO_NB0763] = "lenovo-nb0763", | 9239 | [ALC883_LENOVO_NB0763] = "lenovo-nb0763", |
| @@ -9211,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { | |||
| 9211 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), | 9350 | SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), |
| 9212 | 9351 | ||
| 9213 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), | 9352 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), |
| 9353 | SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG), | ||
| 9214 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), | 9354 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), |
| 9215 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), | 9355 | SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), |
| 9216 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), | 9356 | SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), |
| @@ -9749,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = { | |||
| 9749 | .setup = alc883_medion_md2_setup, | 9889 | .setup = alc883_medion_md2_setup, |
| 9750 | .init_hook = alc_automute_amp, | 9890 | .init_hook = alc_automute_amp, |
| 9751 | }, | 9891 | }, |
| 9892 | [ALC883_MEDION_WIM2160] = { | ||
| 9893 | .mixers = { alc883_medion_wim2160_mixer }, | ||
| 9894 | .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs }, | ||
| 9895 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | ||
| 9896 | .dac_nids = alc883_dac_nids, | ||
| 9897 | .dig_out_nid = ALC883_DIGOUT_NID, | ||
| 9898 | .num_adc_nids = ARRAY_SIZE(alc883_adc_nids), | ||
| 9899 | .adc_nids = alc883_adc_nids, | ||
| 9900 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), | ||
| 9901 | .channel_mode = alc883_3ST_2ch_modes, | ||
| 9902 | .input_mux = &alc883_capture_source, | ||
| 9903 | .unsol_event = alc_automute_amp_unsol_event, | ||
| 9904 | .setup = alc883_medion_wim2160_setup, | ||
| 9905 | .init_hook = alc_automute_amp, | ||
| 9906 | }, | ||
| 9752 | [ALC883_LAPTOP_EAPD] = { | 9907 | [ALC883_LAPTOP_EAPD] = { |
| 9753 | .mixers = { alc883_base_mixer }, | 9908 | .mixers = { alc883_base_mixer }, |
| 9754 | .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, | 9909 | .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, |
| @@ -10041,13 +10196,12 @@ static void alc882_auto_set_output_and_unmute(struct hda_codec *codec, | |||
| 10041 | int idx; | 10196 | int idx; |
| 10042 | 10197 | ||
| 10043 | alc_set_pin_output(codec, nid, pin_type); | 10198 | alc_set_pin_output(codec, nid, pin_type); |
| 10199 | if (dac_idx >= spec->multiout.num_dacs) | ||
| 10200 | return; | ||
| 10044 | if (spec->multiout.dac_nids[dac_idx] == 0x25) | 10201 | if (spec->multiout.dac_nids[dac_idx] == 0x25) |
| 10045 | idx = 4; | 10202 | idx = 4; |
| 10046 | else { | 10203 | else |
| 10047 | if (spec->multiout.num_dacs >= dac_idx) | ||
| 10048 | return; | ||
| 10049 | idx = spec->multiout.dac_nids[dac_idx] - 2; | 10204 | idx = spec->multiout.dac_nids[dac_idx] - 2; |
| 10050 | } | ||
| 10051 | snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx); | 10205 | snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx); |
| 10052 | 10206 | ||
| 10053 | } | 10207 | } |
| @@ -10295,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec) | |||
| 10295 | board_config = ALC882_AUTO; | 10449 | board_config = ALC882_AUTO; |
| 10296 | } | 10450 | } |
| 10297 | 10451 | ||
| 10298 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); | 10452 | if (board_config == ALC882_AUTO) |
| 10453 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1); | ||
| 10299 | 10454 | ||
| 10300 | if (board_config == ALC882_AUTO) { | 10455 | if (board_config == ALC882_AUTO) { |
| 10301 | /* automatic parse from the BIOS config */ | 10456 | /* automatic parse from the BIOS config */ |
| @@ -10368,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec) | |||
| 10368 | set_capture_mixer(codec); | 10523 | set_capture_mixer(codec); |
| 10369 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); | 10524 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); |
| 10370 | 10525 | ||
| 10526 | if (board_config == ALC882_AUTO) | ||
| 10527 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0); | ||
| 10528 | |||
| 10371 | spec->vmaster_nid = 0x0c; | 10529 | spec->vmaster_nid = 0x0c; |
| 10372 | 10530 | ||
| 10373 | codec->patch_ops = alc_patch_ops; | 10531 | codec->patch_ops = alc_patch_ops; |
| @@ -12459,11 +12617,11 @@ static void alc268_aspire_one_speaker_automute(struct hda_codec *codec) | |||
| 12459 | unsigned char bits; | 12617 | unsigned char bits; |
| 12460 | 12618 | ||
| 12461 | present = snd_hda_jack_detect(codec, 0x15); | 12619 | present = snd_hda_jack_detect(codec, 0x15); |
| 12462 | bits = present ? AMP_IN_MUTE(0) : 0; | 12620 | bits = present ? HDA_AMP_MUTE : 0; |
| 12463 | snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 0, | 12621 | snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 0, |
| 12464 | AMP_IN_MUTE(0), bits); | 12622 | HDA_AMP_MUTE, bits); |
| 12465 | snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 1, | 12623 | snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 1, |
| 12466 | AMP_IN_MUTE(0), bits); | 12624 | HDA_AMP_MUTE, bits); |
| 12467 | } | 12625 | } |
| 12468 | 12626 | ||
| 12469 | static void alc268_acer_lc_unsol_event(struct hda_codec *codec, | 12627 | static void alc268_acer_lc_unsol_event(struct hda_codec *codec, |
| @@ -12748,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid, | |||
| 12748 | dac = 0x02; | 12906 | dac = 0x02; |
| 12749 | break; | 12907 | break; |
| 12750 | case 0x15: | 12908 | case 0x15: |
| 12909 | case 0x21: /* ALC269vb has this pin, too */ | ||
| 12751 | dac = 0x03; | 12910 | dac = 0x03; |
| 12752 | break; | 12911 | break; |
| 12753 | default: | 12912 | default: |
| @@ -13333,9 +13492,9 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { | |||
| 13333 | 0x22, | 13492 | 0x22, |
| 13334 | }; | 13493 | }; |
| 13335 | 13494 | ||
| 13336 | /* NOTE: ADC2 (0x07) is connected from a recording *MIXER* (0x24), | 13495 | static hda_nid_t alc269_adc_candidates[] = { |
| 13337 | * not a mux! | 13496 | 0x08, 0x09, 0x07, |
| 13338 | */ | 13497 | }; |
| 13339 | 13498 | ||
| 13340 | #define alc269_modes alc260_modes | 13499 | #define alc269_modes alc260_modes |
| 13341 | #define alc269_capture_source alc880_lg_lw_capture_source | 13500 | #define alc269_capture_source alc880_lg_lw_capture_source |
| @@ -13482,11 +13641,11 @@ static void alc269_quanta_fl1_speaker_automute(struct hda_codec *codec) | |||
| 13482 | unsigned char bits; | 13641 | unsigned char bits; |
| 13483 | 13642 | ||
| 13484 | present = snd_hda_jack_detect(codec, 0x15); | 13643 | present = snd_hda_jack_detect(codec, 0x15); |
| 13485 | bits = present ? AMP_IN_MUTE(0) : 0; | 13644 | bits = present ? HDA_AMP_MUTE : 0; |
| 13486 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 13645 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 13487 | AMP_IN_MUTE(0), bits); | 13646 | HDA_AMP_MUTE, bits); |
| 13488 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 13647 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 13489 | AMP_IN_MUTE(0), bits); | 13648 | HDA_AMP_MUTE, bits); |
| 13490 | 13649 | ||
| 13491 | snd_hda_codec_write(codec, 0x20, 0, | 13650 | snd_hda_codec_write(codec, 0x20, 0, |
| 13492 | AC_VERB_SET_COEF_INDEX, 0x0c); | 13651 | AC_VERB_SET_COEF_INDEX, 0x0c); |
| @@ -13511,11 +13670,11 @@ static void alc269_lifebook_speaker_automute(struct hda_codec *codec) | |||
| 13511 | /* Check port replicator headphone socket */ | 13670 | /* Check port replicator headphone socket */ |
| 13512 | present |= snd_hda_jack_detect(codec, 0x1a); | 13671 | present |= snd_hda_jack_detect(codec, 0x1a); |
| 13513 | 13672 | ||
| 13514 | bits = present ? AMP_IN_MUTE(0) : 0; | 13673 | bits = present ? HDA_AMP_MUTE : 0; |
| 13515 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 13674 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 13516 | AMP_IN_MUTE(0), bits); | 13675 | HDA_AMP_MUTE, bits); |
| 13517 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 13676 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 13518 | AMP_IN_MUTE(0), bits); | 13677 | HDA_AMP_MUTE, bits); |
| 13519 | 13678 | ||
| 13520 | snd_hda_codec_write(codec, 0x20, 0, | 13679 | snd_hda_codec_write(codec, 0x20, 0, |
| 13521 | AC_VERB_SET_COEF_INDEX, 0x0c); | 13680 | AC_VERB_SET_COEF_INDEX, 0x0c); |
| @@ -13646,11 +13805,11 @@ static void alc269_speaker_automute(struct hda_codec *codec) | |||
| 13646 | unsigned char bits; | 13805 | unsigned char bits; |
| 13647 | 13806 | ||
| 13648 | present = snd_hda_jack_detect(codec, nid); | 13807 | present = snd_hda_jack_detect(codec, nid); |
| 13649 | bits = present ? AMP_IN_MUTE(0) : 0; | 13808 | bits = present ? HDA_AMP_MUTE : 0; |
| 13650 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 13809 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 13651 | AMP_IN_MUTE(0), bits); | 13810 | HDA_AMP_MUTE, bits); |
| 13652 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 13811 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 13653 | AMP_IN_MUTE(0), bits); | 13812 | HDA_AMP_MUTE, bits); |
| 13654 | } | 13813 | } |
| 13655 | 13814 | ||
| 13656 | /* unsolicited event for HP jack sensing */ | 13815 | /* unsolicited event for HP jack sensing */ |
| @@ -13667,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec, | |||
| 13667 | } | 13826 | } |
| 13668 | } | 13827 | } |
| 13669 | 13828 | ||
| 13670 | static void alc269_laptop_dmic_setup(struct hda_codec *codec) | 13829 | static void alc269_laptop_amic_setup(struct hda_codec *codec) |
| 13671 | { | 13830 | { |
| 13672 | struct alc_spec *spec = codec->spec; | 13831 | struct alc_spec *spec = codec->spec; |
| 13673 | spec->autocfg.hp_pins[0] = 0x15; | 13832 | spec->autocfg.hp_pins[0] = 0x15; |
| 13674 | spec->autocfg.speaker_pins[0] = 0x14; | 13833 | spec->autocfg.speaker_pins[0] = 0x14; |
| 13675 | spec->ext_mic.pin = 0x18; | 13834 | spec->ext_mic.pin = 0x18; |
| 13676 | spec->ext_mic.mux_idx = 0; | 13835 | spec->ext_mic.mux_idx = 0; |
| 13677 | spec->int_mic.pin = 0x12; | 13836 | spec->int_mic.pin = 0x19; |
| 13678 | spec->int_mic.mux_idx = 5; | 13837 | spec->int_mic.mux_idx = 1; |
| 13679 | spec->auto_mic = 1; | 13838 | spec->auto_mic = 1; |
| 13680 | } | 13839 | } |
| 13681 | 13840 | ||
| 13682 | static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | 13841 | static void alc269_laptop_dmic_setup(struct hda_codec *codec) |
| 13683 | { | 13842 | { |
| 13684 | struct alc_spec *spec = codec->spec; | 13843 | struct alc_spec *spec = codec->spec; |
| 13685 | spec->autocfg.hp_pins[0] = 0x15; | 13844 | spec->autocfg.hp_pins[0] = 0x15; |
| @@ -13687,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | |||
| 13687 | spec->ext_mic.pin = 0x18; | 13846 | spec->ext_mic.pin = 0x18; |
| 13688 | spec->ext_mic.mux_idx = 0; | 13847 | spec->ext_mic.mux_idx = 0; |
| 13689 | spec->int_mic.pin = 0x12; | 13848 | spec->int_mic.pin = 0x12; |
| 13690 | spec->int_mic.mux_idx = 6; | 13849 | spec->int_mic.mux_idx = 5; |
| 13691 | spec->auto_mic = 1; | 13850 | spec->auto_mic = 1; |
| 13692 | } | 13851 | } |
| 13693 | 13852 | ||
| 13694 | static void alc269_laptop_amic_setup(struct hda_codec *codec) | 13853 | static void alc269vb_laptop_amic_setup(struct hda_codec *codec) |
| 13695 | { | 13854 | { |
| 13696 | struct alc_spec *spec = codec->spec; | 13855 | struct alc_spec *spec = codec->spec; |
| 13697 | spec->autocfg.hp_pins[0] = 0x15; | 13856 | spec->autocfg.hp_pins[0] = 0x21; |
| 13698 | spec->autocfg.speaker_pins[0] = 0x14; | 13857 | spec->autocfg.speaker_pins[0] = 0x14; |
| 13699 | spec->ext_mic.pin = 0x18; | 13858 | spec->ext_mic.pin = 0x18; |
| 13700 | spec->ext_mic.mux_idx = 0; | 13859 | spec->ext_mic.mux_idx = 0; |
| @@ -13703,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec) | |||
| 13703 | spec->auto_mic = 1; | 13862 | spec->auto_mic = 1; |
| 13704 | } | 13863 | } |
| 13705 | 13864 | ||
| 13865 | static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) | ||
| 13866 | { | ||
| 13867 | struct alc_spec *spec = codec->spec; | ||
| 13868 | spec->autocfg.hp_pins[0] = 0x21; | ||
| 13869 | spec->autocfg.speaker_pins[0] = 0x14; | ||
| 13870 | spec->ext_mic.pin = 0x18; | ||
| 13871 | spec->ext_mic.mux_idx = 0; | ||
| 13872 | spec->int_mic.pin = 0x12; | ||
| 13873 | spec->int_mic.mux_idx = 6; | ||
| 13874 | spec->auto_mic = 1; | ||
| 13875 | } | ||
| 13876 | |||
| 13706 | static void alc269_laptop_inithook(struct hda_codec *codec) | 13877 | static void alc269_laptop_inithook(struct hda_codec *codec) |
| 13707 | { | 13878 | { |
| 13708 | alc269_speaker_automute(codec); | 13879 | alc269_speaker_automute(codec); |
| @@ -13842,7 +14013,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec) | |||
| 13842 | struct alc_spec *spec = codec->spec; | 14013 | struct alc_spec *spec = codec->spec; |
| 13843 | int err; | 14014 | int err; |
| 13844 | static hda_nid_t alc269_ignore[] = { 0x1d, 0 }; | 14015 | static hda_nid_t alc269_ignore[] = { 0x1d, 0 }; |
| 13845 | hda_nid_t real_capsrc_nids; | ||
| 13846 | 14016 | ||
| 13847 | err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, | 14017 | err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, |
| 13848 | alc269_ignore); | 14018 | alc269_ignore); |
| @@ -13866,18 +14036,19 @@ static int alc269_parse_auto_config(struct hda_codec *codec) | |||
| 13866 | 14036 | ||
| 13867 | if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010) { | 14037 | if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010) { |
| 13868 | add_verb(spec, alc269vb_init_verbs); | 14038 | add_verb(spec, alc269vb_init_verbs); |
| 13869 | real_capsrc_nids = alc269vb_capsrc_nids[0]; | ||
| 13870 | alc_ssid_check(codec, 0, 0x1b, 0x14, 0x21); | 14039 | alc_ssid_check(codec, 0, 0x1b, 0x14, 0x21); |
| 13871 | } else { | 14040 | } else { |
| 13872 | add_verb(spec, alc269_init_verbs); | 14041 | add_verb(spec, alc269_init_verbs); |
| 13873 | real_capsrc_nids = alc269_capsrc_nids[0]; | ||
| 13874 | alc_ssid_check(codec, 0x15, 0x1b, 0x14, 0); | 14042 | alc_ssid_check(codec, 0x15, 0x1b, 0x14, 0); |
| 13875 | } | 14043 | } |
| 13876 | 14044 | ||
| 13877 | spec->num_mux_defs = 1; | 14045 | spec->num_mux_defs = 1; |
| 13878 | spec->input_mux = &spec->private_imux[0]; | 14046 | spec->input_mux = &spec->private_imux[0]; |
| 14047 | fillup_priv_adc_nids(codec, alc269_adc_candidates, | ||
| 14048 | sizeof(alc269_adc_candidates)); | ||
| 14049 | |||
| 13879 | /* set default input source */ | 14050 | /* set default input source */ |
| 13880 | snd_hda_codec_write_cache(codec, real_capsrc_nids, | 14051 | snd_hda_codec_write_cache(codec, spec->capsrc_nids[0], |
| 13881 | 0, AC_VERB_SET_CONNECT_SEL, | 14052 | 0, AC_VERB_SET_CONNECT_SEL, |
| 13882 | spec->input_mux->items[0].index); | 14053 | spec->input_mux->items[0].index); |
| 13883 | 14054 | ||
| @@ -13907,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec) | |||
| 13907 | alc_inithook(codec); | 14078 | alc_inithook(codec); |
| 13908 | } | 14079 | } |
| 13909 | 14080 | ||
| 14081 | enum { | ||
| 14082 | ALC269_FIXUP_SONY_VAIO, | ||
| 14083 | }; | ||
| 14084 | |||
| 14085 | const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = { | ||
| 14086 | {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD}, | ||
| 14087 | {} | ||
| 14088 | }; | ||
| 14089 | |||
| 14090 | static const struct alc_fixup alc269_fixups[] = { | ||
| 14091 | [ALC269_FIXUP_SONY_VAIO] = { | ||
| 14092 | .verbs = alc269_sony_vaio_fixup_verbs | ||
| 14093 | }, | ||
| 14094 | }; | ||
| 14095 | |||
| 14096 | static struct snd_pci_quirk alc269_fixup_tbl[] = { | ||
| 14097 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), | ||
| 14098 | {} | ||
| 14099 | }; | ||
| 14100 | |||
| 14101 | |||
| 13910 | /* | 14102 | /* |
| 13911 | * configuration and preset | 14103 | * configuration and preset |
| 13912 | */ | 14104 | */ |
| @@ -13966,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { | |||
| 13966 | ALC269_DMIC), | 14158 | ALC269_DMIC), |
| 13967 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), | 14159 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), |
| 13968 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), | 14160 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), |
| 13969 | SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC), | 14161 | SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO), |
| 13970 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), | 14162 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), |
| 13971 | SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), | 14163 | SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), |
| 13972 | SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), | 14164 | SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), |
| @@ -14040,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = { | |||
| 14040 | .num_channel_mode = ARRAY_SIZE(alc269_modes), | 14232 | .num_channel_mode = ARRAY_SIZE(alc269_modes), |
| 14041 | .channel_mode = alc269_modes, | 14233 | .channel_mode = alc269_modes, |
| 14042 | .unsol_event = alc269_laptop_unsol_event, | 14234 | .unsol_event = alc269_laptop_unsol_event, |
| 14043 | .setup = alc269_laptop_amic_setup, | 14235 | .setup = alc269vb_laptop_amic_setup, |
| 14044 | .init_hook = alc269_laptop_inithook, | 14236 | .init_hook = alc269_laptop_inithook, |
| 14045 | }, | 14237 | }, |
| 14046 | [ALC269VB_DMIC] = { | 14238 | [ALC269VB_DMIC] = { |
| @@ -14120,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 14120 | board_config = ALC269_AUTO; | 14312 | board_config = ALC269_AUTO; |
| 14121 | } | 14313 | } |
| 14122 | 14314 | ||
| 14315 | if (board_config == ALC269_AUTO) | ||
| 14316 | alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1); | ||
| 14317 | |||
| 14123 | if (board_config == ALC269_AUTO) { | 14318 | if (board_config == ALC269_AUTO) { |
| 14124 | /* automatic parse from the BIOS config */ | 14319 | /* automatic parse from the BIOS config */ |
| 14125 | err = alc269_parse_auto_config(codec); | 14320 | err = alc269_parse_auto_config(codec); |
| @@ -14156,20 +14351,25 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 14156 | spec->stream_digital_playback = &alc269_pcm_digital_playback; | 14351 | spec->stream_digital_playback = &alc269_pcm_digital_playback; |
| 14157 | spec->stream_digital_capture = &alc269_pcm_digital_capture; | 14352 | spec->stream_digital_capture = &alc269_pcm_digital_capture; |
| 14158 | 14353 | ||
| 14159 | if (!is_alc269vb) { | 14354 | if (!spec->adc_nids) { /* wasn't filled automatically? use default */ |
| 14160 | spec->adc_nids = alc269_adc_nids; | 14355 | if (!is_alc269vb) { |
| 14161 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); | 14356 | spec->adc_nids = alc269_adc_nids; |
| 14162 | spec->capsrc_nids = alc269_capsrc_nids; | 14357 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); |
| 14163 | } else { | 14358 | spec->capsrc_nids = alc269_capsrc_nids; |
| 14164 | spec->adc_nids = alc269vb_adc_nids; | 14359 | } else { |
| 14165 | spec->num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids); | 14360 | spec->adc_nids = alc269vb_adc_nids; |
| 14166 | spec->capsrc_nids = alc269vb_capsrc_nids; | 14361 | spec->num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids); |
| 14362 | spec->capsrc_nids = alc269vb_capsrc_nids; | ||
| 14363 | } | ||
| 14167 | } | 14364 | } |
| 14168 | 14365 | ||
| 14169 | if (!spec->cap_mixer) | 14366 | if (!spec->cap_mixer) |
| 14170 | set_capture_mixer(codec); | 14367 | set_capture_mixer(codec); |
| 14171 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); | 14368 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); |
| 14172 | 14369 | ||
| 14370 | if (board_config == ALC269_AUTO) | ||
| 14371 | alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0); | ||
| 14372 | |||
| 14173 | spec->vmaster_nid = 0x02; | 14373 | spec->vmaster_nid = 0x02; |
| 14174 | 14374 | ||
| 14175 | codec->patch_ops = alc_patch_ops; | 14375 | codec->patch_ops = alc_patch_ops; |
| @@ -15258,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec) | |||
| 15258 | board_config = ALC861_AUTO; | 15458 | board_config = ALC861_AUTO; |
| 15259 | } | 15459 | } |
| 15260 | 15460 | ||
| 15261 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups); | 15461 | if (board_config == ALC861_AUTO) |
| 15462 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1); | ||
| 15262 | 15463 | ||
| 15263 | if (board_config == ALC861_AUTO) { | 15464 | if (board_config == ALC861_AUTO) { |
| 15264 | /* automatic parse from the BIOS config */ | 15465 | /* automatic parse from the BIOS config */ |
| @@ -15295,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec) | |||
| 15295 | 15496 | ||
| 15296 | spec->vmaster_nid = 0x03; | 15497 | spec->vmaster_nid = 0x03; |
| 15297 | 15498 | ||
| 15499 | if (board_config == ALC861_AUTO) | ||
| 15500 | alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0); | ||
| 15501 | |||
| 15298 | codec->patch_ops = alc_patch_ops; | 15502 | codec->patch_ops = alc_patch_ops; |
| 15299 | if (board_config == ALC861_AUTO) { | 15503 | if (board_config == ALC861_AUTO) { |
| 15300 | spec->init_hook = alc861_auto_init; | 15504 | spec->init_hook = alc861_auto_init; |
| @@ -16229,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
| 16229 | board_config = ALC861VD_AUTO; | 16433 | board_config = ALC861VD_AUTO; |
| 16230 | } | 16434 | } |
| 16231 | 16435 | ||
| 16232 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); | 16436 | if (board_config == ALC861VD_AUTO) |
| 16437 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1); | ||
| 16233 | 16438 | ||
| 16234 | if (board_config == ALC861VD_AUTO) { | 16439 | if (board_config == ALC861VD_AUTO) { |
| 16235 | /* automatic parse from the BIOS config */ | 16440 | /* automatic parse from the BIOS config */ |
| @@ -16277,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
| 16277 | 16482 | ||
| 16278 | spec->vmaster_nid = 0x02; | 16483 | spec->vmaster_nid = 0x02; |
| 16279 | 16484 | ||
| 16485 | if (board_config == ALC861VD_AUTO) | ||
| 16486 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0); | ||
| 16487 | |||
| 16280 | codec->patch_ops = alc_patch_ops; | 16488 | codec->patch_ops = alc_patch_ops; |
| 16281 | 16489 | ||
| 16282 | if (board_config == ALC861VD_AUTO) | 16490 | if (board_config == ALC861VD_AUTO) |
| @@ -17115,9 +17323,9 @@ static void alc663_m51va_speaker_automute(struct hda_codec *codec) | |||
| 17115 | present = snd_hda_jack_detect(codec, 0x21); | 17323 | present = snd_hda_jack_detect(codec, 0x21); |
| 17116 | bits = present ? HDA_AMP_MUTE : 0; | 17324 | bits = present ? HDA_AMP_MUTE : 0; |
| 17117 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 17325 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 17118 | AMP_IN_MUTE(0), bits); | 17326 | HDA_AMP_MUTE, bits); |
| 17119 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 17327 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 17120 | AMP_IN_MUTE(0), bits); | 17328 | HDA_AMP_MUTE, bits); |
| 17121 | } | 17329 | } |
| 17122 | 17330 | ||
| 17123 | static void alc663_21jd_two_speaker_automute(struct hda_codec *codec) | 17331 | static void alc663_21jd_two_speaker_automute(struct hda_codec *codec) |
| @@ -17128,13 +17336,13 @@ static void alc663_21jd_two_speaker_automute(struct hda_codec *codec) | |||
| 17128 | present = snd_hda_jack_detect(codec, 0x21); | 17336 | present = snd_hda_jack_detect(codec, 0x21); |
| 17129 | bits = present ? HDA_AMP_MUTE : 0; | 17337 | bits = present ? HDA_AMP_MUTE : 0; |
| 17130 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 17338 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 17131 | AMP_IN_MUTE(0), bits); | 17339 | HDA_AMP_MUTE, bits); |
| 17132 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 17340 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 17133 | AMP_IN_MUTE(0), bits); | 17341 | HDA_AMP_MUTE, bits); |
| 17134 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, | 17342 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, |
| 17135 | AMP_IN_MUTE(0), bits); | 17343 | HDA_AMP_MUTE, bits); |
| 17136 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, | 17344 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, |
| 17137 | AMP_IN_MUTE(0), bits); | 17345 | HDA_AMP_MUTE, bits); |
| 17138 | } | 17346 | } |
| 17139 | 17347 | ||
| 17140 | static void alc663_15jd_two_speaker_automute(struct hda_codec *codec) | 17348 | static void alc663_15jd_two_speaker_automute(struct hda_codec *codec) |
| @@ -17145,13 +17353,13 @@ static void alc663_15jd_two_speaker_automute(struct hda_codec *codec) | |||
| 17145 | present = snd_hda_jack_detect(codec, 0x15); | 17353 | present = snd_hda_jack_detect(codec, 0x15); |
| 17146 | bits = present ? HDA_AMP_MUTE : 0; | 17354 | bits = present ? HDA_AMP_MUTE : 0; |
| 17147 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 17355 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 17148 | AMP_IN_MUTE(0), bits); | 17356 | HDA_AMP_MUTE, bits); |
| 17149 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 17357 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 17150 | AMP_IN_MUTE(0), bits); | 17358 | HDA_AMP_MUTE, bits); |
| 17151 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, | 17359 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, |
| 17152 | AMP_IN_MUTE(0), bits); | 17360 | HDA_AMP_MUTE, bits); |
| 17153 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, | 17361 | snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, |
| 17154 | AMP_IN_MUTE(0), bits); | 17362 | HDA_AMP_MUTE, bits); |
| 17155 | } | 17363 | } |
| 17156 | 17364 | ||
| 17157 | static void alc662_f5z_speaker_automute(struct hda_codec *codec) | 17365 | static void alc662_f5z_speaker_automute(struct hda_codec *codec) |
| @@ -17190,14 +17398,14 @@ static void alc663_two_hp_m2_speaker_automute(struct hda_codec *codec) | |||
| 17190 | 17398 | ||
| 17191 | if (present1 || present2) { | 17399 | if (present1 || present2) { |
| 17192 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 17400 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 17193 | AMP_IN_MUTE(0), AMP_IN_MUTE(0)); | 17401 | HDA_AMP_MUTE, HDA_AMP_MUTE); |
| 17194 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 17402 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 17195 | AMP_IN_MUTE(0), AMP_IN_MUTE(0)); | 17403 | HDA_AMP_MUTE, HDA_AMP_MUTE); |
| 17196 | } else { | 17404 | } else { |
| 17197 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, | 17405 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, |
| 17198 | AMP_IN_MUTE(0), 0); | 17406 | HDA_AMP_MUTE, 0); |
| 17199 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, | 17407 | snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, |
| 17200 | AMP_IN_MUTE(0), 0); | 17408 | HDA_AMP_MUTE, 0); |
| 17201 | } | 17409 | } |
| 17202 | } | 17410 | } |
| 17203 | 17411 | ||
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 9ddc37300f6b..73453814e098 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
| @@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec, | |||
| 476 | knew->name = kstrdup(tmpl->name, GFP_KERNEL); | 476 | knew->name = kstrdup(tmpl->name, GFP_KERNEL); |
| 477 | if (!knew->name) | 477 | if (!knew->name) |
| 478 | return NULL; | 478 | return NULL; |
| 479 | return 0; | 479 | return knew; |
| 480 | } | 480 | } |
| 481 | 481 | ||
| 482 | static void via_free_kctls(struct hda_codec *codec) | 482 | static void via_free_kctls(struct hda_codec *codec) |
| @@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = { | |||
| 1215 | }, | 1215 | }, |
| 1216 | }; | 1216 | }; |
| 1217 | 1217 | ||
| 1218 | static int via_hp_build(struct via_spec *spec) | 1218 | static int via_hp_build(struct hda_codec *codec) |
| 1219 | { | 1219 | { |
| 1220 | struct via_spec *spec = codec->spec; | ||
| 1220 | struct snd_kcontrol_new *knew; | 1221 | struct snd_kcontrol_new *knew; |
| 1221 | hda_nid_t nid; | 1222 | hda_nid_t nid; |
| 1222 | 1223 | int nums; | |
| 1223 | knew = via_clone_control(spec, &via_hp_mixer[0]); | 1224 | hda_nid_t conn[HDA_MAX_CONNECTIONS]; |
| 1224 | if (knew == NULL) | ||
| 1225 | return -ENOMEM; | ||
| 1226 | 1225 | ||
| 1227 | switch (spec->codec_type) { | 1226 | switch (spec->codec_type) { |
| 1228 | case VT1718S: | 1227 | case VT1718S: |
| @@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec) | |||
| 1239 | break; | 1238 | break; |
| 1240 | } | 1239 | } |
| 1241 | 1240 | ||
| 1241 | nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); | ||
| 1242 | if (nums <= 1) | ||
| 1243 | return 0; | ||
| 1244 | |||
| 1245 | knew = via_clone_control(spec, &via_hp_mixer[0]); | ||
| 1246 | if (knew == NULL) | ||
| 1247 | return -ENOMEM; | ||
| 1248 | |||
| 1242 | knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; | 1249 | knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; |
| 1243 | knew->private_value = nid; | 1250 | knew->private_value = nid; |
| 1244 | 1251 | ||
| @@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec) | |||
| 2561 | spec->input_mux = &spec->private_imux[0]; | 2568 | spec->input_mux = &spec->private_imux[0]; |
| 2562 | 2569 | ||
| 2563 | if (spec->hp_mux) | 2570 | if (spec->hp_mux) |
| 2564 | via_hp_build(spec); | 2571 | via_hp_build(codec); |
| 2565 | 2572 | ||
| 2566 | via_smart51_build(spec); | 2573 | via_smart51_build(spec); |
| 2567 | return 1; | 2574 | return 1; |
| @@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec) | |||
| 3087 | spec->input_mux = &spec->private_imux[0]; | 3094 | spec->input_mux = &spec->private_imux[0]; |
| 3088 | 3095 | ||
| 3089 | if (spec->hp_mux) | 3096 | if (spec->hp_mux) |
| 3090 | via_hp_build(spec); | 3097 | via_hp_build(codec); |
| 3091 | 3098 | ||
| 3092 | via_smart51_build(spec); | 3099 | via_smart51_build(spec); |
| 3093 | return 1; | 3100 | return 1; |
| @@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec) | |||
| 3654 | spec->input_mux = &spec->private_imux[0]; | 3661 | spec->input_mux = &spec->private_imux[0]; |
| 3655 | 3662 | ||
| 3656 | if (spec->hp_mux) | 3663 | if (spec->hp_mux) |
| 3657 | via_hp_build(spec); | 3664 | via_hp_build(codec); |
| 3658 | 3665 | ||
| 3659 | via_smart51_build(spec); | 3666 | via_smart51_build(spec); |
| 3660 | return 1; | 3667 | return 1; |
| @@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec) | |||
| 4140 | spec->input_mux = &spec->private_imux[0]; | 4147 | spec->input_mux = &spec->private_imux[0]; |
| 4141 | 4148 | ||
| 4142 | if (spec->hp_mux) | 4149 | if (spec->hp_mux) |
| 4143 | via_hp_build(spec); | 4150 | via_hp_build(codec); |
| 4144 | 4151 | ||
| 4145 | via_smart51_build(spec); | 4152 | via_smart51_build(spec); |
| 4146 | return 1; | 4153 | return 1; |
| @@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec) | |||
| 4510 | spec->input_mux = &spec->private_imux[0]; | 4517 | spec->input_mux = &spec->private_imux[0]; |
| 4511 | 4518 | ||
| 4512 | if (spec->hp_mux) | 4519 | if (spec->hp_mux) |
| 4513 | via_hp_build(spec); | 4520 | via_hp_build(codec); |
| 4514 | 4521 | ||
| 4515 | return 1; | 4522 | return 1; |
| 4516 | } | 4523 | } |
| @@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec) | |||
| 4930 | spec->input_mux = &spec->private_imux[0]; | 4937 | spec->input_mux = &spec->private_imux[0]; |
| 4931 | 4938 | ||
| 4932 | if (spec->hp_mux) | 4939 | if (spec->hp_mux) |
| 4933 | via_hp_build(spec); | 4940 | via_hp_build(codec); |
| 4934 | 4941 | ||
| 4935 | via_smart51_build(spec); | 4942 | via_smart51_build(spec); |
| 4936 | 4943 | ||
| @@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec) | |||
| 5425 | spec->input_mux = &spec->private_imux[0]; | 5432 | spec->input_mux = &spec->private_imux[0]; |
| 5426 | 5433 | ||
| 5427 | if (spec->hp_mux) | 5434 | if (spec->hp_mux) |
| 5428 | via_hp_build(spec); | 5435 | via_hp_build(codec); |
| 5429 | 5436 | ||
| 5430 | via_smart51_build(spec); | 5437 | via_smart51_build(spec); |
| 5431 | 5438 | ||
| @@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec) | |||
| 5781 | spec->input_mux = &spec->private_imux[0]; | 5788 | spec->input_mux = &spec->private_imux[0]; |
| 5782 | 5789 | ||
| 5783 | if (spec->hp_mux) | 5790 | if (spec->hp_mux) |
| 5784 | via_hp_build(spec); | 5791 | via_hp_build(codec); |
| 5785 | 5792 | ||
| 5786 | return 1; | 5793 | return 1; |
| 5787 | } | 5794 | } |
| @@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec, | |||
| 6000 | 6007 | ||
| 6001 | /* Line-Out: PortE */ | 6008 | /* Line-Out: PortE */ |
| 6002 | err = via_add_control(spec, VIA_CTL_WIDGET_VOL, | 6009 | err = via_add_control(spec, VIA_CTL_WIDGET_VOL, |
| 6003 | "Master Front Playback Volume", | 6010 | "Front Playback Volume", |
| 6004 | HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); | 6011 | HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); |
| 6005 | if (err < 0) | 6012 | if (err < 0) |
| 6006 | return err; | 6013 | return err; |
| 6007 | err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, | 6014 | err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, |
| 6008 | "Master Front Playback Switch", | 6015 | "Front Playback Switch", |
| 6009 | HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); | 6016 | HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); |
| 6010 | if (err < 0) | 6017 | if (err < 0) |
| 6011 | return err; | 6018 | return err; |
| @@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec) | |||
| 6130 | spec->input_mux = &spec->private_imux[0]; | 6137 | spec->input_mux = &spec->private_imux[0]; |
| 6131 | 6138 | ||
| 6132 | if (spec->hp_mux) | 6139 | if (spec->hp_mux) |
| 6133 | via_hp_build(spec); | 6140 | via_hp_build(codec); |
| 6134 | 6141 | ||
| 6135 | return 1; | 6142 | return 1; |
| 6136 | } | 6143 | } |
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index 55e9315d4ccd..3be8f97c8bc0 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c | |||
| @@ -1162,13 +1162,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private | |||
| 1162 | unsigned long count, unsigned long pos) | 1162 | unsigned long count, unsigned long pos) |
| 1163 | { | 1163 | { |
| 1164 | struct mixart_mgr *mgr = entry->private_data; | 1164 | struct mixart_mgr *mgr = entry->private_data; |
| 1165 | unsigned long maxsize; | ||
| 1165 | 1166 | ||
| 1166 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | 1167 | if (pos >= MIXART_BA0_SIZE) |
| 1167 | if(count <= 0) | ||
| 1168 | return 0; | 1168 | return 0; |
| 1169 | if(pos + count > MIXART_BA0_SIZE) | 1169 | maxsize = MIXART_BA0_SIZE - pos; |
| 1170 | count = (long)(MIXART_BA0_SIZE - pos); | 1170 | if (count > maxsize) |
| 1171 | if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count)) | 1171 | count = maxsize; |
| 1172 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | ||
| 1173 | if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count)) | ||
| 1172 | return -EFAULT; | 1174 | return -EFAULT; |
| 1173 | return count; | 1175 | return count; |
| 1174 | } | 1176 | } |
| @@ -1181,13 +1183,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private | |||
| 1181 | unsigned long count, unsigned long pos) | 1183 | unsigned long count, unsigned long pos) |
| 1182 | { | 1184 | { |
| 1183 | struct mixart_mgr *mgr = entry->private_data; | 1185 | struct mixart_mgr *mgr = entry->private_data; |
| 1186 | unsigned long maxsize; | ||
| 1184 | 1187 | ||
| 1185 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | 1188 | if (pos > MIXART_BA1_SIZE) |
| 1186 | if(count <= 0) | ||
| 1187 | return 0; | 1189 | return 0; |
| 1188 | if(pos + count > MIXART_BA1_SIZE) | 1190 | maxsize = MIXART_BA1_SIZE - pos; |
| 1189 | count = (long)(MIXART_BA1_SIZE - pos); | 1191 | if (count > maxsize) |
| 1190 | if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count)) | 1192 | count = maxsize; |
| 1193 | count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ | ||
| 1194 | if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count)) | ||
| 1191 | return -EFAULT; | 1195 | return -EFAULT; |
| 1192 | return count; | 1196 | return count; |
| 1193 | } | 1197 | } |
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c index 9ef6b96373f5..3e6628c8e665 100644 --- a/sound/soc/atmel/atmel-pcm.c +++ b/sound/soc/atmel/atmel-pcm.c | |||
| @@ -180,7 +180,7 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 180 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 180 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
| 181 | runtime->dma_bytes = params_buffer_bytes(params); | 181 | runtime->dma_bytes = params_buffer_bytes(params); |
| 182 | 182 | ||
| 183 | prtd->params = rtd->dai->cpu_dai->dma_data; | 183 | prtd->params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
| 184 | prtd->params->dma_intr_handler = atmel_pcm_dma_irq; | 184 | prtd->params->dma_intr_handler = atmel_pcm_dma_irq; |
| 185 | 185 | ||
| 186 | prtd->dma_buffer = runtime->dma_addr; | 186 | prtd->dma_buffer = runtime->dma_addr; |
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index e588e63f18d2..0b59806905d1 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c | |||
| @@ -363,12 +363,12 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, | |||
| 363 | ssc_p->dma_params[dir] = dma_params; | 363 | ssc_p->dma_params[dir] = dma_params; |
| 364 | 364 | ||
| 365 | /* | 365 | /* |
| 366 | * The cpu_dai->dma_data field is only used to communicate the | 366 | * The snd_soc_pcm_stream->dma_data field is only used to communicate |
| 367 | * appropriate DMA parameters to the pcm driver hw_params() | 367 | * the appropriate DMA parameters to the pcm driver hw_params() |
| 368 | * function. It should not be used for other purposes | 368 | * function. It should not be used for other purposes |
| 369 | * as it is common to all substreams. | 369 | * as it is common to all substreams. |
| 370 | */ | 370 | */ |
| 371 | rtd->dai->cpu_dai->dma_data = dma_params; | 371 | snd_soc_dai_set_dma_data(rtd->dai->cpu_dai, substream, dma_params); |
| 372 | 372 | ||
| 373 | channels = params_channels(params); | 373 | channels = params_channels(params); |
| 374 | 374 | ||
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c index fd101d450d56..1f5e57a4bb7a 100644 --- a/sound/soc/codecs/ac97.c +++ b/sound/soc/codecs/ac97.c | |||
| @@ -81,9 +81,11 @@ static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, | |||
| 81 | static int ac97_soc_probe(struct platform_device *pdev) | 81 | static int ac97_soc_probe(struct platform_device *pdev) |
| 82 | { | 82 | { |
| 83 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); | 83 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); |
| 84 | struct snd_soc_card *card = socdev->card; | ||
| 84 | struct snd_soc_codec *codec; | 85 | struct snd_soc_codec *codec; |
| 85 | struct snd_ac97_bus *ac97_bus; | 86 | struct snd_ac97_bus *ac97_bus; |
| 86 | struct snd_ac97_template ac97_template; | 87 | struct snd_ac97_template ac97_template; |
| 88 | int i; | ||
| 87 | int ret = 0; | 89 | int ret = 0; |
| 88 | 90 | ||
| 89 | printk(KERN_INFO "AC97 SoC Audio Codec %s\n", AC97_VERSION); | 91 | printk(KERN_INFO "AC97 SoC Audio Codec %s\n", AC97_VERSION); |
| @@ -103,12 +105,6 @@ static int ac97_soc_probe(struct platform_device *pdev) | |||
| 103 | INIT_LIST_HEAD(&codec->dapm_widgets); | 105 | INIT_LIST_HEAD(&codec->dapm_widgets); |
| 104 | INIT_LIST_HEAD(&codec->dapm_paths); | 106 | INIT_LIST_HEAD(&codec->dapm_paths); |
| 105 | 107 | ||
| 106 | ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); | ||
| 107 | if (ret < 0) { | ||
| 108 | printk(KERN_ERR "ASoC: failed to init gen ac97 glue\n"); | ||
| 109 | goto err; | ||
| 110 | } | ||
| 111 | |||
| 112 | /* register pcms */ | 108 | /* register pcms */ |
| 113 | ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); | 109 | ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); |
| 114 | if (ret < 0) | 110 | if (ret < 0) |
| @@ -124,6 +120,13 @@ static int ac97_soc_probe(struct platform_device *pdev) | |||
| 124 | if (ret < 0) | 120 | if (ret < 0) |
| 125 | goto bus_err; | 121 | goto bus_err; |
| 126 | 122 | ||
| 123 | for (i = 0; i < card->num_links; i++) { | ||
| 124 | if (card->dai_link[i].codec_dai->ac97_control) { | ||
| 125 | snd_ac97_dev_add_pdata(codec->ac97, | ||
| 126 | card->dai_link[i].cpu_dai->ac97_pdata); | ||
| 127 | } | ||
| 128 | } | ||
| 129 | |||
| 127 | return 0; | 130 | return 0; |
| 128 | 131 | ||
| 129 | bus_err: | 132 | bus_err: |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a34cbcf7904f..002e289d1255 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | 23 | ||
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
| 26 | #include <linux/version.h> | ||
| 27 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
| 28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 29 | #include <linux/firmware.h> | 28 | #include <linux/firmware.h> |
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 8d1c63754be4..9da0724cd47a 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
| @@ -3008,34 +3008,39 @@ static int wm8994_set_bias_level(struct snd_soc_codec *codec, | |||
| 3008 | break; | 3008 | break; |
| 3009 | 3009 | ||
| 3010 | case SND_SOC_BIAS_OFF: | 3010 | case SND_SOC_BIAS_OFF: |
| 3011 | /* Switch over to startup biases */ | 3011 | if (codec->bias_level == SND_SOC_BIAS_STANDBY) { |
| 3012 | snd_soc_update_bits(codec, WM8994_ANTIPOP_2, | 3012 | /* Switch over to startup biases */ |
| 3013 | WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | | 3013 | snd_soc_update_bits(codec, WM8994_ANTIPOP_2, |
| 3014 | WM8994_VMID_BUF_ENA | | 3014 | WM8994_BIAS_SRC | |
| 3015 | WM8994_VMID_RAMP_MASK, | 3015 | WM8994_STARTUP_BIAS_ENA | |
| 3016 | WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | | 3016 | WM8994_VMID_BUF_ENA | |
| 3017 | WM8994_VMID_BUF_ENA | | 3017 | WM8994_VMID_RAMP_MASK, |
| 3018 | (1 << WM8994_VMID_RAMP_SHIFT)); | 3018 | WM8994_BIAS_SRC | |
| 3019 | 3019 | WM8994_STARTUP_BIAS_ENA | | |
| 3020 | /* Disable main biases */ | 3020 | WM8994_VMID_BUF_ENA | |
| 3021 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1, | 3021 | (1 << WM8994_VMID_RAMP_SHIFT)); |
| 3022 | WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0); | ||
| 3023 | 3022 | ||
| 3024 | /* Discharge line */ | 3023 | /* Disable main biases */ |
| 3025 | snd_soc_update_bits(codec, WM8994_ANTIPOP_1, | 3024 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1, |
| 3026 | WM8994_LINEOUT1_DISCH | | 3025 | WM8994_BIAS_ENA | |
| 3027 | WM8994_LINEOUT2_DISCH, | 3026 | WM8994_VMID_SEL_MASK, 0); |
| 3028 | WM8994_LINEOUT1_DISCH | | ||
| 3029 | WM8994_LINEOUT2_DISCH); | ||
| 3030 | 3027 | ||
| 3031 | msleep(5); | 3028 | /* Discharge line */ |
| 3029 | snd_soc_update_bits(codec, WM8994_ANTIPOP_1, | ||
| 3030 | WM8994_LINEOUT1_DISCH | | ||
| 3031 | WM8994_LINEOUT2_DISCH, | ||
| 3032 | WM8994_LINEOUT1_DISCH | | ||
| 3033 | WM8994_LINEOUT2_DISCH); | ||
| 3032 | 3034 | ||
| 3033 | /* Switch off startup biases */ | 3035 | msleep(5); |
| 3034 | snd_soc_update_bits(codec, WM8994_ANTIPOP_2, | ||
| 3035 | WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | | ||
| 3036 | WM8994_VMID_BUF_ENA | | ||
| 3037 | WM8994_VMID_RAMP_MASK, 0); | ||
| 3038 | 3036 | ||
| 3037 | /* Switch off startup biases */ | ||
| 3038 | snd_soc_update_bits(codec, WM8994_ANTIPOP_2, | ||
| 3039 | WM8994_BIAS_SRC | | ||
| 3040 | WM8994_STARTUP_BIAS_ENA | | ||
| 3041 | WM8994_VMID_BUF_ENA | | ||
| 3042 | WM8994_VMID_RAMP_MASK, 0); | ||
| 3043 | } | ||
| 3039 | break; | 3044 | break; |
| 3040 | } | 3045 | } |
| 3041 | codec->bias_level = level; | 3046 | codec->bias_level = level; |
| @@ -3402,7 +3407,7 @@ struct snd_soc_dai wm8994_dai[] = { | |||
| 3402 | .rates = WM8994_RATES, | 3407 | .rates = WM8994_RATES, |
| 3403 | .formats = WM8994_FORMATS, | 3408 | .formats = WM8994_FORMATS, |
| 3404 | }, | 3409 | }, |
| 3405 | .playback = { | 3410 | .capture = { |
| 3406 | .stream_name = "AIF3 Capture", | 3411 | .stream_name = "AIF3 Capture", |
| 3407 | .channels_min = 2, | 3412 | .channels_min = 2, |
| 3408 | .channels_max = 2, | 3413 | .channels_max = 2, |
| @@ -3731,11 +3736,12 @@ static int wm8994_codec_probe(struct platform_device *pdev) | |||
| 3731 | case 3: | 3736 | case 3: |
| 3732 | wm8994->hubs.dcs_codes = -5; | 3737 | wm8994->hubs.dcs_codes = -5; |
| 3733 | wm8994->hubs.hp_startup_mode = 1; | 3738 | wm8994->hubs.hp_startup_mode = 1; |
| 3739 | wm8994->hubs.dcs_readback_mode = 1; | ||
| 3734 | break; | 3740 | break; |
| 3735 | default: | 3741 | default: |
| 3742 | wm8994->hubs.dcs_readback_mode = 1; | ||
| 3736 | break; | 3743 | break; |
| 3737 | } | 3744 | } |
| 3738 | |||
| 3739 | 3745 | ||
| 3740 | /* Remember if AIFnLRCLK is configured as a GPIO. This should be | 3746 | /* Remember if AIFnLRCLK is configured as a GPIO. This should be |
| 3741 | * configured on init - if a system wants to do this dynamically | 3747 | * configured on init - if a system wants to do this dynamically |
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 486bdd21a98a..e1f225a3ac46 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c | |||
| @@ -62,21 +62,27 @@ static const char *speaker_mode_text[] = { | |||
| 62 | static const struct soc_enum speaker_mode = | 62 | static const struct soc_enum speaker_mode = |
| 63 | SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text); | 63 | SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text); |
| 64 | 64 | ||
| 65 | static void wait_for_dc_servo(struct snd_soc_codec *codec) | 65 | static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op) |
| 66 | { | 66 | { |
| 67 | unsigned int reg; | 67 | unsigned int reg; |
| 68 | int count = 0; | 68 | int count = 0; |
| 69 | unsigned int val; | ||
| 70 | |||
| 71 | val = op | WM8993_DCS_ENA_CHAN_0 | WM8993_DCS_ENA_CHAN_1; | ||
| 72 | |||
| 73 | /* Trigger the command */ | ||
| 74 | snd_soc_write(codec, WM8993_DC_SERVO_0, val); | ||
| 69 | 75 | ||
| 70 | dev_dbg(codec->dev, "Waiting for DC servo...\n"); | 76 | dev_dbg(codec->dev, "Waiting for DC servo...\n"); |
| 71 | 77 | ||
| 72 | do { | 78 | do { |
| 73 | count++; | 79 | count++; |
| 74 | msleep(1); | 80 | msleep(1); |
| 75 | reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0); | 81 | reg = snd_soc_read(codec, WM8993_DC_SERVO_0); |
| 76 | dev_dbg(codec->dev, "DC servo: %x\n", reg); | 82 | dev_dbg(codec->dev, "DC servo: %x\n", reg); |
| 77 | } while (reg & WM8993_DCS_DATAPATH_BUSY && count < 400); | 83 | } while (reg & op && count < 400); |
| 78 | 84 | ||
| 79 | if (reg & WM8993_DCS_DATAPATH_BUSY) | 85 | if (reg & op) |
| 80 | dev_err(codec->dev, "Timed out waiting for DC Servo\n"); | 86 | dev_err(codec->dev, "Timed out waiting for DC Servo\n"); |
| 81 | } | 87 | } |
| 82 | 88 | ||
| @@ -86,51 +92,58 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec) | |||
| 86 | static void calibrate_dc_servo(struct snd_soc_codec *codec) | 92 | static void calibrate_dc_servo(struct snd_soc_codec *codec) |
| 87 | { | 93 | { |
| 88 | struct wm_hubs_data *hubs = codec->private_data; | 94 | struct wm_hubs_data *hubs = codec->private_data; |
| 89 | u16 reg, dcs_cfg; | 95 | u16 reg, reg_l, reg_r, dcs_cfg; |
| 90 | 96 | ||
| 91 | /* Set for 32 series updates */ | 97 | /* Set for 32 series updates */ |
| 92 | snd_soc_update_bits(codec, WM8993_DC_SERVO_1, | 98 | snd_soc_update_bits(codec, WM8993_DC_SERVO_1, |
| 93 | WM8993_DCS_SERIES_NO_01_MASK, | 99 | WM8993_DCS_SERIES_NO_01_MASK, |
| 94 | 32 << WM8993_DCS_SERIES_NO_01_SHIFT); | 100 | 32 << WM8993_DCS_SERIES_NO_01_SHIFT); |
| 95 | 101 | wait_for_dc_servo(codec, | |
| 96 | /* Enable the DC servo. Write all bits to avoid triggering startup | 102 | WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1); |
| 97 | * or write calibration. | ||
| 98 | */ | ||
| 99 | snd_soc_update_bits(codec, WM8993_DC_SERVO_0, | ||
| 100 | 0xFFFF, | ||
| 101 | WM8993_DCS_ENA_CHAN_0 | | ||
| 102 | WM8993_DCS_ENA_CHAN_1 | | ||
| 103 | WM8993_DCS_TRIG_SERIES_1 | | ||
| 104 | WM8993_DCS_TRIG_SERIES_0); | ||
| 105 | |||
| 106 | wait_for_dc_servo(codec); | ||
| 107 | 103 | ||
| 108 | /* Apply correction to DC servo result */ | 104 | /* Apply correction to DC servo result */ |
| 109 | if (hubs->dcs_codes) { | 105 | if (hubs->dcs_codes) { |
| 110 | dev_dbg(codec->dev, "Applying %d code DC servo correction\n", | 106 | dev_dbg(codec->dev, "Applying %d code DC servo correction\n", |
| 111 | hubs->dcs_codes); | 107 | hubs->dcs_codes); |
| 112 | 108 | ||
| 109 | /* Different chips in the family support different | ||
| 110 | * readback methods. | ||
| 111 | */ | ||
| 112 | switch (hubs->dcs_readback_mode) { | ||
| 113 | case 0: | ||
| 114 | reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) | ||
| 115 | & WM8993_DCS_INTEG_CHAN_0_MASK;; | ||
| 116 | reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) | ||
| 117 | & WM8993_DCS_INTEG_CHAN_1_MASK; | ||
| 118 | break; | ||
| 119 | case 1: | ||
| 120 | reg = snd_soc_read(codec, WM8993_DC_SERVO_3); | ||
| 121 | reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK) | ||
| 122 | >> WM8993_DCS_DAC_WR_VAL_1_SHIFT; | ||
| 123 | reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK; | ||
| 124 | break; | ||
| 125 | default: | ||
| 126 | WARN(1, "Unknown DCS readback method"); | ||
| 127 | break; | ||
| 128 | } | ||
| 129 | |||
| 113 | /* HPOUT1L */ | 130 | /* HPOUT1L */ |
| 114 | reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) & | 131 | if (reg_l + hubs->dcs_codes > 0 && |
| 115 | WM8993_DCS_INTEG_CHAN_0_MASK;; | 132 | reg_l + hubs->dcs_codes < 0xff) |
| 116 | reg += hubs->dcs_codes; | 133 | reg_l += hubs->dcs_codes; |
| 117 | dcs_cfg = reg << WM8993_DCS_DAC_WR_VAL_1_SHIFT; | 134 | dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT; |
| 118 | 135 | ||
| 119 | /* HPOUT1R */ | 136 | /* HPOUT1R */ |
| 120 | reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) & | 137 | if (reg_r + hubs->dcs_codes > 0 && |
| 121 | WM8993_DCS_INTEG_CHAN_1_MASK; | 138 | reg_r + hubs->dcs_codes < 0xff) |
| 122 | reg += hubs->dcs_codes; | 139 | reg_r += hubs->dcs_codes; |
| 123 | dcs_cfg |= reg; | 140 | dcs_cfg |= reg_r; |
| 124 | 141 | ||
| 125 | /* Do it */ | 142 | /* Do it */ |
| 126 | snd_soc_write(codec, WM8993_DC_SERVO_3, dcs_cfg); | 143 | snd_soc_write(codec, WM8993_DC_SERVO_3, dcs_cfg); |
| 127 | snd_soc_update_bits(codec, WM8993_DC_SERVO_0, | 144 | wait_for_dc_servo(codec, |
| 128 | WM8993_DCS_TRIG_DAC_WR_0 | | 145 | WM8993_DCS_TRIG_DAC_WR_0 | |
| 129 | WM8993_DCS_TRIG_DAC_WR_1, | 146 | WM8993_DCS_TRIG_DAC_WR_1); |
| 130 | WM8993_DCS_TRIG_DAC_WR_0 | | ||
| 131 | WM8993_DCS_TRIG_DAC_WR_1); | ||
| 132 | |||
| 133 | wait_for_dc_servo(codec); | ||
| 134 | } | 147 | } |
| 135 | } | 148 | } |
| 136 | 149 | ||
| @@ -141,10 +154,16 @@ static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol, | |||
| 141 | struct snd_ctl_elem_value *ucontrol) | 154 | struct snd_ctl_elem_value *ucontrol) |
| 142 | { | 155 | { |
| 143 | struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); | 156 | struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); |
| 157 | struct wm_hubs_data *hubs = codec->private_data; | ||
| 144 | int ret; | 158 | int ret; |
| 145 | 159 | ||
| 146 | ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); | 160 | ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); |
| 147 | 161 | ||
| 162 | /* If we're applying an offset correction then updating the | ||
| 163 | * callibration would be likely to introduce further offsets. */ | ||
| 164 | if (hubs->dcs_codes) | ||
| 165 | return ret; | ||
| 166 | |||
| 148 | /* Only need to do this if the outputs are active */ | 167 | /* Only need to do this if the outputs are active */ |
| 149 | if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1) | 168 | if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1) |
| 150 | & (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA)) | 169 | & (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA)) |
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index 420104fe9c90..e51c16683589 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h | |||
| @@ -21,6 +21,7 @@ extern const unsigned int wm_hubs_spkmix_tlv[]; | |||
| 21 | /* This *must* be the first element of the codec->private_data struct */ | 21 | /* This *must* be the first element of the codec->private_data struct */ |
| 22 | struct wm_hubs_data { | 22 | struct wm_hubs_data { |
| 23 | int dcs_codes; | 23 | int dcs_codes; |
| 24 | int dcs_readback_mode; | ||
| 24 | int hp_startup_mode; | 25 | int hp_startup_mode; |
| 25 | }; | 26 | }; |
| 26 | 27 | ||
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c index 62af7e025e7f..adadcd3aa1b1 100644 --- a/sound/soc/davinci/davinci-i2s.c +++ b/sound/soc/davinci/davinci-i2s.c | |||
| @@ -586,7 +586,8 @@ static int davinci_i2s_probe(struct platform_device *pdev) | |||
| 586 | dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; | 586 | dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; |
| 587 | 587 | ||
| 588 | davinci_i2s_dai.private_data = dev; | 588 | davinci_i2s_dai.private_data = dev; |
| 589 | davinci_i2s_dai.dma_data = dev->dma_params; | 589 | davinci_i2s_dai.capture.dma_data = dev->dma_params; |
| 590 | davinci_i2s_dai.playback.dma_data = dev->dma_params; | ||
| 590 | ret = snd_soc_register_dai(&davinci_i2s_dai); | 591 | ret = snd_soc_register_dai(&davinci_i2s_dai); |
| 591 | if (ret != 0) | 592 | if (ret != 0) |
| 592 | goto err_free_mem; | 593 | goto err_free_mem; |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 6c80cc35ecad..79f0f4ad242c 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
| @@ -918,7 +918,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev) | |||
| 918 | 918 | ||
| 919 | dma_data->channel = res->start; | 919 | dma_data->channel = res->start; |
| 920 | davinci_mcasp_dai[pdata->op_mode].private_data = dev; | 920 | davinci_mcasp_dai[pdata->op_mode].private_data = dev; |
| 921 | davinci_mcasp_dai[pdata->op_mode].dma_data = dev->dma_params; | 921 | davinci_mcasp_dai[pdata->op_mode].capture.dma_data = dev->dma_params; |
| 922 | davinci_mcasp_dai[pdata->op_mode].playback.dma_data = dev->dma_params; | ||
| 922 | davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev; | 923 | davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev; |
| 923 | ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]); | 924 | ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]); |
| 924 | 925 | ||
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index 80c7fdf2f521..2dc406f42fe7 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c | |||
| @@ -649,8 +649,10 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream) | |||
| 649 | struct snd_pcm_hardware *ppcm; | 649 | struct snd_pcm_hardware *ppcm; |
| 650 | int ret = 0; | 650 | int ret = 0; |
| 651 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 651 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 652 | struct davinci_pcm_dma_params *pa = rtd->dai->cpu_dai->dma_data; | 652 | struct davinci_pcm_dma_params *pa; |
| 653 | struct davinci_pcm_dma_params *params; | 653 | struct davinci_pcm_dma_params *params; |
| 654 | |||
| 655 | pa = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 654 | if (!pa) | 656 | if (!pa) |
| 655 | return -ENODEV; | 657 | return -ENODEV; |
| 656 | params = &pa[substream->stream]; | 658 | params = &pa[substream->stream]; |
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index 86668ab3f4d4..2b31ac673ea4 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
| @@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data) | |||
| 71 | 71 | ||
| 72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) | 72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) |
| 73 | { | 73 | { |
| 74 | pr_err("DMA error callback called\n"); | 74 | struct snd_pcm_substream *substream = data; |
| 75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
| 76 | struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; | ||
| 77 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
| 78 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
| 79 | int ret; | ||
| 75 | 80 | ||
| 76 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", | 81 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", |
| 77 | channel, | 82 | channel, |
| @@ -79,16 +84,26 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err) | |||
| 79 | err & IMX_DMA_ERR_REQUEST ? " request" : "", | 84 | err & IMX_DMA_ERR_REQUEST ? " request" : "", |
| 80 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | 85 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", |
| 81 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | 86 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); |
| 87 | |||
| 88 | imx_dma_disable(iprtd->dma); | ||
| 89 | ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
| 90 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
| 91 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
| 92 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
| 93 | if (!ret) | ||
| 94 | imx_dma_enable(iprtd->dma); | ||
| 82 | } | 95 | } |
| 83 | 96 | ||
| 84 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) | 97 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) |
| 85 | { | 98 | { |
| 86 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 99 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 87 | struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; | 100 | struct imx_pcm_dma_params *dma_params; |
| 88 | struct snd_pcm_runtime *runtime = substream->runtime; | 101 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 89 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 102 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
| 90 | int ret; | 103 | int ret; |
| 91 | 104 | ||
| 105 | dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 106 | |||
| 92 | iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); | 107 | iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); |
| 93 | if (iprtd->dma < 0) { | 108 | if (iprtd->dma < 0) { |
| 94 | pr_err("Failed to claim the audio DMA\n"); | 109 | pr_err("Failed to claim the audio DMA\n"); |
| @@ -193,10 +208,12 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) | |||
| 193 | { | 208 | { |
| 194 | struct snd_pcm_runtime *runtime = substream->runtime; | 209 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 195 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 210 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 196 | struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; | 211 | struct imx_pcm_dma_params *dma_params; |
| 197 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 212 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
| 198 | int err; | 213 | int err; |
| 199 | 214 | ||
| 215 | dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 216 | |||
| 200 | iprtd->substream = substream; | 217 | iprtd->substream = substream; |
| 201 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; | 218 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; |
| 202 | iprtd->period_cnt = 0; | 219 | iprtd->period_cnt = 0; |
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c index f96a373699cf..6b518e07eea9 100644 --- a/sound/soc/imx/imx-pcm-fiq.c +++ b/sound/soc/imx/imx-pcm-fiq.c | |||
| @@ -39,23 +39,24 @@ struct imx_pcm_runtime_data { | |||
| 39 | unsigned long offset; | 39 | unsigned long offset; |
| 40 | unsigned long last_offset; | 40 | unsigned long last_offset; |
| 41 | unsigned long size; | 41 | unsigned long size; |
| 42 | struct timer_list timer; | 42 | struct hrtimer hrt; |
| 43 | int poll_time; | 43 | int poll_time_ns; |
| 44 | struct snd_pcm_substream *substream; | ||
| 45 | atomic_t running; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd) | 48 | static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) |
| 47 | { | 49 | { |
| 48 | iprtd->timer.expires = jiffies + iprtd->poll_time; | 50 | struct imx_pcm_runtime_data *iprtd = |
| 49 | } | 51 | container_of(hrt, struct imx_pcm_runtime_data, hrt); |
| 50 | 52 | struct snd_pcm_substream *substream = iprtd->substream; | |
| 51 | static void imx_ssi_timer_callback(unsigned long data) | ||
| 52 | { | ||
| 53 | struct snd_pcm_substream *substream = (void *)data; | ||
| 54 | struct snd_pcm_runtime *runtime = substream->runtime; | 53 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 55 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
| 56 | struct pt_regs regs; | 54 | struct pt_regs regs; |
| 57 | unsigned long delta; | 55 | unsigned long delta; |
| 58 | 56 | ||
| 57 | if (!atomic_read(&iprtd->running)) | ||
| 58 | return HRTIMER_NORESTART; | ||
| 59 | |||
| 59 | get_fiq_regs(®s); | 60 | get_fiq_regs(®s); |
| 60 | 61 | ||
| 61 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 62 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| @@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data) | |||
| 72 | 73 | ||
| 73 | /* If we've transferred at least a period then report it and | 74 | /* If we've transferred at least a period then report it and |
| 74 | * reset our poll time */ | 75 | * reset our poll time */ |
| 75 | if (delta >= runtime->period_size) { | 76 | if (delta >= iprtd->period) { |
| 76 | snd_pcm_period_elapsed(substream); | 77 | snd_pcm_period_elapsed(substream); |
| 77 | iprtd->last_offset = iprtd->offset; | 78 | iprtd->last_offset = iprtd->offset; |
| 78 | |||
| 79 | imx_ssi_set_next_poll(iprtd); | ||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | /* Restart the timer; if we didn't report we'll run on the next tick */ | 81 | hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns)); |
| 83 | add_timer(&iprtd->timer); | ||
| 84 | 82 | ||
| 83 | return HRTIMER_RESTART; | ||
| 85 | } | 84 | } |
| 86 | 85 | ||
| 87 | static struct fiq_handler fh = { | 86 | static struct fiq_handler fh = { |
| @@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 99 | iprtd->period = params_period_bytes(params) ; | 98 | iprtd->period = params_period_bytes(params) ; |
| 100 | iprtd->offset = 0; | 99 | iprtd->offset = 0; |
| 101 | iprtd->last_offset = 0; | 100 | iprtd->last_offset = 0; |
| 102 | iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params)); | 101 | iprtd->poll_time_ns = 1000000000 / params_rate(params) * |
| 103 | 102 | params_period_size(params); | |
| 104 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 103 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
| 105 | 104 | ||
| 106 | return 0; | 105 | return 0; |
| @@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
| 135 | case SNDRV_PCM_TRIGGER_START: | 134 | case SNDRV_PCM_TRIGGER_START: |
| 136 | case SNDRV_PCM_TRIGGER_RESUME: | 135 | case SNDRV_PCM_TRIGGER_RESUME: |
| 137 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 136 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
| 138 | imx_ssi_set_next_poll(iprtd); | 137 | atomic_set(&iprtd->running, 1); |
| 139 | add_timer(&iprtd->timer); | 138 | hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns), |
| 139 | HRTIMER_MODE_REL); | ||
| 140 | if (++fiq_enable == 1) | 140 | if (++fiq_enable == 1) |
| 141 | enable_fiq(imx_pcm_fiq); | 141 | enable_fiq(imx_pcm_fiq); |
| 142 | 142 | ||
| @@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
| 145 | case SNDRV_PCM_TRIGGER_STOP: | 145 | case SNDRV_PCM_TRIGGER_STOP: |
| 146 | case SNDRV_PCM_TRIGGER_SUSPEND: | 146 | case SNDRV_PCM_TRIGGER_SUSPEND: |
| 147 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 147 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
| 148 | del_timer(&iprtd->timer); | 148 | atomic_set(&iprtd->running, 0); |
| 149 | |||
| 149 | if (--fiq_enable == 0) | 150 | if (--fiq_enable == 0) |
| 150 | disable_fiq(imx_pcm_fiq); | 151 | disable_fiq(imx_pcm_fiq); |
| 151 | 152 | ||
| 152 | |||
| 153 | break; | 153 | break; |
| 154 | default: | 154 | default: |
| 155 | return -EINVAL; | 155 | return -EINVAL; |
| @@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = { | |||
| 180 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, | 180 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, |
| 181 | .period_bytes_min = 128, | 181 | .period_bytes_min = 128, |
| 182 | .period_bytes_max = 16 * 1024, | 182 | .period_bytes_max = 16 * 1024, |
| 183 | .periods_min = 2, | 183 | .periods_min = 4, |
| 184 | .periods_max = 255, | 184 | .periods_max = 255, |
| 185 | .fifo_size = 0, | 185 | .fifo_size = 0, |
| 186 | }; | 186 | }; |
| @@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream) | |||
| 194 | iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); | 194 | iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); |
| 195 | runtime->private_data = iprtd; | 195 | runtime->private_data = iprtd; |
| 196 | 196 | ||
| 197 | init_timer(&iprtd->timer); | 197 | iprtd->substream = substream; |
| 198 | iprtd->timer.data = (unsigned long)substream; | 198 | |
| 199 | iprtd->timer.function = imx_ssi_timer_callback; | 199 | atomic_set(&iprtd->running, 0); |
| 200 | hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
| 201 | iprtd->hrt.function = snd_hrtimer_callback; | ||
| 200 | 202 | ||
| 201 | ret = snd_pcm_hw_constraint_integer(substream->runtime, | 203 | ret = snd_pcm_hw_constraint_integer(substream->runtime, |
| 202 | SNDRV_PCM_HW_PARAM_PERIODS); | 204 | SNDRV_PCM_HW_PARAM_PERIODS); |
| @@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream) | |||
| 212 | struct snd_pcm_runtime *runtime = substream->runtime; | 214 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 213 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 215 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
| 214 | 216 | ||
| 215 | del_timer_sync(&iprtd->timer); | 217 | hrtimer_cancel(&iprtd->hrt); |
| 218 | |||
| 216 | kfree(iprtd); | 219 | kfree(iprtd); |
| 217 | 220 | ||
| 218 | return 0; | 221 | return 0; |
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index 6546b06cbd2a..80b4fee2442b 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
| @@ -235,17 +235,20 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream, | |||
| 235 | struct snd_soc_dai *cpu_dai) | 235 | struct snd_soc_dai *cpu_dai) |
| 236 | { | 236 | { |
| 237 | struct imx_ssi *ssi = cpu_dai->private_data; | 237 | struct imx_ssi *ssi = cpu_dai->private_data; |
| 238 | struct imx_pcm_dma_params *dma_data; | ||
| 238 | u32 reg, sccr; | 239 | u32 reg, sccr; |
| 239 | 240 | ||
| 240 | /* Tx/Rx config */ | 241 | /* Tx/Rx config */ |
| 241 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 242 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
| 242 | reg = SSI_STCCR; | 243 | reg = SSI_STCCR; |
| 243 | cpu_dai->dma_data = &ssi->dma_params_tx; | 244 | dma_data = &ssi->dma_params_tx; |
| 244 | } else { | 245 | } else { |
| 245 | reg = SSI_SRCCR; | 246 | reg = SSI_SRCCR; |
| 246 | cpu_dai->dma_data = &ssi->dma_params_rx; | 247 | dma_data = &ssi->dma_params_rx; |
| 247 | } | 248 | } |
| 248 | 249 | ||
| 250 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | ||
| 251 | |||
| 249 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; | 252 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; |
| 250 | 253 | ||
| 251 | /* DAI data (word) size */ | 254 | /* DAI data (word) size */ |
| @@ -653,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
| 653 | dai->private_data = ssi; | 656 | dai->private_data = ssi; |
| 654 | 657 | ||
| 655 | if ((cpu_is_mx27() || cpu_is_mx21()) && | 658 | if ((cpu_is_mx27() || cpu_is_mx21()) && |
| 656 | !(ssi->flags & IMX_SSI_USE_AC97)) { | 659 | !(ssi->flags & IMX_SSI_USE_AC97) && |
| 660 | (ssi->flags & IMX_SSI_DMA)) { | ||
| 657 | ssi->flags |= IMX_SSI_DMA; | 661 | ssi->flags |= IMX_SSI_DMA; |
| 658 | platform = imx_ssi_dma_mx2_init(pdev, ssi); | 662 | platform = imx_ssi_dma_mx2_init(pdev, ssi); |
| 659 | } else | 663 | } else |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index e814a9591f78..8ad9dc901007 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
| @@ -297,7 +297,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream, | |||
| 297 | omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode; | 297 | omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode; |
| 298 | omap_mcbsp_dai_dma_params[id][substream->stream].data_type = | 298 | omap_mcbsp_dai_dma_params[id][substream->stream].data_type = |
| 299 | OMAP_DMA_DATA_TYPE_S16; | 299 | OMAP_DMA_DATA_TYPE_S16; |
| 300 | cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream]; | 300 | |
| 301 | snd_soc_dai_set_dma_data(cpu_dai, substream, | ||
| 302 | &omap_mcbsp_dai_dma_params[id][substream->stream]); | ||
| 301 | 303 | ||
| 302 | if (mcbsp_data->configured) { | 304 | if (mcbsp_data->configured) { |
| 303 | /* McBSP already configured by another stream */ | 305 | /* McBSP already configured by another stream */ |
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c index 25f19e4728bf..b7f4f7e015f3 100644 --- a/sound/soc/omap/omap-mcpdm.c +++ b/sound/soc/omap/omap-mcpdm.c | |||
| @@ -150,7 +150,8 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream, | |||
| 150 | int stream = substream->stream; | 150 | int stream = substream->stream; |
| 151 | int channels, err, link_mask = 0; | 151 | int channels, err, link_mask = 0; |
| 152 | 152 | ||
| 153 | cpu_dai->dma_data = &omap_mcpdm_dai_dma_params[stream]; | 153 | snd_soc_dai_set_dma_data(cpu_dai, substream, |
| 154 | &omap_mcpdm_dai_dma_params[stream]); | ||
| 154 | 155 | ||
| 155 | channels = params_channels(params); | 156 | channels = params_channels(params); |
| 156 | switch (channels) { | 157 | switch (channels) { |
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index ba8acbb0a7fa..1e521904ea64 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c | |||
| @@ -61,12 +61,11 @@ static void omap_pcm_dma_irq(int ch, u16 stat, void *data) | |||
| 61 | struct omap_runtime_data *prtd = runtime->private_data; | 61 | struct omap_runtime_data *prtd = runtime->private_data; |
| 62 | unsigned long flags; | 62 | unsigned long flags; |
| 63 | 63 | ||
| 64 | if ((cpu_is_omap1510()) && | 64 | if ((cpu_is_omap1510())) { |
| 65 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) { | ||
| 66 | /* | 65 | /* |
| 67 | * OMAP1510 doesn't fully support DMA progress counter | 66 | * OMAP1510 doesn't fully support DMA progress counter |
| 68 | * and there is no software emulation implemented yet, | 67 | * and there is no software emulation implemented yet, |
| 69 | * so have to maintain our own playback progress counter | 68 | * so have to maintain our own progress counters |
| 70 | * that can be used by omap_pcm_pointer() instead. | 69 | * that can be used by omap_pcm_pointer() instead. |
| 71 | */ | 70 | */ |
| 72 | spin_lock_irqsave(&prtd->lock, flags); | 71 | spin_lock_irqsave(&prtd->lock, flags); |
| @@ -101,9 +100,11 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 101 | struct snd_pcm_runtime *runtime = substream->runtime; | 100 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 102 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 101 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 103 | struct omap_runtime_data *prtd = runtime->private_data; | 102 | struct omap_runtime_data *prtd = runtime->private_data; |
| 104 | struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data; | 103 | struct omap_pcm_dma_data *dma_data; |
| 105 | int err = 0; | 104 | int err = 0; |
| 106 | 105 | ||
| 106 | dma_data = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 107 | |||
| 107 | /* return if this is a bufferless transfer e.g. | 108 | /* return if this is a bufferless transfer e.g. |
| 108 | * codec <--> BT codec or GSM modem -- lg FIXME */ | 109 | * codec <--> BT codec or GSM modem -- lg FIXME */ |
| 109 | if (!dma_data) | 110 | if (!dma_data) |
| @@ -190,8 +191,7 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream) | |||
| 190 | dma_params.frame_count = runtime->periods; | 191 | dma_params.frame_count = runtime->periods; |
| 191 | omap_set_dma_params(prtd->dma_ch, &dma_params); | 192 | omap_set_dma_params(prtd->dma_ch, &dma_params); |
| 192 | 193 | ||
| 193 | if ((cpu_is_omap1510()) && | 194 | if ((cpu_is_omap1510())) |
| 194 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) | ||
| 195 | omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | | 195 | omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | |
| 196 | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); | 196 | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); |
| 197 | else | 197 | else |
| @@ -249,14 +249,15 @@ static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream) | |||
| 249 | dma_addr_t ptr; | 249 | dma_addr_t ptr; |
| 250 | snd_pcm_uframes_t offset; | 250 | snd_pcm_uframes_t offset; |
| 251 | 251 | ||
| 252 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { | 252 | if (cpu_is_omap1510()) { |
| 253 | offset = prtd->period_index * runtime->period_size; | ||
| 254 | } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { | ||
| 253 | ptr = omap_get_dma_dst_pos(prtd->dma_ch); | 255 | ptr = omap_get_dma_dst_pos(prtd->dma_ch); |
| 254 | offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); | 256 | offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); |
| 255 | } else if (!(cpu_is_omap1510())) { | 257 | } else { |
| 256 | ptr = omap_get_dma_src_pos(prtd->dma_ch); | 258 | ptr = omap_get_dma_src_pos(prtd->dma_ch); |
| 257 | offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); | 259 | offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); |
| 258 | } else | 260 | } |
| 259 | offset = prtd->period_index * runtime->period_size; | ||
| 260 | 261 | ||
| 261 | if (offset >= runtime->buffer_size) | 262 | if (offset >= runtime->buffer_size) |
| 262 | offset = 0; | 263 | offset = 0; |
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index d5fc52d0a3c4..544fd9566f4d 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c | |||
| @@ -122,10 +122,9 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream, | |||
| 122 | ssp_disable(ssp); | 122 | ssp_disable(ssp); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | if (cpu_dai->dma_data) { | 125 | kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); |
| 126 | kfree(cpu_dai->dma_data); | 126 | snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); |
| 127 | cpu_dai->dma_data = NULL; | 127 | |
| 128 | } | ||
| 129 | return ret; | 128 | return ret; |
| 130 | } | 129 | } |
| 131 | 130 | ||
| @@ -142,10 +141,8 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream, | |||
| 142 | clk_disable(ssp->clk); | 141 | clk_disable(ssp->clk); |
| 143 | } | 142 | } |
| 144 | 143 | ||
| 145 | if (cpu_dai->dma_data) { | 144 | kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); |
| 146 | kfree(cpu_dai->dma_data); | 145 | snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); |
| 147 | cpu_dai->dma_data = NULL; | ||
| 148 | } | ||
| 149 | } | 146 | } |
| 150 | 147 | ||
| 151 | #ifdef CONFIG_PM | 148 | #ifdef CONFIG_PM |
| @@ -570,19 +567,23 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, | |||
| 570 | u32 sspsp; | 567 | u32 sspsp; |
| 571 | int width = snd_pcm_format_physical_width(params_format(params)); | 568 | int width = snd_pcm_format_physical_width(params_format(params)); |
| 572 | int ttsa = ssp_read_reg(ssp, SSTSA) & 0xf; | 569 | int ttsa = ssp_read_reg(ssp, SSTSA) & 0xf; |
| 570 | struct pxa2xx_pcm_dma_params *dma_data; | ||
| 571 | |||
| 572 | dma_data = snd_soc_dai_get_dma_data(dai, substream); | ||
| 573 | 573 | ||
| 574 | /* generate correct DMA params */ | 574 | /* generate correct DMA params */ |
| 575 | if (cpu_dai->dma_data) | 575 | kfree(dma_data); |
| 576 | kfree(cpu_dai->dma_data); | ||
| 577 | 576 | ||
| 578 | /* Network mode with one active slot (ttsa == 1) can be used | 577 | /* Network mode with one active slot (ttsa == 1) can be used |
| 579 | * to force 16-bit frame width on the wire (for S16_LE), even | 578 | * to force 16-bit frame width on the wire (for S16_LE), even |
| 580 | * with two channels. Use 16-bit DMA transfers for this case. | 579 | * with two channels. Use 16-bit DMA transfers for this case. |
| 581 | */ | 580 | */ |
| 582 | cpu_dai->dma_data = ssp_get_dma_params(ssp, | 581 | dma_data = ssp_get_dma_params(ssp, |
| 583 | ((chn == 2) && (ttsa != 1)) || (width == 32), | 582 | ((chn == 2) && (ttsa != 1)) || (width == 32), |
| 584 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK); | 583 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK); |
| 585 | 584 | ||
| 585 | snd_soc_dai_set_dma_data(dai, substream, dma_data); | ||
| 586 | |||
| 586 | /* we can only change the settings if the port is not in use */ | 587 | /* we can only change the settings if the port is not in use */ |
| 587 | if (ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) | 588 | if (ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) |
| 588 | return 0; | 589 | return 0; |
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c index e9ae7b3a7e00..d314115e3dd7 100644 --- a/sound/soc/pxa/pxa2xx-ac97.c +++ b/sound/soc/pxa/pxa2xx-ac97.c | |||
| @@ -122,11 +122,14 @@ static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream, | |||
| 122 | { | 122 | { |
| 123 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 123 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 124 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 124 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; |
| 125 | struct pxa2xx_pcm_dma_params *dma_data; | ||
| 125 | 126 | ||
| 126 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 127 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 127 | cpu_dai->dma_data = &pxa2xx_ac97_pcm_stereo_out; | 128 | dma_data = &pxa2xx_ac97_pcm_stereo_out; |
| 128 | else | 129 | else |
| 129 | cpu_dai->dma_data = &pxa2xx_ac97_pcm_stereo_in; | 130 | dma_data = &pxa2xx_ac97_pcm_stereo_in; |
| 131 | |||
| 132 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | ||
| 130 | 133 | ||
| 131 | return 0; | 134 | return 0; |
| 132 | } | 135 | } |
| @@ -137,11 +140,14 @@ static int pxa2xx_ac97_hw_aux_params(struct snd_pcm_substream *substream, | |||
| 137 | { | 140 | { |
| 138 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 141 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 139 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 142 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; |
| 143 | struct pxa2xx_pcm_dma_params *dma_data; | ||
| 140 | 144 | ||
| 141 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 145 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 142 | cpu_dai->dma_data = &pxa2xx_ac97_pcm_aux_mono_out; | 146 | dma_data = &pxa2xx_ac97_pcm_aux_mono_out; |
| 143 | else | 147 | else |
| 144 | cpu_dai->dma_data = &pxa2xx_ac97_pcm_aux_mono_in; | 148 | dma_data = &pxa2xx_ac97_pcm_aux_mono_in; |
| 149 | |||
| 150 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | ||
| 145 | 151 | ||
| 146 | return 0; | 152 | return 0; |
| 147 | } | 153 | } |
| @@ -156,7 +162,8 @@ static int pxa2xx_ac97_hw_mic_params(struct snd_pcm_substream *substream, | |||
| 156 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 162 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 157 | return -ENODEV; | 163 | return -ENODEV; |
| 158 | else | 164 | else |
| 159 | cpu_dai->dma_data = &pxa2xx_ac97_pcm_mic_mono_in; | 165 | snd_soc_dai_set_dma_data(cpu_dai, substream, |
| 166 | &pxa2xx_ac97_pcm_mic_mono_in); | ||
| 160 | 167 | ||
| 161 | return 0; | 168 | return 0; |
| 162 | } | 169 | } |
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c index 6b8f655d1ad8..c1a5275721e4 100644 --- a/sound/soc/pxa/pxa2xx-i2s.c +++ b/sound/soc/pxa/pxa2xx-i2s.c | |||
| @@ -164,6 +164,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream, | |||
| 164 | { | 164 | { |
| 165 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 165 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 166 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 166 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; |
| 167 | struct pxa2xx_pcm_dma_params *dma_data; | ||
| 167 | 168 | ||
| 168 | BUG_ON(IS_ERR(clk_i2s)); | 169 | BUG_ON(IS_ERR(clk_i2s)); |
| 169 | clk_enable(clk_i2s); | 170 | clk_enable(clk_i2s); |
| @@ -171,9 +172,11 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream, | |||
| 171 | pxa_i2s_wait(); | 172 | pxa_i2s_wait(); |
| 172 | 173 | ||
| 173 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 174 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 174 | cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_out; | 175 | dma_data = &pxa2xx_i2s_pcm_stereo_out; |
| 175 | else | 176 | else |
| 176 | cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_in; | 177 | dma_data = &pxa2xx_i2s_pcm_stereo_in; |
| 178 | |||
| 179 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | ||
| 177 | 180 | ||
| 178 | /* is port used by another stream */ | 181 | /* is port used by another stream */ |
| 179 | if (!(SACR0 & SACR0_ENB)) { | 182 | if (!(SACR0 & SACR0_ENB)) { |
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index d38e39575f51..adc7e6f15f93 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c | |||
| @@ -25,9 +25,11 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 25 | struct snd_pcm_runtime *runtime = substream->runtime; | 25 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 26 | struct pxa2xx_runtime_data *prtd = runtime->private_data; | 26 | struct pxa2xx_runtime_data *prtd = runtime->private_data; |
| 27 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 27 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 28 | struct pxa2xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data; | 28 | struct pxa2xx_pcm_dma_params *dma; |
| 29 | int ret; | 29 | int ret; |
| 30 | 30 | ||
| 31 | dma = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 32 | |||
| 31 | /* return if this is a bufferless transfer e.g. | 33 | /* return if this is a bufferless transfer e.g. |
| 32 | * codec <--> BT codec or GSM modem -- lg FIXME */ | 34 | * codec <--> BT codec or GSM modem -- lg FIXME */ |
| 33 | if (!dma) | 35 | if (!dma) |
diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/s3c24xx/s3c-ac97.c index ee8ed9d7e703..ecf4fd04ae96 100644 --- a/sound/soc/s3c24xx/s3c-ac97.c +++ b/sound/soc/s3c24xx/s3c-ac97.c | |||
| @@ -224,11 +224,14 @@ static int s3c_ac97_hw_params(struct snd_pcm_substream *substream, | |||
| 224 | { | 224 | { |
| 225 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 225 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 226 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 226 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; |
| 227 | struct s3c_dma_params *dma_data; | ||
| 227 | 228 | ||
| 228 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 229 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 229 | cpu_dai->dma_data = &s3c_ac97_pcm_out; | 230 | dma_data = &s3c_ac97_pcm_out; |
| 230 | else | 231 | else |
| 231 | cpu_dai->dma_data = &s3c_ac97_pcm_in; | 232 | dma_data = &s3c_ac97_pcm_in; |
| 233 | |||
| 234 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | ||
| 232 | 235 | ||
| 233 | return 0; | 236 | return 0; |
| 234 | } | 237 | } |
| @@ -238,8 +241,8 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 238 | { | 241 | { |
| 239 | u32 ac_glbctrl; | 242 | u32 ac_glbctrl; |
| 240 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 243 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 241 | int channel = ((struct s3c_dma_params *) | 244 | struct s3c_dma_params *dma_data = |
| 242 | rtd->dai->cpu_dai->dma_data)->channel; | 245 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
| 243 | 246 | ||
| 244 | ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); | 247 | ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); |
| 245 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) | 248 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) |
| @@ -265,7 +268,7 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 265 | 268 | ||
| 266 | writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); | 269 | writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); |
| 267 | 270 | ||
| 268 | s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); | 271 | s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); |
| 269 | 272 | ||
| 270 | return 0; | 273 | return 0; |
| 271 | } | 274 | } |
| @@ -280,7 +283,7 @@ static int s3c_ac97_hw_mic_params(struct snd_pcm_substream *substream, | |||
| 280 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 283 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 281 | return -ENODEV; | 284 | return -ENODEV; |
| 282 | else | 285 | else |
| 283 | cpu_dai->dma_data = &s3c_ac97_mic_in; | 286 | snd_soc_dai_set_dma_data(cpu_dai, substream, &s3c_ac97_mic_in); |
| 284 | 287 | ||
| 285 | return 0; | 288 | return 0; |
| 286 | } | 289 | } |
| @@ -290,8 +293,8 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream, | |||
| 290 | { | 293 | { |
| 291 | u32 ac_glbctrl; | 294 | u32 ac_glbctrl; |
| 292 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 295 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 293 | int channel = ((struct s3c_dma_params *) | 296 | struct s3c_dma_params *dma_data = |
| 294 | rtd->dai->cpu_dai->dma_data)->channel; | 297 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
| 295 | 298 | ||
| 296 | ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); | 299 | ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); |
| 297 | ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK; | 300 | ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK; |
| @@ -311,7 +314,7 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream, | |||
| 311 | 314 | ||
| 312 | writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); | 315 | writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); |
| 313 | 316 | ||
| 314 | s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); | 317 | s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); |
| 315 | 318 | ||
| 316 | return 0; | 319 | return 0; |
| 317 | } | 320 | } |
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c index 7725e26d6c91..1b61c23ff300 100644 --- a/sound/soc/s3c24xx/s3c-dma.c +++ b/sound/soc/s3c24xx/s3c-dma.c | |||
| @@ -145,10 +145,12 @@ static int s3c_dma_hw_params(struct snd_pcm_substream *substream, | |||
| 145 | struct snd_pcm_runtime *runtime = substream->runtime; | 145 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 146 | struct s3c24xx_runtime_data *prtd = runtime->private_data; | 146 | struct s3c24xx_runtime_data *prtd = runtime->private_data; |
| 147 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 147 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 148 | struct s3c_dma_params *dma = rtd->dai->cpu_dai->dma_data; | ||
| 149 | unsigned long totbytes = params_buffer_bytes(params); | 148 | unsigned long totbytes = params_buffer_bytes(params); |
| 149 | struct s3c_dma_params *dma = | ||
| 150 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
| 150 | int ret = 0; | 151 | int ret = 0; |
| 151 | 152 | ||
| 153 | |||
| 152 | pr_debug("Entered %s\n", __func__); | 154 | pr_debug("Entered %s\n", __func__); |
| 153 | 155 | ||
| 154 | /* return if this is a bufferless transfer e.g. | 156 | /* return if this is a bufferless transfer e.g. |
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c index e994d8374fe6..88515946b6c0 100644 --- a/sound/soc/s3c24xx/s3c-i2s-v2.c +++ b/sound/soc/s3c24xx/s3c-i2s-v2.c | |||
| @@ -339,14 +339,17 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream, | |||
| 339 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 339 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 340 | struct snd_soc_dai_link *dai = rtd->dai; | 340 | struct snd_soc_dai_link *dai = rtd->dai; |
| 341 | struct s3c_i2sv2_info *i2s = to_info(dai->cpu_dai); | 341 | struct s3c_i2sv2_info *i2s = to_info(dai->cpu_dai); |
| 342 | struct s3c_dma_params *dma_data; | ||
| 342 | u32 iismod; | 343 | u32 iismod; |
| 343 | 344 | ||
| 344 | pr_debug("Entered %s\n", __func__); | 345 | pr_debug("Entered %s\n", __func__); |
| 345 | 346 | ||
| 346 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 347 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 347 | dai->cpu_dai->dma_data = i2s->dma_playback; | 348 | dma_data = i2s->dma_playback; |
| 348 | else | 349 | else |
| 349 | dai->cpu_dai->dma_data = i2s->dma_capture; | 350 | dma_data = i2s->dma_capture; |
| 351 | |||
| 352 | snd_soc_dai_set_dma_data(dai->cpu_dai, substream, dma_data); | ||
| 350 | 353 | ||
| 351 | /* Working copies of register */ | 354 | /* Working copies of register */ |
| 352 | iismod = readl(i2s->regs + S3C2412_IISMOD); | 355 | iismod = readl(i2s->regs + S3C2412_IISMOD); |
| @@ -394,8 +397,8 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 394 | int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); | 397 | int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); |
| 395 | unsigned long irqs; | 398 | unsigned long irqs; |
| 396 | int ret = 0; | 399 | int ret = 0; |
| 397 | int channel = ((struct s3c_dma_params *) | 400 | struct s3c_dma_params *dma_data = |
| 398 | rtd->dai->cpu_dai->dma_data)->channel; | 401 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
| 399 | 402 | ||
| 400 | pr_debug("Entered %s\n", __func__); | 403 | pr_debug("Entered %s\n", __func__); |
| 401 | 404 | ||
| @@ -431,7 +434,7 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 431 | * of the auto reload mechanism of S3C24XX. | 434 | * of the auto reload mechanism of S3C24XX. |
| 432 | * This call won't bother S3C64XX. | 435 | * This call won't bother S3C64XX. |
| 433 | */ | 436 | */ |
| 434 | s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); | 437 | s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); |
| 435 | 438 | ||
| 436 | break; | 439 | break; |
| 437 | 440 | ||
diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/s3c24xx/s3c-pcm.c index a98f40c3cd29..326f0a9e7e30 100644 --- a/sound/soc/s3c24xx/s3c-pcm.c +++ b/sound/soc/s3c24xx/s3c-pcm.c | |||
| @@ -178,6 +178,7 @@ static int s3c_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 178 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 178 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 179 | struct snd_soc_dai_link *dai = rtd->dai; | 179 | struct snd_soc_dai_link *dai = rtd->dai; |
| 180 | struct s3c_pcm_info *pcm = to_info(dai->cpu_dai); | 180 | struct s3c_pcm_info *pcm = to_info(dai->cpu_dai); |
| 181 | struct s3c_dma_params *dma_data; | ||
| 181 | void __iomem *regs = pcm->regs; | 182 | void __iomem *regs = pcm->regs; |
| 182 | struct clk *clk; | 183 | struct clk *clk; |
| 183 | int sclk_div, sync_div; | 184 | int sclk_div, sync_div; |
| @@ -187,9 +188,11 @@ static int s3c_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 187 | dev_dbg(pcm->dev, "Entered %s\n", __func__); | 188 | dev_dbg(pcm->dev, "Entered %s\n", __func__); |
| 188 | 189 | ||
| 189 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 190 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 190 | dai->cpu_dai->dma_data = pcm->dma_playback; | 191 | dma_data = pcm->dma_playback; |
| 191 | else | 192 | else |
| 192 | dai->cpu_dai->dma_data = pcm->dma_capture; | 193 | dma_data = pcm->dma_capture; |
| 194 | |||
| 195 | snd_soc_dai_set_dma_data(dai->cpu_dai, substream, dma_data); | ||
| 193 | 196 | ||
| 194 | /* Strictly check for sample size */ | 197 | /* Strictly check for sample size */ |
| 195 | switch (params_format(params)) { | 198 | switch (params_format(params)) { |
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/s3c24xx/s3c24xx-i2s.c index 0bc5950b9f02..c3ac890a3986 100644 --- a/sound/soc/s3c24xx/s3c24xx-i2s.c +++ b/sound/soc/s3c24xx/s3c24xx-i2s.c | |||
| @@ -242,14 +242,17 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream, | |||
| 242 | struct snd_soc_dai *dai) | 242 | struct snd_soc_dai *dai) |
| 243 | { | 243 | { |
| 244 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 244 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 245 | struct s3c_dma_params *dma_data; | ||
| 245 | u32 iismod; | 246 | u32 iismod; |
| 246 | 247 | ||
| 247 | pr_debug("Entered %s\n", __func__); | 248 | pr_debug("Entered %s\n", __func__); |
| 248 | 249 | ||
| 249 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 250 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 250 | rtd->dai->cpu_dai->dma_data = &s3c24xx_i2s_pcm_stereo_out; | 251 | dma_data = &s3c24xx_i2s_pcm_stereo_out; |
| 251 | else | 252 | else |
| 252 | rtd->dai->cpu_dai->dma_data = &s3c24xx_i2s_pcm_stereo_in; | 253 | dma_data = &s3c24xx_i2s_pcm_stereo_in; |
| 254 | |||
| 255 | snd_soc_dai_set_dma_data(rtd->dai->cpu_dai, substream, dma_data); | ||
| 253 | 256 | ||
| 254 | /* Working copies of register */ | 257 | /* Working copies of register */ |
| 255 | iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD); | 258 | iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD); |
| @@ -258,13 +261,11 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream, | |||
| 258 | switch (params_format(params)) { | 261 | switch (params_format(params)) { |
| 259 | case SNDRV_PCM_FORMAT_S8: | 262 | case SNDRV_PCM_FORMAT_S8: |
| 260 | iismod &= ~S3C2410_IISMOD_16BIT; | 263 | iismod &= ~S3C2410_IISMOD_16BIT; |
| 261 | ((struct s3c_dma_params *) | 264 | dma_data->dma_size = 1; |
| 262 | rtd->dai->cpu_dai->dma_data)->dma_size = 1; | ||
| 263 | break; | 265 | break; |
| 264 | case SNDRV_PCM_FORMAT_S16_LE: | 266 | case SNDRV_PCM_FORMAT_S16_LE: |
| 265 | iismod |= S3C2410_IISMOD_16BIT; | 267 | iismod |= S3C2410_IISMOD_16BIT; |
| 266 | ((struct s3c_dma_params *) | 268 | dma_data->dma_size = 2; |
| 267 | rtd->dai->cpu_dai->dma_data)->dma_size = 2; | ||
| 268 | break; | 269 | break; |
| 269 | default: | 270 | default: |
| 270 | return -EINVAL; | 271 | return -EINVAL; |
| @@ -280,8 +281,8 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 280 | { | 281 | { |
| 281 | int ret = 0; | 282 | int ret = 0; |
| 282 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 283 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 283 | int channel = ((struct s3c_dma_params *) | 284 | struct s3c_dma_params *dma_data = |
| 284 | rtd->dai->cpu_dai->dma_data)->channel; | 285 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
| 285 | 286 | ||
| 286 | pr_debug("Entered %s\n", __func__); | 287 | pr_debug("Entered %s\n", __func__); |
| 287 | 288 | ||
| @@ -300,7 +301,7 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 300 | else | 301 | else |
| 301 | s3c24xx_snd_txctrl(1); | 302 | s3c24xx_snd_txctrl(1); |
| 302 | 303 | ||
| 303 | s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); | 304 | s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); |
| 304 | break; | 305 | break; |
| 305 | case SNDRV_PCM_TRIGGER_STOP: | 306 | case SNDRV_PCM_TRIGGER_STOP: |
| 306 | case SNDRV_PCM_TRIGGER_SUSPEND: | 307 | case SNDRV_PCM_TRIGGER_SUSPEND: |
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c index 0664fac7612a..5b9ac1759bd2 100644 --- a/sound/soc/s6000/s6000-i2s.c +++ b/sound/soc/s6000/s6000-i2s.c | |||
| @@ -519,7 +519,8 @@ static int __devinit s6000_i2s_probe(struct platform_device *pdev) | |||
| 519 | 519 | ||
| 520 | s6000_i2s_dai.dev = &pdev->dev; | 520 | s6000_i2s_dai.dev = &pdev->dev; |
| 521 | s6000_i2s_dai.private_data = dev; | 521 | s6000_i2s_dai.private_data = dev; |
| 522 | s6000_i2s_dai.dma_data = &dev->dma_params; | 522 | s6000_i2s_dai.capture.dma_data = &dev->dma_params; |
| 523 | s6000_i2s_dai.playback.dma_data = &dev->dma_params; | ||
| 523 | 524 | ||
| 524 | dev->sifbase = sifmem->start; | 525 | dev->sifbase = sifmem->start; |
| 525 | dev->scbbase = mmio; | 526 | dev->scbbase = mmio; |
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c index 1d61109e09fa..9c7f7f00cebb 100644 --- a/sound/soc/s6000/s6000-pcm.c +++ b/sound/soc/s6000/s6000-pcm.c | |||
| @@ -58,13 +58,15 @@ static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream) | |||
| 58 | struct snd_pcm_runtime *runtime = substream->runtime; | 58 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 59 | struct s6000_runtime_data *prtd = runtime->private_data; | 59 | struct s6000_runtime_data *prtd = runtime->private_data; |
| 60 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 60 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 61 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 61 | struct s6000_pcm_dma_params *par; |
| 62 | int channel; | 62 | int channel; |
| 63 | unsigned int period_size; | 63 | unsigned int period_size; |
| 64 | unsigned int dma_offset; | 64 | unsigned int dma_offset; |
| 65 | dma_addr_t dma_pos; | 65 | dma_addr_t dma_pos; |
| 66 | dma_addr_t src, dst; | 66 | dma_addr_t src, dst; |
| 67 | 67 | ||
| 68 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 69 | |||
| 68 | period_size = snd_pcm_lib_period_bytes(substream); | 70 | period_size = snd_pcm_lib_period_bytes(substream); |
| 69 | dma_offset = prtd->period * period_size; | 71 | dma_offset = prtd->period * period_size; |
| 70 | dma_pos = runtime->dma_addr + dma_offset; | 72 | dma_pos = runtime->dma_addr + dma_offset; |
| @@ -101,7 +103,8 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data) | |||
| 101 | { | 103 | { |
| 102 | struct snd_pcm *pcm = data; | 104 | struct snd_pcm *pcm = data; |
| 103 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; | 105 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; |
| 104 | struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; | 106 | struct s6000_pcm_dma_params *params = |
| 107 | snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 105 | struct s6000_runtime_data *prtd; | 108 | struct s6000_runtime_data *prtd; |
| 106 | unsigned int has_xrun; | 109 | unsigned int has_xrun; |
| 107 | int i, ret = IRQ_NONE; | 110 | int i, ret = IRQ_NONE; |
| @@ -172,11 +175,13 @@ static int s6000_pcm_start(struct snd_pcm_substream *substream) | |||
| 172 | { | 175 | { |
| 173 | struct s6000_runtime_data *prtd = substream->runtime->private_data; | 176 | struct s6000_runtime_data *prtd = substream->runtime->private_data; |
| 174 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 177 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 175 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 178 | struct s6000_pcm_dma_params *par; |
| 176 | unsigned long flags; | 179 | unsigned long flags; |
| 177 | int srcinc; | 180 | int srcinc; |
| 178 | u32 dma; | 181 | u32 dma; |
| 179 | 182 | ||
| 183 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 184 | |||
| 180 | spin_lock_irqsave(&prtd->lock, flags); | 185 | spin_lock_irqsave(&prtd->lock, flags); |
| 181 | 186 | ||
| 182 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 187 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
| @@ -212,10 +217,12 @@ static int s6000_pcm_stop(struct snd_pcm_substream *substream) | |||
| 212 | { | 217 | { |
| 213 | struct s6000_runtime_data *prtd = substream->runtime->private_data; | 218 | struct s6000_runtime_data *prtd = substream->runtime->private_data; |
| 214 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 219 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 215 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 220 | struct s6000_pcm_dma_params *par; |
| 216 | unsigned long flags; | 221 | unsigned long flags; |
| 217 | u32 channel; | 222 | u32 channel; |
| 218 | 223 | ||
| 224 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 225 | |||
| 219 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 226 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 220 | channel = par->dma_out; | 227 | channel = par->dma_out; |
| 221 | else | 228 | else |
| @@ -236,9 +243,11 @@ static int s6000_pcm_stop(struct snd_pcm_substream *substream) | |||
| 236 | static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | 243 | static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) |
| 237 | { | 244 | { |
| 238 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 245 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 239 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 246 | struct s6000_pcm_dma_params *par; |
| 240 | int ret; | 247 | int ret; |
| 241 | 248 | ||
| 249 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 250 | |||
| 242 | ret = par->trigger(substream, cmd, 0); | 251 | ret = par->trigger(substream, cmd, 0); |
| 243 | if (ret < 0) | 252 | if (ret < 0) |
| 244 | return ret; | 253 | return ret; |
| @@ -275,13 +284,15 @@ static int s6000_pcm_prepare(struct snd_pcm_substream *substream) | |||
| 275 | static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) | 284 | static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) |
| 276 | { | 285 | { |
| 277 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 286 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 278 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 287 | struct s6000_pcm_dma_params *par; |
| 279 | struct snd_pcm_runtime *runtime = substream->runtime; | 288 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 280 | struct s6000_runtime_data *prtd = runtime->private_data; | 289 | struct s6000_runtime_data *prtd = runtime->private_data; |
| 281 | unsigned long flags; | 290 | unsigned long flags; |
| 282 | unsigned int offset; | 291 | unsigned int offset; |
| 283 | dma_addr_t count; | 292 | dma_addr_t count; |
| 284 | 293 | ||
| 294 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 295 | |||
| 285 | spin_lock_irqsave(&prtd->lock, flags); | 296 | spin_lock_irqsave(&prtd->lock, flags); |
| 286 | 297 | ||
| 287 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 298 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
| @@ -305,11 +316,12 @@ static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) | |||
| 305 | static int s6000_pcm_open(struct snd_pcm_substream *substream) | 316 | static int s6000_pcm_open(struct snd_pcm_substream *substream) |
| 306 | { | 317 | { |
| 307 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 318 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 308 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 319 | struct s6000_pcm_dma_params *par; |
| 309 | struct snd_pcm_runtime *runtime = substream->runtime; | 320 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 310 | struct s6000_runtime_data *prtd; | 321 | struct s6000_runtime_data *prtd; |
| 311 | int ret; | 322 | int ret; |
| 312 | 323 | ||
| 324 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 313 | snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware); | 325 | snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware); |
| 314 | 326 | ||
| 315 | ret = snd_pcm_hw_constraint_step(runtime, 0, | 327 | ret = snd_pcm_hw_constraint_step(runtime, 0, |
| @@ -364,7 +376,7 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 364 | struct snd_pcm_hw_params *hw_params) | 376 | struct snd_pcm_hw_params *hw_params) |
| 365 | { | 377 | { |
| 366 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 378 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 367 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 379 | struct s6000_pcm_dma_params *par; |
| 368 | int ret; | 380 | int ret; |
| 369 | ret = snd_pcm_lib_malloc_pages(substream, | 381 | ret = snd_pcm_lib_malloc_pages(substream, |
| 370 | params_buffer_bytes(hw_params)); | 382 | params_buffer_bytes(hw_params)); |
| @@ -373,6 +385,8 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 373 | return ret; | 385 | return ret; |
| 374 | } | 386 | } |
| 375 | 387 | ||
| 388 | par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 389 | |||
| 376 | if (par->same_rate) { | 390 | if (par->same_rate) { |
| 377 | spin_lock(&par->lock); | 391 | spin_lock(&par->lock); |
| 378 | if (par->rate == -1 || | 392 | if (par->rate == -1 || |
| @@ -392,7 +406,8 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 392 | static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) | 406 | static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) |
| 393 | { | 407 | { |
| 394 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; | 408 | struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; |
| 395 | struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; | 409 | struct s6000_pcm_dma_params *par = |
| 410 | snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 396 | 411 | ||
| 397 | spin_lock(&par->lock); | 412 | spin_lock(&par->lock); |
| 398 | par->in_use &= ~(1 << substream->stream); | 413 | par->in_use &= ~(1 << substream->stream); |
| @@ -417,7 +432,8 @@ static struct snd_pcm_ops s6000_pcm_ops = { | |||
| 417 | static void s6000_pcm_free(struct snd_pcm *pcm) | 432 | static void s6000_pcm_free(struct snd_pcm *pcm) |
| 418 | { | 433 | { |
| 419 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; | 434 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; |
| 420 | struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; | 435 | struct s6000_pcm_dma_params *params = |
| 436 | snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 421 | 437 | ||
| 422 | free_irq(params->irq, pcm); | 438 | free_irq(params->irq, pcm); |
| 423 | snd_pcm_lib_preallocate_free_for_all(pcm); | 439 | snd_pcm_lib_preallocate_free_for_all(pcm); |
| @@ -429,9 +445,11 @@ static int s6000_pcm_new(struct snd_card *card, | |||
| 429 | struct snd_soc_dai *dai, struct snd_pcm *pcm) | 445 | struct snd_soc_dai *dai, struct snd_pcm *pcm) |
| 430 | { | 446 | { |
| 431 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; | 447 | struct snd_soc_pcm_runtime *runtime = pcm->private_data; |
| 432 | struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; | 448 | struct s6000_pcm_dma_params *params; |
| 433 | int res; | 449 | int res; |
| 434 | 450 | ||
| 451 | params = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); | ||
| 452 | |||
| 435 | if (!card->dev->dma_mask) | 453 | if (!card->dev->dma_mask) |
| 436 | card->dev->dma_mask = &s6000_pcm_dmamask; | 454 | card->dev->dma_mask = &s6000_pcm_dmamask; |
| 437 | if (!card->dev->coherent_dma_mask) | 455 | if (!card->dev->coherent_dma_mask) |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 2320153bd923..ad7f9528d751 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
| @@ -1549,7 +1549,8 @@ int snd_soc_new_pcms(struct snd_soc_device *socdev, int idx, const char *xid) | |||
| 1549 | mutex_unlock(&codec->mutex); | 1549 | mutex_unlock(&codec->mutex); |
| 1550 | return ret; | 1550 | return ret; |
| 1551 | } | 1551 | } |
| 1552 | if (card->dai_link[i].codec_dai->ac97_control) { | 1552 | /* Check for codec->ac97 to handle the ac97.c fun */ |
| 1553 | if (card->dai_link[i].codec_dai->ac97_control && codec->ac97) { | ||
| 1553 | snd_ac97_dev_add_pdata(codec->ac97, | 1554 | snd_ac97_dev_add_pdata(codec->ac97, |
| 1554 | card->dai_link[i].cpu_dai->ac97_pdata); | 1555 | card->dai_link[i].cpu_dai->ac97_pdata); |
| 1555 | } | 1556 | } |
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index 2c59afd99611..9e28b20cb2ce 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c | |||
| @@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) | |||
| 986 | DEFINE_WAIT(wait); | 986 | DEFINE_WAIT(wait); |
| 987 | long timeout = msecs_to_jiffies(50); | 987 | long timeout = msecs_to_jiffies(50); |
| 988 | 988 | ||
| 989 | if (ep->umidi->disconnected) | ||
| 990 | return; | ||
| 989 | /* | 991 | /* |
| 990 | * The substream buffer is empty, but some data might still be in the | 992 | * The substream buffer is empty, but some data might still be in the |
| 991 | * currently active URBs, so we have to wait for those to complete. | 993 | * currently active URBs, so we have to wait for those to complete. |
| @@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi, | |||
| 1123 | * Frees an output endpoint. | 1125 | * Frees an output endpoint. |
| 1124 | * May be called when ep hasn't been initialized completely. | 1126 | * May be called when ep hasn't been initialized completely. |
| 1125 | */ | 1127 | */ |
| 1126 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) | 1128 | static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep) |
| 1127 | { | 1129 | { |
| 1128 | unsigned int i; | 1130 | unsigned int i; |
| 1129 | 1131 | ||
| 1130 | for (i = 0; i < OUTPUT_URBS; ++i) | 1132 | for (i = 0; i < OUTPUT_URBS; ++i) |
| 1131 | if (ep->urbs[i].urb) | 1133 | if (ep->urbs[i].urb) { |
| 1132 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, | 1134 | free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, |
| 1133 | ep->max_transfer); | 1135 | ep->max_transfer); |
| 1136 | ep->urbs[i].urb = NULL; | ||
| 1137 | } | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep) | ||
| 1141 | { | ||
| 1142 | snd_usbmidi_out_endpoint_clear(ep); | ||
| 1134 | kfree(ep); | 1143 | kfree(ep); |
| 1135 | } | 1144 | } |
| 1136 | 1145 | ||
| @@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p) | |||
| 1262 | usb_kill_urb(ep->out->urbs[j].urb); | 1271 | usb_kill_urb(ep->out->urbs[j].urb); |
| 1263 | if (umidi->usb_protocol_ops->finish_out_endpoint) | 1272 | if (umidi->usb_protocol_ops->finish_out_endpoint) |
| 1264 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); | 1273 | umidi->usb_protocol_ops->finish_out_endpoint(ep->out); |
| 1274 | ep->out->active_urbs = 0; | ||
| 1275 | if (ep->out->drain_urbs) { | ||
| 1276 | ep->out->drain_urbs = 0; | ||
| 1277 | wake_up(&ep->out->drain_wait); | ||
| 1278 | } | ||
| 1265 | } | 1279 | } |
| 1266 | if (ep->in) | 1280 | if (ep->in) |
| 1267 | for (j = 0; j < INPUT_URBS; ++j) | 1281 | for (j = 0; j < INPUT_URBS; ++j) |
| 1268 | usb_kill_urb(ep->in->urbs[j]); | 1282 | usb_kill_urb(ep->in->urbs[j]); |
| 1269 | /* free endpoints here; later call can result in Oops */ | 1283 | /* free endpoints here; later call can result in Oops */ |
| 1270 | if (ep->out) { | 1284 | if (ep->out) |
| 1271 | snd_usbmidi_out_endpoint_delete(ep->out); | 1285 | snd_usbmidi_out_endpoint_clear(ep->out); |
| 1272 | ep->out = NULL; | ||
| 1273 | } | ||
| 1274 | if (ep->in) { | 1286 | if (ep->in) { |
| 1275 | snd_usbmidi_in_endpoint_delete(ep->in); | 1287 | snd_usbmidi_in_endpoint_delete(ep->in); |
| 1276 | ep->in = NULL; | 1288 | ep->in = NULL; |
