diff options
272 files changed, 3734 insertions, 4200 deletions
| @@ -18,7 +18,7 @@ | |||
| 18 | Version 2, June 1991 | 18 | Version 2, June 1991 |
| 19 | 19 | ||
| 20 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. | 20 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. |
| 21 | 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 22 | Everyone is permitted to copy and distribute verbatim copies | 22 | Everyone is permitted to copy and distribute verbatim copies |
| 23 | of this license document, but changing it is not allowed. | 23 | of this license document, but changing it is not allowed. |
| 24 | 24 | ||
| @@ -321,7 +321,7 @@ the "copyright" line and a pointer to where the full notice is found. | |||
| 321 | 321 | ||
| 322 | You should have received a copy of the GNU General Public License | 322 | You should have received a copy of the GNU General Public License |
| 323 | along with this program; if not, write to the Free Software | 323 | along with this program; if not, write to the Free Software |
| 324 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 324 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 325 | 325 | ||
| 326 | 326 | ||
| 327 | Also add information on how to contact you by electronic and paper mail. | 327 | Also add information on how to contact you by electronic and paper mail. |
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index f6de52b01059..433cf5e9ae04 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
| @@ -277,7 +277,7 @@ tty.txt | |||
| 277 | unicode.txt | 277 | unicode.txt |
| 278 | - info on the Unicode character/font mapping used in Linux. | 278 | - info on the Unicode character/font mapping used in Linux. |
| 279 | uml/ | 279 | uml/ |
| 280 | - directory with infomation about User Mode Linux. | 280 | - directory with information about User Mode Linux. |
| 281 | usb/ | 281 | usb/ |
| 282 | - directory with info regarding the Universal Serial Bus. | 282 | - directory with info regarding the Universal Serial Bus. |
| 283 | video4linux/ | 283 | video4linux/ |
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle index f25b3953f513..22e5f9036f3c 100644 --- a/Documentation/CodingStyle +++ b/Documentation/CodingStyle | |||
| @@ -236,6 +236,9 @@ ugly), but try to avoid excess. Instead, put the comments at the head | |||
| 236 | of the function, telling people what it does, and possibly WHY it does | 236 | of the function, telling people what it does, and possibly WHY it does |
| 237 | it. | 237 | it. |
| 238 | 238 | ||
| 239 | When commenting the kernel API functions, please use the kerneldoc format. | ||
| 240 | See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc | ||
| 241 | for details. | ||
| 239 | 242 | ||
| 240 | Chapter 8: You've made a mess of it | 243 | Chapter 8: You've made a mess of it |
| 241 | 244 | ||
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 6ee3cd6134df..1af0f2d50220 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
| @@ -121,7 +121,7 @@ pool's device. | |||
| 121 | dma_addr_t addr); | 121 | dma_addr_t addr); |
| 122 | 122 | ||
| 123 | This puts memory back into the pool. The pool is what was passed to | 123 | This puts memory back into the pool. The pool is what was passed to |
| 124 | the the pool allocation routine; the cpu and dma addresses are what | 124 | the pool allocation routine; the cpu and dma addresses are what |
| 125 | were returned when that routine allocated the memory being freed. | 125 | were returned when that routine allocated the memory being freed. |
| 126 | 126 | ||
| 127 | 127 | ||
diff --git a/Documentation/DocBook/journal-api.tmpl b/Documentation/DocBook/journal-api.tmpl index 1ef6f43c6d8f..341aaa4ce481 100644 --- a/Documentation/DocBook/journal-api.tmpl +++ b/Documentation/DocBook/journal-api.tmpl | |||
| @@ -116,7 +116,7 @@ filesystem. Almost. | |||
| 116 | 116 | ||
| 117 | You still need to actually journal your filesystem changes, this | 117 | You still need to actually journal your filesystem changes, this |
| 118 | is done by wrapping them into transactions. Additionally you | 118 | is done by wrapping them into transactions. Additionally you |
| 119 | also need to wrap the modification of each of the the buffers | 119 | also need to wrap the modification of each of the buffers |
| 120 | with calls to the journal layer, so it knows what the modifications | 120 | with calls to the journal layer, so it knows what the modifications |
| 121 | you are actually making are. To do this use journal_start() which | 121 | you are actually making are. To do this use journal_start() which |
| 122 | returns a transaction handle. | 122 | returns a transaction handle. |
| @@ -128,7 +128,7 @@ and its counterpart journal_stop(), which indicates the end of a transaction | |||
| 128 | are nestable calls, so you can reenter a transaction if necessary, | 128 | are nestable calls, so you can reenter a transaction if necessary, |
| 129 | but remember you must call journal_stop() the same number of times as | 129 | but remember you must call journal_stop() the same number of times as |
| 130 | journal_start() before the transaction is completed (or more accurately | 130 | journal_start() before the transaction is completed (or more accurately |
| 131 | leaves the the update phase). Ext3/VFS makes use of this feature to simplify | 131 | leaves the update phase). Ext3/VFS makes use of this feature to simplify |
| 132 | quota support. | 132 | quota support. |
| 133 | </para> | 133 | </para> |
| 134 | 134 | ||
diff --git a/Documentation/DocBook/usb.tmpl b/Documentation/DocBook/usb.tmpl index f3ef0bf435e9..705c442c7bf4 100644 --- a/Documentation/DocBook/usb.tmpl +++ b/Documentation/DocBook/usb.tmpl | |||
| @@ -841,7 +841,7 @@ usbdev_ioctl (int fd, int ifno, unsigned request, void *param) | |||
| 841 | File modification time is not updated by this request. | 841 | File modification time is not updated by this request. |
| 842 | </para><para> | 842 | </para><para> |
| 843 | Those struct members are from some interface descriptor | 843 | Those struct members are from some interface descriptor |
| 844 | applying to the the current configuration. | 844 | applying to the current configuration. |
| 845 | The interface number is the bInterfaceNumber value, and | 845 | The interface number is the bInterfaceNumber value, and |
| 846 | the altsetting number is the bAlternateSetting value. | 846 | the altsetting number is the bAlternateSetting value. |
| 847 | (This resets each endpoint in the interface.) | 847 | (This resets each endpoint in the interface.) |
diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt index d5032eb480aa..63edc5f847c4 100644 --- a/Documentation/MSI-HOWTO.txt +++ b/Documentation/MSI-HOWTO.txt | |||
| @@ -430,7 +430,7 @@ which may result in system hang. The software driver of specific | |||
| 430 | MSI-capable hardware is responsible for whether calling | 430 | MSI-capable hardware is responsible for whether calling |
| 431 | pci_enable_msi or not. A return of zero indicates the kernel | 431 | pci_enable_msi or not. A return of zero indicates the kernel |
| 432 | successfully initializes the MSI/MSI-X capability structure of the | 432 | successfully initializes the MSI/MSI-X capability structure of the |
| 433 | device funtion. The device function is now running on MSI/MSI-X mode. | 433 | device function. The device function is now running on MSI/MSI-X mode. |
| 434 | 434 | ||
| 435 | 5.6 How to tell whether MSI/MSI-X is enabled on device function | 435 | 5.6 How to tell whether MSI/MSI-X is enabled on device function |
| 436 | 436 | ||
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt index 9c6d450138ea..fcbcbc35b122 100644 --- a/Documentation/RCU/RTFP.txt +++ b/Documentation/RCU/RTFP.txt | |||
| @@ -2,7 +2,8 @@ Read the F-ing Papers! | |||
| 2 | 2 | ||
| 3 | 3 | ||
| 4 | This document describes RCU-related publications, and is followed by | 4 | This document describes RCU-related publications, and is followed by |
| 5 | the corresponding bibtex entries. | 5 | the corresponding bibtex entries. A number of the publications may |
| 6 | be found at http://www.rdrop.com/users/paulmck/RCU/. | ||
| 6 | 7 | ||
| 7 | The first thing resembling RCU was published in 1980, when Kung and Lehman | 8 | The first thing resembling RCU was published in 1980, when Kung and Lehman |
| 8 | [Kung80] recommended use of a garbage collector to defer destruction | 9 | [Kung80] recommended use of a garbage collector to defer destruction |
| @@ -113,6 +114,10 @@ describing how to make RCU safe for soft-realtime applications [Sarma04c], | |||
| 113 | and a paper describing SELinux performance with RCU [JamesMorris04b]. | 114 | and a paper describing SELinux performance with RCU [JamesMorris04b]. |
| 114 | 115 | ||
| 115 | 116 | ||
| 117 | 2005 has seen further adaptation of RCU to realtime use, permitting | ||
| 118 | preemption of RCU realtime critical sections [PaulMcKenney05a, | ||
| 119 | PaulMcKenney05b]. | ||
| 120 | |||
| 116 | Bibtex Entries | 121 | Bibtex Entries |
| 117 | 122 | ||
| 118 | @article{Kung80 | 123 | @article{Kung80 |
| @@ -410,3 +415,32 @@ Oregon Health and Sciences University" | |||
| 410 | \url{http://www.livejournal.com/users/james_morris/2153.html} | 415 | \url{http://www.livejournal.com/users/james_morris/2153.html} |
| 411 | [Viewed December 10, 2004]" | 416 | [Viewed December 10, 2004]" |
| 412 | } | 417 | } |
| 418 | |||
| 419 | @unpublished{PaulMcKenney05a | ||
| 420 | ,Author="Paul E. McKenney" | ||
| 421 | ,Title="{[RFC]} {RCU} and {CONFIG\_PREEMPT\_RT} progress" | ||
| 422 | ,month="May" | ||
| 423 | ,year="2005" | ||
| 424 | ,note="Available: | ||
| 425 | \url{http://lkml.org/lkml/2005/5/9/185} | ||
| 426 | [Viewed May 13, 2005]" | ||
| 427 | ,annotation=" | ||
| 428 | First publication of working lock-based deferred free patches | ||
| 429 | for the CONFIG_PREEMPT_RT environment. | ||
| 430 | " | ||
| 431 | } | ||
| 432 | |||
| 433 | @conference{PaulMcKenney05b | ||
| 434 | ,Author="Paul E. McKenney and Dipankar Sarma" | ||
| 435 | ,Title="Towards Hard Realtime Response from the Linux Kernel on SMP Hardware" | ||
| 436 | ,Booktitle="linux.conf.au 2005" | ||
| 437 | ,month="April" | ||
| 438 | ,year="2005" | ||
| 439 | ,address="Canberra, Australia" | ||
| 440 | ,note="Available: | ||
| 441 | \url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf} | ||
| 442 | [Viewed May 13, 2005]" | ||
| 443 | ,annotation=" | ||
| 444 | Realtime turns into making RCU yet more realtime friendly. | ||
| 445 | " | ||
| 446 | } | ||
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt index 3bfb84b3b7db..aab4a9ec3931 100644 --- a/Documentation/RCU/UP.txt +++ b/Documentation/RCU/UP.txt | |||
| @@ -8,7 +8,7 @@ is that since there is only one CPU, it should not be necessary to | |||
| 8 | wait for anything else to get done, since there are no other CPUs for | 8 | wait for anything else to get done, since there are no other CPUs for |
| 9 | anything else to be happening on. Although this approach will -sort- -of- | 9 | anything else to be happening on. Although this approach will -sort- -of- |
| 10 | work a surprising amount of the time, it is a very bad idea in general. | 10 | work a surprising amount of the time, it is a very bad idea in general. |
| 11 | This document presents two examples that demonstrate exactly how bad an | 11 | This document presents three examples that demonstrate exactly how bad an |
| 12 | idea this is. | 12 | idea this is. |
| 13 | 13 | ||
| 14 | 14 | ||
| @@ -26,6 +26,9 @@ from softirq, the list scan would find itself referencing a newly freed | |||
| 26 | element B. This situation can greatly decrease the life expectancy of | 26 | element B. This situation can greatly decrease the life expectancy of |
| 27 | your kernel. | 27 | your kernel. |
| 28 | 28 | ||
| 29 | This same problem can occur if call_rcu() is invoked from a hardware | ||
| 30 | interrupt handler. | ||
| 31 | |||
| 29 | 32 | ||
| 30 | Example 2: Function-Call Fatality | 33 | Example 2: Function-Call Fatality |
| 31 | 34 | ||
| @@ -44,8 +47,37 @@ its arguments would cause it to fail to make the fundamental guarantee | |||
| 44 | underlying RCU, namely that call_rcu() defers invoking its arguments until | 47 | underlying RCU, namely that call_rcu() defers invoking its arguments until |
| 45 | all RCU read-side critical sections currently executing have completed. | 48 | all RCU read-side critical sections currently executing have completed. |
| 46 | 49 | ||
| 47 | Quick Quiz: why is it -not- legal to invoke synchronize_rcu() in | 50 | Quick Quiz #1: why is it -not- legal to invoke synchronize_rcu() in |
| 48 | this case? | 51 | this case? |
| 52 | |||
| 53 | |||
| 54 | Example 3: Death by Deadlock | ||
| 55 | |||
| 56 | Suppose that call_rcu() is invoked while holding a lock, and that the | ||
| 57 | callback function must acquire this same lock. In this case, if | ||
| 58 | call_rcu() were to directly invoke the callback, the result would | ||
| 59 | be self-deadlock. | ||
| 60 | |||
| 61 | In some cases, it would possible to restructure to code so that | ||
| 62 | the call_rcu() is delayed until after the lock is released. However, | ||
| 63 | there are cases where this can be quite ugly: | ||
| 64 | |||
| 65 | 1. If a number of items need to be passed to call_rcu() within | ||
| 66 | the same critical section, then the code would need to create | ||
| 67 | a list of them, then traverse the list once the lock was | ||
| 68 | released. | ||
| 69 | |||
| 70 | 2. In some cases, the lock will be held across some kernel API, | ||
| 71 | so that delaying the call_rcu() until the lock is released | ||
| 72 | requires that the data item be passed up via a common API. | ||
| 73 | It is far better to guarantee that callbacks are invoked | ||
| 74 | with no locks held than to have to modify such APIs to allow | ||
| 75 | arbitrary data items to be passed back up through them. | ||
| 76 | |||
| 77 | If call_rcu() directly invokes the callback, painful locking restrictions | ||
| 78 | or API changes would be required. | ||
| 79 | |||
| 80 | Quick Quiz #2: What locking restriction must RCU callbacks respect? | ||
| 49 | 81 | ||
| 50 | 82 | ||
| 51 | Summary | 83 | Summary |
| @@ -53,12 +85,35 @@ Summary | |||
| 53 | Permitting call_rcu() to immediately invoke its arguments or permitting | 85 | Permitting call_rcu() to immediately invoke its arguments or permitting |
| 54 | synchronize_rcu() to immediately return breaks RCU, even on a UP system. | 86 | synchronize_rcu() to immediately return breaks RCU, even on a UP system. |
| 55 | So do not do it! Even on a UP system, the RCU infrastructure -must- | 87 | So do not do it! Even on a UP system, the RCU infrastructure -must- |
| 56 | respect grace periods. | 88 | respect grace periods, and -must- invoke callbacks from a known environment |
| 57 | 89 | in which no locks are held. | |
| 58 | 90 | ||
| 59 | Answer to Quick Quiz | 91 | |
| 60 | 92 | Answer to Quick Quiz #1: | |
| 61 | The calling function is scanning an RCU-protected linked list, and | 93 | Why is it -not- legal to invoke synchronize_rcu() in this case? |
| 62 | is therefore within an RCU read-side critical section. Therefore, | 94 | |
| 63 | the called function has been invoked within an RCU read-side critical | 95 | Because the calling function is scanning an RCU-protected linked |
| 64 | section, and is not permitted to block. | 96 | list, and is therefore within an RCU read-side critical section. |
| 97 | Therefore, the called function has been invoked within an RCU | ||
| 98 | read-side critical section, and is not permitted to block. | ||
| 99 | |||
| 100 | Answer to Quick Quiz #2: | ||
| 101 | What locking restriction must RCU callbacks respect? | ||
| 102 | |||
| 103 | Any lock that is acquired within an RCU callback must be | ||
| 104 | acquired elsewhere using an _irq variant of the spinlock | ||
| 105 | primitive. For example, if "mylock" is acquired by an | ||
| 106 | RCU callback, then a process-context acquisition of this | ||
| 107 | lock must use something like spin_lock_irqsave() to | ||
| 108 | acquire the lock. | ||
| 109 | |||
| 110 | If the process-context code were to simply use spin_lock(), | ||
| 111 | then, since RCU callbacks can be invoked from softirq context, | ||
| 112 | the callback might be called from a softirq that interrupted | ||
| 113 | the process-context critical section. This would result in | ||
| 114 | self-deadlock. | ||
| 115 | |||
| 116 | This restriction might seem gratuitous, since very few RCU | ||
| 117 | callbacks acquire locks directly. However, a great many RCU | ||
| 118 | callbacks do acquire locks -indirectly-, for example, via | ||
| 119 | the kfree() primitive. | ||
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index 8f3fb77c9cd3..e118a7c1a092 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt | |||
| @@ -43,6 +43,10 @@ over a rather long period of time, but improvements are always welcome! | |||
| 43 | rcu_read_lock_bh()) in the read-side critical sections, | 43 | rcu_read_lock_bh()) in the read-side critical sections, |
| 44 | and are also an excellent aid to readability. | 44 | and are also an excellent aid to readability. |
| 45 | 45 | ||
| 46 | As a rough rule of thumb, any dereference of an RCU-protected | ||
| 47 | pointer must be covered by rcu_read_lock() or rcu_read_lock_bh() | ||
| 48 | or by the appropriate update-side lock. | ||
| 49 | |||
| 46 | 3. Does the update code tolerate concurrent accesses? | 50 | 3. Does the update code tolerate concurrent accesses? |
| 47 | 51 | ||
| 48 | The whole point of RCU is to permit readers to run without | 52 | The whole point of RCU is to permit readers to run without |
| @@ -90,7 +94,11 @@ over a rather long period of time, but improvements are always welcome! | |||
| 90 | 94 | ||
| 91 | The rcu_dereference() primitive is used by the various | 95 | The rcu_dereference() primitive is used by the various |
| 92 | "_rcu()" list-traversal primitives, such as the | 96 | "_rcu()" list-traversal primitives, such as the |
| 93 | list_for_each_entry_rcu(). | 97 | list_for_each_entry_rcu(). Note that it is perfectly |
| 98 | legal (if redundant) for update-side code to use | ||
| 99 | rcu_dereference() and the "_rcu()" list-traversal | ||
| 100 | primitives. This is particularly useful in code | ||
| 101 | that is common to readers and updaters. | ||
| 94 | 102 | ||
| 95 | b. If the list macros are being used, the list_add_tail_rcu() | 103 | b. If the list macros are being used, the list_add_tail_rcu() |
| 96 | and list_add_rcu() primitives must be used in order | 104 | and list_add_rcu() primitives must be used in order |
| @@ -150,16 +158,9 @@ over a rather long period of time, but improvements are always welcome! | |||
| 150 | 158 | ||
| 151 | Use of the _rcu() list-traversal primitives outside of an | 159 | Use of the _rcu() list-traversal primitives outside of an |
| 152 | RCU read-side critical section causes no harm other than | 160 | RCU read-side critical section causes no harm other than |
| 153 | a slight performance degradation on Alpha CPUs and some | 161 | a slight performance degradation on Alpha CPUs. It can |
| 154 | confusion on the part of people trying to read the code. | 162 | also be quite helpful in reducing code bloat when common |
| 155 | 163 | code is shared between readers and updaters. | |
| 156 | Another way of thinking of this is "If you are holding the | ||
| 157 | lock that prevents the data structure from changing, why do | ||
| 158 | you also need RCU-based protection?" That said, there may | ||
| 159 | well be situations where use of the _rcu() list-traversal | ||
| 160 | primitives while the update-side lock is held results in | ||
| 161 | simpler and more maintainable code. The jury is still out | ||
| 162 | on this question. | ||
| 163 | 164 | ||
| 164 | 10. Conversely, if you are in an RCU read-side critical section, | 165 | 10. Conversely, if you are in an RCU read-side critical section, |
| 165 | you -must- use the "_rcu()" variants of the list macros. | 166 | you -must- use the "_rcu()" variants of the list macros. |
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt index eb444006683e..6fa092251586 100644 --- a/Documentation/RCU/rcu.txt +++ b/Documentation/RCU/rcu.txt | |||
| @@ -64,6 +64,54 @@ o I hear that RCU is patented? What is with that? | |||
| 64 | Of these, one was allowed to lapse by the assignee, and the | 64 | Of these, one was allowed to lapse by the assignee, and the |
| 65 | others have been contributed to the Linux kernel under GPL. | 65 | others have been contributed to the Linux kernel under GPL. |
| 66 | 66 | ||
| 67 | o I hear that RCU needs work in order to support realtime kernels? | ||
| 68 | |||
| 69 | Yes, work in progress. | ||
| 70 | |||
| 67 | o Where can I find more information on RCU? | 71 | o Where can I find more information on RCU? |
| 68 | 72 | ||
| 69 | See the RTFP.txt file in this directory. | 73 | See the RTFP.txt file in this directory. |
| 74 | Or point your browser at http://www.rdrop.com/users/paulmck/RCU/. | ||
| 75 | |||
| 76 | o What are all these files in this directory? | ||
| 77 | |||
| 78 | |||
| 79 | NMI-RCU.txt | ||
| 80 | |||
| 81 | Describes how to use RCU to implement dynamic | ||
| 82 | NMI handlers, which can be revectored on the fly, | ||
| 83 | without rebooting. | ||
| 84 | |||
| 85 | RTFP.txt | ||
| 86 | |||
| 87 | List of RCU-related publications and web sites. | ||
| 88 | |||
| 89 | UP.txt | ||
| 90 | |||
| 91 | Discussion of RCU usage in UP kernels. | ||
| 92 | |||
| 93 | arrayRCU.txt | ||
| 94 | |||
| 95 | Describes how to use RCU to protect arrays, with | ||
| 96 | resizeable arrays whose elements reference other | ||
| 97 | data structures being of the most interest. | ||
| 98 | |||
| 99 | checklist.txt | ||
| 100 | |||
| 101 | Lists things to check for when inspecting code that | ||
| 102 | uses RCU. | ||
| 103 | |||
| 104 | listRCU.txt | ||
| 105 | |||
| 106 | Describes how to use RCU to protect linked lists. | ||
| 107 | This is the simplest and most common use of RCU | ||
| 108 | in the Linux kernel. | ||
| 109 | |||
| 110 | rcu.txt | ||
| 111 | |||
| 112 | You are reading it! | ||
| 113 | |||
| 114 | whatisRCU.txt | ||
| 115 | |||
| 116 | Overview of how the RCU implementation works. Along | ||
| 117 | the way, presents a conceptual view of RCU. | ||
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt new file mode 100644 index 000000000000..354d89c78377 --- /dev/null +++ b/Documentation/RCU/whatisRCU.txt | |||
| @@ -0,0 +1,902 @@ | |||
| 1 | What is RCU? | ||
| 2 | |||
| 3 | RCU is a synchronization mechanism that was added to the Linux kernel | ||
| 4 | during the 2.5 development effort that is optimized for read-mostly | ||
| 5 | situations. Although RCU is actually quite simple once you understand it, | ||
| 6 | getting there can sometimes be a challenge. Part of the problem is that | ||
| 7 | most of the past descriptions of RCU have been written with the mistaken | ||
| 8 | assumption that there is "one true way" to describe RCU. Instead, | ||
| 9 | the experience has been that different people must take different paths | ||
| 10 | to arrive at an understanding of RCU. This document provides several | ||
| 11 | different paths, as follows: | ||
| 12 | |||
| 13 | 1. RCU OVERVIEW | ||
| 14 | 2. WHAT IS RCU'S CORE API? | ||
| 15 | 3. WHAT ARE SOME EXAMPLE USES OF CORE RCU API? | ||
| 16 | 4. WHAT IF MY UPDATING THREAD CANNOT BLOCK? | ||
| 17 | 5. WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU? | ||
| 18 | 6. ANALOGY WITH READER-WRITER LOCKING | ||
| 19 | 7. FULL LIST OF RCU APIs | ||
| 20 | 8. ANSWERS TO QUICK QUIZZES | ||
| 21 | |||
| 22 | People who prefer starting with a conceptual overview should focus on | ||
| 23 | Section 1, though most readers will profit by reading this section at | ||
| 24 | some point. People who prefer to start with an API that they can then | ||
| 25 | experiment with should focus on Section 2. People who prefer to start | ||
| 26 | with example uses should focus on Sections 3 and 4. People who need to | ||
| 27 | understand the RCU implementation should focus on Section 5, then dive | ||
| 28 | into the kernel source code. People who reason best by analogy should | ||
| 29 | focus on Section 6. Section 7 serves as an index to the docbook API | ||
| 30 | documentation, and Section 8 is the traditional answer key. | ||
| 31 | |||
| 32 | So, start with the section that makes the most sense to you and your | ||
| 33 | preferred method of learning. If you need to know everything about | ||
| 34 | everything, feel free to read the whole thing -- but if you are really | ||
| 35 | that type of person, you have perused the source code and will therefore | ||
| 36 | never need this document anyway. ;-) | ||
| 37 | |||
| 38 | |||
| 39 | 1. RCU OVERVIEW | ||
| 40 | |||
| 41 | The basic idea behind RCU is to split updates into "removal" and | ||
| 42 | "reclamation" phases. The removal phase removes references to data items | ||
| 43 | within a data structure (possibly by replacing them with references to | ||
| 44 | new versions of these data items), and can run concurrently with readers. | ||
| 45 | The reason that it is safe to run the removal phase concurrently with | ||
| 46 | readers is the semantics of modern CPUs guarantee that readers will see | ||
| 47 | either the old or the new version of the data structure rather than a | ||
| 48 | partially updated reference. The reclamation phase does the work of reclaiming | ||
| 49 | (e.g., freeing) the data items removed from the data structure during the | ||
| 50 | removal phase. Because reclaiming data items can disrupt any readers | ||
| 51 | concurrently referencing those data items, the reclamation phase must | ||
| 52 | not start until readers no longer hold references to those data items. | ||
| 53 | |||
| 54 | Splitting the update into removal and reclamation phases permits the | ||
| 55 | updater to perform the removal phase immediately, and to defer the | ||
| 56 | reclamation phase until all readers active during the removal phase have | ||
| 57 | completed, either by blocking until they finish or by registering a | ||
| 58 | callback that is invoked after they finish. Only readers that are active | ||
| 59 | during the removal phase need be considered, because any reader starting | ||
| 60 | after the removal phase will be unable to gain a reference to the removed | ||
| 61 | data items, and therefore cannot be disrupted by the reclamation phase. | ||
| 62 | |||
| 63 | So the typical RCU update sequence goes something like the following: | ||
| 64 | |||
| 65 | a. Remove pointers to a data structure, so that subsequent | ||
| 66 | readers cannot gain a reference to it. | ||
| 67 | |||
| 68 | b. Wait for all previous readers to complete their RCU read-side | ||
| 69 | critical sections. | ||
| 70 | |||
| 71 | c. At this point, there cannot be any readers who hold references | ||
| 72 | to the data structure, so it now may safely be reclaimed | ||
| 73 | (e.g., kfree()d). | ||
| 74 | |||
| 75 | Step (b) above is the key idea underlying RCU's deferred destruction. | ||
| 76 | The ability to wait until all readers are done allows RCU readers to | ||
| 77 | use much lighter-weight synchronization, in some cases, absolutely no | ||
| 78 | synchronization at all. In contrast, in more conventional lock-based | ||
| 79 | schemes, readers must use heavy-weight synchronization in order to | ||
| 80 | prevent an updater from deleting the data structure out from under them. | ||
| 81 | This is because lock-based updaters typically update data items in place, | ||
| 82 | and must therefore exclude readers. In contrast, RCU-based updaters | ||
| 83 | typically take advantage of the fact that writes to single aligned | ||
| 84 | pointers are atomic on modern CPUs, allowing atomic insertion, removal, | ||
| 85 | and replacement of data items in a linked structure without disrupting | ||
| 86 | readers. Concurrent RCU readers can then continue accessing the old | ||
| 87 | versions, and can dispense with the atomic operations, memory barriers, | ||
| 88 | and communications cache misses that are so expensive on present-day | ||
| 89 | SMP computer systems, even in absence of lock contention. | ||
| 90 | |||
| 91 | In the three-step procedure shown above, the updater is performing both | ||
| 92 | the removal and the reclamation step, but it is often helpful for an | ||
| 93 | entirely different thread to do the reclamation, as is in fact the case | ||
| 94 | in the Linux kernel's directory-entry cache (dcache). Even if the same | ||
| 95 | thread performs both the update step (step (a) above) and the reclamation | ||
| 96 | step (step (c) above), it is often helpful to think of them separately. | ||
| 97 | For example, RCU readers and updaters need not communicate at all, | ||
| 98 | but RCU provides implicit low-overhead communication between readers | ||
| 99 | and reclaimers, namely, in step (b) above. | ||
| 100 | |||
| 101 | So how the heck can a reclaimer tell when a reader is done, given | ||
| 102 | that readers are not doing any sort of synchronization operations??? | ||
| 103 | Read on to learn about how RCU's API makes this easy. | ||
| 104 | |||
| 105 | |||
| 106 | 2. WHAT IS RCU'S CORE API? | ||
| 107 | |||
| 108 | The core RCU API is quite small: | ||
| 109 | |||
| 110 | a. rcu_read_lock() | ||
| 111 | b. rcu_read_unlock() | ||
| 112 | c. synchronize_rcu() / call_rcu() | ||
| 113 | d. rcu_assign_pointer() | ||
| 114 | e. rcu_dereference() | ||
| 115 | |||
| 116 | There are many other members of the RCU API, but the rest can be | ||
| 117 | expressed in terms of these five, though most implementations instead | ||
| 118 | express synchronize_rcu() in terms of the call_rcu() callback API. | ||
| 119 | |||
| 120 | The five core RCU APIs are described below, the other 18 will be enumerated | ||
| 121 | later. See the kernel docbook documentation for more info, or look directly | ||
| 122 | at the function header comments. | ||
| 123 | |||
| 124 | rcu_read_lock() | ||
| 125 | |||
| 126 | void rcu_read_lock(void); | ||
| 127 | |||
| 128 | Used by a reader to inform the reclaimer that the reader is | ||
| 129 | entering an RCU read-side critical section. It is illegal | ||
| 130 | to block while in an RCU read-side critical section, though | ||
| 131 | kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side | ||
| 132 | critical sections. Any RCU-protected data structure accessed | ||
| 133 | during an RCU read-side critical section is guaranteed to remain | ||
| 134 | unreclaimed for the full duration of that critical section. | ||
| 135 | Reference counts may be used in conjunction with RCU to maintain | ||
| 136 | longer-term references to data structures. | ||
| 137 | |||
| 138 | rcu_read_unlock() | ||
| 139 | |||
| 140 | void rcu_read_unlock(void); | ||
| 141 | |||
| 142 | Used by a reader to inform the reclaimer that the reader is | ||
| 143 | exiting an RCU read-side critical section. Note that RCU | ||
| 144 | read-side critical sections may be nested and/or overlapping. | ||
| 145 | |||
| 146 | synchronize_rcu() | ||
| 147 | |||
| 148 | void synchronize_rcu(void); | ||
| 149 | |||
| 150 | Marks the end of updater code and the beginning of reclaimer | ||
| 151 | code. It does this by blocking until all pre-existing RCU | ||
| 152 | read-side critical sections on all CPUs have completed. | ||
| 153 | Note that synchronize_rcu() will -not- necessarily wait for | ||
| 154 | any subsequent RCU read-side critical sections to complete. | ||
| 155 | For example, consider the following sequence of events: | ||
| 156 | |||
| 157 | CPU 0 CPU 1 CPU 2 | ||
| 158 | ----------------- ------------------------- --------------- | ||
| 159 | 1. rcu_read_lock() | ||
| 160 | 2. enters synchronize_rcu() | ||
| 161 | 3. rcu_read_lock() | ||
| 162 | 4. rcu_read_unlock() | ||
| 163 | 5. exits synchronize_rcu() | ||
| 164 | 6. rcu_read_unlock() | ||
| 165 | |||
| 166 | To reiterate, synchronize_rcu() waits only for ongoing RCU | ||
| 167 | read-side critical sections to complete, not necessarily for | ||
| 168 | any that begin after synchronize_rcu() is invoked. | ||
| 169 | |||
| 170 | Of course, synchronize_rcu() does not necessarily return | ||
| 171 | -immediately- after the last pre-existing RCU read-side critical | ||
| 172 | section completes. For one thing, there might well be scheduling | ||
| 173 | delays. For another thing, many RCU implementations process | ||
| 174 | requests in batches in order to improve efficiencies, which can | ||
| 175 | further delay synchronize_rcu(). | ||
| 176 | |||
| 177 | Since synchronize_rcu() is the API that must figure out when | ||
| 178 | readers are done, its implementation is key to RCU. For RCU | ||
| 179 | to be useful in all but the most read-intensive situations, | ||
| 180 | synchronize_rcu()'s overhead must also be quite small. | ||
| 181 | |||
| 182 | The call_rcu() API is a callback form of synchronize_rcu(), | ||
| 183 | and is described in more detail in a later section. Instead of | ||
| 184 | blocking, it registers a function and argument which are invoked | ||
| 185 | after all ongoing RCU read-side critical sections have completed. | ||
| 186 | This callback variant is particularly useful in situations where | ||
| 187 | it is illegal to block. | ||
| 188 | |||
| 189 | rcu_assign_pointer() | ||
| 190 | |||
| 191 | typeof(p) rcu_assign_pointer(p, typeof(p) v); | ||
| 192 | |||
| 193 | Yes, rcu_assign_pointer() -is- implemented as a macro, though it | ||
| 194 | would be cool to be able to declare a function in this manner. | ||
| 195 | (Compiler experts will no doubt disagree.) | ||
| 196 | |||
| 197 | The updater uses this function to assign a new value to an | ||
| 198 | RCU-protected pointer, in order to safely communicate the change | ||
| 199 | in value from the updater to the reader. This function returns | ||
| 200 | the new value, and also executes any memory-barrier instructions | ||
| 201 | required for a given CPU architecture. | ||
| 202 | |||
| 203 | Perhaps more important, it serves to document which pointers | ||
| 204 | are protected by RCU. That said, rcu_assign_pointer() is most | ||
| 205 | frequently used indirectly, via the _rcu list-manipulation | ||
| 206 | primitives such as list_add_rcu(). | ||
| 207 | |||
| 208 | rcu_dereference() | ||
| 209 | |||
| 210 | typeof(p) rcu_dereference(p); | ||
| 211 | |||
| 212 | Like rcu_assign_pointer(), rcu_dereference() must be implemented | ||
| 213 | as a macro. | ||
| 214 | |||
| 215 | The reader uses rcu_dereference() to fetch an RCU-protected | ||
| 216 | pointer, which returns a value that may then be safely | ||
| 217 | dereferenced. Note that rcu_deference() does not actually | ||
| 218 | dereference the pointer, instead, it protects the pointer for | ||
| 219 | later dereferencing. It also executes any needed memory-barrier | ||
| 220 | instructions for a given CPU architecture. Currently, only Alpha | ||
| 221 | needs memory barriers within rcu_dereference() -- on other CPUs, | ||
| 222 | it compiles to nothing, not even a compiler directive. | ||
| 223 | |||
| 224 | Common coding practice uses rcu_dereference() to copy an | ||
| 225 | RCU-protected pointer to a local variable, then dereferences | ||
| 226 | this local variable, for example as follows: | ||
| 227 | |||
| 228 | p = rcu_dereference(head.next); | ||
| 229 | return p->data; | ||
| 230 | |||
| 231 | However, in this case, one could just as easily combine these | ||
| 232 | into one statement: | ||
| 233 | |||
| 234 | return rcu_dereference(head.next)->data; | ||
| 235 | |||
| 236 | If you are going to be fetching multiple fields from the | ||
| 237 | RCU-protected structure, using the local variable is of | ||
| 238 | course preferred. Repeated rcu_dereference() calls look | ||
| 239 | ugly and incur unnecessary overhead on Alpha CPUs. | ||
| 240 | |||
| 241 | Note that the value returned by rcu_dereference() is valid | ||
| 242 | only within the enclosing RCU read-side critical section. | ||
| 243 | For example, the following is -not- legal: | ||
| 244 | |||
| 245 | rcu_read_lock(); | ||
| 246 | p = rcu_dereference(head.next); | ||
| 247 | rcu_read_unlock(); | ||
| 248 | x = p->address; | ||
| 249 | rcu_read_lock(); | ||
| 250 | y = p->data; | ||
| 251 | rcu_read_unlock(); | ||
| 252 | |||
| 253 | Holding a reference from one RCU read-side critical section | ||
| 254 | to another is just as illegal as holding a reference from | ||
| 255 | one lock-based critical section to another! Similarly, | ||
| 256 | using a reference outside of the critical section in which | ||
| 257 | it was acquired is just as illegal as doing so with normal | ||
| 258 | locking. | ||
| 259 | |||
| 260 | As with rcu_assign_pointer(), an important function of | ||
| 261 | rcu_dereference() is to document which pointers are protected | ||
| 262 | by RCU. And, again like rcu_assign_pointer(), rcu_dereference() | ||
| 263 | is typically used indirectly, via the _rcu list-manipulation | ||
| 264 | primitives, such as list_for_each_entry_rcu(). | ||
| 265 | |||
| 266 | The following diagram shows how each API communicates among the | ||
| 267 | reader, updater, and reclaimer. | ||
| 268 | |||
| 269 | |||
| 270 | rcu_assign_pointer() | ||
| 271 | +--------+ | ||
| 272 | +---------------------->| reader |---------+ | ||
| 273 | | +--------+ | | ||
| 274 | | | | | ||
| 275 | | | | Protect: | ||
| 276 | | | | rcu_read_lock() | ||
| 277 | | | | rcu_read_unlock() | ||
| 278 | | rcu_dereference() | | | ||
| 279 | +---------+ | | | ||
| 280 | | updater |<---------------------+ | | ||
| 281 | +---------+ V | ||
| 282 | | +-----------+ | ||
| 283 | +----------------------------------->| reclaimer | | ||
| 284 | +-----------+ | ||
| 285 | Defer: | ||
| 286 | synchronize_rcu() & call_rcu() | ||
| 287 | |||
| 288 | |||
| 289 | The RCU infrastructure observes the time sequence of rcu_read_lock(), | ||
| 290 | rcu_read_unlock(), synchronize_rcu(), and call_rcu() invocations in | ||
| 291 | order to determine when (1) synchronize_rcu() invocations may return | ||
| 292 | to their callers and (2) call_rcu() callbacks may be invoked. Efficient | ||
| 293 | implementations of the RCU infrastructure make heavy use of batching in | ||
| 294 | order to amortize their overhead over many uses of the corresponding APIs. | ||
| 295 | |||
| 296 | There are no fewer than three RCU mechanisms in the Linux kernel; the | ||
| 297 | diagram above shows the first one, which is by far the most commonly used. | ||
| 298 | The rcu_dereference() and rcu_assign_pointer() primitives are used for | ||
| 299 | all three mechanisms, but different defer and protect primitives are | ||
| 300 | used as follows: | ||
| 301 | |||
| 302 | Defer Protect | ||
| 303 | |||
| 304 | a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock() | ||
| 305 | call_rcu() | ||
| 306 | |||
| 307 | b. call_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh() | ||
| 308 | |||
| 309 | c. synchronize_sched() preempt_disable() / preempt_enable() | ||
| 310 | local_irq_save() / local_irq_restore() | ||
| 311 | hardirq enter / hardirq exit | ||
| 312 | NMI enter / NMI exit | ||
| 313 | |||
| 314 | These three mechanisms are used as follows: | ||
| 315 | |||
| 316 | a. RCU applied to normal data structures. | ||
| 317 | |||
| 318 | b. RCU applied to networking data structures that may be subjected | ||
| 319 | to remote denial-of-service attacks. | ||
| 320 | |||
| 321 | c. RCU applied to scheduler and interrupt/NMI-handler tasks. | ||
| 322 | |||
| 323 | Again, most uses will be of (a). The (b) and (c) cases are important | ||
| 324 | for specialized uses, but are relatively uncommon. | ||
| 325 | |||
| 326 | |||
| 327 | 3. WHAT ARE SOME EXAMPLE USES OF CORE RCU API? | ||
| 328 | |||
| 329 | This section shows a simple use of the core RCU API to protect a | ||
| 330 | global pointer to a dynamically allocated structure. More typical | ||
| 331 | uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt. | ||
| 332 | |||
| 333 | struct foo { | ||
| 334 | int a; | ||
| 335 | char b; | ||
| 336 | long c; | ||
| 337 | }; | ||
| 338 | DEFINE_SPINLOCK(foo_mutex); | ||
| 339 | |||
| 340 | struct foo *gbl_foo; | ||
| 341 | |||
| 342 | /* | ||
| 343 | * Create a new struct foo that is the same as the one currently | ||
| 344 | * pointed to by gbl_foo, except that field "a" is replaced | ||
| 345 | * with "new_a". Points gbl_foo to the new structure, and | ||
| 346 | * frees up the old structure after a grace period. | ||
| 347 | * | ||
| 348 | * Uses rcu_assign_pointer() to ensure that concurrent readers | ||
| 349 | * see the initialized version of the new structure. | ||
| 350 | * | ||
| 351 | * Uses synchronize_rcu() to ensure that any readers that might | ||
| 352 | * have references to the old structure complete before freeing | ||
| 353 | * the old structure. | ||
| 354 | */ | ||
| 355 | void foo_update_a(int new_a) | ||
| 356 | { | ||
| 357 | struct foo *new_fp; | ||
| 358 | struct foo *old_fp; | ||
| 359 | |||
| 360 | new_fp = kmalloc(sizeof(*fp), GFP_KERNEL); | ||
| 361 | spin_lock(&foo_mutex); | ||
| 362 | old_fp = gbl_foo; | ||
| 363 | *new_fp = *old_fp; | ||
| 364 | new_fp->a = new_a; | ||
| 365 | rcu_assign_pointer(gbl_foo, new_fp); | ||
| 366 | spin_unlock(&foo_mutex); | ||
| 367 | synchronize_rcu(); | ||
| 368 | kfree(old_fp); | ||
| 369 | } | ||
| 370 | |||
| 371 | /* | ||
| 372 | * Return the value of field "a" of the current gbl_foo | ||
| 373 | * structure. Use rcu_read_lock() and rcu_read_unlock() | ||
| 374 | * to ensure that the structure does not get deleted out | ||
| 375 | * from under us, and use rcu_dereference() to ensure that | ||
| 376 | * we see the initialized version of the structure (important | ||
| 377 | * for DEC Alpha and for people reading the code). | ||
| 378 | */ | ||
| 379 | int foo_get_a(void) | ||
| 380 | { | ||
| 381 | int retval; | ||
| 382 | |||
| 383 | rcu_read_lock(); | ||
| 384 | retval = rcu_dereference(gbl_foo)->a; | ||
| 385 | rcu_read_unlock(); | ||
| 386 | return retval; | ||
| 387 | } | ||
| 388 | |||
| 389 | So, to sum up: | ||
| 390 | |||
| 391 | o Use rcu_read_lock() and rcu_read_unlock() to guard RCU | ||
| 392 | read-side critical sections. | ||
| 393 | |||
| 394 | o Within an RCU read-side critical section, use rcu_dereference() | ||
| 395 | to dereference RCU-protected pointers. | ||
| 396 | |||
| 397 | o Use some solid scheme (such as locks or semaphores) to | ||
| 398 | keep concurrent updates from interfering with each other. | ||
| 399 | |||
| 400 | o Use rcu_assign_pointer() to update an RCU-protected pointer. | ||
| 401 | This primitive protects concurrent readers from the updater, | ||
| 402 | -not- concurrent updates from each other! You therefore still | ||
| 403 | need to use locking (or something similar) to keep concurrent | ||
| 404 | rcu_assign_pointer() primitives from interfering with each other. | ||
| 405 | |||
| 406 | o Use synchronize_rcu() -after- removing a data element from an | ||
| 407 | RCU-protected data structure, but -before- reclaiming/freeing | ||
| 408 | the data element, in order to wait for the completion of all | ||
| 409 | RCU read-side critical sections that might be referencing that | ||
| 410 | data item. | ||
| 411 | |||
| 412 | See checklist.txt for additional rules to follow when using RCU. | ||
| 413 | |||
| 414 | |||
| 415 | 4. WHAT IF MY UPDATING THREAD CANNOT BLOCK? | ||
| 416 | |||
| 417 | In the example above, foo_update_a() blocks until a grace period elapses. | ||
| 418 | This is quite simple, but in some cases one cannot afford to wait so | ||
| 419 | long -- there might be other high-priority work to be done. | ||
| 420 | |||
| 421 | In such cases, one uses call_rcu() rather than synchronize_rcu(). | ||
| 422 | The call_rcu() API is as follows: | ||
| 423 | |||
| 424 | void call_rcu(struct rcu_head * head, | ||
| 425 | void (*func)(struct rcu_head *head)); | ||
| 426 | |||
| 427 | This function invokes func(head) after a grace period has elapsed. | ||
| 428 | This invocation might happen from either softirq or process context, | ||
| 429 | so the function is not permitted to block. The foo struct needs to | ||
| 430 | have an rcu_head structure added, perhaps as follows: | ||
| 431 | |||
| 432 | struct foo { | ||
| 433 | int a; | ||
| 434 | char b; | ||
| 435 | long c; | ||
| 436 | struct rcu_head rcu; | ||
| 437 | }; | ||
| 438 | |||
| 439 | The foo_update_a() function might then be written as follows: | ||
| 440 | |||
| 441 | /* | ||
| 442 | * Create a new struct foo that is the same as the one currently | ||
| 443 | * pointed to by gbl_foo, except that field "a" is replaced | ||
| 444 | * with "new_a". Points gbl_foo to the new structure, and | ||
| 445 | * frees up the old structure after a grace period. | ||
| 446 | * | ||
| 447 | * Uses rcu_assign_pointer() to ensure that concurrent readers | ||
| 448 | * see the initialized version of the new structure. | ||
| 449 | * | ||
| 450 | * Uses call_rcu() to ensure that any readers that might have | ||
| 451 | * references to the old structure complete before freeing the | ||
| 452 | * old structure. | ||
| 453 | */ | ||
| 454 | void foo_update_a(int new_a) | ||
| 455 | { | ||
| 456 | struct foo *new_fp; | ||
| 457 | struct foo *old_fp; | ||
| 458 | |||
| 459 | new_fp = kmalloc(sizeof(*fp), GFP_KERNEL); | ||
| 460 | spin_lock(&foo_mutex); | ||
| 461 | old_fp = gbl_foo; | ||
| 462 | *new_fp = *old_fp; | ||
| 463 | new_fp->a = new_a; | ||
| 464 | rcu_assign_pointer(gbl_foo, new_fp); | ||
| 465 | spin_unlock(&foo_mutex); | ||
| 466 | call_rcu(&old_fp->rcu, foo_reclaim); | ||
| 467 | } | ||
| 468 | |||
| 469 | The foo_reclaim() function might appear as follows: | ||
| 470 | |||
| 471 | void foo_reclaim(struct rcu_head *rp) | ||
| 472 | { | ||
| 473 | struct foo *fp = container_of(rp, struct foo, rcu); | ||
| 474 | |||
| 475 | kfree(fp); | ||
| 476 | } | ||
| 477 | |||
| 478 | The container_of() primitive is a macro that, given a pointer into a | ||
| 479 | struct, the type of the struct, and the pointed-to field within the | ||
| 480 | struct, returns a pointer to the beginning of the struct. | ||
| 481 | |||
| 482 | The use of call_rcu() permits the caller of foo_update_a() to | ||
| 483 | immediately regain control, without needing to worry further about the | ||
| 484 | old version of the newly updated element. It also clearly shows the | ||
| 485 | RCU distinction between updater, namely foo_update_a(), and reclaimer, | ||
| 486 | namely foo_reclaim(). | ||
| 487 | |||
| 488 | The summary of advice is the same as for the previous section, except | ||
| 489 | that we are now using call_rcu() rather than synchronize_rcu(): | ||
| 490 | |||
| 491 | o Use call_rcu() -after- removing a data element from an | ||
| 492 | RCU-protected data structure in order to register a callback | ||
| 493 | function that will be invoked after the completion of all RCU | ||
| 494 | read-side critical sections that might be referencing that | ||
| 495 | data item. | ||
| 496 | |||
| 497 | Again, see checklist.txt for additional rules governing the use of RCU. | ||
| 498 | |||
| 499 | |||
| 500 | 5. WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU? | ||
| 501 | |||
| 502 | One of the nice things about RCU is that it has extremely simple "toy" | ||
| 503 | implementations that are a good first step towards understanding the | ||
| 504 | production-quality implementations in the Linux kernel. This section | ||
| 505 | presents two such "toy" implementations of RCU, one that is implemented | ||
| 506 | in terms of familiar locking primitives, and another that more closely | ||
| 507 | resembles "classic" RCU. Both are way too simple for real-world use, | ||
| 508 | lacking both functionality and performance. However, they are useful | ||
| 509 | in getting a feel for how RCU works. See kernel/rcupdate.c for a | ||
| 510 | production-quality implementation, and see: | ||
| 511 | |||
| 512 | http://www.rdrop.com/users/paulmck/RCU | ||
| 513 | |||
| 514 | for papers describing the Linux kernel RCU implementation. The OLS'01 | ||
| 515 | and OLS'02 papers are a good introduction, and the dissertation provides | ||
| 516 | more details on the current implementation. | ||
| 517 | |||
| 518 | |||
| 519 | 5A. "TOY" IMPLEMENTATION #1: LOCKING | ||
| 520 | |||
| 521 | This section presents a "toy" RCU implementation that is based on | ||
| 522 | familiar locking primitives. Its overhead makes it a non-starter for | ||
| 523 | real-life use, as does its lack of scalability. It is also unsuitable | ||
| 524 | for realtime use, since it allows scheduling latency to "bleed" from | ||
| 525 | one read-side critical section to another. | ||
| 526 | |||
| 527 | However, it is probably the easiest implementation to relate to, so is | ||
| 528 | a good starting point. | ||
| 529 | |||
| 530 | It is extremely simple: | ||
| 531 | |||
| 532 | static DEFINE_RWLOCK(rcu_gp_mutex); | ||
| 533 | |||
| 534 | void rcu_read_lock(void) | ||
| 535 | { | ||
| 536 | read_lock(&rcu_gp_mutex); | ||
| 537 | } | ||
| 538 | |||
| 539 | void rcu_read_unlock(void) | ||
| 540 | { | ||
| 541 | read_unlock(&rcu_gp_mutex); | ||
| 542 | } | ||
| 543 | |||
| 544 | void synchronize_rcu(void) | ||
| 545 | { | ||
| 546 | write_lock(&rcu_gp_mutex); | ||
| 547 | write_unlock(&rcu_gp_mutex); | ||
| 548 | } | ||
| 549 | |||
| 550 | [You can ignore rcu_assign_pointer() and rcu_dereference() without | ||
| 551 | missing much. But here they are anyway. And whatever you do, don't | ||
| 552 | forget about them when submitting patches making use of RCU!] | ||
| 553 | |||
| 554 | #define rcu_assign_pointer(p, v) ({ \ | ||
| 555 | smp_wmb(); \ | ||
| 556 | (p) = (v); \ | ||
| 557 | }) | ||
| 558 | |||
| 559 | #define rcu_dereference(p) ({ \ | ||
| 560 | typeof(p) _________p1 = p; \ | ||
| 561 | smp_read_barrier_depends(); \ | ||
| 562 | (_________p1); \ | ||
| 563 | }) | ||
| 564 | |||
| 565 | |||
| 566 | The rcu_read_lock() and rcu_read_unlock() primitive read-acquire | ||
| 567 | and release a global reader-writer lock. The synchronize_rcu() | ||
| 568 | primitive write-acquires this same lock, then immediately releases | ||
| 569 | it. This means that once synchronize_rcu() exits, all RCU read-side | ||
| 570 | critical sections that were in progress before synchonize_rcu() was | ||
| 571 | called are guaranteed to have completed -- there is no way that | ||
| 572 | synchronize_rcu() would have been able to write-acquire the lock | ||
| 573 | otherwise. | ||
| 574 | |||
| 575 | It is possible to nest rcu_read_lock(), since reader-writer locks may | ||
| 576 | be recursively acquired. Note also that rcu_read_lock() is immune | ||
| 577 | from deadlock (an important property of RCU). The reason for this is | ||
| 578 | that the only thing that can block rcu_read_lock() is a synchronize_rcu(). | ||
| 579 | But synchronize_rcu() does not acquire any locks while holding rcu_gp_mutex, | ||
| 580 | so there can be no deadlock cycle. | ||
| 581 | |||
| 582 | Quick Quiz #1: Why is this argument naive? How could a deadlock | ||
| 583 | occur when using this algorithm in a real-world Linux | ||
| 584 | kernel? How could this deadlock be avoided? | ||
| 585 | |||
| 586 | |||
| 587 | 5B. "TOY" EXAMPLE #2: CLASSIC RCU | ||
| 588 | |||
| 589 | This section presents a "toy" RCU implementation that is based on | ||
| 590 | "classic RCU". It is also short on performance (but only for updates) and | ||
| 591 | on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT | ||
| 592 | kernels. The definitions of rcu_dereference() and rcu_assign_pointer() | ||
| 593 | are the same as those shown in the preceding section, so they are omitted. | ||
| 594 | |||
| 595 | void rcu_read_lock(void) { } | ||
| 596 | |||
| 597 | void rcu_read_unlock(void) { } | ||
| 598 | |||
| 599 | void synchronize_rcu(void) | ||
| 600 | { | ||
| 601 | int cpu; | ||
| 602 | |||
| 603 | for_each_cpu(cpu) | ||
| 604 | run_on(cpu); | ||
| 605 | } | ||
| 606 | |||
| 607 | Note that rcu_read_lock() and rcu_read_unlock() do absolutely nothing. | ||
| 608 | This is the great strength of classic RCU in a non-preemptive kernel: | ||
| 609 | read-side overhead is precisely zero, at least on non-Alpha CPUs. | ||
| 610 | And there is absolutely no way that rcu_read_lock() can possibly | ||
| 611 | participate in a deadlock cycle! | ||
| 612 | |||
| 613 | The implementation of synchronize_rcu() simply schedules itself on each | ||
| 614 | CPU in turn. The run_on() primitive can be implemented straightforwardly | ||
| 615 | in terms of the sched_setaffinity() primitive. Of course, a somewhat less | ||
| 616 | "toy" implementation would restore the affinity upon completion rather | ||
| 617 | than just leaving all tasks running on the last CPU, but when I said | ||
| 618 | "toy", I meant -toy-! | ||
| 619 | |||
| 620 | So how the heck is this supposed to work??? | ||
| 621 | |||
| 622 | Remember that it is illegal to block while in an RCU read-side critical | ||
| 623 | section. Therefore, if a given CPU executes a context switch, we know | ||
| 624 | that it must have completed all preceding RCU read-side critical sections. | ||
| 625 | Once -all- CPUs have executed a context switch, then -all- preceding | ||
| 626 | RCU read-side critical sections will have completed. | ||
| 627 | |||
| 628 | So, suppose that we remove a data item from its structure and then invoke | ||
| 629 | synchronize_rcu(). Once synchronize_rcu() returns, we are guaranteed | ||
| 630 | that there are no RCU read-side critical sections holding a reference | ||
| 631 | to that data item, so we can safely reclaim it. | ||
| 632 | |||
| 633 | Quick Quiz #2: Give an example where Classic RCU's read-side | ||
| 634 | overhead is -negative-. | ||
| 635 | |||
| 636 | Quick Quiz #3: If it is illegal to block in an RCU read-side | ||
| 637 | critical section, what the heck do you do in | ||
| 638 | PREEMPT_RT, where normal spinlocks can block??? | ||
| 639 | |||
| 640 | |||
| 641 | 6. ANALOGY WITH READER-WRITER LOCKING | ||
| 642 | |||
| 643 | Although RCU can be used in many different ways, a very common use of | ||
| 644 | RCU is analogous to reader-writer locking. The following unified | ||
| 645 | diff shows how closely related RCU and reader-writer locking can be. | ||
| 646 | |||
| 647 | @@ -13,15 +14,15 @@ | ||
| 648 | struct list_head *lp; | ||
| 649 | struct el *p; | ||
| 650 | |||
| 651 | - read_lock(); | ||
| 652 | - list_for_each_entry(p, head, lp) { | ||
| 653 | + rcu_read_lock(); | ||
| 654 | + list_for_each_entry_rcu(p, head, lp) { | ||
| 655 | if (p->key == key) { | ||
| 656 | *result = p->data; | ||
| 657 | - read_unlock(); | ||
| 658 | + rcu_read_unlock(); | ||
| 659 | return 1; | ||
| 660 | } | ||
| 661 | } | ||
| 662 | - read_unlock(); | ||
| 663 | + rcu_read_unlock(); | ||
| 664 | return 0; | ||
| 665 | } | ||
| 666 | |||
| 667 | @@ -29,15 +30,16 @@ | ||
| 668 | { | ||
| 669 | struct el *p; | ||
| 670 | |||
| 671 | - write_lock(&listmutex); | ||
| 672 | + spin_lock(&listmutex); | ||
| 673 | list_for_each_entry(p, head, lp) { | ||
| 674 | if (p->key == key) { | ||
| 675 | list_del(&p->list); | ||
| 676 | - write_unlock(&listmutex); | ||
| 677 | + spin_unlock(&listmutex); | ||
| 678 | + synchronize_rcu(); | ||
| 679 | kfree(p); | ||
| 680 | return 1; | ||
| 681 | } | ||
| 682 | } | ||
| 683 | - write_unlock(&listmutex); | ||
| 684 | + spin_unlock(&listmutex); | ||
| 685 | return 0; | ||
| 686 | } | ||
| 687 | |||
| 688 | Or, for those who prefer a side-by-side listing: | ||
| 689 | |||
| 690 | 1 struct el { 1 struct el { | ||
| 691 | 2 struct list_head list; 2 struct list_head list; | ||
| 692 | 3 long key; 3 long key; | ||
| 693 | 4 spinlock_t mutex; 4 spinlock_t mutex; | ||
| 694 | 5 int data; 5 int data; | ||
| 695 | 6 /* Other data fields */ 6 /* Other data fields */ | ||
| 696 | 7 }; 7 }; | ||
| 697 | 8 spinlock_t listmutex; 8 spinlock_t listmutex; | ||
| 698 | 9 struct el head; 9 struct el head; | ||
| 699 | |||
| 700 | 1 int search(long key, int *result) 1 int search(long key, int *result) | ||
| 701 | 2 { 2 { | ||
| 702 | 3 struct list_head *lp; 3 struct list_head *lp; | ||
| 703 | 4 struct el *p; 4 struct el *p; | ||
| 704 | 5 5 | ||
| 705 | 6 read_lock(); 6 rcu_read_lock(); | ||
| 706 | 7 list_for_each_entry(p, head, lp) { 7 list_for_each_entry_rcu(p, head, lp) { | ||
| 707 | 8 if (p->key == key) { 8 if (p->key == key) { | ||
| 708 | 9 *result = p->data; 9 *result = p->data; | ||
| 709 | 10 read_unlock(); 10 rcu_read_unlock(); | ||
| 710 | 11 return 1; 11 return 1; | ||
| 711 | 12 } 12 } | ||
| 712 | 13 } 13 } | ||
| 713 | 14 read_unlock(); 14 rcu_read_unlock(); | ||
| 714 | 15 return 0; 15 return 0; | ||
| 715 | 16 } 16 } | ||
| 716 | |||
| 717 | 1 int delete(long key) 1 int delete(long key) | ||
| 718 | 2 { 2 { | ||
| 719 | 3 struct el *p; 3 struct el *p; | ||
| 720 | 4 4 | ||
| 721 | 5 write_lock(&listmutex); 5 spin_lock(&listmutex); | ||
| 722 | 6 list_for_each_entry(p, head, lp) { 6 list_for_each_entry(p, head, lp) { | ||
| 723 | 7 if (p->key == key) { 7 if (p->key == key) { | ||
| 724 | 8 list_del(&p->list); 8 list_del(&p->list); | ||
| 725 | 9 write_unlock(&listmutex); 9 spin_unlock(&listmutex); | ||
| 726 | 10 synchronize_rcu(); | ||
| 727 | 10 kfree(p); 11 kfree(p); | ||
| 728 | 11 return 1; 12 return 1; | ||
| 729 | 12 } 13 } | ||
| 730 | 13 } 14 } | ||
| 731 | 14 write_unlock(&listmutex); 15 spin_unlock(&listmutex); | ||
| 732 | 15 return 0; 16 return 0; | ||
| 733 | 16 } 17 } | ||
| 734 | |||
| 735 | Either way, the differences are quite small. Read-side locking moves | ||
| 736 | to rcu_read_lock() and rcu_read_unlock, update-side locking moves from | ||
| 737 | from a reader-writer lock to a simple spinlock, and a synchronize_rcu() | ||
| 738 | precedes the kfree(). | ||
| 739 | |||
| 740 | However, there is one potential catch: the read-side and update-side | ||
| 741 | critical sections can now run concurrently. In many cases, this will | ||
| 742 | not be a problem, but it is necessary to check carefully regardless. | ||
| 743 | For example, if multiple independent list updates must be seen as | ||
| 744 | a single atomic update, converting to RCU will require special care. | ||
| 745 | |||
| 746 | Also, the presence of synchronize_rcu() means that the RCU version of | ||
| 747 | delete() can now block. If this is a problem, there is a callback-based | ||
| 748 | mechanism that never blocks, namely call_rcu(), that can be used in | ||
| 749 | place of synchronize_rcu(). | ||
| 750 | |||
| 751 | |||
| 752 | 7. FULL LIST OF RCU APIs | ||
| 753 | |||
| 754 | The RCU APIs are documented in docbook-format header comments in the | ||
| 755 | Linux-kernel source code, but it helps to have a full list of the | ||
| 756 | APIs, since there does not appear to be a way to categorize them | ||
| 757 | in docbook. Here is the list, by category. | ||
| 758 | |||
| 759 | Markers for RCU read-side critical sections: | ||
| 760 | |||
| 761 | rcu_read_lock | ||
| 762 | rcu_read_unlock | ||
| 763 | rcu_read_lock_bh | ||
| 764 | rcu_read_unlock_bh | ||
| 765 | |||
| 766 | RCU pointer/list traversal: | ||
| 767 | |||
| 768 | rcu_dereference | ||
| 769 | list_for_each_rcu (to be deprecated in favor of | ||
| 770 | list_for_each_entry_rcu) | ||
| 771 | list_for_each_safe_rcu (deprecated, not used) | ||
| 772 | list_for_each_entry_rcu | ||
| 773 | list_for_each_continue_rcu (to be deprecated in favor of new | ||
| 774 | list_for_each_entry_continue_rcu) | ||
| 775 | hlist_for_each_rcu (to be deprecated in favor of | ||
| 776 | hlist_for_each_entry_rcu) | ||
| 777 | hlist_for_each_entry_rcu | ||
| 778 | |||
| 779 | RCU pointer update: | ||
| 780 | |||
| 781 | rcu_assign_pointer | ||
| 782 | list_add_rcu | ||
| 783 | list_add_tail_rcu | ||
| 784 | list_del_rcu | ||
| 785 | list_replace_rcu | ||
| 786 | hlist_del_rcu | ||
| 787 | hlist_add_head_rcu | ||
| 788 | |||
| 789 | RCU grace period: | ||
| 790 | |||
| 791 | synchronize_kernel (deprecated) | ||
| 792 | synchronize_net | ||
| 793 | synchronize_sched | ||
| 794 | synchronize_rcu | ||
| 795 | call_rcu | ||
| 796 | call_rcu_bh | ||
| 797 | |||
| 798 | See the comment headers in the source code (or the docbook generated | ||
| 799 | from them) for more information. | ||
| 800 | |||
| 801 | |||
| 802 | 8. ANSWERS TO QUICK QUIZZES | ||
| 803 | |||
| 804 | Quick Quiz #1: Why is this argument naive? How could a deadlock | ||
| 805 | occur when using this algorithm in a real-world Linux | ||
| 806 | kernel? [Referring to the lock-based "toy" RCU | ||
| 807 | algorithm.] | ||
| 808 | |||
| 809 | Answer: Consider the following sequence of events: | ||
| 810 | |||
| 811 | 1. CPU 0 acquires some unrelated lock, call it | ||
| 812 | "problematic_lock". | ||
| 813 | |||
| 814 | 2. CPU 1 enters synchronize_rcu(), write-acquiring | ||
| 815 | rcu_gp_mutex. | ||
| 816 | |||
| 817 | 3. CPU 0 enters rcu_read_lock(), but must wait | ||
| 818 | because CPU 1 holds rcu_gp_mutex. | ||
| 819 | |||
| 820 | 4. CPU 1 is interrupted, and the irq handler | ||
| 821 | attempts to acquire problematic_lock. | ||
| 822 | |||
| 823 | The system is now deadlocked. | ||
| 824 | |||
| 825 | One way to avoid this deadlock is to use an approach like | ||
| 826 | that of CONFIG_PREEMPT_RT, where all normal spinlocks | ||
| 827 | become blocking locks, and all irq handlers execute in | ||
| 828 | the context of special tasks. In this case, in step 4 | ||
| 829 | above, the irq handler would block, allowing CPU 1 to | ||
| 830 | release rcu_gp_mutex, avoiding the deadlock. | ||
| 831 | |||
| 832 | Even in the absence of deadlock, this RCU implementation | ||
| 833 | allows latency to "bleed" from readers to other | ||
| 834 | readers through synchronize_rcu(). To see this, | ||
| 835 | consider task A in an RCU read-side critical section | ||
| 836 | (thus read-holding rcu_gp_mutex), task B blocked | ||
| 837 | attempting to write-acquire rcu_gp_mutex, and | ||
| 838 | task C blocked in rcu_read_lock() attempting to | ||
| 839 | read_acquire rcu_gp_mutex. Task A's RCU read-side | ||
| 840 | latency is holding up task C, albeit indirectly via | ||
| 841 | task B. | ||
| 842 | |||
| 843 | Realtime RCU implementations therefore use a counter-based | ||
| 844 | approach where tasks in RCU read-side critical sections | ||
| 845 | cannot be blocked by tasks executing synchronize_rcu(). | ||
| 846 | |||
| 847 | Quick Quiz #2: Give an example where Classic RCU's read-side | ||
| 848 | overhead is -negative-. | ||
| 849 | |||
| 850 | Answer: Imagine a single-CPU system with a non-CONFIG_PREEMPT | ||
| 851 | kernel where a routing table is used by process-context | ||
| 852 | code, but can be updated by irq-context code (for example, | ||
| 853 | by an "ICMP REDIRECT" packet). The usual way of handling | ||
| 854 | this would be to have the process-context code disable | ||
| 855 | interrupts while searching the routing table. Use of | ||
| 856 | RCU allows such interrupt-disabling to be dispensed with. | ||
| 857 | Thus, without RCU, you pay the cost of disabling interrupts, | ||
| 858 | and with RCU you don't. | ||
| 859 | |||
| 860 | One can argue that the overhead of RCU in this | ||
| 861 | case is negative with respect to the single-CPU | ||
| 862 | interrupt-disabling approach. Others might argue that | ||
| 863 | the overhead of RCU is merely zero, and that replacing | ||
| 864 | the positive overhead of the interrupt-disabling scheme | ||
| 865 | with the zero-overhead RCU scheme does not constitute | ||
| 866 | negative overhead. | ||
| 867 | |||
| 868 | In real life, of course, things are more complex. But | ||
| 869 | even the theoretical possibility of negative overhead for | ||
| 870 | a synchronization primitive is a bit unexpected. ;-) | ||
| 871 | |||
| 872 | Quick Quiz #3: If it is illegal to block in an RCU read-side | ||
| 873 | critical section, what the heck do you do in | ||
| 874 | PREEMPT_RT, where normal spinlocks can block??? | ||
| 875 | |||
| 876 | Answer: Just as PREEMPT_RT permits preemption of spinlock | ||
| 877 | critical sections, it permits preemption of RCU | ||
| 878 | read-side critical sections. It also permits | ||
| 879 | spinlocks blocking while in RCU read-side critical | ||
| 880 | sections. | ||
| 881 | |||
| 882 | Why the apparent inconsistency? Because it is it | ||
| 883 | possible to use priority boosting to keep the RCU | ||
| 884 | grace periods short if need be (for example, if running | ||
| 885 | short of memory). In contrast, if blocking waiting | ||
| 886 | for (say) network reception, there is no way to know | ||
| 887 | what should be boosted. Especially given that the | ||
| 888 | process we need to boost might well be a human being | ||
| 889 | who just went out for a pizza or something. And although | ||
| 890 | a computer-operated cattle prod might arouse serious | ||
| 891 | interest, it might also provoke serious objections. | ||
| 892 | Besides, how does the computer know what pizza parlor | ||
| 893 | the human being went to??? | ||
| 894 | |||
| 895 | |||
| 896 | ACKNOWLEDGEMENTS | ||
| 897 | |||
| 898 | My thanks to the people who helped make this human-readable, including | ||
| 899 | Jon Walpole, Josh Triplett, Serge Hallyn, and Suzanne Wood. | ||
| 900 | |||
| 901 | |||
| 902 | For more information, see http://www.rdrop.com/users/paulmck/RCU. | ||
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt index e2d1e760b4ba..6a82948ff4bd 100644 --- a/Documentation/cpu-freq/cpufreq-stats.txt +++ b/Documentation/cpu-freq/cpufreq-stats.txt | |||
| @@ -36,7 +36,7 @@ cpufreq stats provides following statistics (explained in detail below). | |||
| 36 | 36 | ||
| 37 | All the statistics will be from the time the stats driver has been inserted | 37 | All the statistics will be from the time the stats driver has been inserted |
| 38 | to the time when a read of a particular statistic is done. Obviously, stats | 38 | to the time when a read of a particular statistic is done. Obviously, stats |
| 39 | driver will not have any information about the the frequcny transitions before | 39 | driver will not have any information about the frequency transitions before |
| 40 | the stats driver insertion. | 40 | the stats driver insertion. |
| 41 | 41 | ||
| 42 | -------------------------------------------------------------------------------- | 42 | -------------------------------------------------------------------------------- |
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index 47f4114fbf54..d17b7d2dd771 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt | |||
| @@ -277,7 +277,7 @@ rewritten to the 'tasks' file of its cpuset. This is done to avoid | |||
| 277 | impacting the scheduler code in the kernel with a check for changes | 277 | impacting the scheduler code in the kernel with a check for changes |
| 278 | in a tasks processor placement. | 278 | in a tasks processor placement. |
| 279 | 279 | ||
| 280 | There is an exception to the above. If hotplug funtionality is used | 280 | There is an exception to the above. If hotplug functionality is used |
| 281 | to remove all the CPUs that are currently assigned to a cpuset, | 281 | to remove all the CPUs that are currently assigned to a cpuset, |
| 282 | then the kernel will automatically update the cpus_allowed of all | 282 | then the kernel will automatically update the cpus_allowed of all |
| 283 | tasks attached to CPUs in that cpuset to allow all CPUs. When memory | 283 | tasks attached to CPUs in that cpuset to allow all CPUs. When memory |
diff --git a/Documentation/crypto/descore-readme.txt b/Documentation/crypto/descore-readme.txt index 166474c2ee0b..16e9e6350755 100644 --- a/Documentation/crypto/descore-readme.txt +++ b/Documentation/crypto/descore-readme.txt | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | Below is the orginal README file from the descore.shar package. | 1 | Below is the original README file from the descore.shar package. |
| 2 | ------------------------------------------------------------------------------ | 2 | ------------------------------------------------------------------------------ |
| 3 | 3 | ||
| 4 | des - fast & portable DES encryption & decryption. | 4 | des - fast & portable DES encryption & decryption. |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 5f95d4b3cab1..784e08c1c80a 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -17,14 +17,6 @@ Who: Greg Kroah-Hartman <greg@kroah.com> | |||
| 17 | 17 | ||
| 18 | --------------------------- | 18 | --------------------------- |
| 19 | 19 | ||
| 20 | What: ACPI S4bios support | ||
| 21 | When: May 2005 | ||
| 22 | Why: Noone uses it, and it probably does not work, anyway. swsusp is | ||
| 23 | faster, more reliable, and people are actually using it. | ||
| 24 | Who: Pavel Machek <pavel@suse.cz> | ||
| 25 | |||
| 26 | --------------------------- | ||
| 27 | |||
| 28 | What: io_remap_page_range() (macro or function) | 20 | What: io_remap_page_range() (macro or function) |
| 29 | When: September 2005 | 21 | When: September 2005 |
| 30 | Why: Replaced by io_remap_pfn_range() which allows more memory space | 22 | Why: Replaced by io_remap_pfn_range() which allows more memory space |
diff --git a/Documentation/ioctl/cdrom.txt b/Documentation/ioctl/cdrom.txt index 4ccdcc6fe364..8ec32cc49eb1 100644 --- a/Documentation/ioctl/cdrom.txt +++ b/Documentation/ioctl/cdrom.txt | |||
| @@ -878,7 +878,7 @@ DVD_READ_STRUCT Read structure | |||
| 878 | 878 | ||
| 879 | error returns: | 879 | error returns: |
| 880 | EINVAL physical.layer_num exceeds number of layers | 880 | EINVAL physical.layer_num exceeds number of layers |
| 881 | EIO Recieved invalid response from drive | 881 | EIO Received invalid response from drive |
| 882 | 882 | ||
| 883 | 883 | ||
| 884 | 884 | ||
diff --git a/Documentation/mono.txt b/Documentation/mono.txt index 6739ab9615ef..807a0c7b4737 100644 --- a/Documentation/mono.txt +++ b/Documentation/mono.txt | |||
| @@ -30,7 +30,7 @@ other program after you have done the following: | |||
| 30 | Read the file 'binfmt_misc.txt' in this directory to know | 30 | Read the file 'binfmt_misc.txt' in this directory to know |
| 31 | more about the configuration process. | 31 | more about the configuration process. |
| 32 | 32 | ||
| 33 | 3) Add the following enries to /etc/rc.local or similar script | 33 | 3) Add the following entries to /etc/rc.local or similar script |
| 34 | to be run at system startup: | 34 | to be run at system startup: |
| 35 | 35 | ||
| 36 | # Insert BINFMT_MISC module into the kernel | 36 | # Insert BINFMT_MISC module into the kernel |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 24d029455baa..a55f0f95b171 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
| @@ -1241,7 +1241,7 @@ traffic while still maintaining carrier on. | |||
| 1241 | 1241 | ||
| 1242 | If running SNMP agents, the bonding driver should be loaded | 1242 | If running SNMP agents, the bonding driver should be loaded |
| 1243 | before any network drivers participating in a bond. This requirement | 1243 | before any network drivers participating in a bond. This requirement |
| 1244 | is due to the the interface index (ipAdEntIfIndex) being associated to | 1244 | is due to the interface index (ipAdEntIfIndex) being associated to |
| 1245 | the first interface found with a given IP address. That is, there is | 1245 | the first interface found with a given IP address. That is, there is |
| 1246 | only one ipAdEntIfIndex for each IP address. For example, if eth0 and | 1246 | only one ipAdEntIfIndex for each IP address. For example, if eth0 and |
| 1247 | eth1 are slaves of bond0 and the driver for eth0 is loaded before the | 1247 | eth1 are slaves of bond0 and the driver for eth0 is loaded before the |
| @@ -1937,7 +1937,7 @@ switches currently available support 802.3ad. | |||
| 1937 | If not explicitly configured (with ifconfig or ip link), the | 1937 | If not explicitly configured (with ifconfig or ip link), the |
| 1938 | MAC address of the bonding device is taken from its first slave | 1938 | MAC address of the bonding device is taken from its first slave |
| 1939 | device. This MAC address is then passed to all following slaves and | 1939 | device. This MAC address is then passed to all following slaves and |
| 1940 | remains persistent (even if the the first slave is removed) until the | 1940 | remains persistent (even if the first slave is removed) until the |
| 1941 | bonding device is brought down or reconfigured. | 1941 | bonding device is brought down or reconfigured. |
| 1942 | 1942 | ||
| 1943 | If you wish to change the MAC address, you can set it with | 1943 | If you wish to change the MAC address, you can set it with |
diff --git a/Documentation/networking/wan-router.txt b/Documentation/networking/wan-router.txt index aea20cd2a56e..c96897aa08b6 100644 --- a/Documentation/networking/wan-router.txt +++ b/Documentation/networking/wan-router.txt | |||
| @@ -355,7 +355,7 @@ REVISION HISTORY | |||
| 355 | There is no functional difference between the two packages | 355 | There is no functional difference between the two packages |
| 356 | 356 | ||
| 357 | 2.0.7 Aug 26, 1999 o Merged X25API code into WANPIPE. | 357 | 2.0.7 Aug 26, 1999 o Merged X25API code into WANPIPE. |
| 358 | o Fixed a memeory leak for X25API | 358 | o Fixed a memory leak for X25API |
| 359 | o Updated the X25API code for 2.2.X kernels. | 359 | o Updated the X25API code for 2.2.X kernels. |
| 360 | o Improved NEM handling. | 360 | o Improved NEM handling. |
| 361 | 361 | ||
| @@ -514,7 +514,7 @@ beta2-2.2.0 Jan 8 2001 | |||
| 514 | o Patches for 2.4.0 kernel | 514 | o Patches for 2.4.0 kernel |
| 515 | o Patches for 2.2.18 kernel | 515 | o Patches for 2.2.18 kernel |
| 516 | o Minor updates to PPP and CHLDC drivers. | 516 | o Minor updates to PPP and CHLDC drivers. |
| 517 | Note: No functinal difference. | 517 | Note: No functional difference. |
| 518 | 518 | ||
| 519 | beta3-2.2.9 Jan 10 2001 | 519 | beta3-2.2.9 Jan 10 2001 |
| 520 | o I missed the 2.2.18 kernel patches in beta2-2.2.0 | 520 | o I missed the 2.2.18 kernel patches in beta2-2.2.0 |
diff --git a/Documentation/pci.txt b/Documentation/pci.txt index 76d28d033657..711210b38f5f 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt | |||
| @@ -84,7 +84,7 @@ Each entry consists of: | |||
| 84 | 84 | ||
| 85 | Most drivers don't need to use the driver_data field. Best practice | 85 | Most drivers don't need to use the driver_data field. Best practice |
| 86 | for use of driver_data is to use it as an index into a static list of | 86 | for use of driver_data is to use it as an index into a static list of |
| 87 | equivalant device types, not to use it as a pointer. | 87 | equivalent device types, not to use it as a pointer. |
| 88 | 88 | ||
| 89 | Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID} | 89 | Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID} |
| 90 | to have probe() called for every PCI device known to the system. | 90 | to have probe() called for every PCI device known to the system. |
diff --git a/Documentation/powerpc/eeh-pci-error-recovery.txt b/Documentation/powerpc/eeh-pci-error-recovery.txt index 2bfe71beec5b..e75d7474322c 100644 --- a/Documentation/powerpc/eeh-pci-error-recovery.txt +++ b/Documentation/powerpc/eeh-pci-error-recovery.txt | |||
| @@ -134,7 +134,7 @@ pci_get_device_by_addr() will find the pci device associated | |||
| 134 | with that address (if any). | 134 | with that address (if any). |
| 135 | 135 | ||
| 136 | The default include/asm-ppc64/io.h macros readb(), inb(), insb(), | 136 | The default include/asm-ppc64/io.h macros readb(), inb(), insb(), |
| 137 | etc. include a check to see if the the i/o read returned all-0xff's. | 137 | etc. include a check to see if the i/o read returned all-0xff's. |
| 138 | If so, these make a call to eeh_dn_check_failure(), which in turn | 138 | If so, these make a call to eeh_dn_check_failure(), which in turn |
| 139 | asks the firmware if the all-ff's value is the sign of a true EEH | 139 | asks the firmware if the all-ff's value is the sign of a true EEH |
| 140 | error. If it is not, processing continues as normal. The grand | 140 | error. If it is not, processing continues as normal. The grand |
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt index e24fdeada970..e321a8ed2a2d 100644 --- a/Documentation/s390/s390dbf.txt +++ b/Documentation/s390/s390dbf.txt | |||
| @@ -468,7 +468,7 @@ The hex_ascii view shows the data field in hex and ascii representation | |||
| 468 | The raw view returns a bytestream as the debug areas are stored in memory. | 468 | The raw view returns a bytestream as the debug areas are stored in memory. |
| 469 | 469 | ||
| 470 | The sprintf view formats the debug entries in the same way as the sprintf | 470 | The sprintf view formats the debug entries in the same way as the sprintf |
| 471 | function would do. The sprintf event/expection fuctions write to the | 471 | function would do. The sprintf event/expection functions write to the |
| 472 | debug entry a pointer to the format string (size = sizeof(long)) | 472 | debug entry a pointer to the format string (size = sizeof(long)) |
| 473 | and for each vararg a long value. So e.g. for a debug entry with a format | 473 | and for each vararg a long value. So e.g. for a debug entry with a format |
| 474 | string plus two varargs one would need to allocate a (3 * sizeof(long)) | 474 | string plus two varargs one would need to allocate a (3 * sizeof(long)) |
diff --git a/Documentation/scsi/ibmmca.txt b/Documentation/scsi/ibmmca.txt index 2814491600ff..2ffb3ae0ef4d 100644 --- a/Documentation/scsi/ibmmca.txt +++ b/Documentation/scsi/ibmmca.txt | |||
| @@ -344,7 +344,7 @@ | |||
| 344 | /proc/scsi/ibmmca/<host_no>. ibmmca_proc_info() provides this information. | 344 | /proc/scsi/ibmmca/<host_no>. ibmmca_proc_info() provides this information. |
| 345 | 345 | ||
| 346 | This table is quite informative for interested users. It shows the load | 346 | This table is quite informative for interested users. It shows the load |
| 347 | of commands on the subsystem and wether you are running the bypassed | 347 | of commands on the subsystem and whether you are running the bypassed |
| 348 | (software) or integrated (hardware) SCSI-command set (see below). The | 348 | (software) or integrated (hardware) SCSI-command set (see below). The |
| 349 | amount of accesses is shown. Read, write, modeselect is shown separately | 349 | amount of accesses is shown. Read, write, modeselect is shown separately |
| 350 | in order to help debugging problems with CD-ROMs or tapedrives. | 350 | in order to help debugging problems with CD-ROMs or tapedrives. |
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index 5c49ba07e709..ebfcdf28485f 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt | |||
| @@ -1459,7 +1459,7 @@ devices where %i is sound card number from zero to seven. | |||
| 1459 | To auto-load an ALSA driver for OSS services, define the string | 1459 | To auto-load an ALSA driver for OSS services, define the string |
| 1460 | 'sound-slot-%i' where %i means the slot number for OSS, which | 1460 | 'sound-slot-%i' where %i means the slot number for OSS, which |
| 1461 | corresponds to the card index of ALSA. Usually, define this | 1461 | corresponds to the card index of ALSA. Usually, define this |
| 1462 | as the the same card module. | 1462 | as the same card module. |
| 1463 | 1463 | ||
| 1464 | An example configuration for a single emu10k1 card is like below: | 1464 | An example configuration for a single emu10k1 card is like below: |
| 1465 | ----- /etc/modprobe.conf | 1465 | ----- /etc/modprobe.conf |
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 136d817c01ba..baf17b381588 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt | |||
| @@ -171,7 +171,7 @@ the header 'include/linux/sysrq.h', this will define everything else you need. | |||
| 171 | Next, you must create a sysrq_key_op struct, and populate it with A) the key | 171 | Next, you must create a sysrq_key_op struct, and populate it with A) the key |
| 172 | handler function you will use, B) a help_msg string, that will print when SysRQ | 172 | handler function you will use, B) a help_msg string, that will print when SysRQ |
| 173 | prints help, and C) an action_msg string, that will print right before your | 173 | prints help, and C) an action_msg string, that will print right before your |
| 174 | handler is called. Your handler must conform to the protoype in 'sysrq.h'. | 174 | handler is called. Your handler must conform to the prototype in 'sysrq.h'. |
| 175 | 175 | ||
| 176 | After the sysrq_key_op is created, you can call the macro | 176 | After the sysrq_key_op is created, you can call the macro |
| 177 | register_sysrq_key(int key, struct sysrq_key_op *op_p) that is defined in | 177 | register_sysrq_key(int key, struct sysrq_key_op *op_p) that is defined in |
diff --git a/Documentation/uml/UserModeLinux-HOWTO.txt b/Documentation/uml/UserModeLinux-HOWTO.txt index 0c7b654fec99..544430e39980 100644 --- a/Documentation/uml/UserModeLinux-HOWTO.txt +++ b/Documentation/uml/UserModeLinux-HOWTO.txt | |||
| @@ -2176,7 +2176,7 @@ | |||
| 2176 | If you want to access files on the host machine from inside UML, you | 2176 | If you want to access files on the host machine from inside UML, you |
| 2177 | can treat it as a separate machine and either nfs mount directories | 2177 | can treat it as a separate machine and either nfs mount directories |
| 2178 | from the host or copy files into the virtual machine with scp or rcp. | 2178 | from the host or copy files into the virtual machine with scp or rcp. |
| 2179 | However, since UML is running on the the host, it can access those | 2179 | However, since UML is running on the host, it can access those |
| 2180 | files just like any other process and make them available inside the | 2180 | files just like any other process and make them available inside the |
| 2181 | virtual machine without needing to use the network. | 2181 | virtual machine without needing to use the network. |
| 2182 | 2182 | ||
diff --git a/Documentation/usb/gadget_serial.txt b/Documentation/usb/gadget_serial.txt index a938c3dd13d6..815f5c2301ff 100644 --- a/Documentation/usb/gadget_serial.txt +++ b/Documentation/usb/gadget_serial.txt | |||
| @@ -20,7 +20,7 @@ License along with this program; if not, write to the Free | |||
| 20 | Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, | 20 | Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, |
| 21 | MA 02111-1307 USA. | 21 | MA 02111-1307 USA. |
| 22 | 22 | ||
| 23 | This document and the the gadget serial driver itself are | 23 | This document and the gadget serial driver itself are |
| 24 | Copyright (C) 2004 by Al Borchers (alborchers@steinerpoint.com). | 24 | Copyright (C) 2004 by Al Borchers (alborchers@steinerpoint.com). |
| 25 | 25 | ||
| 26 | If you have questions, problems, or suggestions for this driver | 26 | If you have questions, problems, or suggestions for this driver |
diff --git a/Documentation/video4linux/Zoran b/Documentation/video4linux/Zoran index 01425c21986b..52c94bd7dca1 100644 --- a/Documentation/video4linux/Zoran +++ b/Documentation/video4linux/Zoran | |||
| @@ -222,7 +222,7 @@ was introduced in 1991, is used in the DC10 old | |||
| 222 | can generate: PAL , NTSC , SECAM | 222 | can generate: PAL , NTSC , SECAM |
| 223 | 223 | ||
| 224 | The adv717x, should be able to produce PAL N. But you find nothing PAL N | 224 | The adv717x, should be able to produce PAL N. But you find nothing PAL N |
| 225 | specific in the the registers. Seem that you have to reuse a other standard | 225 | specific in the registers. Seem that you have to reuse a other standard |
| 226 | to generate PAL N, maybe it would work if you use the PAL M settings. | 226 | to generate PAL N, maybe it would work if you use the PAL M settings. |
| 227 | 227 | ||
| 228 | ========================== | 228 | ========================== |
diff --git a/REPORTING-BUGS b/REPORTING-BUGS index 224c34741d32..f9da827a0c18 100644 --- a/REPORTING-BUGS +++ b/REPORTING-BUGS | |||
| @@ -9,7 +9,7 @@ screen please read "Documentation/oops-tracing.txt" before posting your | |||
| 9 | bug report. This explains what you should do with the "Oops" information | 9 | bug report. This explains what you should do with the "Oops" information |
| 10 | to make it useful to the recipient. | 10 | to make it useful to the recipient. |
| 11 | 11 | ||
| 12 | Send the output the maintainer of the kernel area that seems to | 12 | Send the output to the maintainer of the kernel area that seems to |
| 13 | be involved with the problem. Don't worry too much about getting the | 13 | be involved with the problem. Don't worry too much about getting the |
| 14 | wrong person. If you are unsure send it to the person responsible for the | 14 | wrong person. If you are unsure send it to the person responsible for the |
| 15 | code relevant to what you were doing. If it occurs repeatably try and | 15 | code relevant to what you were doing. If it occurs repeatably try and |
| @@ -18,15 +18,15 @@ The list of maintainers is in the MAINTAINERS file in this directory. | |||
| 18 | 18 | ||
| 19 | If it is a security bug, please copy the Security Contact listed | 19 | If it is a security bug, please copy the Security Contact listed |
| 20 | in the MAINTAINERS file. They can help coordinate bugfix and disclosure. | 20 | in the MAINTAINERS file. They can help coordinate bugfix and disclosure. |
| 21 | See Documentation/SecurityBugs for more infomation. | 21 | See Documentation/SecurityBugs for more information. |
| 22 | 22 | ||
| 23 | If you are totally stumped as to whom to send the report, send it to | 23 | If you are totally stumped as to whom to send the report, send it to |
| 24 | linux-kernel@vger.kernel.org. (For more information on the linux-kernel | 24 | linux-kernel@vger.kernel.org. (For more information on the linux-kernel |
| 25 | mailing list see http://www.tux.org/lkml/). | 25 | mailing list see http://www.tux.org/lkml/). |
| 26 | 26 | ||
| 27 | This is a suggested format for a bug report sent to the Linux kernel mailing | 27 | This is a suggested format for a bug report sent to the Linux kernel mailing |
| 28 | list. Having a standardized bug report form makes it easier for you not to | 28 | list. Having a standardized bug report form makes it easier for you not to |
| 29 | overlook things, and easier for the developers to find the pieces of | 29 | overlook things, and easier for the developers to find the pieces of |
| 30 | information they're really interested in. Don't feel you have to follow it. | 30 | information they're really interested in. Don't feel you have to follow it. |
| 31 | 31 | ||
| 32 | First run the ver_linux script included as scripts/ver_linux, which | 32 | First run the ver_linux script included as scripts/ver_linux, which |
| @@ -35,9 +35,9 @@ the command "sh scripts/ver_linux". | |||
| 35 | 35 | ||
| 36 | Use that information to fill in all fields of the bug report form, and | 36 | Use that information to fill in all fields of the bug report form, and |
| 37 | post it to the mailing list with a subject of "PROBLEM: <one line | 37 | post it to the mailing list with a subject of "PROBLEM: <one line |
| 38 | summary from [1.]>" for easy identification by the developers | 38 | summary from [1.]>" for easy identification by the developers. |
| 39 | 39 | ||
| 40 | [1.] One line summary of the problem: | 40 | [1.] One line summary of the problem: |
| 41 | [2.] Full description of the problem/report: | 41 | [2.] Full description of the problem/report: |
| 42 | [3.] Keywords (i.e., modules, networking, kernel): | 42 | [3.] Keywords (i.e., modules, networking, kernel): |
| 43 | [4.] Kernel version (from /proc/version): | 43 | [4.] Kernel version (from /proc/version): |
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index fc5ef90c4fc9..24ae9a366073 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c | |||
| @@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus); | |||
| 185 | EXPORT_SYMBOL(smp_call_function); | 185 | EXPORT_SYMBOL(smp_call_function); |
| 186 | EXPORT_SYMBOL(smp_call_function_on_cpu); | 186 | EXPORT_SYMBOL(smp_call_function_on_cpu); |
| 187 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 187 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 188 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 189 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
| 190 | EXPORT_SYMBOL(debug_spin_lock); | ||
| 191 | EXPORT_SYMBOL(debug_spin_trylock); | ||
| 192 | #endif | ||
| 193 | #ifdef CONFIG_DEBUG_RWLOCK | ||
| 194 | EXPORT_SYMBOL(_raw_write_lock); | ||
| 195 | EXPORT_SYMBOL(_raw_read_lock); | ||
| 196 | #endif | ||
| 197 | EXPORT_SYMBOL(cpu_present_mask); | 188 | EXPORT_SYMBOL(cpu_present_mask); |
| 198 | #endif /* CONFIG_SMP */ | 189 | #endif /* CONFIG_SMP */ |
| 199 | 190 | ||
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 2b034182a0ca..0636116210d2 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
| @@ -1154,8 +1154,7 @@ osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remai | |||
| 1154 | 1154 | ||
| 1155 | ticks = timeval_to_jiffies(&tmp); | 1155 | ticks = timeval_to_jiffies(&tmp); |
| 1156 | 1156 | ||
| 1157 | current->state = TASK_INTERRUPTIBLE; | 1157 | ticks = schedule_timeout_interruptible(ticks); |
| 1158 | ticks = schedule_timeout(ticks); | ||
| 1159 | 1158 | ||
| 1160 | if (remain) { | 1159 | if (remain) { |
| 1161 | jiffies_to_timeval(ticks, &tmp); | 1160 | jiffies_to_timeval(ticks, &tmp); |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index e211aa7404e6..da0be3465791 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
| @@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
| 989 | 989 | ||
| 990 | preempt_enable(); | 990 | preempt_enable(); |
| 991 | } | 991 | } |
| 992 | |||
| 993 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 994 | void | ||
| 995 | _raw_spin_unlock(spinlock_t * lock) | ||
| 996 | { | ||
| 997 | mb(); | ||
| 998 | lock->lock = 0; | ||
| 999 | |||
| 1000 | lock->on_cpu = -1; | ||
| 1001 | lock->previous = NULL; | ||
| 1002 | lock->task = NULL; | ||
| 1003 | lock->base_file = "none"; | ||
| 1004 | lock->line_no = 0; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | void | ||
| 1008 | debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) | ||
| 1009 | { | ||
| 1010 | long tmp; | ||
| 1011 | long stuck; | ||
| 1012 | void *inline_pc = __builtin_return_address(0); | ||
| 1013 | unsigned long started = jiffies; | ||
| 1014 | int printed = 0; | ||
| 1015 | int cpu = smp_processor_id(); | ||
| 1016 | |||
| 1017 | stuck = 1L << 30; | ||
| 1018 | try_again: | ||
| 1019 | |||
| 1020 | /* Use sub-sections to put the actual loop at the end | ||
| 1021 | of this object file's text section so as to perfect | ||
| 1022 | branch prediction. */ | ||
| 1023 | __asm__ __volatile__( | ||
| 1024 | "1: ldl_l %0,%1\n" | ||
| 1025 | " subq %2,1,%2\n" | ||
| 1026 | " blbs %0,2f\n" | ||
| 1027 | " or %0,1,%0\n" | ||
| 1028 | " stl_c %0,%1\n" | ||
| 1029 | " beq %0,3f\n" | ||
| 1030 | "4: mb\n" | ||
| 1031 | ".subsection 2\n" | ||
| 1032 | "2: ldl %0,%1\n" | ||
| 1033 | " subq %2,1,%2\n" | ||
| 1034 | "3: blt %2,4b\n" | ||
| 1035 | " blbs %0,2b\n" | ||
| 1036 | " br 1b\n" | ||
| 1037 | ".previous" | ||
| 1038 | : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) | ||
| 1039 | : "m" (lock->lock), "2" (stuck) : "memory"); | ||
| 1040 | |||
| 1041 | if (stuck < 0) { | ||
| 1042 | printk(KERN_WARNING | ||
| 1043 | "%s:%d spinlock stuck in %s at %p(%d)" | ||
| 1044 | " owner %s at %p(%d) %s:%d\n", | ||
| 1045 | base_file, line_no, | ||
| 1046 | current->comm, inline_pc, cpu, | ||
| 1047 | lock->task->comm, lock->previous, | ||
| 1048 | lock->on_cpu, lock->base_file, lock->line_no); | ||
| 1049 | stuck = 1L << 36; | ||
| 1050 | printed = 1; | ||
| 1051 | goto try_again; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | /* Exiting. Got the lock. */ | ||
| 1055 | lock->on_cpu = cpu; | ||
| 1056 | lock->previous = inline_pc; | ||
| 1057 | lock->task = current; | ||
| 1058 | lock->base_file = base_file; | ||
| 1059 | lock->line_no = line_no; | ||
| 1060 | |||
| 1061 | if (printed) { | ||
| 1062 | printk(KERN_WARNING | ||
| 1063 | "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", | ||
| 1064 | base_file, line_no, current->comm, inline_pc, | ||
| 1065 | cpu, jiffies - started); | ||
| 1066 | } | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | int | ||
| 1070 | debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) | ||
| 1071 | { | ||
| 1072 | int ret; | ||
| 1073 | if ((ret = !test_and_set_bit(0, lock))) { | ||
| 1074 | lock->on_cpu = smp_processor_id(); | ||
| 1075 | lock->previous = __builtin_return_address(0); | ||
| 1076 | lock->task = current; | ||
| 1077 | } else { | ||
| 1078 | lock->base_file = base_file; | ||
| 1079 | lock->line_no = line_no; | ||
| 1080 | } | ||
| 1081 | return ret; | ||
| 1082 | } | ||
| 1083 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 1084 | |||
| 1085 | #ifdef CONFIG_DEBUG_RWLOCK | ||
| 1086 | void _raw_write_lock(rwlock_t * lock) | ||
| 1087 | { | ||
| 1088 | long regx, regy; | ||
| 1089 | int stuck_lock, stuck_reader; | ||
| 1090 | void *inline_pc = __builtin_return_address(0); | ||
| 1091 | |||
| 1092 | try_again: | ||
| 1093 | |||
| 1094 | stuck_lock = 1<<30; | ||
| 1095 | stuck_reader = 1<<30; | ||
| 1096 | |||
| 1097 | __asm__ __volatile__( | ||
| 1098 | "1: ldl_l %1,%0\n" | ||
| 1099 | " blbs %1,6f\n" | ||
| 1100 | " blt %1,8f\n" | ||
| 1101 | " mov 1,%1\n" | ||
| 1102 | " stl_c %1,%0\n" | ||
| 1103 | " beq %1,6f\n" | ||
| 1104 | "4: mb\n" | ||
| 1105 | ".subsection 2\n" | ||
| 1106 | "6: blt %3,4b # debug\n" | ||
| 1107 | " subl %3,1,%3 # debug\n" | ||
| 1108 | " ldl %1,%0\n" | ||
| 1109 | " blbs %1,6b\n" | ||
| 1110 | "8: blt %4,4b # debug\n" | ||
| 1111 | " subl %4,1,%4 # debug\n" | ||
| 1112 | " ldl %1,%0\n" | ||
| 1113 | " blt %1,8b\n" | ||
| 1114 | " br 1b\n" | ||
| 1115 | ".previous" | ||
| 1116 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), | ||
| 1117 | "=&r" (stuck_lock), "=&r" (stuck_reader) | ||
| 1118 | : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); | ||
| 1119 | |||
| 1120 | if (stuck_lock < 0) { | ||
| 1121 | printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); | ||
| 1122 | goto try_again; | ||
| 1123 | } | ||
| 1124 | if (stuck_reader < 0) { | ||
| 1125 | printk(KERN_WARNING "write_lock stuck on readers at %p\n", | ||
| 1126 | inline_pc); | ||
| 1127 | goto try_again; | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | void _raw_read_lock(rwlock_t * lock) | ||
| 1132 | { | ||
| 1133 | long regx; | ||
| 1134 | int stuck_lock; | ||
| 1135 | void *inline_pc = __builtin_return_address(0); | ||
| 1136 | |||
| 1137 | try_again: | ||
| 1138 | |||
| 1139 | stuck_lock = 1<<30; | ||
| 1140 | |||
| 1141 | __asm__ __volatile__( | ||
| 1142 | "1: ldl_l %1,%0;" | ||
| 1143 | " blbs %1,6f;" | ||
| 1144 | " subl %1,2,%1;" | ||
| 1145 | " stl_c %1,%0;" | ||
| 1146 | " beq %1,6f;" | ||
| 1147 | "4: mb\n" | ||
| 1148 | ".subsection 2\n" | ||
| 1149 | "6: ldl %1,%0;" | ||
| 1150 | " blt %2,4b # debug\n" | ||
| 1151 | " subl %2,1,%2 # debug\n" | ||
| 1152 | " blbs %1,6b;" | ||
| 1153 | " br 1b\n" | ||
| 1154 | ".previous" | ||
| 1155 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) | ||
| 1156 | : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); | ||
| 1157 | |||
| 1158 | if (stuck_lock < 0) { | ||
| 1159 | printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); | ||
| 1160 | goto try_again; | ||
| 1161 | } | ||
| 1162 | } | ||
| 1163 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0f2899b4159d..11fff042aa81 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -326,8 +326,8 @@ config SMP | |||
| 326 | processor machines. On a single processor machine, the kernel will | 326 | processor machines. On a single processor machine, the kernel will |
| 327 | run faster if you say N here. | 327 | run faster if you say N here. |
| 328 | 328 | ||
| 329 | See also the <file:Documentation/smp.tex>, | 329 | See also the <file:Documentation/smp.txt>, |
| 330 | <file:Documentation/smp.txt>, <file:Documentation/i386/IO-APIC.txt>, | 330 | <file:Documentation/i386/IO-APIC.txt>, |
| 331 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at | 331 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at |
| 332 | <http://www.linuxdoc.org/docs.html#howto>. | 332 | <http://www.linuxdoc.org/docs.html#howto>. |
| 333 | 333 | ||
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 45a5709eaaa4..5d3acff8c596 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
| @@ -53,7 +53,7 @@ config DEBUG_LL | |||
| 53 | bool "Kernel low-level debugging functions" | 53 | bool "Kernel low-level debugging functions" |
| 54 | depends on DEBUG_KERNEL | 54 | depends on DEBUG_KERNEL |
| 55 | help | 55 | help |
| 56 | Say Y here to include definitions of printascii, printchar, printhex | 56 | Say Y here to include definitions of printascii, printch, printhex |
| 57 | in the kernel. This is helpful if you are debugging code that | 57 | in the kernel. This is helpful if you are debugging code that |
| 58 | executes before the console is initialized. | 58 | executes before the console is initialized. |
| 59 | 59 | ||
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index 688a595598c8..d3a04c2a2c85 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c | |||
| @@ -91,7 +91,7 @@ EXPORT_SYMBOL(read_scoop_reg); | |||
| 91 | EXPORT_SYMBOL(write_scoop_reg); | 91 | EXPORT_SYMBOL(write_scoop_reg); |
| 92 | 92 | ||
| 93 | #ifdef CONFIG_PM | 93 | #ifdef CONFIG_PM |
| 94 | static int scoop_suspend(struct device *dev, uint32_t state, uint32_t level) | 94 | static int scoop_suspend(struct device *dev, pm_message_t state, uint32_t level) |
| 95 | { | 95 | { |
| 96 | if (level == SUSPEND_POWER_DOWN) { | 96 | if (level == SUSPEND_POWER_DOWN) { |
| 97 | struct scoop_dev *sdev = dev_get_drvdata(dev); | 97 | struct scoop_dev *sdev = dev_get_drvdata(dev); |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index db07ce42b3b2..949ec4427f21 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | * This file is included twice in entry-common.S | 10 | * This file is included twice in entry-common.S |
| 11 | */ | 11 | */ |
| 12 | #ifndef NR_syscalls | 12 | #ifndef NR_syscalls |
| 13 | #define NR_syscalls 320 | 13 | #define NR_syscalls 328 |
| 14 | #else | 14 | #else |
| 15 | 15 | ||
| 16 | __syscall_start: | 16 | __syscall_start: |
| @@ -333,6 +333,9 @@ __syscall_start: | |||
| 333 | .long sys_inotify_init | 333 | .long sys_inotify_init |
| 334 | .long sys_inotify_add_watch | 334 | .long sys_inotify_add_watch |
| 335 | .long sys_inotify_rm_watch | 335 | .long sys_inotify_rm_watch |
| 336 | .long sys_mbind_wrapper | ||
| 337 | /* 320 */ .long sys_get_mempolicy | ||
| 338 | .long sys_set_mempolicy | ||
| 336 | __syscall_end: | 339 | __syscall_end: |
| 337 | 340 | ||
| 338 | .rept NR_syscalls - (__syscall_end - __syscall_start) / 4 | 341 | .rept NR_syscalls - (__syscall_end - __syscall_start) / 4 |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6281d488ac97..db302c6e5343 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
| @@ -269,6 +269,10 @@ sys_arm_fadvise64_64_wrapper: | |||
| 269 | str r5, [sp, #4] @ push r5 to stack | 269 | str r5, [sp, #4] @ push r5 to stack |
| 270 | b sys_arm_fadvise64_64 | 270 | b sys_arm_fadvise64_64 |
| 271 | 271 | ||
| 272 | sys_mbind_wrapper: | ||
| 273 | str r5, [sp, #4] | ||
| 274 | b sys_mbind | ||
| 275 | |||
| 272 | /* | 276 | /* |
| 273 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | 277 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested |
| 274 | * offset, we return EINVAL. | 278 | * offset, we return EINVAL. |
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 29185acdd9e1..07b5dd453565 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
| @@ -131,27 +131,12 @@ static struct platform_device corgits_device = { | |||
| 131 | /* | 131 | /* |
| 132 | * MMC/SD Device | 132 | * MMC/SD Device |
| 133 | * | 133 | * |
| 134 | * The card detect interrupt isn't debounced so we delay it by HZ/4 | 134 | * The card detect interrupt isn't debounced so we delay it by 250ms |
| 135 | * to give the card a chance to fully insert/eject. | 135 | * to give the card a chance to fully insert/eject. |
| 136 | */ | 136 | */ |
| 137 | static struct mmc_detect { | 137 | static struct pxamci_platform_data corgi_mci_platform_data; |
| 138 | struct timer_list detect_timer; | ||
| 139 | void *devid; | ||
| 140 | } mmc_detect; | ||
| 141 | 138 | ||
| 142 | static void mmc_detect_callback(unsigned long data) | 139 | static int corgi_mci_init(struct device *dev, irqreturn_t (*corgi_detect_int)(int, void *, struct pt_regs *), void *data) |
| 143 | { | ||
| 144 | mmc_detect_change(mmc_detect.devid); | ||
| 145 | } | ||
| 146 | |||
| 147 | static irqreturn_t corgi_mmc_detect_int(int irq, void *devid, struct pt_regs *regs) | ||
| 148 | { | ||
| 149 | mmc_detect.devid=devid; | ||
| 150 | mod_timer(&mmc_detect.detect_timer, jiffies + HZ/4); | ||
| 151 | return IRQ_HANDLED; | ||
| 152 | } | ||
| 153 | |||
| 154 | static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(int, void *, struct pt_regs *), void *data) | ||
| 155 | { | 140 | { |
| 156 | int err; | 141 | int err; |
| 157 | 142 | ||
| @@ -161,11 +146,9 @@ static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(i | |||
| 161 | pxa_gpio_mode(CORGI_GPIO_nSD_DETECT | GPIO_IN); | 146 | pxa_gpio_mode(CORGI_GPIO_nSD_DETECT | GPIO_IN); |
| 162 | pxa_gpio_mode(CORGI_GPIO_SD_PWR | GPIO_OUT); | 147 | pxa_gpio_mode(CORGI_GPIO_SD_PWR | GPIO_OUT); |
| 163 | 148 | ||
| 164 | init_timer(&mmc_detect.detect_timer); | 149 | corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250); |
| 165 | mmc_detect.detect_timer.function = mmc_detect_callback; | ||
| 166 | mmc_detect.detect_timer.data = (unsigned long) &mmc_detect; | ||
| 167 | 150 | ||
| 168 | err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_mmc_detect_int, SA_INTERRUPT, | 151 | err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, SA_INTERRUPT, |
| 169 | "MMC card detect", data); | 152 | "MMC card detect", data); |
| 170 | if (err) { | 153 | if (err) { |
| 171 | printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n"); | 154 | printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n"); |
| @@ -198,7 +181,6 @@ static int corgi_mci_get_ro(struct device *dev) | |||
| 198 | static void corgi_mci_exit(struct device *dev, void *data) | 181 | static void corgi_mci_exit(struct device *dev, void *data) |
| 199 | { | 182 | { |
| 200 | free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data); | 183 | free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data); |
| 201 | del_timer(&mmc_detect.detect_timer); | ||
| 202 | } | 184 | } |
| 203 | 185 | ||
| 204 | static struct pxamci_platform_data corgi_mci_platform_data = { | 186 | static struct pxamci_platform_data corgi_mci_platform_data = { |
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S index 44d886c745ec..7c74fe0dc93c 100644 --- a/arch/i386/kernel/acpi/wakeup.S +++ b/arch/i386/kernel/acpi/wakeup.S | |||
| @@ -304,12 +304,6 @@ ret_point: | |||
| 304 | call restore_processor_state | 304 | call restore_processor_state |
| 305 | ret | 305 | ret |
| 306 | 306 | ||
| 307 | ENTRY(do_suspend_lowlevel_s4bios) | ||
| 308 | call save_processor_state | ||
| 309 | call save_registers | ||
| 310 | call acpi_enter_sleep_state_s4bios | ||
| 311 | ret | ||
| 312 | |||
| 313 | ALIGN | 307 | ALIGN |
| 314 | # saved registers | 308 | # saved registers |
| 315 | saved_gdt: .long 0,0 | 309 | saved_gdt: .long 0,0 |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 46ce9b248f55..9ad43be9a01f 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
| @@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | 153 | ||
| 154 | void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 154 | static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) |
| 155 | { | 155 | { |
| 156 | char *v = c->x86_vendor_id; | 156 | char *v = c->x86_vendor_id; |
| 157 | int i; | 157 | int i; |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 1efdc76ae96d..35d3ce26a544 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
| @@ -573,8 +573,7 @@ static int balanced_irq(void *unused) | |||
| 573 | } | 573 | } |
| 574 | 574 | ||
| 575 | for ( ; ; ) { | 575 | for ( ; ; ) { |
| 576 | set_current_state(TASK_INTERRUPTIBLE); | 576 | time_remaining = schedule_timeout_interruptible(time_remaining); |
| 577 | time_remaining = schedule_timeout(time_remaining); | ||
| 578 | try_to_freeze(); | 577 | try_to_freeze(); |
| 579 | if (time_after(jiffies, | 578 | if (time_after(jiffies, |
| 580 | prev_balance_time+balanced_irq_interval)) { | 579 | prev_balance_time+balanced_irq_interval)) { |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 5e4893d2b9f2..c70cd2a08304 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
| @@ -1330,8 +1330,7 @@ void __cpu_die(unsigned int cpu) | |||
| 1330 | printk ("CPU %d is now offline\n", cpu); | 1330 | printk ("CPU %d is now offline\n", cpu); |
| 1331 | return; | 1331 | return; |
| 1332 | } | 1332 | } |
| 1333 | current->state = TASK_UNINTERRUPTIBLE; | 1333 | msleep(100); |
| 1334 | schedule_timeout(HZ/10); | ||
| 1335 | } | 1334 | } |
| 1336 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1335 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
| 1337 | } | 1336 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4ebbf3974381..8d484204a3ff 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms, | |||
| 491 | unw_init_from_interruption(&info, current, pt, sw); | 491 | unw_init_from_interruption(&info, current, pt, sw); |
| 492 | ia64_do_show_stack(&info, NULL); | 492 | ia64_do_show_stack(&info, NULL); |
| 493 | 493 | ||
| 494 | #ifdef CONFIG_SMP | 494 | if (read_trylock(&tasklist_lock)) { |
| 495 | /* read_trylock() would be handy... */ | ||
| 496 | if (!tasklist_lock.write_lock) | ||
| 497 | read_lock(&tasklist_lock); | ||
| 498 | #endif | ||
| 499 | { | ||
| 500 | struct task_struct *g, *t; | 495 | struct task_struct *g, *t; |
| 501 | do_each_thread (g, t) { | 496 | do_each_thread (g, t) { |
| 502 | if (t == current) | 497 | if (t == current) |
| @@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms, | |||
| 506 | show_stack(t, NULL); | 501 | show_stack(t, NULL); |
| 507 | } while_each_thread (g, t); | 502 | } while_each_thread (g, t); |
| 508 | } | 503 | } |
| 509 | #ifdef CONFIG_SMP | ||
| 510 | if (!tasklist_lock.write_lock) | ||
| 511 | read_unlock(&tasklist_lock); | ||
| 512 | #endif | ||
| 513 | 504 | ||
| 514 | printk("\nINIT dump complete. Please reboot now.\n"); | 505 | printk("\nINIT dump complete. Please reboot now.\n"); |
| 515 | while (1); /* hang city if no debugger */ | 506 | while (1); /* hang city if no debugger */ |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 7622d4ec5f08..1ef3987ebc6a 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
| @@ -242,8 +242,8 @@ config SMP | |||
| 242 | Y to "Enhanced Real Time Clock Support", below. The "Advanced Power | 242 | Y to "Enhanced Real Time Clock Support", below. The "Advanced Power |
| 243 | Management" code will be disabled if you say Y here. | 243 | Management" code will be disabled if you say Y here. |
| 244 | 244 | ||
| 245 | See also the <file:Documentation/smp.tex>, | 245 | See also the <file:Documentation/smp.txt>, |
| 246 | <file:Documentation/smp.txt> and the SMP-HOWTO available at | 246 | and the SMP-HOWTO available at |
| 247 | <http://www.linuxdoc.org/docs.html#howto>. | 247 | <http://www.linuxdoc.org/docs.html#howto>. |
| 248 | 248 | ||
| 249 | If you don't know what to do here, say N. | 249 | If you don't know what to do here, say N. |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 48b187f2d2b3..a4576ac7e870 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
| @@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | |||
| 892 | int try) | 892 | int try) |
| 893 | { | 893 | { |
| 894 | spinlock_t *ipilock; | 894 | spinlock_t *ipilock; |
| 895 | unsigned long flags = 0; | ||
| 896 | volatile unsigned long *ipicr_addr; | 895 | volatile unsigned long *ipicr_addr; |
| 897 | unsigned long ipicr_val; | 896 | unsigned long ipicr_val; |
| 898 | unsigned long my_physid_mask; | 897 | unsigned long my_physid_mask; |
| @@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | |||
| 916 | * write IPICRi (send IPIi) | 915 | * write IPICRi (send IPIi) |
| 917 | * unlock ipi_lock[i] | 916 | * unlock ipi_lock[i] |
| 918 | */ | 917 | */ |
| 918 | spin_lock(ipilock); | ||
| 919 | __asm__ __volatile__ ( | 919 | __asm__ __volatile__ ( |
| 920 | ";; LOCK ipi_lock[i] \n\t" | 920 | ";; CHECK IPICRi == 0 \n\t" |
| 921 | ".fillinsn \n" | 921 | ".fillinsn \n" |
| 922 | "1: \n\t" | 922 | "1: \n\t" |
| 923 | "mvfc %1, psw \n\t" | 923 | "ld %0, @%1 \n\t" |
| 924 | "clrpsw #0x40 -> nop \n\t" | 924 | "and %0, %4 \n\t" |
| 925 | DCACHE_CLEAR("r4", "r5", "%2") | 925 | "beqz %0, 2f \n\t" |
| 926 | "lock r4, @%2 \n\t" | 926 | "bnez %3, 3f \n\t" |
| 927 | "addi r4, #-1 \n\t" | ||
| 928 | "unlock r4, @%2 \n\t" | ||
| 929 | "mvtc %1, psw \n\t" | ||
| 930 | "bnez r4, 2f \n\t" | ||
| 931 | LOCK_SECTION_START(".balign 4 \n\t") | ||
| 932 | ".fillinsn \n" | ||
| 933 | "2: \n\t" | ||
| 934 | "ld r4, @%2 \n\t" | ||
| 935 | "blez r4, 2b \n\t" | ||
| 936 | "bra 1b \n\t" | 927 | "bra 1b \n\t" |
| 937 | LOCK_SECTION_END | ||
| 938 | ";; CHECK IPICRi == 0 \n\t" | ||
| 939 | ".fillinsn \n" | ||
| 940 | "3: \n\t" | ||
| 941 | "ld %0, @%3 \n\t" | ||
| 942 | "and %0, %6 \n\t" | ||
| 943 | "beqz %0, 4f \n\t" | ||
| 944 | "bnez %5, 5f \n\t" | ||
| 945 | "bra 3b \n\t" | ||
| 946 | ";; WRITE IPICRi (send IPIi) \n\t" | 928 | ";; WRITE IPICRi (send IPIi) \n\t" |
| 947 | ".fillinsn \n" | 929 | ".fillinsn \n" |
| 948 | "4: \n\t" | 930 | "2: \n\t" |
| 949 | "st %4, @%3 \n\t" | 931 | "st %2, @%1 \n\t" |
| 950 | ";; UNLOCK ipi_lock[i] \n\t" | ||
| 951 | ".fillinsn \n" | 932 | ".fillinsn \n" |
| 952 | "5: \n\t" | 933 | "3: \n\t" |
| 953 | "ldi r4, #1 \n\t" | ||
| 954 | "st r4, @%2 \n\t" | ||
| 955 | : "=&r"(ipicr_val) | 934 | : "=&r"(ipicr_val) |
| 956 | : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr), | 935 | : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask) |
| 957 | "r"(mask), "r"(try), "r"(my_physid_mask) | 936 | : "memory" |
| 958 | : "memory", "r4" | ||
| 959 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
| 960 | , "r5" | ||
| 961 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
| 962 | ); | 937 | ); |
| 938 | spin_unlock(ipilock); | ||
| 963 | 939 | ||
| 964 | return ipicr_val; | 940 | return ipicr_val; |
| 965 | } | 941 | } |
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c index 4c114ae21793..eff89322ba50 100644 --- a/arch/mips/kernel/irixsig.c +++ b/arch/mips/kernel/irixsig.c | |||
| @@ -440,18 +440,6 @@ struct irix5_siginfo { | |||
| 440 | } stuff; | 440 | } stuff; |
| 441 | }; | 441 | }; |
| 442 | 442 | ||
| 443 | static inline unsigned long timespectojiffies(struct timespec *value) | ||
| 444 | { | ||
| 445 | unsigned long sec = (unsigned) value->tv_sec; | ||
| 446 | long nsec = value->tv_nsec; | ||
| 447 | |||
| 448 | if (sec > (LONG_MAX / HZ)) | ||
| 449 | return LONG_MAX; | ||
| 450 | nsec += 1000000000L / HZ - 1; | ||
| 451 | nsec /= 1000000000L / HZ; | ||
| 452 | return HZ * sec + nsec; | ||
| 453 | } | ||
| 454 | |||
| 455 | asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | 443 | asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, |
| 456 | struct timespec *tp) | 444 | struct timespec *tp) |
| 457 | { | 445 | { |
| @@ -489,14 +477,13 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | |||
| 489 | error = -EINVAL; | 477 | error = -EINVAL; |
| 490 | goto out; | 478 | goto out; |
| 491 | } | 479 | } |
| 492 | expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec); | 480 | expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec); |
| 493 | } | 481 | } |
| 494 | 482 | ||
| 495 | while(1) { | 483 | while(1) { |
| 496 | long tmp = 0; | 484 | long tmp = 0; |
| 497 | 485 | ||
| 498 | current->state = TASK_INTERRUPTIBLE; | 486 | expire = schedule_timeout_interruptible(expire); |
| 499 | expire = schedule_timeout(expire); | ||
| 500 | 487 | ||
| 501 | for (i=0; i<=4; i++) | 488 | for (i=0; i<=4; i++) |
| 502 | tmp |= (current->pending.signal.sig[i] & kset.sig[i]); | 489 | tmp |= (current->pending.signal.sig[i] & kset.sig[i]); |
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c index b46595462717..4de155699c4f 100644 --- a/arch/mips/kernel/sysirix.c +++ b/arch/mips/kernel/sysirix.c | |||
| @@ -1032,8 +1032,7 @@ bad: | |||
| 1032 | 1032 | ||
| 1033 | asmlinkage int irix_sginap(int ticks) | 1033 | asmlinkage int irix_sginap(int ticks) |
| 1034 | { | 1034 | { |
| 1035 | current->state = TASK_INTERRUPTIBLE; | 1035 | schedule_timeout_interruptible(ticks); |
| 1036 | schedule_timeout(ticks); | ||
| 1037 | return 0; | 1036 | return 0; |
| 1038 | } | 1037 | } |
| 1039 | 1038 | ||
diff --git a/arch/mips/lib/dec_and_lock.c b/arch/mips/lib/dec_and_lock.c index e44e9579bd36..fd82c84a93b7 100644 --- a/arch/mips/lib/dec_and_lock.c +++ b/arch/mips/lib/dec_and_lock.c | |||
| @@ -20,14 +20,7 @@ | |||
| 20 | * has a cmpxchg, and where atomic->value is an int holding | 20 | * has a cmpxchg, and where atomic->value is an int holding |
| 21 | * the value of the atomic (i.e. the high bits aren't used | 21 | * the value of the atomic (i.e. the high bits aren't used |
| 22 | * for a lock or anything like that). | 22 | * for a lock or anything like that). |
| 23 | * | ||
| 24 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
| 25 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
| 26 | * to be atomic_dec_and_test - in that case we don't need it | ||
| 27 | * defined here as well. | ||
| 28 | */ | 23 | */ |
| 29 | |||
| 30 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 31 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 24 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 32 | { | 25 | { |
| 33 | int counter; | 26 | int counter; |
| @@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
| 52 | } | 45 | } |
| 53 | 46 | ||
| 54 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 47 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 55 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 7bf705676297..5f2e6904d14a 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile | |||
| @@ -5,5 +5,3 @@ | |||
| 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o | 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o |
| 6 | 6 | ||
| 7 | obj-y := iomap.o | 7 | obj-y := iomap.o |
| 8 | |||
| 9 | lib-$(CONFIG_SMP) += debuglocks.o | ||
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 2de182f6fe8a..90f400b10282 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_SMP | 15 | #ifdef CONFIG_SMP |
| 16 | spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { | 16 | raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { |
| 17 | [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED | 17 | [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED |
| 18 | }; | 18 | }; |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c deleted file mode 100644 index 1b33fe6e5b7a..000000000000 --- a/arch/parisc/lib/debuglocks.c +++ /dev/null | |||
| @@ -1,277 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Debugging versions of SMP locking primitives. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org> | ||
| 5 | * | ||
| 6 | * Some code stollen from alpha & sparc64 ;) | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program; if not, write to the Free Software | ||
| 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 21 | * | ||
| 22 | * We use pdc_printf() throughout the file for all output messages, to avoid | ||
| 23 | * losing messages because of disabled interrupts. Since we're using these | ||
| 24 | * messages for debugging purposes, it makes sense not to send them to the | ||
| 25 | * linux console. | ||
| 26 | */ | ||
| 27 | |||
| 28 | |||
| 29 | #include <linux/config.h> | ||
| 30 | #include <linux/kernel.h> | ||
| 31 | #include <linux/sched.h> | ||
| 32 | #include <linux/spinlock.h> | ||
| 33 | #include <linux/hardirq.h> /* in_interrupt() */ | ||
| 34 | #include <asm/system.h> | ||
| 35 | #include <asm/hardirq.h> /* in_interrupt() */ | ||
| 36 | #include <asm/pdc.h> | ||
| 37 | |||
| 38 | #undef INIT_STUCK | ||
| 39 | #define INIT_STUCK 1L << 30 | ||
| 40 | |||
| 41 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 42 | |||
| 43 | |||
| 44 | void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no) | ||
| 45 | { | ||
| 46 | volatile unsigned int *a; | ||
| 47 | long stuck = INIT_STUCK; | ||
| 48 | void *inline_pc = __builtin_return_address(0); | ||
| 49 | unsigned long started = jiffies; | ||
| 50 | int printed = 0; | ||
| 51 | int cpu = smp_processor_id(); | ||
| 52 | |||
| 53 | try_again: | ||
| 54 | |||
| 55 | /* Do the actual locking */ | ||
| 56 | /* <T-Bone> ggg: we can't get stuck on the outter loop? | ||
| 57 | * <ggg> T-Bone: We can hit the outer loop | ||
| 58 | * alot if multiple CPUs are constantly racing for a lock | ||
| 59 | * and the backplane is NOT fair about which CPU sees | ||
| 60 | * the update first. But it won't hang since every failed | ||
| 61 | * attempt will drop us back into the inner loop and | ||
| 62 | * decrement `stuck'. | ||
| 63 | * <ggg> K-class and some of the others are NOT fair in the HW | ||
| 64 | * implementation so we could see false positives. | ||
| 65 | * But fixing the lock contention is easier than | ||
| 66 | * fixing the HW to be fair. | ||
| 67 | * <tausq> __ldcw() returns 1 if we get the lock; otherwise we | ||
| 68 | * spin until the value of the lock changes, or we time out. | ||
| 69 | */ | ||
| 70 | mb(); | ||
| 71 | a = __ldcw_align(lock); | ||
| 72 | while (stuck && (__ldcw(a) == 0)) | ||
| 73 | while ((*a == 0) && --stuck); | ||
| 74 | mb(); | ||
| 75 | |||
| 76 | if (unlikely(stuck <= 0)) { | ||
| 77 | pdc_printf( | ||
| 78 | "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)" | ||
| 79 | " owned by %s:%d in %s at %p(%d)\n", | ||
| 80 | base_file, line_no, lock->module, lock, | ||
| 81 | current->comm, inline_pc, cpu, | ||
| 82 | lock->bfile, lock->bline, lock->task->comm, | ||
| 83 | lock->previous, lock->oncpu); | ||
| 84 | stuck = INIT_STUCK; | ||
| 85 | printed = 1; | ||
| 86 | goto try_again; | ||
| 87 | } | ||
| 88 | |||
| 89 | /* Exiting. Got the lock. */ | ||
| 90 | lock->oncpu = cpu; | ||
| 91 | lock->previous = inline_pc; | ||
| 92 | lock->task = current; | ||
| 93 | lock->bfile = (char *)base_file; | ||
| 94 | lock->bline = line_no; | ||
| 95 | |||
| 96 | if (unlikely(printed)) { | ||
| 97 | pdc_printf( | ||
| 98 | "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
| 99 | base_file, line_no, current->comm, inline_pc, | ||
| 100 | cpu, jiffies - started); | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 104 | void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no) | ||
| 105 | { | ||
| 106 | CHECK_LOCK(lock); | ||
| 107 | volatile unsigned int *a; | ||
| 108 | mb(); | ||
| 109 | a = __ldcw_align(lock); | ||
| 110 | if (unlikely((*a != 0) && lock->babble)) { | ||
| 111 | lock->babble--; | ||
| 112 | pdc_printf( | ||
| 113 | "%s:%d: spin_unlock(%s:%p) not locked\n", | ||
| 114 | base_file, line_no, lock->module, lock); | ||
| 115 | } | ||
| 116 | *a = 1; | ||
| 117 | mb(); | ||
| 118 | } | ||
| 119 | |||
| 120 | int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) | ||
| 121 | { | ||
| 122 | int ret; | ||
| 123 | volatile unsigned int *a; | ||
| 124 | mb(); | ||
| 125 | a = __ldcw_align(lock); | ||
| 126 | ret = (__ldcw(a) != 0); | ||
| 127 | mb(); | ||
| 128 | if (ret) { | ||
| 129 | lock->oncpu = smp_processor_id(); | ||
| 130 | lock->previous = __builtin_return_address(0); | ||
| 131 | lock->task = current; | ||
| 132 | } else { | ||
| 133 | lock->bfile = (char *)base_file; | ||
| 134 | lock->bline = line_no; | ||
| 135 | } | ||
| 136 | return ret; | ||
| 137 | } | ||
| 138 | |||
| 139 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 140 | |||
| 141 | #ifdef CONFIG_DEBUG_RWLOCK | ||
| 142 | |||
| 143 | /* Interrupts trouble detailed explanation, thx Grant: | ||
| 144 | * | ||
| 145 | * o writer (wants to modify data) attempts to acquire the rwlock | ||
| 146 | * o He gets the write lock. | ||
| 147 | * o Interupts are still enabled, we take an interrupt with the | ||
| 148 | * write still holding the lock. | ||
| 149 | * o interrupt handler tries to acquire the rwlock for read. | ||
| 150 | * o deadlock since the writer can't release it at this point. | ||
| 151 | * | ||
| 152 | * In general, any use of spinlocks that competes between "base" | ||
| 153 | * level and interrupt level code will risk deadlock. Interrupts | ||
| 154 | * need to be disabled in the base level routines to avoid it. | ||
| 155 | * Or more precisely, only the IRQ the base level routine | ||
| 156 | * is competing with for the lock. But it's more efficient/faster | ||
| 157 | * to just disable all interrupts on that CPU to guarantee | ||
| 158 | * once it gets the lock it can release it quickly too. | ||
| 159 | */ | ||
| 160 | |||
| 161 | void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline) | ||
| 162 | { | ||
| 163 | void *inline_pc = __builtin_return_address(0); | ||
| 164 | unsigned long started = jiffies; | ||
| 165 | long stuck = INIT_STUCK; | ||
| 166 | int printed = 0; | ||
| 167 | int cpu = smp_processor_id(); | ||
| 168 | |||
| 169 | if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ | ||
| 170 | pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); | ||
| 171 | BUG(); | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Note: if interrupts are disabled (which is most likely), the printk | ||
| 175 | will never show on the console. We might need a polling method to flush | ||
| 176 | the dmesg buffer anyhow. */ | ||
| 177 | |||
| 178 | retry: | ||
| 179 | _raw_spin_lock(&rw->lock); | ||
| 180 | |||
| 181 | if(rw->counter != 0) { | ||
| 182 | /* this basically never happens */ | ||
| 183 | _raw_spin_unlock(&rw->lock); | ||
| 184 | |||
| 185 | stuck--; | ||
| 186 | if ((unlikely(stuck <= 0)) && (rw->counter < 0)) { | ||
| 187 | pdc_printf( | ||
| 188 | "%s:%d: write_lock stuck on writer" | ||
| 189 | " in %s at %p(%d) %ld ticks\n", | ||
| 190 | bfile, bline, current->comm, inline_pc, | ||
| 191 | cpu, jiffies - started); | ||
| 192 | stuck = INIT_STUCK; | ||
| 193 | printed = 1; | ||
| 194 | } | ||
| 195 | else if (unlikely(stuck <= 0)) { | ||
| 196 | pdc_printf( | ||
| 197 | "%s:%d: write_lock stuck on reader" | ||
| 198 | " in %s at %p(%d) %ld ticks\n", | ||
| 199 | bfile, bline, current->comm, inline_pc, | ||
| 200 | cpu, jiffies - started); | ||
| 201 | stuck = INIT_STUCK; | ||
| 202 | printed = 1; | ||
| 203 | } | ||
| 204 | |||
| 205 | while(rw->counter != 0); | ||
| 206 | |||
| 207 | goto retry; | ||
| 208 | } | ||
| 209 | |||
| 210 | /* got it. now leave without unlocking */ | ||
| 211 | rw->counter = -1; /* remember we are locked */ | ||
| 212 | |||
| 213 | if (unlikely(printed)) { | ||
| 214 | pdc_printf( | ||
| 215 | "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
| 216 | bfile, bline, current->comm, inline_pc, | ||
| 217 | cpu, jiffies - started); | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline) | ||
| 222 | { | ||
| 223 | #if 0 | ||
| 224 | void *inline_pc = __builtin_return_address(0); | ||
| 225 | int cpu = smp_processor_id(); | ||
| 226 | #endif | ||
| 227 | |||
| 228 | if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ | ||
| 229 | pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); | ||
| 230 | BUG(); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* Note: if interrupts are disabled (which is most likely), the printk | ||
| 234 | will never show on the console. We might need a polling method to flush | ||
| 235 | the dmesg buffer anyhow. */ | ||
| 236 | |||
| 237 | _raw_spin_lock(&rw->lock); | ||
| 238 | |||
| 239 | if(rw->counter != 0) { | ||
| 240 | /* this basically never happens */ | ||
| 241 | _raw_spin_unlock(&rw->lock); | ||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* got it. now leave without unlocking */ | ||
| 246 | rw->counter = -1; /* remember we are locked */ | ||
| 247 | #if 0 | ||
| 248 | pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n", | ||
| 249 | bfile, bline, current->comm, inline_pc, cpu); | ||
| 250 | #endif | ||
| 251 | return 1; | ||
| 252 | } | ||
| 253 | |||
| 254 | void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline) | ||
| 255 | { | ||
| 256 | #if 0 | ||
| 257 | void *inline_pc = __builtin_return_address(0); | ||
| 258 | unsigned long started = jiffies; | ||
| 259 | int cpu = smp_processor_id(); | ||
| 260 | #endif | ||
| 261 | unsigned long flags; | ||
| 262 | |||
| 263 | local_irq_save(flags); | ||
| 264 | _raw_spin_lock(&rw->lock); | ||
| 265 | |||
| 266 | rw->counter++; | ||
| 267 | #if 0 | ||
| 268 | pdc_printf( | ||
| 269 | "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
| 270 | bfile, bline, current->comm, inline_pc, | ||
| 271 | cpu, jiffies - started); | ||
| 272 | #endif | ||
| 273 | _raw_spin_unlock(&rw->lock); | ||
| 274 | local_irq_restore(flags); | ||
| 275 | } | ||
| 276 | |||
| 277 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index e3f1ce33e642..347ea284140b 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig | |||
| @@ -265,6 +265,15 @@ config PPC601_SYNC_FIX | |||
| 265 | 265 | ||
| 266 | If in doubt, say Y here. | 266 | If in doubt, say Y here. |
| 267 | 267 | ||
| 268 | config HOTPLUG_CPU | ||
| 269 | bool "Support for enabling/disabling CPUs" | ||
| 270 | depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC | ||
| 271 | ---help--- | ||
| 272 | Say Y here to be able to disable and re-enable individual | ||
| 273 | CPUs at runtime on SMP machines. | ||
| 274 | |||
| 275 | Say N if you are unsure. | ||
| 276 | |||
| 268 | source arch/ppc/platforms/4xx/Kconfig | 277 | source arch/ppc/platforms/4xx/Kconfig |
| 269 | source arch/ppc/platforms/85xx/Kconfig | 278 | source arch/ppc/platforms/85xx/Kconfig |
| 270 | 279 | ||
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index 4b3fe395ffa4..6dd7b50e0669 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile | |||
| @@ -21,13 +21,14 @@ CC := $(CC) -m32 | |||
| 21 | endif | 21 | endif |
| 22 | 22 | ||
| 23 | LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic | 23 | LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic |
| 24 | CPPFLAGS += -Iarch/$(ARCH) -Iinclude3 | 24 | # The -Iarch/$(ARCH)/include is temporary while we are merging |
| 25 | CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include | ||
| 25 | AFLAGS += -Iarch/$(ARCH) | 26 | AFLAGS += -Iarch/$(ARCH) |
| 26 | CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \ | 27 | CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \ |
| 27 | -ffixed-r2 -mmultiple | 28 | -ffixed-r2 -mmultiple |
| 28 | CPP = $(CC) -E $(CFLAGS) | 29 | CPP = $(CC) -E $(CFLAGS) |
| 29 | # Temporary hack until we have migrated to asm-powerpc | 30 | # Temporary hack until we have migrated to asm-powerpc |
| 30 | LINUXINCLUDE += -Iinclude3 | 31 | LINUXINCLUDE += -Iarch/$(ARCH)/include |
| 31 | 32 | ||
| 32 | CHECKFLAGS += -D__powerpc__ | 33 | CHECKFLAGS += -D__powerpc__ |
| 33 | 34 | ||
| @@ -103,15 +104,16 @@ endef | |||
| 103 | 104 | ||
| 104 | archclean: | 105 | archclean: |
| 105 | $(Q)$(MAKE) $(clean)=arch/ppc/boot | 106 | $(Q)$(MAKE) $(clean)=arch/ppc/boot |
| 106 | $(Q)rm -rf include3 | 107 | # Temporary hack until we have migrated to asm-powerpc |
| 108 | $(Q)rm -rf arch/$(ARCH)/include | ||
| 107 | 109 | ||
| 108 | prepare: checkbin | 110 | prepare: checkbin |
| 109 | 111 | ||
| 110 | # Temporary hack until we have migrated to asm-powerpc | 112 | # Temporary hack until we have migrated to asm-powerpc |
| 111 | include/asm: include3/asm | 113 | include/asm: arch/$(ARCH)/include/asm |
| 112 | include3/asm: | 114 | arch/$(ARCH)/include/asm: |
| 113 | $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi | 115 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi |
| 114 | $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm | 116 | $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm |
| 115 | 117 | ||
| 116 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output | 118 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output |
| 117 | # to stdout and these checks are run even on install targets. | 119 | # to stdout and these checks are run even on install targets. |
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S index 1f37b7eafac2..ba396438ede3 100644 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ b/arch/ppc/kernel/cpu_setup_6xx.S | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
| 13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/ppc_asm.h> | ||
| 16 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 17 | #include <asm/ppc_asm.h> | 16 | #include <asm/ppc_asm.h> |
| 18 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S index 304589aebdbc..7e4fbb653724 100644 --- a/arch/ppc/kernel/cpu_setup_power4.S +++ b/arch/ppc/kernel/cpu_setup_power4.S | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/ppc_asm.h> | 15 | #include <asm/ppc_asm.h> |
| 16 | #include <asm/cputable.h> | 16 | #include <asm/cputable.h> |
| 17 | #include <asm/ppc_asm.h> | ||
| 18 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
| 19 | #include <asm/cache.h> | 18 | #include <asm/cache.h> |
| 20 | 19 | ||
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index e0c631cf96b0..b566d982806c 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c | |||
| @@ -393,7 +393,7 @@ EXPORT_SYMBOL(__dma_sync); | |||
| 393 | * __dma_sync_page() implementation for systems using highmem. | 393 | * __dma_sync_page() implementation for systems using highmem. |
| 394 | * In this case, each page of a buffer must be kmapped/kunmapped | 394 | * In this case, each page of a buffer must be kmapped/kunmapped |
| 395 | * in order to have a virtual address for __dma_sync(). This must | 395 | * in order to have a virtual address for __dma_sync(). This must |
| 396 | * not sleep so kmap_atmomic()/kunmap_atomic() are used. | 396 | * not sleep so kmap_atomic()/kunmap_atomic() are used. |
| 397 | * | 397 | * |
| 398 | * Note: yes, it is possible and correct to have a buffer extend | 398 | * Note: yes, it is possible and correct to have a buffer extend |
| 399 | * beyond the first page. | 399 | * beyond the first page. |
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 55daf1210f32..1960fb8c259c 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S | |||
| @@ -1023,23 +1023,21 @@ __secondary_start_gemini: | |||
| 1023 | andc r4,r4,r3 | 1023 | andc r4,r4,r3 |
| 1024 | mtspr SPRN_HID0,r4 | 1024 | mtspr SPRN_HID0,r4 |
| 1025 | sync | 1025 | sync |
| 1026 | bl gemini_prom_init | ||
| 1027 | b __secondary_start | 1026 | b __secondary_start |
| 1028 | #endif /* CONFIG_GEMINI */ | 1027 | #endif /* CONFIG_GEMINI */ |
| 1029 | .globl __secondary_start_psurge | 1028 | |
| 1030 | __secondary_start_psurge: | 1029 | .globl __secondary_start_pmac_0 |
| 1031 | li r24,1 /* cpu # */ | 1030 | __secondary_start_pmac_0: |
| 1032 | b __secondary_start_psurge99 | 1031 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ |
| 1033 | .globl __secondary_start_psurge2 | 1032 | li r24,0 |
| 1034 | __secondary_start_psurge2: | 1033 | b 1f |
| 1035 | li r24,2 /* cpu # */ | 1034 | li r24,1 |
| 1036 | b __secondary_start_psurge99 | 1035 | b 1f |
| 1037 | .globl __secondary_start_psurge3 | 1036 | li r24,2 |
| 1038 | __secondary_start_psurge3: | 1037 | b 1f |
| 1039 | li r24,3 /* cpu # */ | 1038 | li r24,3 |
| 1040 | b __secondary_start_psurge99 | 1039 | 1: |
| 1041 | __secondary_start_psurge99: | 1040 | /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 |
| 1042 | /* we come in here with IR=0 and DR=1, and DBAT 0 | ||
| 1043 | set to map the 0xf0000000 - 0xffffffff region */ | 1041 | set to map the 0xf0000000 - 0xffffffff region */ |
| 1044 | mfmsr r0 | 1042 | mfmsr r0 |
| 1045 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | 1043 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ |
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c index 53547b6de45b..fba29c876b62 100644 --- a/arch/ppc/kernel/idle.c +++ b/arch/ppc/kernel/idle.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
| 23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 24 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
| 25 | #include <linux/cpu.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
| 27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
| @@ -35,6 +36,7 @@ | |||
| 35 | void default_idle(void) | 36 | void default_idle(void) |
| 36 | { | 37 | { |
| 37 | void (*powersave)(void); | 38 | void (*powersave)(void); |
| 39 | int cpu = smp_processor_id(); | ||
| 38 | 40 | ||
| 39 | powersave = ppc_md.power_save; | 41 | powersave = ppc_md.power_save; |
| 40 | 42 | ||
| @@ -44,7 +46,7 @@ void default_idle(void) | |||
| 44 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
| 45 | else { | 47 | else { |
| 46 | set_thread_flag(TIF_POLLING_NRFLAG); | 48 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 47 | while (!need_resched()) | 49 | while (!need_resched() && !cpu_is_offline(cpu)) |
| 48 | barrier(); | 50 | barrier(); |
| 49 | clear_thread_flag(TIF_POLLING_NRFLAG); | 51 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 50 | } | 52 | } |
| @@ -52,6 +54,8 @@ void default_idle(void) | |||
| 52 | } | 54 | } |
| 53 | if (need_resched()) | 55 | if (need_resched()) |
| 54 | schedule(); | 56 | schedule(); |
| 57 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | ||
| 58 | cpu_die(); | ||
| 55 | } | 59 | } |
| 56 | 60 | ||
| 57 | /* | 61 | /* |
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c index e70b587b9e51..726fe7ce1747 100644 --- a/arch/ppc/kernel/smp.c +++ b/arch/ppc/kernel/smp.c | |||
| @@ -45,6 +45,7 @@ cpumask_t cpu_online_map; | |||
| 45 | cpumask_t cpu_possible_map; | 45 | cpumask_t cpu_possible_map; |
| 46 | int smp_hw_index[NR_CPUS]; | 46 | int smp_hw_index[NR_CPUS]; |
| 47 | struct thread_info *secondary_ti; | 47 | struct thread_info *secondary_ti; |
| 48 | static struct task_struct *idle_tasks[NR_CPUS]; | ||
| 48 | 49 | ||
| 49 | EXPORT_SYMBOL(cpu_online_map); | 50 | EXPORT_SYMBOL(cpu_online_map); |
| 50 | EXPORT_SYMBOL(cpu_possible_map); | 51 | EXPORT_SYMBOL(cpu_possible_map); |
| @@ -286,7 +287,8 @@ static void __devinit smp_store_cpu_info(int id) | |||
| 286 | 287 | ||
| 287 | void __init smp_prepare_cpus(unsigned int max_cpus) | 288 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 288 | { | 289 | { |
| 289 | int num_cpus, i; | 290 | int num_cpus, i, cpu; |
| 291 | struct task_struct *p; | ||
| 290 | 292 | ||
| 291 | /* Fixup boot cpu */ | 293 | /* Fixup boot cpu */ |
| 292 | smp_store_cpu_info(smp_processor_id()); | 294 | smp_store_cpu_info(smp_processor_id()); |
| @@ -308,6 +310,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
| 308 | 310 | ||
| 309 | if (smp_ops->space_timers) | 311 | if (smp_ops->space_timers) |
| 310 | smp_ops->space_timers(num_cpus); | 312 | smp_ops->space_timers(num_cpus); |
| 313 | |||
| 314 | for_each_cpu(cpu) { | ||
| 315 | if (cpu == smp_processor_id()) | ||
| 316 | continue; | ||
| 317 | /* create a process for the processor */ | ||
| 318 | p = fork_idle(cpu); | ||
| 319 | if (IS_ERR(p)) | ||
| 320 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
| 321 | p->thread_info->cpu = cpu; | ||
| 322 | idle_tasks[cpu] = p; | ||
| 323 | } | ||
| 311 | } | 324 | } |
| 312 | 325 | ||
| 313 | void __devinit smp_prepare_boot_cpu(void) | 326 | void __devinit smp_prepare_boot_cpu(void) |
| @@ -334,12 +347,17 @@ int __devinit start_secondary(void *unused) | |||
| 334 | set_dec(tb_ticks_per_jiffy); | 347 | set_dec(tb_ticks_per_jiffy); |
| 335 | cpu_callin_map[cpu] = 1; | 348 | cpu_callin_map[cpu] = 1; |
| 336 | 349 | ||
| 337 | printk("CPU %i done callin...\n", cpu); | 350 | printk("CPU %d done callin...\n", cpu); |
| 338 | smp_ops->setup_cpu(cpu); | 351 | smp_ops->setup_cpu(cpu); |
| 339 | printk("CPU %i done setup...\n", cpu); | 352 | printk("CPU %d done setup...\n", cpu); |
| 340 | local_irq_enable(); | ||
| 341 | smp_ops->take_timebase(); | 353 | smp_ops->take_timebase(); |
| 342 | printk("CPU %i done timebase take...\n", cpu); | 354 | printk("CPU %d done timebase take...\n", cpu); |
| 355 | |||
| 356 | spin_lock(&call_lock); | ||
| 357 | cpu_set(cpu, cpu_online_map); | ||
| 358 | spin_unlock(&call_lock); | ||
| 359 | |||
| 360 | local_irq_enable(); | ||
| 343 | 361 | ||
| 344 | cpu_idle(); | 362 | cpu_idle(); |
| 345 | return 0; | 363 | return 0; |
| @@ -347,17 +365,11 @@ int __devinit start_secondary(void *unused) | |||
| 347 | 365 | ||
| 348 | int __cpu_up(unsigned int cpu) | 366 | int __cpu_up(unsigned int cpu) |
| 349 | { | 367 | { |
| 350 | struct task_struct *p; | ||
| 351 | char buf[32]; | 368 | char buf[32]; |
| 352 | int c; | 369 | int c; |
| 353 | 370 | ||
| 354 | /* create a process for the processor */ | 371 | secondary_ti = idle_tasks[cpu]->thread_info; |
| 355 | /* only regs.msr is actually used, and 0 is OK for it */ | 372 | mb(); |
| 356 | p = fork_idle(cpu); | ||
| 357 | if (IS_ERR(p)) | ||
| 358 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
| 359 | secondary_ti = p->thread_info; | ||
| 360 | p->thread_info->cpu = cpu; | ||
| 361 | 373 | ||
| 362 | /* | 374 | /* |
| 363 | * There was a cache flush loop here to flush the cache | 375 | * There was a cache flush loop here to flush the cache |
| @@ -389,7 +401,11 @@ int __cpu_up(unsigned int cpu) | |||
| 389 | printk("Processor %d found.\n", cpu); | 401 | printk("Processor %d found.\n", cpu); |
| 390 | 402 | ||
| 391 | smp_ops->give_timebase(); | 403 | smp_ops->give_timebase(); |
| 392 | cpu_set(cpu, cpu_online_map); | 404 | |
| 405 | /* Wait until cpu puts itself in the online map */ | ||
| 406 | while (!cpu_online(cpu)) | ||
| 407 | cpu_relax(); | ||
| 408 | |||
| 393 | return 0; | 409 | return 0; |
| 394 | } | 410 | } |
| 395 | 411 | ||
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 8356d544fa60..961ede87be72 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c | |||
| @@ -118,6 +118,28 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
| 118 | info.si_code = code; | 118 | info.si_code = code; |
| 119 | info.si_addr = (void __user *) addr; | 119 | info.si_addr = (void __user *) addr; |
| 120 | force_sig_info(signr, &info, current); | 120 | force_sig_info(signr, &info, current); |
| 121 | |||
| 122 | /* | ||
| 123 | * Init gets no signals that it doesn't have a handler for. | ||
| 124 | * That's all very well, but if it has caused a synchronous | ||
| 125 | * exception and we ignore the resulting signal, it will just | ||
| 126 | * generate the same exception over and over again and we get | ||
| 127 | * nowhere. Better to kill it and let the kernel panic. | ||
| 128 | */ | ||
| 129 | if (current->pid == 1) { | ||
| 130 | __sighandler_t handler; | ||
| 131 | |||
| 132 | spin_lock_irq(¤t->sighand->siglock); | ||
| 133 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
| 134 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 135 | if (handler == SIG_DFL) { | ||
| 136 | /* init has generated a synchronous exception | ||
| 137 | and it doesn't have a handler for the signal */ | ||
| 138 | printk(KERN_CRIT "init has generated signal %d " | ||
| 139 | "but has no handler for it\n", signr); | ||
| 140 | do_exit(signr); | ||
| 141 | } | ||
| 142 | } | ||
| 121 | } | 143 | } |
| 122 | 144 | ||
| 123 | /* | 145 | /* |
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile index 1c380e67d435..f1e1fb4144f0 100644 --- a/arch/ppc/lib/Makefile +++ b/arch/ppc/lib/Makefile | |||
| @@ -4,6 +4,5 @@ | |||
| 4 | 4 | ||
| 5 | obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o | 5 | obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o |
| 6 | 6 | ||
| 7 | obj-$(CONFIG_SMP) += locks.o | ||
| 8 | obj-$(CONFIG_8xx) += rheap.o | 7 | obj-$(CONFIG_8xx) += rheap.o |
| 9 | obj-$(CONFIG_CPM2) += rheap.o | 8 | obj-$(CONFIG_CPM2) += rheap.o |
diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c index 4ee888070d91..b18f0d9a00fc 100644 --- a/arch/ppc/lib/dec_and_lock.c +++ b/arch/ppc/lib/dec_and_lock.c | |||
| @@ -11,14 +11,7 @@ | |||
| 11 | * has a cmpxchg, and where atomic->value is an int holding | 11 | * has a cmpxchg, and where atomic->value is an int holding |
| 12 | * the value of the atomic (i.e. the high bits aren't used | 12 | * the value of the atomic (i.e. the high bits aren't used |
| 13 | * for a lock or anything like that). | 13 | * for a lock or anything like that). |
| 14 | * | ||
| 15 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
| 16 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
| 17 | * to be atomic_dec_and_test - in that case we don't need it | ||
| 18 | * defined here as well. | ||
| 19 | */ | 14 | */ |
| 20 | |||
| 21 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 22 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 15 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 23 | { | 16 | { |
| 24 | int counter; | 17 | int counter; |
| @@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
| 43 | } | 36 | } |
| 44 | 37 | ||
| 45 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 38 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 46 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 57d9930843ac..ee5e9f25baf9 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c | |||
| @@ -278,11 +278,7 @@ bad_area: | |||
| 278 | 278 | ||
| 279 | /* User mode accesses cause a SIGSEGV */ | 279 | /* User mode accesses cause a SIGSEGV */ |
| 280 | if (user_mode(regs)) { | 280 | if (user_mode(regs)) { |
| 281 | info.si_signo = SIGSEGV; | 281 | _exception(SIGSEGV, regs, code, address); |
| 282 | info.si_errno = 0; | ||
| 283 | info.si_code = code; | ||
| 284 | info.si_addr = (void __user *) address; | ||
| 285 | force_sig_info(SIGSEGV, &info, current); | ||
| 286 | return 0; | 282 | return 0; |
| 287 | } | 283 | } |
| 288 | 284 | ||
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index 8d67adc76925..88419c77ac43 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S | |||
| @@ -161,6 +161,8 @@ _GLOBAL(low_sleep_handler) | |||
| 161 | addi r3,r3,sleep_storage@l | 161 | addi r3,r3,sleep_storage@l |
| 162 | stw r5,0(r3) | 162 | stw r5,0(r3) |
| 163 | 163 | ||
| 164 | .globl low_cpu_die | ||
| 165 | low_cpu_die: | ||
| 164 | /* Flush & disable all caches */ | 166 | /* Flush & disable all caches */ |
| 165 | bl flush_disable_caches | 167 | bl flush_disable_caches |
| 166 | 168 | ||
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c index 8e049dab4e63..794a23994b82 100644 --- a/arch/ppc/platforms/pmac_smp.c +++ b/arch/ppc/platforms/pmac_smp.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| 35 | #include <linux/hardirq.h> | 35 | #include <linux/hardirq.h> |
| 36 | #include <linux/cpu.h> | ||
| 36 | 37 | ||
| 37 | #include <asm/ptrace.h> | 38 | #include <asm/ptrace.h> |
| 38 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
| @@ -55,9 +56,7 @@ | |||
| 55 | * Powersurge (old powermac SMP) support. | 56 | * Powersurge (old powermac SMP) support. |
| 56 | */ | 57 | */ |
| 57 | 58 | ||
| 58 | extern void __secondary_start_psurge(void); | 59 | extern void __secondary_start_pmac_0(void); |
| 59 | extern void __secondary_start_psurge2(void); /* Temporary horrible hack */ | ||
| 60 | extern void __secondary_start_psurge3(void); /* Temporary horrible hack */ | ||
| 61 | 60 | ||
| 62 | /* Addresses for powersurge registers */ | 61 | /* Addresses for powersurge registers */ |
| 63 | #define HAMMERHEAD_BASE 0xf8000000 | 62 | #define HAMMERHEAD_BASE 0xf8000000 |
| @@ -119,7 +118,7 @@ static volatile int sec_tb_reset = 0; | |||
| 119 | static unsigned int pri_tb_hi, pri_tb_lo; | 118 | static unsigned int pri_tb_hi, pri_tb_lo; |
| 120 | static unsigned int pri_tb_stamp; | 119 | static unsigned int pri_tb_stamp; |
| 121 | 120 | ||
| 122 | static void __init core99_init_caches(int cpu) | 121 | static void __devinit core99_init_caches(int cpu) |
| 123 | { | 122 | { |
| 124 | if (!cpu_has_feature(CPU_FTR_L2CR)) | 123 | if (!cpu_has_feature(CPU_FTR_L2CR)) |
| 125 | return; | 124 | return; |
| @@ -346,7 +345,7 @@ static int __init smp_psurge_probe(void) | |||
| 346 | 345 | ||
| 347 | static void __init smp_psurge_kick_cpu(int nr) | 346 | static void __init smp_psurge_kick_cpu(int nr) |
| 348 | { | 347 | { |
| 349 | void (*start)(void) = __secondary_start_psurge; | 348 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; |
| 350 | unsigned long a; | 349 | unsigned long a; |
| 351 | 350 | ||
| 352 | /* may need to flush here if secondary bats aren't setup */ | 351 | /* may need to flush here if secondary bats aren't setup */ |
| @@ -356,17 +355,7 @@ static void __init smp_psurge_kick_cpu(int nr) | |||
| 356 | 355 | ||
| 357 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); | 356 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); |
| 358 | 357 | ||
| 359 | /* setup entry point of secondary processor */ | 358 | out_be32(psurge_start, start); |
| 360 | switch (nr) { | ||
| 361 | case 2: | ||
| 362 | start = __secondary_start_psurge2; | ||
| 363 | break; | ||
| 364 | case 3: | ||
| 365 | start = __secondary_start_psurge3; | ||
| 366 | break; | ||
| 367 | } | ||
| 368 | |||
| 369 | out_be32(psurge_start, __pa(start)); | ||
| 370 | mb(); | 359 | mb(); |
| 371 | 360 | ||
| 372 | psurge_set_ipi(nr); | 361 | psurge_set_ipi(nr); |
| @@ -500,14 +489,14 @@ static int __init smp_core99_probe(void) | |||
| 500 | return ncpus; | 489 | return ncpus; |
| 501 | } | 490 | } |
| 502 | 491 | ||
| 503 | static void __init smp_core99_kick_cpu(int nr) | 492 | static void __devinit smp_core99_kick_cpu(int nr) |
| 504 | { | 493 | { |
| 505 | unsigned long save_vector, new_vector; | 494 | unsigned long save_vector, new_vector; |
| 506 | unsigned long flags; | 495 | unsigned long flags; |
| 507 | 496 | ||
| 508 | volatile unsigned long *vector | 497 | volatile unsigned long *vector |
| 509 | = ((volatile unsigned long *)(KERNELBASE+0x100)); | 498 | = ((volatile unsigned long *)(KERNELBASE+0x100)); |
| 510 | if (nr < 1 || nr > 3) | 499 | if (nr < 0 || nr > 3) |
| 511 | return; | 500 | return; |
| 512 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); | 501 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); |
| 513 | 502 | ||
| @@ -518,19 +507,9 @@ static void __init smp_core99_kick_cpu(int nr) | |||
| 518 | save_vector = *vector; | 507 | save_vector = *vector; |
| 519 | 508 | ||
| 520 | /* Setup fake reset vector that does | 509 | /* Setup fake reset vector that does |
| 521 | * b __secondary_start_psurge - KERNELBASE | 510 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE |
| 522 | */ | 511 | */ |
| 523 | switch(nr) { | 512 | new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; |
| 524 | case 1: | ||
| 525 | new_vector = (unsigned long)__secondary_start_psurge; | ||
| 526 | break; | ||
| 527 | case 2: | ||
| 528 | new_vector = (unsigned long)__secondary_start_psurge2; | ||
| 529 | break; | ||
| 530 | case 3: | ||
| 531 | new_vector = (unsigned long)__secondary_start_psurge3; | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | *vector = 0x48000002 + new_vector - KERNELBASE; | 513 | *vector = 0x48000002 + new_vector - KERNELBASE; |
| 535 | 514 | ||
| 536 | /* flush data cache and inval instruction cache */ | 515 | /* flush data cache and inval instruction cache */ |
| @@ -554,7 +533,7 @@ static void __init smp_core99_kick_cpu(int nr) | |||
| 554 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); | 533 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); |
| 555 | } | 534 | } |
| 556 | 535 | ||
| 557 | static void __init smp_core99_setup_cpu(int cpu_nr) | 536 | static void __devinit smp_core99_setup_cpu(int cpu_nr) |
| 558 | { | 537 | { |
| 559 | /* Setup L2/L3 */ | 538 | /* Setup L2/L3 */ |
| 560 | if (cpu_nr != 0) | 539 | if (cpu_nr != 0) |
| @@ -668,3 +647,47 @@ struct smp_ops_t core99_smp_ops __pmacdata = { | |||
| 668 | .give_timebase = smp_core99_give_timebase, | 647 | .give_timebase = smp_core99_give_timebase, |
| 669 | .take_timebase = smp_core99_take_timebase, | 648 | .take_timebase = smp_core99_take_timebase, |
| 670 | }; | 649 | }; |
| 650 | |||
| 651 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 652 | |||
| 653 | int __cpu_disable(void) | ||
| 654 | { | ||
| 655 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
| 656 | |||
| 657 | /* XXX reset cpu affinity here */ | ||
| 658 | openpic_set_priority(0xf); | ||
| 659 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
| 660 | mb(); | ||
| 661 | udelay(20); | ||
| 662 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
| 663 | return 0; | ||
| 664 | } | ||
| 665 | |||
| 666 | extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */ | ||
| 667 | static int cpu_dead[NR_CPUS]; | ||
| 668 | |||
| 669 | void cpu_die(void) | ||
| 670 | { | ||
| 671 | local_irq_disable(); | ||
| 672 | cpu_dead[smp_processor_id()] = 1; | ||
| 673 | mb(); | ||
| 674 | low_cpu_die(); | ||
| 675 | } | ||
| 676 | |||
| 677 | void __cpu_die(unsigned int cpu) | ||
| 678 | { | ||
| 679 | int timeout; | ||
| 680 | |||
| 681 | timeout = 1000; | ||
| 682 | while (!cpu_dead[cpu]) { | ||
| 683 | if (--timeout == 0) { | ||
| 684 | printk("CPU %u refused to die!\n", cpu); | ||
| 685 | break; | ||
| 686 | } | ||
| 687 | msleep(1); | ||
| 688 | } | ||
| 689 | cpu_callin_map[cpu] = 0; | ||
| 690 | cpu_dead[cpu] = 0; | ||
| 691 | } | ||
| 692 | |||
| 693 | #endif | ||
diff --git a/arch/ppc/syslib/cpc700_pic.c b/arch/ppc/syslib/cpc700_pic.c index 774709807538..75fe8eb10693 100644 --- a/arch/ppc/syslib/cpc700_pic.c +++ b/arch/ppc/syslib/cpc700_pic.c | |||
| @@ -90,14 +90,10 @@ cpc700_mask_and_ack_irq(unsigned int irq) | |||
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static struct hw_interrupt_type cpc700_pic = { | 92 | static struct hw_interrupt_type cpc700_pic = { |
| 93 | "CPC700 PIC", | 93 | .typename = "CPC700 PIC", |
| 94 | NULL, | 94 | .enable = cpc700_unmask_irq, |
| 95 | NULL, | 95 | .disable = cpc700_mask_irq, |
| 96 | cpc700_unmask_irq, | 96 | .ack = cpc700_mask_and_ack_irq, |
| 97 | cpc700_mask_irq, | ||
| 98 | cpc700_mask_and_ack_irq, | ||
| 99 | NULL, | ||
| 100 | NULL | ||
| 101 | }; | 97 | }; |
| 102 | 98 | ||
| 103 | __init static void | 99 | __init static void |
diff --git a/arch/ppc/syslib/i8259.c b/arch/ppc/syslib/i8259.c index b9391e650141..5c7908c20e43 100644 --- a/arch/ppc/syslib/i8259.c +++ b/arch/ppc/syslib/i8259.c | |||
| @@ -129,14 +129,11 @@ static void i8259_end_irq(unsigned int irq) | |||
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | struct hw_interrupt_type i8259_pic = { | 131 | struct hw_interrupt_type i8259_pic = { |
| 132 | " i8259 ", | 132 | .typename = " i8259 ", |
| 133 | NULL, | 133 | .enable = i8259_unmask_irq, |
| 134 | NULL, | 134 | .disable = i8259_mask_irq, |
| 135 | i8259_unmask_irq, | 135 | .ack = i8259_mask_and_ack_irq, |
| 136 | i8259_mask_irq, | 136 | .end = i8259_end_irq, |
| 137 | i8259_mask_and_ack_irq, | ||
| 138 | i8259_end_irq, | ||
| 139 | NULL | ||
| 140 | }; | 137 | }; |
| 141 | 138 | ||
| 142 | static struct resource pic1_iores = { | 139 | static struct resource pic1_iores = { |
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c index 7e272c51a497..2e0ea92144f6 100644 --- a/arch/ppc/syslib/open_pic2.c +++ b/arch/ppc/syslib/open_pic2.c | |||
| @@ -82,13 +82,11 @@ static void openpic2_end_irq(unsigned int irq_nr); | |||
| 82 | static void openpic2_ack_irq(unsigned int irq_nr); | 82 | static void openpic2_ack_irq(unsigned int irq_nr); |
| 83 | 83 | ||
| 84 | struct hw_interrupt_type open_pic2 = { | 84 | struct hw_interrupt_type open_pic2 = { |
| 85 | " OpenPIC2 ", | 85 | .typename = " OpenPIC2 ", |
| 86 | NULL, | 86 | .enable = openpic2_enable_irq, |
| 87 | NULL, | 87 | .disable = openpic2_disable_irq, |
| 88 | openpic2_enable_irq, | 88 | .ack = openpic2_ack_irq, |
| 89 | openpic2_disable_irq, | 89 | .end = openpic2_end_irq, |
| 90 | openpic2_ack_irq, | ||
| 91 | openpic2_end_irq, | ||
| 92 | }; | 90 | }; |
| 93 | 91 | ||
| 94 | /* | 92 | /* |
diff --git a/arch/ppc/syslib/ppc403_pic.c b/arch/ppc/syslib/ppc403_pic.c index 06cb0af2a58d..ce4d1deb86e9 100644 --- a/arch/ppc/syslib/ppc403_pic.c +++ b/arch/ppc/syslib/ppc403_pic.c | |||
| @@ -34,13 +34,10 @@ static void ppc403_aic_disable(unsigned int irq); | |||
| 34 | static void ppc403_aic_disable_and_ack(unsigned int irq); | 34 | static void ppc403_aic_disable_and_ack(unsigned int irq); |
| 35 | 35 | ||
| 36 | static struct hw_interrupt_type ppc403_aic = { | 36 | static struct hw_interrupt_type ppc403_aic = { |
| 37 | "403GC AIC", | 37 | .typename = "403GC AIC", |
| 38 | NULL, | 38 | .enable = ppc403_aic_enable, |
| 39 | NULL, | 39 | .disable = ppc403_aic_disable, |
| 40 | ppc403_aic_enable, | 40 | .ack = ppc403_aic_disable_and_ack, |
| 41 | ppc403_aic_disable, | ||
| 42 | ppc403_aic_disable_and_ack, | ||
| 43 | 0 | ||
| 44 | }; | 41 | }; |
| 45 | 42 | ||
| 46 | int | 43 | int |
diff --git a/arch/ppc/syslib/xilinx_pic.c b/arch/ppc/syslib/xilinx_pic.c index e0bd66f0847a..2cbcad278cef 100644 --- a/arch/ppc/syslib/xilinx_pic.c +++ b/arch/ppc/syslib/xilinx_pic.c | |||
| @@ -79,14 +79,11 @@ xilinx_intc_end(unsigned int irq) | |||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static struct hw_interrupt_type xilinx_intc = { | 81 | static struct hw_interrupt_type xilinx_intc = { |
| 82 | "Xilinx Interrupt Controller", | 82 | .typename = "Xilinx Interrupt Controller", |
| 83 | NULL, | 83 | .enable = xilinx_intc_enable, |
| 84 | NULL, | 84 | .disable = xilinx_intc_disable, |
| 85 | xilinx_intc_enable, | 85 | .ack = xilinx_intc_disable_and_ack, |
| 86 | xilinx_intc_disable, | 86 | .end = xilinx_intc_end, |
| 87 | xilinx_intc_disable_and_ack, | ||
| 88 | xilinx_intc_end, | ||
| 89 | 0 | ||
| 90 | }; | 87 | }; |
| 91 | 88 | ||
| 92 | int | 89 | int |
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index 0a23aeacba88..17d2c1eac3b8 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile | |||
| @@ -56,7 +56,7 @@ LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) | |||
| 56 | CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ | 56 | CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ |
| 57 | -mcall-aixdesc | 57 | -mcall-aixdesc |
| 58 | # Temporary hack until we have migrated to asm-powerpc | 58 | # Temporary hack until we have migrated to asm-powerpc |
| 59 | CPPFLAGS += -Iinclude3 | 59 | CPPFLAGS += -Iarch/$(ARCH)/include |
| 60 | 60 | ||
| 61 | GCC_VERSION := $(call cc-version) | 61 | GCC_VERSION := $(call cc-version) |
| 62 | GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) | 62 | GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) |
| @@ -115,14 +115,15 @@ all: $(KBUILD_IMAGE) | |||
| 115 | 115 | ||
| 116 | archclean: | 116 | archclean: |
| 117 | $(Q)$(MAKE) $(clean)=$(boot) | 117 | $(Q)$(MAKE) $(clean)=$(boot) |
| 118 | $(Q)rm -rf include3 | 118 | # Temporary hack until we have migrated to asm-powerpc |
| 119 | $(Q)rm -rf arch/$(ARCH)/include | ||
| 119 | 120 | ||
| 120 | 121 | ||
| 121 | # Temporary hack until we have migrated to asm-powerpc | 122 | # Temporary hack until we have migrated to asm-powerpc |
| 122 | include/asm: include3/asm | 123 | include/asm: arch/$(ARCH)/include/asm |
| 123 | include3/asm: | 124 | arch/$(ARCH)/include/asm: |
| 124 | $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi; | 125 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi |
| 125 | $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm | 126 | $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm |
| 126 | 127 | ||
| 127 | define archhelp | 128 | define archhelp |
| 128 | echo ' zImage.vmode - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)' | 129 | echo ' zImage.vmode - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)' |
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S index bfce609e8e6b..1fb673c511ff 100644 --- a/arch/ppc64/kernel/cpu_setup_power4.S +++ b/arch/ppc64/kernel/cpu_setup_power4.S | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
| 13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/ppc_asm.h> | ||
| 16 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 17 | #include <asm/ppc_asm.h> | 16 | #include <asm/ppc_asm.h> |
| 18 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
diff --git a/arch/ppc64/lib/dec_and_lock.c b/arch/ppc64/lib/dec_and_lock.c index 6e8d8591708c..7b9d4da5cf92 100644 --- a/arch/ppc64/lib/dec_and_lock.c +++ b/arch/ppc64/lib/dec_and_lock.c | |||
| @@ -20,14 +20,7 @@ | |||
| 20 | * has a cmpxchg, and where atomic->value is an int holding | 20 | * has a cmpxchg, and where atomic->value is an int holding |
| 21 | * the value of the atomic (i.e. the high bits aren't used | 21 | * the value of the atomic (i.e. the high bits aren't used |
| 22 | * for a lock or anything like that). | 22 | * for a lock or anything like that). |
| 23 | * | ||
| 24 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
| 25 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
| 26 | * to be atomic_dec_and_test - in that case we don't need it | ||
| 27 | * defined here as well. | ||
| 28 | */ | 23 | */ |
| 29 | |||
| 30 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 31 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 24 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 32 | { | 25 | { |
| 33 | int counter; | 26 | int counter; |
| @@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
| 52 | } | 45 | } |
| 53 | 46 | ||
| 54 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 47 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 55 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c index ef70ef91abe2..033643ab69e0 100644 --- a/arch/ppc64/lib/locks.c +++ b/arch/ppc64/lib/locks.c | |||
| @@ -23,12 +23,12 @@ | |||
| 23 | /* waiting for a spinlock... */ | 23 | /* waiting for a spinlock... */ |
| 24 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 24 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
| 25 | 25 | ||
| 26 | void __spin_yield(spinlock_t *lock) | 26 | void __spin_yield(raw_spinlock_t *lock) |
| 27 | { | 27 | { |
| 28 | unsigned int lock_value, holder_cpu, yield_count; | 28 | unsigned int lock_value, holder_cpu, yield_count; |
| 29 | struct paca_struct *holder_paca; | 29 | struct paca_struct *holder_paca; |
| 30 | 30 | ||
| 31 | lock_value = lock->lock; | 31 | lock_value = lock->slock; |
| 32 | if (lock_value == 0) | 32 | if (lock_value == 0) |
| 33 | return; | 33 | return; |
| 34 | holder_cpu = lock_value & 0xffff; | 34 | holder_cpu = lock_value & 0xffff; |
| @@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock) | |||
| 38 | if ((yield_count & 1) == 0) | 38 | if ((yield_count & 1) == 0) |
| 39 | return; /* virtual cpu is currently running */ | 39 | return; /* virtual cpu is currently running */ |
| 40 | rmb(); | 40 | rmb(); |
| 41 | if (lock->lock != lock_value) | 41 | if (lock->slock != lock_value) |
| 42 | return; /* something has changed */ | 42 | return; /* something has changed */ |
| 43 | #ifdef CONFIG_PPC_ISERIES | 43 | #ifdef CONFIG_PPC_ISERIES |
| 44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | 44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, |
| @@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock) | |||
| 54 | * This turns out to be the same for read and write locks, since | 54 | * This turns out to be the same for read and write locks, since |
| 55 | * we only know the holder if it is write-locked. | 55 | * we only know the holder if it is write-locked. |
| 56 | */ | 56 | */ |
| 57 | void __rw_yield(rwlock_t *rw) | 57 | void __rw_yield(raw_rwlock_t *rw) |
| 58 | { | 58 | { |
| 59 | int lock_value; | 59 | int lock_value; |
| 60 | unsigned int holder_cpu, yield_count; | 60 | unsigned int holder_cpu, yield_count; |
| @@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw) | |||
| 82 | } | 82 | } |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | void spin_unlock_wait(spinlock_t *lock) | 85 | void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
| 86 | { | 86 | { |
| 87 | while (lock->lock) { | 87 | while (lock->slock) { |
| 88 | HMT_low(); | 88 | HMT_low(); |
| 89 | if (SHARED_PROCESSOR) | 89 | if (SHARED_PROCESSOR) |
| 90 | __spin_yield(lock); | 90 | __spin_yield(lock); |
| @@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock) | |||
| 92 | HMT_medium(); | 92 | HMT_medium(); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | EXPORT_SYMBOL(spin_unlock_wait); | 95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 888b5596c195..2dc14e9c8327 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
| @@ -36,7 +36,7 @@ _diag44(void) | |||
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void | 38 | void |
| 39 | _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) | 39 | _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) |
| 40 | { | 40 | { |
| 41 | int count = spin_retry; | 41 | int count = spin_retry; |
| 42 | 42 | ||
| @@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) | |||
| 53 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 53 | EXPORT_SYMBOL(_raw_spin_lock_wait); |
| 54 | 54 | ||
| 55 | int | 55 | int |
| 56 | _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) | 56 | _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) |
| 57 | { | 57 | { |
| 58 | int count = spin_retry; | 58 | int count = spin_retry; |
| 59 | 59 | ||
| @@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) | |||
| 67 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 67 | EXPORT_SYMBOL(_raw_spin_trylock_retry); |
| 68 | 68 | ||
| 69 | void | 69 | void |
| 70 | _raw_read_lock_wait(rwlock_t *rw) | 70 | _raw_read_lock_wait(raw_rwlock_t *rw) |
| 71 | { | 71 | { |
| 72 | unsigned int old; | 72 | unsigned int old; |
| 73 | int count = spin_retry; | 73 | int count = spin_retry; |
| @@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw) | |||
| 86 | EXPORT_SYMBOL(_raw_read_lock_wait); | 86 | EXPORT_SYMBOL(_raw_read_lock_wait); |
| 87 | 87 | ||
| 88 | int | 88 | int |
| 89 | _raw_read_trylock_retry(rwlock_t *rw) | 89 | _raw_read_trylock_retry(raw_rwlock_t *rw) |
| 90 | { | 90 | { |
| 91 | unsigned int old; | 91 | unsigned int old; |
| 92 | int count = spin_retry; | 92 | int count = spin_retry; |
| @@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw) | |||
| 102 | EXPORT_SYMBOL(_raw_read_trylock_retry); | 102 | EXPORT_SYMBOL(_raw_read_trylock_retry); |
| 103 | 103 | ||
| 104 | void | 104 | void |
| 105 | _raw_write_lock_wait(rwlock_t *rw) | 105 | _raw_write_lock_wait(raw_rwlock_t *rw) |
| 106 | { | 106 | { |
| 107 | int count = spin_retry; | 107 | int count = spin_retry; |
| 108 | 108 | ||
| @@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw) | |||
| 119 | EXPORT_SYMBOL(_raw_write_lock_wait); | 119 | EXPORT_SYMBOL(_raw_write_lock_wait); |
| 120 | 120 | ||
| 121 | int | 121 | int |
| 122 | _raw_write_trylock_retry(rwlock_t *rw) | 122 | _raw_write_trylock_retry(raw_rwlock_t *rw) |
| 123 | { | 123 | { |
| 124 | int count = spin_retry; | 124 | int count = spin_retry; |
| 125 | 125 | ||
diff --git a/arch/sh/boards/adx/irq_maskreg.c b/arch/sh/boards/adx/irq_maskreg.c index ca91bb0f1f5c..c0973f8d57ba 100644 --- a/arch/sh/boards/adx/irq_maskreg.c +++ b/arch/sh/boards/adx/irq_maskreg.c | |||
| @@ -37,13 +37,13 @@ static void end_maskreg_irq(unsigned int irq); | |||
| 37 | 37 | ||
| 38 | /* hw_interrupt_type */ | 38 | /* hw_interrupt_type */ |
| 39 | static struct hw_interrupt_type maskreg_irq_type = { | 39 | static struct hw_interrupt_type maskreg_irq_type = { |
| 40 | " Mask Register", | 40 | .typename = " Mask Register", |
| 41 | startup_maskreg_irq, | 41 | .startup = startup_maskreg_irq, |
| 42 | shutdown_maskreg_irq, | 42 | .shutdown = shutdown_maskreg_irq, |
| 43 | enable_maskreg_irq, | 43 | .enable = enable_maskreg_irq, |
| 44 | disable_maskreg_irq, | 44 | .disable = disable_maskreg_irq, |
| 45 | mask_and_ack_maskreg, | 45 | .ack = mask_and_ack_maskreg, |
| 46 | end_maskreg_irq | 46 | .end = end_maskreg_irq |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | /* actual implementatin */ | 49 | /* actual implementatin */ |
diff --git a/arch/sh/boards/bigsur/io.c b/arch/sh/boards/bigsur/io.c index 697144de7419..a9fde781b21a 100644 --- a/arch/sh/boards/bigsur/io.c +++ b/arch/sh/boards/bigsur/io.c | |||
| @@ -37,10 +37,6 @@ static u8 bigsur_iomap_lo_shift[BIGSUR_IOMAP_LO_NMAP]; | |||
| 37 | static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP]; | 37 | static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP]; |
| 38 | static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP]; | 38 | static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP]; |
| 39 | 39 | ||
| 40 | #ifndef MAX | ||
| 41 | #define MAX(a,b) ((a)>(b)?(a):(b)) | ||
| 42 | #endif | ||
| 43 | |||
| 44 | void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift) | 40 | void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift) |
| 45 | { | 41 | { |
| 46 | u32 port, endport = baseport + nports; | 42 | u32 port, endport = baseport + nports; |
| @@ -57,7 +53,7 @@ void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift) | |||
| 57 | addr += (1<<(BIGSUR_IOMAP_LO_SHIFT)); | 53 | addr += (1<<(BIGSUR_IOMAP_LO_SHIFT)); |
| 58 | } | 54 | } |
| 59 | 55 | ||
| 60 | for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ; | 56 | for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH); |
| 61 | port < endport && port < BIGSUR_IOMAP_HI_THRESH ; | 57 | port < endport && port < BIGSUR_IOMAP_HI_THRESH ; |
| 62 | port += (1<<BIGSUR_IOMAP_HI_SHIFT)) { | 58 | port += (1<<BIGSUR_IOMAP_HI_SHIFT)) { |
| 63 | pr_debug(" maphi[0x%x] = 0x%08x\n", port, addr); | 59 | pr_debug(" maphi[0x%x] = 0x%08x\n", port, addr); |
| @@ -80,7 +76,7 @@ void bigsur_port_unmap(u32 baseport, u32 nports) | |||
| 80 | bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0; | 76 | bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0; |
| 81 | } | 77 | } |
| 82 | 78 | ||
| 83 | for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ; | 79 | for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH); |
| 84 | port < endport && port < BIGSUR_IOMAP_HI_THRESH ; | 80 | port < endport && port < BIGSUR_IOMAP_HI_THRESH ; |
| 85 | port += (1<<BIGSUR_IOMAP_HI_SHIFT)) { | 81 | port += (1<<BIGSUR_IOMAP_HI_SHIFT)) { |
| 86 | bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0; | 82 | bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0; |
diff --git a/arch/sh/boards/bigsur/irq.c b/arch/sh/boards/bigsur/irq.c index c188fc32dc9a..6ddbcc77244d 100644 --- a/arch/sh/boards/bigsur/irq.c +++ b/arch/sh/boards/bigsur/irq.c | |||
| @@ -228,23 +228,23 @@ static void shutdown_bigsur_irq(unsigned int irq) | |||
| 228 | 228 | ||
| 229 | /* Define the IRQ structures for the L1 and L2 IRQ types */ | 229 | /* Define the IRQ structures for the L1 and L2 IRQ types */ |
| 230 | static struct hw_interrupt_type bigsur_l1irq_type = { | 230 | static struct hw_interrupt_type bigsur_l1irq_type = { |
| 231 | "BigSur-CPLD-Level1-IRQ", | 231 | .typename = "BigSur-CPLD-Level1-IRQ", |
| 232 | startup_bigsur_irq, | 232 | .startup = startup_bigsur_irq, |
| 233 | shutdown_bigsur_irq, | 233 | .shutdown = shutdown_bigsur_irq, |
| 234 | enable_bigsur_l1irq, | 234 | .enable = enable_bigsur_l1irq, |
| 235 | disable_bigsur_l1irq, | 235 | .disable = disable_bigsur_l1irq, |
| 236 | mask_and_ack_bigsur, | 236 | .ack = mask_and_ack_bigsur, |
| 237 | end_bigsur_irq | 237 | .end = end_bigsur_irq |
| 238 | }; | 238 | }; |
| 239 | 239 | ||
| 240 | static struct hw_interrupt_type bigsur_l2irq_type = { | 240 | static struct hw_interrupt_type bigsur_l2irq_type = { |
| 241 | "BigSur-CPLD-Level2-IRQ", | 241 | .typename = "BigSur-CPLD-Level2-IRQ", |
| 242 | startup_bigsur_irq, | 242 | .startup = startup_bigsur_irq, |
| 243 | shutdown_bigsur_irq, | 243 | .shutdown =shutdown_bigsur_irq, |
| 244 | enable_bigsur_l2irq, | 244 | .enable = enable_bigsur_l2irq, |
| 245 | disable_bigsur_l2irq, | 245 | .disable = disable_bigsur_l2irq, |
| 246 | mask_and_ack_bigsur, | 246 | .ack = mask_and_ack_bigsur, |
| 247 | end_bigsur_irq | 247 | .end = end_bigsur_irq |
| 248 | }; | 248 | }; |
| 249 | 249 | ||
| 250 | 250 | ||
diff --git a/arch/sh/boards/cqreek/irq.c b/arch/sh/boards/cqreek/irq.c index fa6cfe5a20a7..d1da0d844567 100644 --- a/arch/sh/boards/cqreek/irq.c +++ b/arch/sh/boards/cqreek/irq.c | |||
| @@ -83,13 +83,13 @@ static void shutdown_cqreek_irq(unsigned int irq) | |||
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static struct hw_interrupt_type cqreek_irq_type = { | 85 | static struct hw_interrupt_type cqreek_irq_type = { |
| 86 | "CqREEK-IRQ", | 86 | .typename = "CqREEK-IRQ", |
| 87 | startup_cqreek_irq, | 87 | .startup = startup_cqreek_irq, |
| 88 | shutdown_cqreek_irq, | 88 | .shutdown = shutdown_cqreek_irq, |
| 89 | enable_cqreek_irq, | 89 | .enable = enable_cqreek_irq, |
| 90 | disable_cqreek_irq, | 90 | .disable = disable_cqreek_irq, |
| 91 | mask_and_ack_cqreek, | 91 | .ack = mask_and_ack_cqreek, |
| 92 | end_cqreek_irq | 92 | .end = end_cqreek_irq |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | int cqreek_has_ide, cqreek_has_isa; | 95 | int cqreek_has_ide, cqreek_has_isa; |
diff --git a/arch/sh/boards/harp/irq.c b/arch/sh/boards/harp/irq.c index acd58489970f..52d0ba39031b 100644 --- a/arch/sh/boards/harp/irq.c +++ b/arch/sh/boards/harp/irq.c | |||
| @@ -39,13 +39,13 @@ static unsigned int startup_harp_irq(unsigned int irq) | |||
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static struct hw_interrupt_type harp_irq_type = { | 41 | static struct hw_interrupt_type harp_irq_type = { |
| 42 | "Harp-IRQ", | 42 | .typename = "Harp-IRQ", |
| 43 | startup_harp_irq, | 43 | .startup = startup_harp_irq, |
| 44 | shutdown_harp_irq, | 44 | .shutdown = shutdown_harp_irq, |
| 45 | enable_harp_irq, | 45 | .enable = enable_harp_irq, |
| 46 | disable_harp_irq, | 46 | .disable = disable_harp_irq, |
| 47 | mask_and_ack_harp, | 47 | .ack = mask_and_ack_harp, |
| 48 | end_harp_irq | 48 | .end = end_harp_irq |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | static void disable_harp_irq(unsigned int irq) | 51 | static void disable_harp_irq(unsigned int irq) |
diff --git a/arch/sh/boards/overdrive/irq.c b/arch/sh/boards/overdrive/irq.c index 23adc6be71e7..715e8feb3a68 100644 --- a/arch/sh/boards/overdrive/irq.c +++ b/arch/sh/boards/overdrive/irq.c | |||
| @@ -86,13 +86,13 @@ static unsigned int startup_od_irq(unsigned int irq) | |||
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static struct hw_interrupt_type od_irq_type = { | 88 | static struct hw_interrupt_type od_irq_type = { |
| 89 | "Overdrive-IRQ", | 89 | .typename = "Overdrive-IRQ", |
| 90 | startup_od_irq, | 90 | .startup = startup_od_irq, |
| 91 | shutdown_od_irq, | 91 | .shutdown = shutdown_od_irq, |
| 92 | enable_od_irq, | 92 | .enable = enable_od_irq, |
| 93 | disable_od_irq, | 93 | .disable = disable_od_irq, |
| 94 | mask_and_ack_od, | 94 | .ack = mask_and_ack_od, |
| 95 | end_od_irq | 95 | .end = end_od_irq |
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | static void disable_od_irq(unsigned int irq) | 98 | static void disable_od_irq(unsigned int irq) |
diff --git a/arch/sh/boards/renesas/hs7751rvoip/irq.c b/arch/sh/boards/renesas/hs7751rvoip/irq.c index a7921f67a35f..ed4c5b50ea45 100644 --- a/arch/sh/boards/renesas/hs7751rvoip/irq.c +++ b/arch/sh/boards/renesas/hs7751rvoip/irq.c | |||
| @@ -74,13 +74,13 @@ static void end_hs7751rvoip_irq(unsigned int irq) | |||
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static struct hw_interrupt_type hs7751rvoip_irq_type = { | 76 | static struct hw_interrupt_type hs7751rvoip_irq_type = { |
| 77 | "HS7751RVoIP IRQ", | 77 | .typename = "HS7751RVoIP IRQ", |
| 78 | startup_hs7751rvoip_irq, | 78 | .startup = startup_hs7751rvoip_irq, |
| 79 | shutdown_hs7751rvoip_irq, | 79 | .shutdown = shutdown_hs7751rvoip_irq, |
| 80 | enable_hs7751rvoip_irq, | 80 | .enable = enable_hs7751rvoip_irq, |
| 81 | disable_hs7751rvoip_irq, | 81 | .disable = disable_hs7751rvoip_irq, |
| 82 | ack_hs7751rvoip_irq, | 82 | .ack = ack_hs7751rvoip_irq, |
| 83 | end_hs7751rvoip_irq, | 83 | .end = end_hs7751rvoip_irq, |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | static void make_hs7751rvoip_irq(unsigned int irq) | 86 | static void make_hs7751rvoip_irq(unsigned int irq) |
diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c index 95717f4f1e2d..d36c9374aed1 100644 --- a/arch/sh/boards/renesas/rts7751r2d/irq.c +++ b/arch/sh/boards/renesas/rts7751r2d/irq.c | |||
| @@ -88,13 +88,13 @@ static void end_rts7751r2d_irq(unsigned int irq) | |||
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static struct hw_interrupt_type rts7751r2d_irq_type = { | 90 | static struct hw_interrupt_type rts7751r2d_irq_type = { |
| 91 | "RTS7751R2D IRQ", | 91 | .typename = "RTS7751R2D IRQ", |
| 92 | startup_rts7751r2d_irq, | 92 | .startup = startup_rts7751r2d_irq, |
| 93 | shutdown_rts7751r2d_irq, | 93 | .shutdown = shutdown_rts7751r2d_irq, |
| 94 | enable_rts7751r2d_irq, | 94 | .enable = enable_rts7751r2d_irq, |
| 95 | disable_rts7751r2d_irq, | 95 | .disable = disable_rts7751r2d_irq, |
| 96 | ack_rts7751r2d_irq, | 96 | .ack = ack_rts7751r2d_irq, |
| 97 | end_rts7751r2d_irq, | 97 | .end = end_rts7751r2d_irq, |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | static void make_rts7751r2d_irq(unsigned int irq) | 100 | static void make_rts7751r2d_irq(unsigned int irq) |
diff --git a/arch/sh/boards/renesas/systemh/irq.c b/arch/sh/boards/renesas/systemh/irq.c index 5675a4134eee..7a2eb10edb56 100644 --- a/arch/sh/boards/renesas/systemh/irq.c +++ b/arch/sh/boards/renesas/systemh/irq.c | |||
| @@ -35,13 +35,13 @@ static void end_systemh_irq(unsigned int irq); | |||
| 35 | 35 | ||
| 36 | /* hw_interrupt_type */ | 36 | /* hw_interrupt_type */ |
| 37 | static struct hw_interrupt_type systemh_irq_type = { | 37 | static struct hw_interrupt_type systemh_irq_type = { |
| 38 | " SystemH Register", | 38 | .typename = " SystemH Register", |
| 39 | startup_systemh_irq, | 39 | .startup = startup_systemh_irq, |
| 40 | shutdown_systemh_irq, | 40 | .shutdown = shutdown_systemh_irq, |
| 41 | enable_systemh_irq, | 41 | .enable = enable_systemh_irq, |
| 42 | disable_systemh_irq, | 42 | .disable = disable_systemh_irq, |
| 43 | mask_and_ack_systemh, | 43 | .ack = mask_and_ack_systemh, |
| 44 | end_systemh_irq | 44 | .end = end_systemh_irq |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | static unsigned int startup_systemh_irq(unsigned int irq) | 47 | static unsigned int startup_systemh_irq(unsigned int irq) |
diff --git a/arch/sh/boards/superh/microdev/irq.c b/arch/sh/boards/superh/microdev/irq.c index 1298883eca4b..1395c1e65da4 100644 --- a/arch/sh/boards/superh/microdev/irq.c +++ b/arch/sh/boards/superh/microdev/irq.c | |||
| @@ -83,13 +83,13 @@ static unsigned int startup_microdev_irq(unsigned int irq) | |||
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static struct hw_interrupt_type microdev_irq_type = { | 85 | static struct hw_interrupt_type microdev_irq_type = { |
| 86 | "MicroDev-IRQ", | 86 | .typename = "MicroDev-IRQ", |
| 87 | startup_microdev_irq, | 87 | .startup = startup_microdev_irq, |
| 88 | shutdown_microdev_irq, | 88 | .shutdown = shutdown_microdev_irq, |
| 89 | enable_microdev_irq, | 89 | .enable = enable_microdev_irq, |
| 90 | disable_microdev_irq, | 90 | .disable = disable_microdev_irq, |
| 91 | mask_and_ack_microdev, | 91 | .ack = mask_and_ack_microdev, |
| 92 | end_microdev_irq | 92 | .end = end_microdev_irq |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | static void disable_microdev_irq(unsigned int irq) | 95 | static void disable_microdev_irq(unsigned int irq) |
diff --git a/arch/sh/cchips/hd6446x/hd64465/io.c b/arch/sh/cchips/hd6446x/hd64465/io.c index 99ac709c550e..84cb142def0b 100644 --- a/arch/sh/cchips/hd6446x/hd64465/io.c +++ b/arch/sh/cchips/hd6446x/hd64465/io.c | |||
| @@ -48,10 +48,6 @@ static unsigned char hd64465_iomap_lo_shift[HD64465_IOMAP_LO_NMAP]; | |||
| 48 | static unsigned long hd64465_iomap_hi[HD64465_IOMAP_HI_NMAP]; | 48 | static unsigned long hd64465_iomap_hi[HD64465_IOMAP_HI_NMAP]; |
| 49 | static unsigned char hd64465_iomap_hi_shift[HD64465_IOMAP_HI_NMAP]; | 49 | static unsigned char hd64465_iomap_hi_shift[HD64465_IOMAP_HI_NMAP]; |
| 50 | 50 | ||
| 51 | #ifndef MAX | ||
| 52 | #define MAX(a,b) ((a)>(b)?(a):(b)) | ||
| 53 | #endif | ||
| 54 | |||
| 55 | #define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x)) | 51 | #define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x)) |
| 56 | 52 | ||
| 57 | void hd64465_port_map(unsigned short baseport, unsigned int nports, | 53 | void hd64465_port_map(unsigned short baseport, unsigned int nports, |
| @@ -71,7 +67,7 @@ void hd64465_port_map(unsigned short baseport, unsigned int nports, | |||
| 71 | addr += (1<<(HD64465_IOMAP_LO_SHIFT)); | 67 | addr += (1<<(HD64465_IOMAP_LO_SHIFT)); |
| 72 | } | 68 | } |
| 73 | 69 | ||
| 74 | for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ; | 70 | for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH); |
| 75 | port < endport && port < HD64465_IOMAP_HI_THRESH ; | 71 | port < endport && port < HD64465_IOMAP_HI_THRESH ; |
| 76 | port += (1<<HD64465_IOMAP_HI_SHIFT)) { | 72 | port += (1<<HD64465_IOMAP_HI_SHIFT)) { |
| 77 | DPRINTK(" maphi[0x%x] = 0x%08lx\n", port, addr); | 73 | DPRINTK(" maphi[0x%x] = 0x%08lx\n", port, addr); |
| @@ -95,7 +91,7 @@ void hd64465_port_unmap(unsigned short baseport, unsigned int nports) | |||
| 95 | hd64465_iomap_lo[port>>HD64465_IOMAP_LO_SHIFT] = 0; | 91 | hd64465_iomap_lo[port>>HD64465_IOMAP_LO_SHIFT] = 0; |
| 96 | } | 92 | } |
| 97 | 93 | ||
| 98 | for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ; | 94 | for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH); |
| 99 | port < endport && port < HD64465_IOMAP_HI_THRESH ; | 95 | port < endport && port < HD64465_IOMAP_HI_THRESH ; |
| 100 | port += (1<<HD64465_IOMAP_HI_SHIFT)) { | 96 | port += (1<<HD64465_IOMAP_HI_SHIFT)) { |
| 101 | hd64465_iomap_hi[port>>HD64465_IOMAP_HI_SHIFT] = 0; | 97 | hd64465_iomap_hi[port>>HD64465_IOMAP_HI_SHIFT] = 0; |
diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c index 3079234cb65b..1b6ac523b458 100644 --- a/arch/sh/cchips/voyagergx/irq.c +++ b/arch/sh/cchips/voyagergx/irq.c | |||
| @@ -87,13 +87,13 @@ static void shutdown_voyagergx_irq(unsigned int irq) | |||
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static struct hw_interrupt_type voyagergx_irq_type = { | 89 | static struct hw_interrupt_type voyagergx_irq_type = { |
| 90 | "VOYAGERGX-IRQ", | 90 | .typename = "VOYAGERGX-IRQ", |
| 91 | startup_voyagergx_irq, | 91 | .startup = startup_voyagergx_irq, |
| 92 | shutdown_voyagergx_irq, | 92 | .shutdown = shutdown_voyagergx_irq, |
| 93 | enable_voyagergx_irq, | 93 | .enable = enable_voyagergx_irq, |
| 94 | disable_voyagergx_irq, | 94 | .disable = disable_voyagergx_irq, |
| 95 | mask_and_ack_voyagergx, | 95 | .ack = mask_and_ack_voyagergx, |
| 96 | end_voyagergx_irq, | 96 | .end = end_voyagergx_irq, |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 99 | static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
diff --git a/arch/sh/kernel/cpu/irq_imask.c b/arch/sh/kernel/cpu/irq_imask.c index f76901e732fb..a963d00a971e 100644 --- a/arch/sh/kernel/cpu/irq_imask.c +++ b/arch/sh/kernel/cpu/irq_imask.c | |||
| @@ -46,13 +46,13 @@ static unsigned int startup_imask_irq(unsigned int irq) | |||
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static struct hw_interrupt_type imask_irq_type = { | 48 | static struct hw_interrupt_type imask_irq_type = { |
| 49 | "SR.IMASK", | 49 | .typename = "SR.IMASK", |
| 50 | startup_imask_irq, | 50 | .startup = startup_imask_irq, |
| 51 | shutdown_imask_irq, | 51 | .shutdown = shutdown_imask_irq, |
| 52 | enable_imask_irq, | 52 | .enable = enable_imask_irq, |
| 53 | disable_imask_irq, | 53 | .disable = disable_imask_irq, |
| 54 | mask_and_ack_imask, | 54 | .ack = mask_and_ack_imask, |
| 55 | end_imask_irq | 55 | .end = end_imask_irq |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | void static inline set_interrupt_registers(int ip) | 58 | void static inline set_interrupt_registers(int ip) |
diff --git a/arch/sh/kernel/cpu/irq_ipr.c b/arch/sh/kernel/cpu/irq_ipr.c index 7ea3d2d030e5..71f92096132b 100644 --- a/arch/sh/kernel/cpu/irq_ipr.c +++ b/arch/sh/kernel/cpu/irq_ipr.c | |||
| @@ -48,13 +48,13 @@ static unsigned int startup_ipr_irq(unsigned int irq) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static struct hw_interrupt_type ipr_irq_type = { | 50 | static struct hw_interrupt_type ipr_irq_type = { |
| 51 | "IPR-IRQ", | 51 | .typename = "IPR-IRQ", |
| 52 | startup_ipr_irq, | 52 | .startup = startup_ipr_irq, |
| 53 | shutdown_ipr_irq, | 53 | .shutdown = shutdown_ipr_irq, |
| 54 | enable_ipr_irq, | 54 | .enable = enable_ipr_irq, |
| 55 | disable_ipr_irq, | 55 | .disable = disable_ipr_irq, |
| 56 | mask_and_ack_ipr, | 56 | .ack = mask_and_ack_ipr, |
| 57 | end_ipr_irq | 57 | .end = end_ipr_irq |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | static void disable_ipr_irq(unsigned int irq) | 60 | static void disable_ipr_irq(unsigned int irq) |
| @@ -142,13 +142,13 @@ static unsigned int startup_pint_irq(unsigned int irq) | |||
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static struct hw_interrupt_type pint_irq_type = { | 144 | static struct hw_interrupt_type pint_irq_type = { |
| 145 | "PINT-IRQ", | 145 | .typename = "PINT-IRQ", |
| 146 | startup_pint_irq, | 146 | .startup = startup_pint_irq, |
| 147 | shutdown_pint_irq, | 147 | .shutdown = shutdown_pint_irq, |
| 148 | enable_pint_irq, | 148 | .enable = enable_pint_irq, |
| 149 | disable_pint_irq, | 149 | .disable = disable_pint_irq, |
| 150 | mask_and_ack_pint, | 150 | .ack = mask_and_ack_pint, |
| 151 | end_pint_irq | 151 | .end = end_pint_irq |
| 152 | }; | 152 | }; |
| 153 | 153 | ||
| 154 | static void disable_pint_irq(unsigned int irq) | 154 | static void disable_pint_irq(unsigned int irq) |
diff --git a/arch/sh/kernel/cpu/sh4/irq_intc2.c b/arch/sh/kernel/cpu/sh4/irq_intc2.c index 099ebbf89745..f6b16ba01932 100644 --- a/arch/sh/kernel/cpu/sh4/irq_intc2.c +++ b/arch/sh/kernel/cpu/sh4/irq_intc2.c | |||
| @@ -48,13 +48,13 @@ static unsigned int startup_intc2_irq(unsigned int irq) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static struct hw_interrupt_type intc2_irq_type = { | 50 | static struct hw_interrupt_type intc2_irq_type = { |
| 51 | "INTC2-IRQ", | 51 | .typename = "INTC2-IRQ", |
| 52 | startup_intc2_irq, | 52 | .startup = startup_intc2_irq, |
| 53 | shutdown_intc2_irq, | 53 | .shutdown = shutdown_intc2_irq, |
| 54 | enable_intc2_irq, | 54 | .enable = enable_intc2_irq, |
| 55 | disable_intc2_irq, | 55 | .disable = disable_intc2_irq, |
| 56 | mask_and_ack_intc2, | 56 | .ack = mask_and_ack_intc2, |
| 57 | end_intc2_irq | 57 | .end = end_intc2_irq |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | static void disable_intc2_irq(unsigned int irq) | 60 | static void disable_intc2_irq(unsigned int irq) |
diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c index 43f88f3a78b0..fc99bf4e362c 100644 --- a/arch/sh64/kernel/irq_intc.c +++ b/arch/sh64/kernel/irq_intc.c | |||
| @@ -107,13 +107,13 @@ static void mask_and_ack_intc(unsigned int); | |||
| 107 | static void end_intc_irq(unsigned int irq); | 107 | static void end_intc_irq(unsigned int irq); |
| 108 | 108 | ||
| 109 | static struct hw_interrupt_type intc_irq_type = { | 109 | static struct hw_interrupt_type intc_irq_type = { |
| 110 | "INTC", | 110 | .typename = "INTC", |
| 111 | startup_intc_irq, | 111 | .startup = startup_intc_irq, |
| 112 | shutdown_intc_irq, | 112 | .shutdown = shutdown_intc_irq, |
| 113 | enable_intc_irq, | 113 | .enable = enable_intc_irq, |
| 114 | disable_intc_irq, | 114 | .disable = disable_intc_irq, |
| 115 | mask_and_ack_intc, | 115 | .ack = mask_and_ack_intc, |
| 116 | end_intc_irq | 116 | .end = end_intc_irq |
| 117 | }; | 117 | }; |
| 118 | 118 | ||
| 119 | static int irlm; /* IRL mode */ | 119 | static int irlm; /* IRL mode */ |
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 5d974a2b735a..f84809333624 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
| @@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned) | |||
| 114 | /* used by various drivers */ | 114 | /* used by various drivers */ |
| 115 | EXPORT_SYMBOL(sparc_cpu_model); | 115 | EXPORT_SYMBOL(sparc_cpu_model); |
| 116 | EXPORT_SYMBOL(kernel_thread); | 116 | EXPORT_SYMBOL(kernel_thread); |
| 117 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 118 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
| 119 | EXPORT_SYMBOL(_do_spin_lock); | ||
| 120 | EXPORT_SYMBOL(_do_spin_unlock); | ||
| 121 | EXPORT_SYMBOL(_spin_trylock); | ||
| 122 | EXPORT_SYMBOL(_do_read_lock); | ||
| 123 | EXPORT_SYMBOL(_do_read_unlock); | ||
| 124 | EXPORT_SYMBOL(_do_write_lock); | ||
| 125 | EXPORT_SYMBOL(_do_write_unlock); | ||
| 126 | #endif | ||
| 127 | #else | ||
| 128 | // XXX find what uses (or used) these. | 118 | // XXX find what uses (or used) these. |
| 129 | EXPORT_SYMBOL(___rw_read_enter); | 119 | EXPORT_SYMBOL(___rw_read_enter); |
| 130 | EXPORT_SYMBOL(___rw_read_exit); | 120 | EXPORT_SYMBOL(___rw_read_exit); |
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 2296ff9dc47a..fa5006946062 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
| @@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ | |||
| 9 | strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ | 9 | strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ |
| 10 | copy_user.o locks.o atomic.o atomic32.o bitops.o \ | 10 | copy_user.o locks.o atomic.o atomic32.o bitops.o \ |
| 11 | lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o | 11 | lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o |
| 12 | |||
| 13 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o | ||
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c deleted file mode 100644 index fb182352782c..000000000000 --- a/arch/sparc/lib/debuglocks.c +++ /dev/null | |||
| @@ -1,202 +0,0 @@ | |||
| 1 | /* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $ | ||
| 2 | * debuglocks.c: Debugging versions of SMP locking primitives. | ||
| 3 | * | ||
| 4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
| 5 | * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au) | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/sched.h> | ||
| 10 | #include <linux/threads.h> /* For NR_CPUS */ | ||
| 11 | #include <linux/spinlock.h> | ||
| 12 | #include <asm/psr.h> | ||
| 13 | #include <asm/system.h> | ||
| 14 | |||
| 15 | #ifdef CONFIG_SMP | ||
| 16 | |||
| 17 | /* Some notes on how these debugging routines work. When a lock is acquired | ||
| 18 | * an extra debugging member lock->owner_pc is set to the caller of the lock | ||
| 19 | * acquisition routine. Right before releasing a lock, the debugging program | ||
| 20 | * counter is cleared to zero. | ||
| 21 | * | ||
| 22 | * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU | ||
| 23 | * number of the owner in the lowest two bits. | ||
| 24 | */ | ||
| 25 | |||
| 26 | #define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A)); | ||
| 27 | |||
| 28 | static inline void show(char *str, spinlock_t *lock, unsigned long caller) | ||
| 29 | { | ||
| 30 | int cpu = smp_processor_id(); | ||
| 31 | |||
| 32 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str, | ||
| 33 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline void show_read(char *str, rwlock_t *lock, unsigned long caller) | ||
| 37 | { | ||
| 38 | int cpu = smp_processor_id(); | ||
| 39 | |||
| 40 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str, | ||
| 41 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline void show_write(char *str, rwlock_t *lock, unsigned long caller) | ||
| 45 | { | ||
| 46 | int cpu = smp_processor_id(); | ||
| 47 | int i; | ||
| 48 | |||
| 49 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str, | ||
| 50 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
| 51 | |||
| 52 | for(i = 0; i < NR_CPUS; i++) | ||
| 53 | printk(" reader[%d]=%08lx", i, lock->reader_pc[i]); | ||
| 54 | |||
| 55 | printk("\n"); | ||
| 56 | } | ||
| 57 | |||
| 58 | #undef INIT_STUCK | ||
| 59 | #define INIT_STUCK 100000000 | ||
| 60 | |||
| 61 | void _do_spin_lock(spinlock_t *lock, char *str) | ||
| 62 | { | ||
| 63 | unsigned long caller; | ||
| 64 | unsigned long val; | ||
| 65 | int cpu = smp_processor_id(); | ||
| 66 | int stuck = INIT_STUCK; | ||
| 67 | |||
| 68 | STORE_CALLER(caller); | ||
| 69 | |||
| 70 | again: | ||
| 71 | __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); | ||
| 72 | if(val) { | ||
| 73 | while(lock->lock) { | ||
| 74 | if (!--stuck) { | ||
| 75 | show(str, lock, caller); | ||
| 76 | stuck = INIT_STUCK; | ||
| 77 | } | ||
| 78 | barrier(); | ||
| 79 | } | ||
| 80 | goto again; | ||
| 81 | } | ||
| 82 | lock->owner_pc = (cpu & 3) | (caller & ~3); | ||
| 83 | } | ||
| 84 | |||
| 85 | int _spin_trylock(spinlock_t *lock) | ||
| 86 | { | ||
| 87 | unsigned long val; | ||
| 88 | unsigned long caller; | ||
| 89 | int cpu = smp_processor_id(); | ||
| 90 | |||
| 91 | STORE_CALLER(caller); | ||
| 92 | |||
| 93 | __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); | ||
| 94 | if(!val) { | ||
| 95 | /* We got it, record our identity for debugging. */ | ||
| 96 | lock->owner_pc = (cpu & 3) | (caller & ~3); | ||
| 97 | } | ||
| 98 | return val == 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | void _do_spin_unlock(spinlock_t *lock) | ||
| 102 | { | ||
| 103 | lock->owner_pc = 0; | ||
| 104 | barrier(); | ||
| 105 | lock->lock = 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | void _do_read_lock(rwlock_t *rw, char *str) | ||
| 109 | { | ||
| 110 | unsigned long caller; | ||
| 111 | unsigned long val; | ||
| 112 | int cpu = smp_processor_id(); | ||
| 113 | int stuck = INIT_STUCK; | ||
| 114 | |||
| 115 | STORE_CALLER(caller); | ||
| 116 | |||
| 117 | wlock_again: | ||
| 118 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
| 119 | if(val) { | ||
| 120 | while(rw->lock & 0xff) { | ||
| 121 | if (!--stuck) { | ||
| 122 | show_read(str, rw, caller); | ||
| 123 | stuck = INIT_STUCK; | ||
| 124 | } | ||
| 125 | barrier(); | ||
| 126 | } | ||
| 127 | goto wlock_again; | ||
| 128 | } | ||
| 129 | |||
| 130 | rw->reader_pc[cpu] = caller; | ||
| 131 | barrier(); | ||
| 132 | rw->lock++; | ||
| 133 | } | ||
| 134 | |||
| 135 | void _do_read_unlock(rwlock_t *rw, char *str) | ||
| 136 | { | ||
| 137 | unsigned long caller; | ||
| 138 | unsigned long val; | ||
| 139 | int cpu = smp_processor_id(); | ||
| 140 | int stuck = INIT_STUCK; | ||
| 141 | |||
| 142 | STORE_CALLER(caller); | ||
| 143 | |||
| 144 | wlock_again: | ||
| 145 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
| 146 | if(val) { | ||
| 147 | while(rw->lock & 0xff) { | ||
| 148 | if (!--stuck) { | ||
| 149 | show_read(str, rw, caller); | ||
| 150 | stuck = INIT_STUCK; | ||
| 151 | } | ||
| 152 | barrier(); | ||
| 153 | } | ||
| 154 | goto wlock_again; | ||
| 155 | } | ||
| 156 | |||
| 157 | rw->reader_pc[cpu] = 0; | ||
| 158 | barrier(); | ||
| 159 | rw->lock -= 0x1ff; | ||
| 160 | } | ||
| 161 | |||
| 162 | void _do_write_lock(rwlock_t *rw, char *str) | ||
| 163 | { | ||
| 164 | unsigned long caller; | ||
| 165 | unsigned long val; | ||
| 166 | int cpu = smp_processor_id(); | ||
| 167 | int stuck = INIT_STUCK; | ||
| 168 | |||
| 169 | STORE_CALLER(caller); | ||
| 170 | |||
| 171 | wlock_again: | ||
| 172 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
| 173 | if(val) { | ||
| 174 | wlock_wait: | ||
| 175 | while(rw->lock) { | ||
| 176 | if (!--stuck) { | ||
| 177 | show_write(str, rw, caller); | ||
| 178 | stuck = INIT_STUCK; | ||
| 179 | } | ||
| 180 | barrier(); | ||
| 181 | } | ||
| 182 | goto wlock_again; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (rw->lock & ~0xff) { | ||
| 186 | *(((unsigned char *)&rw->lock)+3) = 0; | ||
| 187 | barrier(); | ||
| 188 | goto wlock_wait; | ||
| 189 | } | ||
| 190 | |||
| 191 | barrier(); | ||
| 192 | rw->owner_pc = (cpu & 3) | (caller & ~3); | ||
| 193 | } | ||
| 194 | |||
| 195 | void _do_write_unlock(rwlock_t *rw) | ||
| 196 | { | ||
| 197 | rw->owner_pc = 0; | ||
| 198 | barrier(); | ||
| 199 | rw->lock = 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | #endif /* SMP */ | ||
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 66255434128a..7d10b0397091 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
| @@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
| 607 | struct thread_info *t = p->thread_info; | 607 | struct thread_info *t = p->thread_info; |
| 608 | char *child_trap_frame; | 608 | char *child_trap_frame; |
| 609 | 609 | ||
| 610 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 611 | p->thread.smp_lock_count = 0; | ||
| 612 | p->thread.smp_lock_pc = 0; | ||
| 613 | #endif | ||
| 614 | |||
| 615 | /* Calculate offset to stack_frame & pt_regs */ | 610 | /* Calculate offset to stack_frame & pt_regs */ |
| 616 | child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); | 611 | child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); |
| 617 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); | 612 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 7d9a0f6c437d..cbb5e59824e5 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
| @@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); | |||
| 115 | 115 | ||
| 116 | /* used by various drivers */ | 116 | /* used by various drivers */ |
| 117 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
| 118 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 119 | /* Out of line rw-locking implementation. */ | 118 | /* Out of line rw-locking implementation. */ |
| 120 | EXPORT_SYMBOL(__read_lock); | 119 | EXPORT_SYMBOL(__read_lock); |
| 121 | EXPORT_SYMBOL(__read_unlock); | 120 | EXPORT_SYMBOL(__read_unlock); |
| 122 | EXPORT_SYMBOL(__write_lock); | 121 | EXPORT_SYMBOL(__write_lock); |
| 123 | EXPORT_SYMBOL(__write_unlock); | 122 | EXPORT_SYMBOL(__write_unlock); |
| 124 | EXPORT_SYMBOL(__write_trylock); | 123 | EXPORT_SYMBOL(__write_trylock); |
| 125 | /* Out of line spin-locking implementation. */ | ||
| 126 | EXPORT_SYMBOL(_raw_spin_lock); | ||
| 127 | EXPORT_SYMBOL(_raw_spin_lock_flags); | ||
| 128 | #endif | ||
| 129 | 124 | ||
| 130 | /* Hard IRQ locking */ | 125 | /* Hard IRQ locking */ |
| 131 | EXPORT_SYMBOL(synchronize_irq); | 126 | EXPORT_SYMBOL(synchronize_irq); |
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 40dbeec7e5d6..d968aebe83b2 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
| @@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
| 14 | copy_in_user.o user_fixup.o memmove.o \ | 14 | copy_in_user.o user_fixup.o memmove.o \ |
| 15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o |
| 16 | 16 | ||
| 17 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o | ||
| 18 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o | 17 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o |
| 19 | 18 | ||
| 20 | obj-y += iomap.o | 19 | obj-y += iomap.o |
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c deleted file mode 100644 index f5f0b5586f01..000000000000 --- a/arch/sparc64/lib/debuglocks.c +++ /dev/null | |||
| @@ -1,366 +0,0 @@ | |||
| 1 | /* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $ | ||
| 2 | * debuglocks.c: Debugging versions of SMP locking primitives. | ||
| 3 | * | ||
| 4 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/config.h> | ||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/sched.h> | ||
| 10 | #include <linux/spinlock.h> | ||
| 11 | #include <asm/system.h> | ||
| 12 | |||
| 13 | #ifdef CONFIG_SMP | ||
| 14 | |||
| 15 | static inline void show (char *str, spinlock_t *lock, unsigned long caller) | ||
| 16 | { | ||
| 17 | int cpu = smp_processor_id(); | ||
| 18 | |||
| 19 | printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n", | ||
| 20 | str, lock, cpu, (unsigned int) caller, | ||
| 21 | lock->owner_pc, lock->owner_cpu); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline void show_read (char *str, rwlock_t *lock, unsigned long caller) | ||
| 25 | { | ||
| 26 | int cpu = smp_processor_id(); | ||
| 27 | |||
| 28 | printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n", | ||
| 29 | str, lock, cpu, (unsigned int) caller, | ||
| 30 | lock->writer_pc, lock->writer_cpu); | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void show_write (char *str, rwlock_t *lock, unsigned long caller) | ||
| 34 | { | ||
| 35 | int cpu = smp_processor_id(); | ||
| 36 | int i; | ||
| 37 | |||
| 38 | printk("%s(%p) CPU#%d stuck at %08x\n", | ||
| 39 | str, lock, cpu, (unsigned int) caller); | ||
| 40 | printk("Writer: PC(%08x):CPU(%x)\n", | ||
| 41 | lock->writer_pc, lock->writer_cpu); | ||
| 42 | printk("Readers:"); | ||
| 43 | for (i = 0; i < NR_CPUS; i++) | ||
| 44 | if (lock->reader_pc[i]) | ||
| 45 | printk(" %d[%08x]", i, lock->reader_pc[i]); | ||
| 46 | printk("\n"); | ||
| 47 | } | ||
| 48 | |||
| 49 | #undef INIT_STUCK | ||
| 50 | #define INIT_STUCK 100000000 | ||
| 51 | |||
| 52 | void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller) | ||
| 53 | { | ||
| 54 | unsigned long val; | ||
| 55 | int stuck = INIT_STUCK; | ||
| 56 | int cpu = get_cpu(); | ||
| 57 | int shown = 0; | ||
| 58 | |||
| 59 | again: | ||
| 60 | __asm__ __volatile__("ldstub [%1], %0" | ||
| 61 | : "=r" (val) | ||
| 62 | : "r" (&(lock->lock)) | ||
| 63 | : "memory"); | ||
| 64 | membar_storeload_storestore(); | ||
| 65 | if (val) { | ||
| 66 | while (lock->lock) { | ||
| 67 | if (!--stuck) { | ||
| 68 | if (shown++ <= 2) | ||
| 69 | show(str, lock, caller); | ||
| 70 | stuck = INIT_STUCK; | ||
| 71 | } | ||
| 72 | rmb(); | ||
| 73 | } | ||
| 74 | goto again; | ||
| 75 | } | ||
| 76 | lock->owner_pc = ((unsigned int)caller); | ||
| 77 | lock->owner_cpu = cpu; | ||
| 78 | current->thread.smp_lock_count++; | ||
| 79 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
| 80 | |||
| 81 | put_cpu(); | ||
| 82 | } | ||
| 83 | |||
| 84 | int _do_spin_trylock(spinlock_t *lock, unsigned long caller) | ||
| 85 | { | ||
| 86 | unsigned long val; | ||
| 87 | int cpu = get_cpu(); | ||
| 88 | |||
| 89 | __asm__ __volatile__("ldstub [%1], %0" | ||
| 90 | : "=r" (val) | ||
| 91 | : "r" (&(lock->lock)) | ||
| 92 | : "memory"); | ||
| 93 | membar_storeload_storestore(); | ||
| 94 | if (!val) { | ||
| 95 | lock->owner_pc = ((unsigned int)caller); | ||
| 96 | lock->owner_cpu = cpu; | ||
| 97 | current->thread.smp_lock_count++; | ||
| 98 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
| 99 | } | ||
| 100 | |||
| 101 | put_cpu(); | ||
| 102 | |||
| 103 | return val == 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | void _do_spin_unlock(spinlock_t *lock) | ||
| 107 | { | ||
| 108 | lock->owner_pc = 0; | ||
| 109 | lock->owner_cpu = NO_PROC_ID; | ||
| 110 | membar_storestore_loadstore(); | ||
| 111 | lock->lock = 0; | ||
| 112 | current->thread.smp_lock_count--; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* Keep INIT_STUCK the same... */ | ||
| 116 | |||
| 117 | void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller) | ||
| 118 | { | ||
| 119 | unsigned long val; | ||
| 120 | int stuck = INIT_STUCK; | ||
| 121 | int cpu = get_cpu(); | ||
| 122 | int shown = 0; | ||
| 123 | |||
| 124 | wlock_again: | ||
| 125 | /* Wait for any writer to go away. */ | ||
| 126 | while (((long)(rw->lock)) < 0) { | ||
| 127 | if (!--stuck) { | ||
| 128 | if (shown++ <= 2) | ||
| 129 | show_read(str, rw, caller); | ||
| 130 | stuck = INIT_STUCK; | ||
| 131 | } | ||
| 132 | rmb(); | ||
| 133 | } | ||
| 134 | /* Try once to increment the counter. */ | ||
| 135 | __asm__ __volatile__( | ||
| 136 | " ldx [%0], %%g1\n" | ||
| 137 | " brlz,a,pn %%g1, 2f\n" | ||
| 138 | " mov 1, %0\n" | ||
| 139 | " add %%g1, 1, %%g7\n" | ||
| 140 | " casx [%0], %%g1, %%g7\n" | ||
| 141 | " sub %%g1, %%g7, %0\n" | ||
| 142 | "2:" : "=r" (val) | ||
| 143 | : "0" (&(rw->lock)) | ||
| 144 | : "g1", "g7", "memory"); | ||
| 145 | membar_storeload_storestore(); | ||
| 146 | if (val) | ||
| 147 | goto wlock_again; | ||
| 148 | rw->reader_pc[cpu] = ((unsigned int)caller); | ||
| 149 | current->thread.smp_lock_count++; | ||
| 150 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
| 151 | |||
| 152 | put_cpu(); | ||
| 153 | } | ||
| 154 | |||
| 155 | void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller) | ||
| 156 | { | ||
| 157 | unsigned long val; | ||
| 158 | int stuck = INIT_STUCK; | ||
| 159 | int cpu = get_cpu(); | ||
| 160 | int shown = 0; | ||
| 161 | |||
| 162 | /* Drop our identity _first_. */ | ||
| 163 | rw->reader_pc[cpu] = 0; | ||
| 164 | current->thread.smp_lock_count--; | ||
| 165 | runlock_again: | ||
| 166 | /* Spin trying to decrement the counter using casx. */ | ||
| 167 | __asm__ __volatile__( | ||
| 168 | " membar #StoreLoad | #LoadLoad\n" | ||
| 169 | " ldx [%0], %%g1\n" | ||
| 170 | " sub %%g1, 1, %%g7\n" | ||
| 171 | " casx [%0], %%g1, %%g7\n" | ||
| 172 | " membar #StoreLoad | #StoreStore\n" | ||
| 173 | " sub %%g1, %%g7, %0\n" | ||
| 174 | : "=r" (val) | ||
| 175 | : "0" (&(rw->lock)) | ||
| 176 | : "g1", "g7", "memory"); | ||
| 177 | if (val) { | ||
| 178 | if (!--stuck) { | ||
| 179 | if (shown++ <= 2) | ||
| 180 | show_read(str, rw, caller); | ||
| 181 | stuck = INIT_STUCK; | ||
| 182 | } | ||
| 183 | goto runlock_again; | ||
| 184 | } | ||
| 185 | |||
| 186 | put_cpu(); | ||
| 187 | } | ||
| 188 | |||
| 189 | void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller) | ||
| 190 | { | ||
| 191 | unsigned long val; | ||
| 192 | int stuck = INIT_STUCK; | ||
| 193 | int cpu = get_cpu(); | ||
| 194 | int shown = 0; | ||
| 195 | |||
| 196 | wlock_again: | ||
| 197 | /* Spin while there is another writer. */ | ||
| 198 | while (((long)rw->lock) < 0) { | ||
| 199 | if (!--stuck) { | ||
| 200 | if (shown++ <= 2) | ||
| 201 | show_write(str, rw, caller); | ||
| 202 | stuck = INIT_STUCK; | ||
| 203 | } | ||
| 204 | rmb(); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* Try to acuire the write bit. */ | ||
| 208 | __asm__ __volatile__( | ||
| 209 | " mov 1, %%g3\n" | ||
| 210 | " sllx %%g3, 63, %%g3\n" | ||
| 211 | " ldx [%0], %%g1\n" | ||
| 212 | " brlz,pn %%g1, 1f\n" | ||
| 213 | " or %%g1, %%g3, %%g7\n" | ||
| 214 | " casx [%0], %%g1, %%g7\n" | ||
| 215 | " membar #StoreLoad | #StoreStore\n" | ||
| 216 | " ba,pt %%xcc, 2f\n" | ||
| 217 | " sub %%g1, %%g7, %0\n" | ||
| 218 | "1: mov 1, %0\n" | ||
| 219 | "2:" : "=r" (val) | ||
| 220 | : "0" (&(rw->lock)) | ||
| 221 | : "g3", "g1", "g7", "memory"); | ||
| 222 | if (val) { | ||
| 223 | /* We couldn't get the write bit. */ | ||
| 224 | if (!--stuck) { | ||
| 225 | if (shown++ <= 2) | ||
| 226 | show_write(str, rw, caller); | ||
| 227 | stuck = INIT_STUCK; | ||
| 228 | } | ||
| 229 | goto wlock_again; | ||
| 230 | } | ||
| 231 | if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { | ||
| 232 | /* Readers still around, drop the write | ||
| 233 | * lock, spin, and try again. | ||
| 234 | */ | ||
| 235 | if (!--stuck) { | ||
| 236 | if (shown++ <= 2) | ||
| 237 | show_write(str, rw, caller); | ||
| 238 | stuck = INIT_STUCK; | ||
| 239 | } | ||
| 240 | __asm__ __volatile__( | ||
| 241 | " mov 1, %%g3\n" | ||
| 242 | " sllx %%g3, 63, %%g3\n" | ||
| 243 | "1: ldx [%0], %%g1\n" | ||
| 244 | " andn %%g1, %%g3, %%g7\n" | ||
| 245 | " casx [%0], %%g1, %%g7\n" | ||
| 246 | " cmp %%g1, %%g7\n" | ||
| 247 | " membar #StoreLoad | #StoreStore\n" | ||
| 248 | " bne,pn %%xcc, 1b\n" | ||
| 249 | " nop" | ||
| 250 | : /* no outputs */ | ||
| 251 | : "r" (&(rw->lock)) | ||
| 252 | : "g3", "g1", "g7", "cc", "memory"); | ||
| 253 | while(rw->lock != 0) { | ||
| 254 | if (!--stuck) { | ||
| 255 | if (shown++ <= 2) | ||
| 256 | show_write(str, rw, caller); | ||
| 257 | stuck = INIT_STUCK; | ||
| 258 | } | ||
| 259 | rmb(); | ||
| 260 | } | ||
| 261 | goto wlock_again; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* We have it, say who we are. */ | ||
| 265 | rw->writer_pc = ((unsigned int)caller); | ||
| 266 | rw->writer_cpu = cpu; | ||
| 267 | current->thread.smp_lock_count++; | ||
| 268 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
| 269 | |||
| 270 | put_cpu(); | ||
| 271 | } | ||
| 272 | |||
| 273 | void _do_write_unlock(rwlock_t *rw, unsigned long caller) | ||
| 274 | { | ||
| 275 | unsigned long val; | ||
| 276 | int stuck = INIT_STUCK; | ||
| 277 | int shown = 0; | ||
| 278 | |||
| 279 | /* Drop our identity _first_ */ | ||
| 280 | rw->writer_pc = 0; | ||
| 281 | rw->writer_cpu = NO_PROC_ID; | ||
| 282 | current->thread.smp_lock_count--; | ||
| 283 | wlock_again: | ||
| 284 | __asm__ __volatile__( | ||
| 285 | " membar #StoreLoad | #LoadLoad\n" | ||
| 286 | " mov 1, %%g3\n" | ||
| 287 | " sllx %%g3, 63, %%g3\n" | ||
| 288 | " ldx [%0], %%g1\n" | ||
| 289 | " andn %%g1, %%g3, %%g7\n" | ||
| 290 | " casx [%0], %%g1, %%g7\n" | ||
| 291 | " membar #StoreLoad | #StoreStore\n" | ||
| 292 | " sub %%g1, %%g7, %0\n" | ||
| 293 | : "=r" (val) | ||
| 294 | : "0" (&(rw->lock)) | ||
| 295 | : "g3", "g1", "g7", "memory"); | ||
| 296 | if (val) { | ||
| 297 | if (!--stuck) { | ||
| 298 | if (shown++ <= 2) | ||
| 299 | show_write("write_unlock", rw, caller); | ||
| 300 | stuck = INIT_STUCK; | ||
| 301 | } | ||
| 302 | goto wlock_again; | ||
| 303 | } | ||
| 304 | } | ||
| 305 | |||
| 306 | int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller) | ||
| 307 | { | ||
| 308 | unsigned long val; | ||
| 309 | int cpu = get_cpu(); | ||
| 310 | |||
| 311 | /* Try to acuire the write bit. */ | ||
| 312 | __asm__ __volatile__( | ||
| 313 | " mov 1, %%g3\n" | ||
| 314 | " sllx %%g3, 63, %%g3\n" | ||
| 315 | " ldx [%0], %%g1\n" | ||
| 316 | " brlz,pn %%g1, 1f\n" | ||
| 317 | " or %%g1, %%g3, %%g7\n" | ||
| 318 | " casx [%0], %%g1, %%g7\n" | ||
| 319 | " membar #StoreLoad | #StoreStore\n" | ||
| 320 | " ba,pt %%xcc, 2f\n" | ||
| 321 | " sub %%g1, %%g7, %0\n" | ||
| 322 | "1: mov 1, %0\n" | ||
| 323 | "2:" : "=r" (val) | ||
| 324 | : "0" (&(rw->lock)) | ||
| 325 | : "g3", "g1", "g7", "memory"); | ||
| 326 | |||
| 327 | if (val) { | ||
| 328 | put_cpu(); | ||
| 329 | return 0; | ||
| 330 | } | ||
| 331 | |||
| 332 | if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { | ||
| 333 | /* Readers still around, drop the write | ||
| 334 | * lock, return failure. | ||
| 335 | */ | ||
| 336 | __asm__ __volatile__( | ||
| 337 | " mov 1, %%g3\n" | ||
| 338 | " sllx %%g3, 63, %%g3\n" | ||
| 339 | "1: ldx [%0], %%g1\n" | ||
| 340 | " andn %%g1, %%g3, %%g7\n" | ||
| 341 | " casx [%0], %%g1, %%g7\n" | ||
| 342 | " cmp %%g1, %%g7\n" | ||
| 343 | " membar #StoreLoad | #StoreStore\n" | ||
| 344 | " bne,pn %%xcc, 1b\n" | ||
| 345 | " nop" | ||
| 346 | : /* no outputs */ | ||
| 347 | : "r" (&(rw->lock)) | ||
| 348 | : "g3", "g1", "g7", "cc", "memory"); | ||
| 349 | |||
| 350 | put_cpu(); | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | /* We have it, say who we are. */ | ||
| 356 | rw->writer_pc = ((unsigned int)caller); | ||
| 357 | rw->writer_cpu = cpu; | ||
| 358 | current->thread.smp_lock_count++; | ||
| 359 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
| 360 | |||
| 361 | put_cpu(); | ||
| 362 | |||
| 363 | return 1; | ||
| 364 | } | ||
| 365 | |||
| 366 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/v850/kernel/irq.c b/arch/v850/kernel/irq.c index 336cbf21dc8f..9e85969ba976 100644 --- a/arch/v850/kernel/irq.c +++ b/arch/v850/kernel/irq.c | |||
| @@ -67,13 +67,13 @@ static void ack_none(unsigned int irq) | |||
| 67 | #define end_none enable_none | 67 | #define end_none enable_none |
| 68 | 68 | ||
| 69 | struct hw_interrupt_type no_irq_type = { | 69 | struct hw_interrupt_type no_irq_type = { |
| 70 | "none", | 70 | .typename = "none", |
| 71 | startup_none, | 71 | .startup = startup_none, |
| 72 | shutdown_none, | 72 | .shutdown = shutdown_none, |
| 73 | enable_none, | 73 | .enable = enable_none, |
| 74 | disable_none, | 74 | .disable = disable_none, |
| 75 | ack_none, | 75 | .ack = ack_none, |
| 76 | end_none | 76 | .end = end_none |
| 77 | }; | 77 | }; |
| 78 | 78 | ||
| 79 | volatile unsigned long irq_err_count, spurious_count; | 79 | volatile unsigned long irq_err_count, spurious_count; |
diff --git a/arch/v850/kernel/setup.c b/arch/v850/kernel/setup.c index abd48409dcca..62bdb8d29fc0 100644 --- a/arch/v850/kernel/setup.c +++ b/arch/v850/kernel/setup.c | |||
| @@ -138,13 +138,13 @@ static void nmi_end (unsigned irq) | |||
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static struct hw_interrupt_type nmi_irq_type = { | 140 | static struct hw_interrupt_type nmi_irq_type = { |
| 141 | "NMI", | 141 | .typename = "NMI", |
| 142 | irq_zero, /* startup */ | 142 | .startup = irq_zero, /* startup */ |
| 143 | irq_nop, /* shutdown */ | 143 | .shutdown = irq_nop, /* shutdown */ |
| 144 | irq_nop, /* enable */ | 144 | .enable = irq_nop, /* enable */ |
| 145 | irq_nop, /* disable */ | 145 | .disable = irq_nop, /* disable */ |
| 146 | irq_nop, /* ack */ | 146 | .ack = irq_nop, /* ack */ |
| 147 | nmi_end, /* end */ | 147 | .end = nmi_end, /* end */ |
| 148 | }; | 148 | }; |
| 149 | 149 | ||
| 150 | void __init init_IRQ (void) | 150 | void __init init_IRQ (void) |
diff --git a/arch/v850/kernel/sim.c b/arch/v850/kernel/sim.c index e2cc5580fa2a..17049aaa8f11 100644 --- a/arch/v850/kernel/sim.c +++ b/arch/v850/kernel/sim.c | |||
| @@ -73,13 +73,13 @@ static void irq_nop (unsigned irq) { } | |||
| 73 | static unsigned irq_zero (unsigned irq) { return 0; } | 73 | static unsigned irq_zero (unsigned irq) { return 0; } |
| 74 | 74 | ||
| 75 | static struct hw_interrupt_type sim_irq_type = { | 75 | static struct hw_interrupt_type sim_irq_type = { |
| 76 | "IRQ", | 76 | .typename = "IRQ", |
| 77 | irq_zero, /* startup */ | 77 | .startup = irq_zero, /* startup */ |
| 78 | irq_nop, /* shutdown */ | 78 | .shutdown = irq_nop, /* shutdown */ |
| 79 | irq_nop, /* enable */ | 79 | .enable = irq_nop, /* enable */ |
| 80 | irq_nop, /* disable */ | 80 | .disable = irq_nop, /* disable */ |
| 81 | irq_nop, /* ack */ | 81 | .ack = irq_nop, /* ack */ |
| 82 | irq_nop, /* end */ | 82 | .end = irq_nop, /* end */ |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | void __init mach_init_irqs (void) | 85 | void __init mach_init_irqs (void) |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 0511d8087910..9aec524be3eb 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
| @@ -929,7 +929,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
| 929 | c->x86_num_cores = intel_num_cpu_cores(c); | 929 | c->x86_num_cores = intel_num_cpu_cores(c); |
| 930 | } | 930 | } |
| 931 | 931 | ||
| 932 | void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 932 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
| 933 | { | 933 | { |
| 934 | char *v = c->x86_vendor_id; | 934 | char *v = c->x86_vendor_id; |
| 935 | 935 | ||
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 7249ba2b7a27..aee50b453265 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c | |||
| @@ -23,7 +23,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT]; | |||
| 23 | 23 | ||
| 24 | static struct pm_ops acpi_pm_ops; | 24 | static struct pm_ops acpi_pm_ops; |
| 25 | 25 | ||
| 26 | extern void do_suspend_lowlevel_s4bios(void); | ||
| 27 | extern void do_suspend_lowlevel(void); | 26 | extern void do_suspend_lowlevel(void); |
| 28 | 27 | ||
| 29 | static u32 acpi_suspend_states[] = { | 28 | static u32 acpi_suspend_states[] = { |
| @@ -98,8 +97,6 @@ static int acpi_pm_enter(suspend_state_t pm_state) | |||
| 98 | case PM_SUSPEND_DISK: | 97 | case PM_SUSPEND_DISK: |
| 99 | if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM) | 98 | if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM) |
| 100 | status = acpi_enter_sleep_state(acpi_state); | 99 | status = acpi_enter_sleep_state(acpi_state); |
| 101 | else | ||
| 102 | do_suspend_lowlevel_s4bios(); | ||
| 103 | break; | 100 | break; |
| 104 | case PM_SUSPEND_MAX: | 101 | case PM_SUSPEND_MAX: |
| 105 | acpi_power_off(); | 102 | acpi_power_off(); |
| @@ -206,11 +203,6 @@ static int __init acpi_sleep_init(void) | |||
| 206 | printk(" S%d", i); | 203 | printk(" S%d", i); |
| 207 | } | 204 | } |
| 208 | if (i == ACPI_STATE_S4) { | 205 | if (i == ACPI_STATE_S4) { |
| 209 | if (acpi_gbl_FACS->S4bios_f) { | ||
| 210 | sleep_states[i] = 1; | ||
| 211 | printk(" S4bios"); | ||
| 212 | acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE; | ||
| 213 | } | ||
| 214 | if (sleep_states[i]) | 206 | if (sleep_states[i]) |
| 215 | acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM; | 207 | acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM; |
| 216 | } | 208 | } |
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c index a5f947de879b..af7935a95bcc 100644 --- a/drivers/acpi/sleep/poweroff.c +++ b/drivers/acpi/sleep/poweroff.c | |||
| @@ -21,9 +21,7 @@ int acpi_sleep_prepare(u32 acpi_state) | |||
| 21 | { | 21 | { |
| 22 | #ifdef CONFIG_ACPI_SLEEP | 22 | #ifdef CONFIG_ACPI_SLEEP |
| 23 | /* do we have a wakeup address for S2 and S3? */ | 23 | /* do we have a wakeup address for S2 and S3? */ |
| 24 | /* Here, we support only S4BIOS, those we set the wakeup address */ | 24 | if (acpi_state == ACPI_STATE_S3) { |
| 25 | /* S4OS is only supported for now via swsusp.. */ | ||
| 26 | if (acpi_state == ACPI_STATE_S3 || acpi_state == ACPI_STATE_S4) { | ||
| 27 | if (!acpi_wakeup_address) { | 25 | if (!acpi_wakeup_address) { |
| 28 | return -EFAULT; | 26 | return -EFAULT; |
| 29 | } | 27 | } |
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 09a603f3523e..4696a85a98b9 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
| @@ -25,8 +25,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset) | |||
| 25 | for (i = 0; i <= ACPI_STATE_S5; i++) { | 25 | for (i = 0; i <= ACPI_STATE_S5; i++) { |
| 26 | if (sleep_states[i]) { | 26 | if (sleep_states[i]) { |
| 27 | seq_printf(seq, "S%d ", i); | 27 | seq_printf(seq, "S%d ", i); |
| 28 | if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f) | ||
| 29 | seq_printf(seq, "S4bios "); | ||
| 30 | } | 28 | } |
| 31 | } | 29 | } |
| 32 | 30 | ||
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c index c4aebf2f522d..60a7ef6a201b 100644 --- a/drivers/base/dmapool.c +++ b/drivers/base/dmapool.c | |||
| @@ -262,7 +262,8 @@ dma_pool_destroy (struct dma_pool *pool) | |||
| 262 | * If such a memory block can't be allocated, null is returned. | 262 | * If such a memory block can't be allocated, null is returned. |
| 263 | */ | 263 | */ |
| 264 | void * | 264 | void * |
| 265 | dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle) | 265 | dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags, |
| 266 | dma_addr_t *handle) | ||
| 266 | { | 267 | { |
| 267 | unsigned long flags; | 268 | unsigned long flags; |
| 268 | struct dma_page *page; | 269 | struct dma_page *page; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 418b1469d75d..28f2c177a541 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -1713,10 +1713,9 @@ static unsigned long pollcomplete(int ctlr) | |||
| 1713 | 1713 | ||
| 1714 | for (i = 20 * HZ; i > 0; i--) { | 1714 | for (i = 20 * HZ; i > 0; i--) { |
| 1715 | done = hba[ctlr]->access.command_completed(hba[ctlr]); | 1715 | done = hba[ctlr]->access.command_completed(hba[ctlr]); |
| 1716 | if (done == FIFO_EMPTY) { | 1716 | if (done == FIFO_EMPTY) |
| 1717 | set_current_state(TASK_UNINTERRUPTIBLE); | 1717 | schedule_timeout_uninterruptible(1); |
| 1718 | schedule_timeout(1); | 1718 | else |
| 1719 | } else | ||
| 1720 | return (done); | 1719 | return (done); |
| 1721 | } | 1720 | } |
| 1722 | /* Invalid address to tell caller we ran out of time */ | 1721 | /* Invalid address to tell caller we ran out of time */ |
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index 30c0903c7cdd..cd056e7e64ec 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c | |||
| @@ -2260,6 +2260,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
| 2260 | if (!atomic_dec_and_test(&cfqd->ref)) | 2260 | if (!atomic_dec_and_test(&cfqd->ref)) |
| 2261 | return; | 2261 | return; |
| 2262 | 2262 | ||
| 2263 | blk_put_queue(q); | ||
| 2264 | |||
| 2263 | cfq_shutdown_timer_wq(cfqd); | 2265 | cfq_shutdown_timer_wq(cfqd); |
| 2264 | q->elevator->elevator_data = NULL; | 2266 | q->elevator->elevator_data = NULL; |
| 2265 | 2267 | ||
| @@ -2316,6 +2318,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2316 | e->elevator_data = cfqd; | 2318 | e->elevator_data = cfqd; |
| 2317 | 2319 | ||
| 2318 | cfqd->queue = q; | 2320 | cfqd->queue = q; |
| 2321 | atomic_inc(&q->refcnt); | ||
| 2319 | 2322 | ||
| 2320 | cfqd->max_queued = q->nr_requests / 4; | 2323 | cfqd->max_queued = q->nr_requests / 4; |
| 2321 | q->nr_batching = cfq_queued; | 2324 | q->nr_batching = cfq_queued; |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7289f67e9568..ac5ba462710b 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
| @@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position) | |||
| 516 | 516 | ||
| 517 | static void pcd_sleep(int cs) | 517 | static void pcd_sleep(int cs) |
| 518 | { | 518 | { |
| 519 | current->state = TASK_INTERRUPTIBLE; | 519 | schedule_timeout_interruptible(cs); |
| 520 | schedule_timeout(cs); | ||
| 521 | } | 520 | } |
| 522 | 521 | ||
| 523 | static int pcd_reset(struct pcd_unit *cd) | 522 | static int pcd_reset(struct pcd_unit *cd) |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 060b1f2a91dd..711d2f314ac3 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf) | |||
| 507 | 507 | ||
| 508 | static void pf_sleep(int cs) | 508 | static void pf_sleep(int cs) |
| 509 | { | 509 | { |
| 510 | current->state = TASK_INTERRUPTIBLE; | 510 | schedule_timeout_interruptible(cs); |
| 511 | schedule_timeout(cs); | ||
| 512 | } | 511 | } |
| 513 | 512 | ||
| 514 | /* the ATAPI standard actually specifies the contents of all 7 registers | 513 | /* the ATAPI standard actually specifies the contents of all 7 registers |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 84d8e291ed96..b3982395f22b 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
| @@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev) | |||
| 276 | 276 | ||
| 277 | static void pg_sleep(int cs) | 277 | static void pg_sleep(int cs) |
| 278 | { | 278 | { |
| 279 | current->state = TASK_INTERRUPTIBLE; | 279 | schedule_timeout_interruptible(cs); |
| 280 | schedule_timeout(cs); | ||
| 281 | } | 280 | } |
| 282 | 281 | ||
| 283 | static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) | 282 | static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) |
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 5fe8ee86f095..d8d35233cf49 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
| @@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char * | |||
| 383 | 383 | ||
| 384 | static void pt_sleep(int cs) | 384 | static void pt_sleep(int cs) |
| 385 | { | 385 | { |
| 386 | current->state = TASK_INTERRUPTIBLE; | 386 | schedule_timeout_interruptible(cs); |
| 387 | schedule_timeout(cs); | ||
| 388 | } | 387 | } |
| 389 | 388 | ||
| 390 | static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg) | 389 | static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg) |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index e5f7494c00ee..e425ad3eebba 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
| @@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs) | |||
| 834 | break; | 834 | break; |
| 835 | } | 835 | } |
| 836 | swim3_select(fs, RELAX); | 836 | swim3_select(fs, RELAX); |
| 837 | current->state = TASK_INTERRUPTIBLE; | 837 | schedule_timeout_interruptible(1); |
| 838 | schedule_timeout(1); | ||
| 839 | if (swim3_readbit(fs, DISK_IN) == 0) | 838 | if (swim3_readbit(fs, DISK_IN) == 0) |
| 840 | break; | 839 | break; |
| 841 | } | 840 | } |
| @@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp) | |||
| 906 | break; | 905 | break; |
| 907 | } | 906 | } |
| 908 | swim3_select(fs, RELAX); | 907 | swim3_select(fs, RELAX); |
| 909 | current->state = TASK_INTERRUPTIBLE; | 908 | schedule_timeout_interruptible(1); |
| 910 | schedule_timeout(1); | ||
| 911 | } | 909 | } |
| 912 | if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 | 910 | if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 |
| 913 | || swim3_readbit(fs, DISK_IN) == 0)) | 911 | || swim3_readbit(fs, DISK_IN) == 0)) |
| @@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk) | |||
| 992 | if (signal_pending(current)) | 990 | if (signal_pending(current)) |
| 993 | break; | 991 | break; |
| 994 | swim3_select(fs, RELAX); | 992 | swim3_select(fs, RELAX); |
| 995 | current->state = TASK_INTERRUPTIBLE; | 993 | schedule_timeout_interruptible(1); |
| 996 | schedule_timeout(1); | ||
| 997 | } | 994 | } |
| 998 | ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 | 995 | ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 |
| 999 | || swim3_readbit(fs, DISK_IN) == 0; | 996 | || swim3_readbit(fs, DISK_IN) == 0; |
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c index a1283f6dc018..89e3c2f8b776 100644 --- a/drivers/block/swim_iop.c +++ b/drivers/block/swim_iop.c | |||
| @@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs) | |||
| 338 | err = -EINTR; | 338 | err = -EINTR; |
| 339 | break; | 339 | break; |
| 340 | } | 340 | } |
| 341 | current->state = TASK_INTERRUPTIBLE; | 341 | schedule_timeout_interruptible(1); |
| 342 | schedule_timeout(1); | ||
| 343 | } | 342 | } |
| 344 | release_drive(fs); | 343 | release_drive(fs); |
| 345 | return cmd->error; | 344 | return cmd->error; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 0c4c121d2e79..0f48301342da 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | * - set initialised bit then. | 34 | * - set initialised bit then. |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | //#define DEBUG /* uncomment if you want debugging info (pr_debug) */ | ||
| 37 | #include <linux/config.h> | 38 | #include <linux/config.h> |
| 38 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
| 39 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
| @@ -58,10 +59,6 @@ | |||
| 58 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
| 59 | #include <asm/io.h> | 60 | #include <asm/io.h> |
| 60 | 61 | ||
| 61 | #define PRINTK(x...) do {} while (0) | ||
| 62 | #define dprintk(x...) do {} while (0) | ||
| 63 | /*#define dprintk(x...) printk(x) */ | ||
| 64 | |||
| 65 | #define MM_MAXCARDS 4 | 62 | #define MM_MAXCARDS 4 |
| 66 | #define MM_RAHEAD 2 /* two sectors */ | 63 | #define MM_RAHEAD 2 /* two sectors */ |
| 67 | #define MM_BLKSIZE 1024 /* 1k blocks */ | 64 | #define MM_BLKSIZE 1024 /* 1k blocks */ |
| @@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card) | |||
| 299 | 296 | ||
| 300 | /* make the last descriptor end the chain */ | 297 | /* make the last descriptor end the chain */ |
| 301 | page = &card->mm_pages[card->Active]; | 298 | page = &card->mm_pages[card->Active]; |
| 302 | PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); | 299 | pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); |
| 303 | desc = &page->desc[page->cnt-1]; | 300 | desc = &page->desc[page->cnt-1]; |
| 304 | 301 | ||
| 305 | desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); | 302 | desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); |
| @@ -532,7 +529,7 @@ static void process_page(unsigned long data) | |||
| 532 | activate(card); | 529 | activate(card); |
| 533 | } else { | 530 | } else { |
| 534 | /* haven't finished with this one yet */ | 531 | /* haven't finished with this one yet */ |
| 535 | PRINTK("do some more\n"); | 532 | pr_debug("do some more\n"); |
| 536 | mm_start_io(card); | 533 | mm_start_io(card); |
| 537 | } | 534 | } |
| 538 | out_unlock: | 535 | out_unlock: |
| @@ -555,7 +552,7 @@ static void process_page(unsigned long data) | |||
| 555 | static int mm_make_request(request_queue_t *q, struct bio *bio) | 552 | static int mm_make_request(request_queue_t *q, struct bio *bio) |
| 556 | { | 553 | { |
| 557 | struct cardinfo *card = q->queuedata; | 554 | struct cardinfo *card = q->queuedata; |
| 558 | PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); | 555 | pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); |
| 559 | 556 | ||
| 560 | bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/ | 557 | bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/ |
| 561 | spin_lock_irq(&card->lock); | 558 | spin_lock_irq(&card->lock); |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 1676033da6c6..68b6d7b154cf 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/wait.h> | 47 | #include <linux/wait.h> |
| 48 | #include <linux/blkdev.h> | 48 | #include <linux/blkdev.h> |
| 49 | #include <linux/blkpg.h> | 49 | #include <linux/blkpg.h> |
| 50 | #include <linux/delay.h> | ||
| 50 | 51 | ||
| 51 | #include <asm/system.h> | 52 | #include <asm/system.h> |
| 52 | #include <asm/io.h> | 53 | #include <asm/io.h> |
| @@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, }; | |||
| 62 | 63 | ||
| 63 | #define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using | 64 | #define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using |
| 64 | "nodma" module option */ | 65 | "nodma" module option */ |
| 65 | #define XD_INIT_DISK_DELAY (30*HZ/1000) /* 30 ms delay during disk initialization */ | 66 | #define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */ |
| 66 | 67 | ||
| 67 | /* Above may need to be increased if a problem with the 2nd drive detection | 68 | /* Above may need to be increased if a problem with the 2nd drive detection |
| 68 | (ST11M controller) or resetting a controller (WD) appears */ | 69 | (ST11M controller) or resetting a controller (WD) appears */ |
| @@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t | |||
| 529 | int success; | 530 | int success; |
| 530 | 531 | ||
| 531 | xdc_busy = 1; | 532 | xdc_busy = 1; |
| 532 | while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) { | 533 | while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) |
| 533 | set_current_state(TASK_UNINTERRUPTIBLE); | 534 | schedule_timeout_uninterruptible(1); |
| 534 | schedule_timeout(1); | ||
| 535 | } | ||
| 536 | xdc_busy = 0; | 535 | xdc_busy = 0; |
| 537 | return (success); | 536 | return (success); |
| 538 | } | 537 | } |
| @@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive)) | |||
| 633 | for (i = 0; i < XD_MAXDRIVES; i++) { | 632 | for (i = 0; i < XD_MAXDRIVES; i++) { |
| 634 | xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); | 633 | xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); |
| 635 | if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { | 634 | if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { |
| 636 | set_current_state(TASK_INTERRUPTIBLE); | 635 | msleep_interruptible(XD_INIT_DISK_DELAY); |
| 637 | schedule_timeout(XD_INIT_DISK_DELAY); | ||
| 638 | 636 | ||
| 639 | init_drive(count); | 637 | init_drive(count); |
| 640 | count++; | 638 | count++; |
| 641 | 639 | ||
| 642 | set_current_state(TASK_INTERRUPTIBLE); | 640 | msleep_interruptible(XD_INIT_DISK_DELAY); |
| 643 | schedule_timeout(XD_INIT_DISK_DELAY); | ||
| 644 | } | 641 | } |
| 645 | } | 642 | } |
| 646 | return (count); | 643 | return (count); |
| @@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address) | |||
| 761 | 758 | ||
| 762 | outb(0,XD_RESET); /* reset the controller */ | 759 | outb(0,XD_RESET); /* reset the controller */ |
| 763 | 760 | ||
| 764 | set_current_state(TASK_UNINTERRUPTIBLE); | 761 | msleep(XD_INIT_DISK_DELAY); |
| 765 | schedule_timeout(XD_INIT_DISK_DELAY); | ||
| 766 | } | 762 | } |
| 767 | 763 | ||
| 768 | static void __init xd_wd_init_drive (u_char drive) | 764 | static void __init xd_wd_init_drive (u_char drive) |
| @@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */ | |||
| 936 | xd_maxsectors = 0x01; | 932 | xd_maxsectors = 0x01; |
| 937 | outb(0,XD_RESET); /* reset the controller */ | 933 | outb(0,XD_RESET); /* reset the controller */ |
| 938 | 934 | ||
| 939 | set_current_state(TASK_UNINTERRUPTIBLE); | 935 | msleep(XD_INIT_DISK_DELAY); |
| 940 | schedule_timeout(XD_INIT_DISK_DELAY); | ||
| 941 | } | 936 | } |
| 942 | 937 | ||
| 943 | static void __init xd_xebec_init_drive (u_char drive) | 938 | static void __init xd_xebec_init_drive (u_char drive) |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 007f6a662439..bb5e8d665a2a 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
| @@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp ) | |||
| 296 | return 0; | 296 | return 0; |
| 297 | 297 | ||
| 298 | err_out_kfree: | 298 | err_out_kfree: |
| 299 | kfree( z2ram_map ); | 299 | kfree(z2ram_map); |
| 300 | err_out: | 300 | err_out: |
| 301 | return rc; | 301 | return rc; |
| 302 | } | 302 | } |
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c index 30a897755361..466e9c2974bd 100644 --- a/drivers/cdrom/sbpcd.c +++ b/drivers/cdrom/sbpcd.c | |||
| @@ -827,8 +827,7 @@ static void mark_timeout_audio(u_long i) | |||
| 827 | static void sbp_sleep(u_int time) | 827 | static void sbp_sleep(u_int time) |
| 828 | { | 828 | { |
| 829 | sti(); | 829 | sti(); |
| 830 | current->state = TASK_INTERRUPTIBLE; | 830 | schedule_timeout_interruptible(time); |
| 831 | schedule_timeout(time); | ||
| 832 | sti(); | 831 | sti(); |
| 833 | } | 832 | } |
| 834 | /*==========================================================================*/ | 833 | /*==========================================================================*/ |
| @@ -4216,7 +4215,8 @@ static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd, | |||
| 4216 | 4215 | ||
| 4217 | case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ | 4216 | case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ |
| 4218 | msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); | 4217 | msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); |
| 4219 | if (current_drive->sbp_audsiz>0) vfree(current_drive->aud_buf); | 4218 | if (current_drive->sbp_audsiz>0) |
| 4219 | vfree(current_drive->aud_buf); | ||
| 4220 | current_drive->aud_buf=NULL; | 4220 | current_drive->aud_buf=NULL; |
| 4221 | current_drive->sbp_audsiz=arg; | 4221 | current_drive->sbp_audsiz=arg; |
| 4222 | 4222 | ||
| @@ -5910,7 +5910,8 @@ static void sbpcd_exit(void) | |||
| 5910 | put_disk(D_S[j].disk); | 5910 | put_disk(D_S[j].disk); |
| 5911 | devfs_remove("sbp/c0t%d", j); | 5911 | devfs_remove("sbp/c0t%d", j); |
| 5912 | vfree(D_S[j].sbp_buf); | 5912 | vfree(D_S[j].sbp_buf); |
| 5913 | if (D_S[j].sbp_audsiz>0) vfree(D_S[j].aud_buf); | 5913 | if (D_S[j].sbp_audsiz>0) |
| 5914 | vfree(D_S[j].aud_buf); | ||
| 5914 | if ((unregister_cdrom(D_S[j].sbpcd_infop) == -EINVAL)) | 5915 | if ((unregister_cdrom(D_S[j].sbpcd_infop) == -EINVAL)) |
| 5915 | { | 5916 | { |
| 5916 | msg(DBG_INF, "What's that: can't unregister info %s.\n", major_name); | 5917 | msg(DBG_INF, "What's that: can't unregister info %s.\n", major_name); |
diff --git a/drivers/cdrom/sonycd535.c b/drivers/cdrom/sonycd535.c index 9f22e8f1f6c0..e65659926432 100644 --- a/drivers/cdrom/sonycd535.c +++ b/drivers/cdrom/sonycd535.c | |||
| @@ -1478,8 +1478,7 @@ static int __init sony535_init(void) | |||
| 1478 | /* look for the CD-ROM, follows the procedure in the DOS driver */ | 1478 | /* look for the CD-ROM, follows the procedure in the DOS driver */ |
| 1479 | inb(select_unit_reg); | 1479 | inb(select_unit_reg); |
| 1480 | /* wait for 40 18 Hz ticks (reverse-engineered from DOS driver) */ | 1480 | /* wait for 40 18 Hz ticks (reverse-engineered from DOS driver) */ |
| 1481 | set_current_state(TASK_INTERRUPTIBLE); | 1481 | schedule_timeout_interruptible((HZ+17)*40/18); |
| 1482 | schedule_timeout((HZ+17)*40/18); | ||
| 1483 | inb(result_reg); | 1482 | inb(result_reg); |
| 1484 | 1483 | ||
| 1485 | outb(0, read_status_reg); /* does a reset? */ | 1484 | outb(0, read_status_reg); /* does a reset? */ |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 4d4e602fdc7e..82b43c541c8d 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
| @@ -206,10 +206,9 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) | |||
| 206 | bridge->driver->cleanup(); | 206 | bridge->driver->cleanup(); |
| 207 | if (bridge->driver->free_gatt_table) | 207 | if (bridge->driver->free_gatt_table) |
| 208 | bridge->driver->free_gatt_table(bridge); | 208 | bridge->driver->free_gatt_table(bridge); |
| 209 | if (bridge->key_list) { | 209 | |
| 210 | vfree(bridge->key_list); | 210 | vfree(bridge->key_list); |
| 211 | bridge->key_list = NULL; | 211 | bridge->key_list = NULL; |
| 212 | } | ||
| 213 | 212 | ||
| 214 | if (bridge->driver->agp_destroy_page && | 213 | if (bridge->driver->agp_destroy_page && |
| 215 | bridge->driver->needs_scratch_page) | 214 | bridge->driver->needs_scratch_page) |
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 11f9ee581124..927a5bbe112c 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c | |||
| @@ -172,7 +172,7 @@ static int ac_register_board(unsigned long physloc, void __iomem *loc, | |||
| 172 | 172 | ||
| 173 | void cleanup_module(void) | 173 | void cleanup_module(void) |
| 174 | { | 174 | { |
| 175 | int i; | 175 | unsigned int i; |
| 176 | 176 | ||
| 177 | misc_deregister(&ac_miscdev); | 177 | misc_deregister(&ac_miscdev); |
| 178 | 178 | ||
| @@ -195,7 +195,7 @@ int __init applicom_init(void) | |||
| 195 | int i, numisa = 0; | 195 | int i, numisa = 0; |
| 196 | struct pci_dev *dev = NULL; | 196 | struct pci_dev *dev = NULL; |
| 197 | void __iomem *RamIO; | 197 | void __iomem *RamIO; |
| 198 | int boardno; | 198 | int boardno, ret; |
| 199 | 199 | ||
| 200 | printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n"); | 200 | printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n"); |
| 201 | 201 | ||
| @@ -294,7 +294,8 @@ int __init applicom_init(void) | |||
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | if (!numisa) | 296 | if (!numisa) |
| 297 | printk(KERN_WARNING"ac.o: No valid ISA Applicom boards found at mem 0x%lx\n",mem); | 297 | printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found " |
| 298 | "at mem 0x%lx\n", mem); | ||
| 298 | 299 | ||
| 299 | fin: | 300 | fin: |
| 300 | init_waitqueue_head(&FlagSleepRec); | 301 | init_waitqueue_head(&FlagSleepRec); |
| @@ -304,7 +305,11 @@ int __init applicom_init(void) | |||
| 304 | DeviceErrorCount = 0; | 305 | DeviceErrorCount = 0; |
| 305 | 306 | ||
| 306 | if (numboards) { | 307 | if (numboards) { |
| 307 | misc_register(&ac_miscdev); | 308 | ret = misc_register(&ac_miscdev); |
| 309 | if (ret) { | ||
| 310 | printk(KERN_WARNING "ac.o: Unable to register misc device\n"); | ||
| 311 | goto out; | ||
| 312 | } | ||
| 308 | for (i = 0; i < MAX_BOARD; i++) { | 313 | for (i = 0; i < MAX_BOARD; i++) { |
| 309 | int serial; | 314 | int serial; |
| 310 | char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1]; | 315 | char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1]; |
| @@ -337,6 +342,17 @@ int __init applicom_init(void) | |||
| 337 | 342 | ||
| 338 | else | 343 | else |
| 339 | return -ENXIO; | 344 | return -ENXIO; |
| 345 | |||
| 346 | out: | ||
| 347 | for (i = 0; i < MAX_BOARD; i++) { | ||
| 348 | if (!apbs[i].RamIO) | ||
| 349 | continue; | ||
| 350 | if (apbs[i].irq) | ||
| 351 | free_irq(apbs[i].irq, &dummy); | ||
| 352 | iounmap(apbs[i].RamIO); | ||
| 353 | } | ||
| 354 | pci_disable_device(dev); | ||
| 355 | return ret; | ||
| 340 | } | 356 | } |
| 341 | 357 | ||
| 342 | 358 | ||
diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c index 1704a2a57048..b2e0928e8428 100644 --- a/drivers/char/ftape/lowlevel/fdc-io.c +++ b/drivers/char/ftape/lowlevel/fdc-io.c | |||
| @@ -387,10 +387,8 @@ int fdc_interrupt_wait(unsigned int time) | |||
| 387 | 387 | ||
| 388 | set_current_state(TASK_INTERRUPTIBLE); | 388 | set_current_state(TASK_INTERRUPTIBLE); |
| 389 | add_wait_queue(&ftape_wait_intr, &wait); | 389 | add_wait_queue(&ftape_wait_intr, &wait); |
| 390 | while (!ft_interrupt_seen && timeout) { | 390 | while (!ft_interrupt_seen && timeout) |
| 391 | set_current_state(TASK_INTERRUPTIBLE); | 391 | timeout = schedule_timeout_interruptible(timeout); |
| 392 | timeout = schedule_timeout(timeout); | ||
| 393 | } | ||
| 394 | 392 | ||
| 395 | spin_lock_irq(¤t->sighand->siglock); | 393 | spin_lock_irq(¤t->sighand->siglock); |
| 396 | current->blocked = old_sigmask; | 394 | current->blocked = old_sigmask; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 5fe8461271fc..de0379b6d502 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
| @@ -100,14 +100,14 @@ static struct hpets *hpets; | |||
| 100 | #endif | 100 | #endif |
| 101 | 101 | ||
| 102 | #ifndef readq | 102 | #ifndef readq |
| 103 | static unsigned long long __inline readq(void __iomem *addr) | 103 | static inline unsigned long long readq(void __iomem *addr) |
| 104 | { | 104 | { |
| 105 | return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); | 105 | return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); |
| 106 | } | 106 | } |
| 107 | #endif | 107 | #endif |
| 108 | 108 | ||
| 109 | #ifndef writeq | 109 | #ifndef writeq |
| 110 | static void __inline writeq(unsigned long long v, void __iomem *addr) | 110 | static inline void writeq(unsigned long long v, void __iomem *addr) |
| 111 | { | 111 | { |
| 112 | writel(v & 0xffffffff, addr); | 112 | writel(v & 0xffffffff, addr); |
| 113 | writel(v >> 32, addr + 4); | 113 | writel(v >> 32, addr + 4); |
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c index 3480535a09c5..6f673d2de0b1 100644 --- a/drivers/char/hw_random.c +++ b/drivers/char/hw_random.c | |||
| @@ -513,10 +513,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size, | |||
| 513 | return ret ? : -EAGAIN; | 513 | return ret ? : -EAGAIN; |
| 514 | 514 | ||
| 515 | if(need_resched()) | 515 | if(need_resched()) |
| 516 | { | 516 | schedule_timeout_interruptible(1); |
| 517 | current->state = TASK_INTERRUPTIBLE; | ||
| 518 | schedule_timeout(1); | ||
| 519 | } | ||
| 520 | else | 517 | else |
| 521 | udelay(200); /* FIXME: We could poll for 250uS ?? */ | 518 | udelay(200); /* FIXME: We could poll for 250uS ?? */ |
| 522 | 519 | ||
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index 82c5f30375ac..ba85eb1b6ec7 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
| @@ -655,8 +655,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 655 | timeout--; // So negative values == forever | 655 | timeout--; // So negative values == forever |
| 656 | 656 | ||
| 657 | if (!in_interrupt()) { | 657 | if (!in_interrupt()) { |
| 658 | current->state = TASK_INTERRUPTIBLE; | 658 | schedule_timeout_interruptible(1); // short nap |
| 659 | schedule_timeout(1); // short nap | ||
| 660 | } else { | 659 | } else { |
| 661 | // we cannot sched/sleep in interrrupt silly | 660 | // we cannot sched/sleep in interrrupt silly |
| 662 | return 0; | 661 | return 0; |
| @@ -1132,8 +1131,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user ) | |||
| 1132 | 1131 | ||
| 1133 | ip2trace (CHANN, ITRC_OUTPUT, 61, 0 ); | 1132 | ip2trace (CHANN, ITRC_OUTPUT, 61, 0 ); |
| 1134 | 1133 | ||
| 1135 | current->state = TASK_INTERRUPTIBLE; | 1134 | schedule_timeout_interruptible(2); |
| 1136 | schedule_timeout(2); | ||
| 1137 | if (signal_pending(current)) { | 1135 | if (signal_pending(current)) { |
| 1138 | break; | 1136 | break; |
| 1139 | } | 1137 | } |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 278f84104996..b6e5cbfb09f8 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -1920,8 +1920,7 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
| 1920 | for (;;) | 1920 | for (;;) |
| 1921 | { | 1921 | { |
| 1922 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1922 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
| 1923 | set_current_state(TASK_UNINTERRUPTIBLE); | 1923 | schedule_timeout_uninterruptible(1); |
| 1924 | schedule_timeout(1); | ||
| 1925 | smi_result = smi_info->handlers->event( | 1924 | smi_result = smi_info->handlers->event( |
| 1926 | smi_info->si_sm, 100); | 1925 | smi_info->si_sm, 100); |
| 1927 | } | 1926 | } |
| @@ -2256,10 +2255,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
| 2256 | 2255 | ||
| 2257 | /* Wait for the timer to stop. This avoids problems with race | 2256 | /* Wait for the timer to stop. This avoids problems with race |
| 2258 | conditions removing the timer here. */ | 2257 | conditions removing the timer here. */ |
| 2259 | while (! new_smi->timer_stopped) { | 2258 | while (!new_smi->timer_stopped) |
| 2260 | set_current_state(TASK_UNINTERRUPTIBLE); | 2259 | schedule_timeout_uninterruptible(1); |
| 2261 | schedule_timeout(1); | ||
| 2262 | } | ||
| 2263 | 2260 | ||
| 2264 | out_err: | 2261 | out_err: |
| 2265 | if (new_smi->intf) | 2262 | if (new_smi->intf) |
| @@ -2379,17 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) | |||
| 2379 | 2376 | ||
| 2380 | /* Wait for the timer to stop. This avoids problems with race | 2377 | /* Wait for the timer to stop. This avoids problems with race |
| 2381 | conditions removing the timer here. */ | 2378 | conditions removing the timer here. */ |
| 2382 | while (! to_clean->timer_stopped) { | 2379 | while (!to_clean->timer_stopped) |
| 2383 | set_current_state(TASK_UNINTERRUPTIBLE); | 2380 | schedule_timeout_uninterruptible(1); |
| 2384 | schedule_timeout(1); | ||
| 2385 | } | ||
| 2386 | 2381 | ||
| 2387 | /* Interrupts and timeouts are stopped, now make sure the | 2382 | /* Interrupts and timeouts are stopped, now make sure the |
| 2388 | interface is in a clean state. */ | 2383 | interface is in a clean state. */ |
| 2389 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 2384 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
| 2390 | poll(to_clean); | 2385 | poll(to_clean); |
| 2391 | set_current_state(TASK_UNINTERRUPTIBLE); | 2386 | schedule_timeout_uninterruptible(1); |
| 2392 | schedule_timeout(1); | ||
| 2393 | } | 2387 | } |
| 2394 | 2388 | ||
| 2395 | rv = ipmi_unregister_smi(to_clean->intf); | 2389 | rv = ipmi_unregister_smi(to_clean->intf); |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index e71aaae855ad..2da64bf7469c 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
| @@ -1037,10 +1037,8 @@ static __exit void ipmi_unregister_watchdog(void) | |||
| 1037 | /* Wait to make sure the message makes it out. The lower layer has | 1037 | /* Wait to make sure the message makes it out. The lower layer has |
| 1038 | pointers to our buffers, we want to make sure they are done before | 1038 | pointers to our buffers, we want to make sure they are done before |
| 1039 | we release our memory. */ | 1039 | we release our memory. */ |
| 1040 | while (atomic_read(&set_timeout_tofree)) { | 1040 | while (atomic_read(&set_timeout_tofree)) |
| 1041 | set_current_state(TASK_UNINTERRUPTIBLE); | 1041 | schedule_timeout_uninterruptible(1); |
| 1042 | schedule_timeout(1); | ||
| 1043 | } | ||
| 1044 | 1042 | ||
| 1045 | /* Disconnect from IPMI. */ | 1043 | /* Disconnect from IPMI. */ |
| 1046 | rv = ipmi_destroy_user(watchdog_user); | 1044 | rv = ipmi_destroy_user(watchdog_user); |
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c index cf01a720eb2e..b77161146144 100644 --- a/drivers/char/lcd.c +++ b/drivers/char/lcd.c | |||
| @@ -613,10 +613,15 @@ static struct miscdevice lcd_dev = { | |||
| 613 | 613 | ||
| 614 | static int lcd_init(void) | 614 | static int lcd_init(void) |
| 615 | { | 615 | { |
| 616 | int ret; | ||
| 616 | unsigned long data; | 617 | unsigned long data; |
| 617 | 618 | ||
| 618 | pr_info("%s\n", LCD_DRIVER); | 619 | pr_info("%s\n", LCD_DRIVER); |
| 619 | misc_register(&lcd_dev); | 620 | ret = misc_register(&lcd_dev); |
| 621 | if (ret) { | ||
| 622 | printk(KERN_WARNING LCD "Unable to register misc device.\n"); | ||
| 623 | return ret; | ||
| 624 | } | ||
| 620 | 625 | ||
| 621 | /* Check region? Naaah! Just snarf it up. */ | 626 | /* Check region? Naaah! Just snarf it up. */ |
| 622 | /* request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/ | 627 | /* request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/ |
diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 59eebe5a035f..2afb9038dbc5 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c | |||
| @@ -128,6 +128,7 @@ | |||
| 128 | #include <linux/console.h> | 128 | #include <linux/console.h> |
| 129 | #include <linux/device.h> | 129 | #include <linux/device.h> |
| 130 | #include <linux/wait.h> | 130 | #include <linux/wait.h> |
| 131 | #include <linux/jiffies.h> | ||
| 131 | 132 | ||
| 132 | #include <linux/parport.h> | 133 | #include <linux/parport.h> |
| 133 | #undef LP_STATS | 134 | #undef LP_STATS |
| @@ -307,7 +308,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf, | |||
| 307 | (LP_F(minor) & LP_ABORT)); | 308 | (LP_F(minor) & LP_ABORT)); |
| 308 | 309 | ||
| 309 | #ifdef LP_STATS | 310 | #ifdef LP_STATS |
| 310 | if (jiffies-lp_table[minor].lastcall > LP_TIME(minor)) | 311 | if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor))) |
| 311 | lp_table[minor].runchars = 0; | 312 | lp_table[minor].runchars = 0; |
| 312 | 313 | ||
| 313 | lp_table[minor].lastcall = jiffies; | 314 | lp_table[minor].lastcall = jiffies; |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index d0ef1ae41298..45d012d85e8c 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
| @@ -1058,8 +1058,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) | |||
| 1058 | */ | 1058 | */ |
| 1059 | timeout = jiffies + HZ; | 1059 | timeout = jiffies + HZ; |
| 1060 | while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) { | 1060 | while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) { |
| 1061 | set_current_state(TASK_INTERRUPTIBLE); | 1061 | schedule_timeout_interruptible(5); |
| 1062 | schedule_timeout(5); | ||
| 1063 | if (time_after(jiffies, timeout)) | 1062 | if (time_after(jiffies, timeout)) |
| 1064 | break; | 1063 | break; |
| 1065 | } | 1064 | } |
| @@ -1080,10 +1079,8 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) | |||
| 1080 | info->event = 0; | 1079 | info->event = 0; |
| 1081 | info->tty = NULL; | 1080 | info->tty = NULL; |
| 1082 | if (info->blocked_open) { | 1081 | if (info->blocked_open) { |
| 1083 | if (info->close_delay) { | 1082 | if (info->close_delay) |
| 1084 | set_current_state(TASK_INTERRUPTIBLE); | 1083 | schedule_timeout_interruptible(info->close_delay); |
| 1085 | schedule_timeout(info->close_delay); | ||
| 1086 | } | ||
| 1087 | wake_up_interruptible(&info->open_wait); | 1084 | wake_up_interruptible(&info->open_wait); |
| 1088 | } | 1085 | } |
| 1089 | 1086 | ||
| @@ -1801,8 +1798,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1801 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT | 1798 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT |
| 1802 | printk("lsr = %d (jiff=%lu)...", lsr, jiffies); | 1799 | printk("lsr = %d (jiff=%lu)...", lsr, jiffies); |
| 1803 | #endif | 1800 | #endif |
| 1804 | set_current_state(TASK_INTERRUPTIBLE); | 1801 | schedule_timeout_interruptible(char_time); |
| 1805 | schedule_timeout(char_time); | ||
| 1806 | if (signal_pending(current)) | 1802 | if (signal_pending(current)) |
| 1807 | break; | 1803 | break; |
| 1808 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) | 1804 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) |
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 09103b3d8f05..c9bdf544ed2c 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
| @@ -62,7 +62,7 @@ | |||
| 62 | 62 | ||
| 63 | static inline unsigned char *alloc_buf(void) | 63 | static inline unsigned char *alloc_buf(void) |
| 64 | { | 64 | { |
| 65 | int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; | 65 | unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; |
| 66 | 66 | ||
| 67 | if (PAGE_SIZE != N_TTY_BUF_SIZE) | 67 | if (PAGE_SIZE != N_TTY_BUF_SIZE) |
| 68 | return kmalloc(N_TTY_BUF_SIZE, prio); | 68 | return kmalloc(N_TTY_BUF_SIZE, prio); |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 7a0c74648124..02d7f046c10a 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/drivers/char/pcmcia/synclink_cs.c | 2 | * linux/drivers/char/pcmcia/synclink_cs.c |
| 3 | * | 3 | * |
| 4 | * $Id: synclink_cs.c,v 4.26 2004/08/11 19:30:02 paulkf Exp $ | 4 | * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $ |
| 5 | * | 5 | * |
| 6 | * Device driver for Microgate SyncLink PC Card | 6 | * Device driver for Microgate SyncLink PC Card |
| 7 | * multiprotocol serial adapter. | 7 | * multiprotocol serial adapter. |
| @@ -472,7 +472,7 @@ module_param_array(dosyncppp, int, NULL, 0); | |||
| 472 | MODULE_LICENSE("GPL"); | 472 | MODULE_LICENSE("GPL"); |
| 473 | 473 | ||
| 474 | static char *driver_name = "SyncLink PC Card driver"; | 474 | static char *driver_name = "SyncLink PC Card driver"; |
| 475 | static char *driver_version = "$Revision: 4.26 $"; | 475 | static char *driver_version = "$Revision: 4.34 $"; |
| 476 | 476 | ||
| 477 | static struct tty_driver *serial_driver; | 477 | static struct tty_driver *serial_driver; |
| 478 | 478 | ||
| @@ -1457,6 +1457,8 @@ static int startup(MGSLPC_INFO * info) | |||
| 1457 | 1457 | ||
| 1458 | info->pending_bh = 0; | 1458 | info->pending_bh = 0; |
| 1459 | 1459 | ||
| 1460 | memset(&info->icount, 0, sizeof(info->icount)); | ||
| 1461 | |||
| 1460 | init_timer(&info->tx_timer); | 1462 | init_timer(&info->tx_timer); |
| 1461 | info->tx_timer.data = (unsigned long)info; | 1463 | info->tx_timer.data = (unsigned long)info; |
| 1462 | info->tx_timer.function = tx_timeout; | 1464 | info->tx_timer.function = tx_timeout; |
| @@ -1946,9 +1948,13 @@ static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount) | |||
| 1946 | int err; | 1948 | int err; |
| 1947 | if (debug_level >= DEBUG_LEVEL_INFO) | 1949 | if (debug_level >= DEBUG_LEVEL_INFO) |
| 1948 | printk("get_params(%s)\n", info->device_name); | 1950 | printk("get_params(%s)\n", info->device_name); |
| 1949 | COPY_TO_USER(err,user_icount, &info->icount, sizeof(struct mgsl_icount)); | 1951 | if (!user_icount) { |
| 1950 | if (err) | 1952 | memset(&info->icount, 0, sizeof(info->icount)); |
| 1951 | return -EFAULT; | 1953 | } else { |
| 1954 | COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); | ||
| 1955 | if (err) | ||
| 1956 | return -EFAULT; | ||
| 1957 | } | ||
| 1952 | return 0; | 1958 | return 0; |
| 1953 | } | 1959 | } |
| 1954 | 1960 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index f174aee659e5..9e9cf1407311 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
| @@ -560,7 +560,7 @@ ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) | |||
| 560 | EXPORT_SYMBOL_GPL(__ide_abort); | 560 | EXPORT_SYMBOL_GPL(__ide_abort); |
| 561 | 561 | ||
| 562 | /** | 562 | /** |
| 563 | * ide_abort - abort pending IDE operatins | 563 | * ide_abort - abort pending IDE operations |
| 564 | * @drive: drive the error occurred on | 564 | * @drive: drive the error occurred on |
| 565 | * @msg: message to report | 565 | * @msg: message to report |
| 566 | * | 566 | * |
| @@ -623,7 +623,7 @@ static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, | |||
| 623 | * @drive: drive the completion interrupt occurred on | 623 | * @drive: drive the completion interrupt occurred on |
| 624 | * | 624 | * |
| 625 | * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. | 625 | * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. |
| 626 | * We do any necessary daya reading and then wait for the drive to | 626 | * We do any necessary data reading and then wait for the drive to |
| 627 | * go non busy. At that point we may read the error data and complete | 627 | * go non busy. At that point we may read the error data and complete |
| 628 | * the request | 628 | * the request |
| 629 | */ | 629 | */ |
| @@ -773,7 +773,7 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd); | |||
| 773 | 773 | ||
| 774 | /** | 774 | /** |
| 775 | * execute_drive_command - issue special drive command | 775 | * execute_drive_command - issue special drive command |
| 776 | * @drive: the drive to issue th command on | 776 | * @drive: the drive to issue the command on |
| 777 | * @rq: the request structure holding the command | 777 | * @rq: the request structure holding the command |
| 778 | * | 778 | * |
| 779 | * execute_drive_cmd() issues a special drive command, usually | 779 | * execute_drive_cmd() issues a special drive command, usually |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 5a3dc46008e6..ee38e6b143a4 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
| @@ -2903,8 +2903,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) | |||
| 2903 | } else if (!(tape->sense_key == 2 && tape->asc == 4 && | 2903 | } else if (!(tape->sense_key == 2 && tape->asc == 4 && |
| 2904 | (tape->ascq == 1 || tape->ascq == 8))) | 2904 | (tape->ascq == 1 || tape->ascq == 8))) |
| 2905 | return -EIO; | 2905 | return -EIO; |
| 2906 | current->state = TASK_INTERRUPTIBLE; | 2906 | msleep(100); |
| 2907 | schedule_timeout(HZ / 10); | ||
| 2908 | } | 2907 | } |
| 2909 | return -EIO; | 2908 | return -EIO; |
| 2910 | } | 2909 | } |
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h index c1196ce15b4d..2fcfac6e967a 100644 --- a/drivers/ide/ide-timing.h +++ b/drivers/ide/ide-timing.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic | 27 | * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #include <linux/kernel.h> | ||
| 30 | #include <linux/hdreg.h> | 31 | #include <linux/hdreg.h> |
| 31 | 32 | ||
| 32 | #define XFER_PIO_5 0x0d | 33 | #define XFER_PIO_5 0x0d |
| @@ -96,11 +97,9 @@ static struct ide_timing ide_timing[] = { | |||
| 96 | #define IDE_TIMING_UDMA 0x80 | 97 | #define IDE_TIMING_UDMA 0x80 |
| 97 | #define IDE_TIMING_ALL 0xff | 98 | #define IDE_TIMING_ALL 0xff |
| 98 | 99 | ||
| 99 | #define MIN(a,b) ((a)<(b)?(a):(b)) | 100 | #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) |
| 100 | #define MAX(a,b) ((a)>(b)?(a):(b)) | 101 | #define ENOUGH(v,unit) (((v)-1)/(unit)+1) |
| 101 | #define FIT(v,min,max) MAX(MIN(v,max),min) | 102 | #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) |
| 102 | #define ENOUGH(v,unit) (((v)-1)/(unit)+1) | ||
| 103 | #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) | ||
| 104 | 103 | ||
| 105 | #define XFER_MODE 0xf0 | 104 | #define XFER_MODE 0xf0 |
| 106 | #define XFER_UDMA_133 0x48 | 105 | #define XFER_UDMA_133 0x48 |
| @@ -188,14 +187,14 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int | |||
| 188 | 187 | ||
| 189 | static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) | 188 | static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) |
| 190 | { | 189 | { |
| 191 | if (what & IDE_TIMING_SETUP ) m->setup = MAX(a->setup, b->setup); | 190 | if (what & IDE_TIMING_SETUP ) m->setup = max(a->setup, b->setup); |
| 192 | if (what & IDE_TIMING_ACT8B ) m->act8b = MAX(a->act8b, b->act8b); | 191 | if (what & IDE_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); |
| 193 | if (what & IDE_TIMING_REC8B ) m->rec8b = MAX(a->rec8b, b->rec8b); | 192 | if (what & IDE_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); |
| 194 | if (what & IDE_TIMING_CYC8B ) m->cyc8b = MAX(a->cyc8b, b->cyc8b); | 193 | if (what & IDE_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); |
| 195 | if (what & IDE_TIMING_ACTIVE ) m->active = MAX(a->active, b->active); | 194 | if (what & IDE_TIMING_ACTIVE ) m->active = max(a->active, b->active); |
| 196 | if (what & IDE_TIMING_RECOVER) m->recover = MAX(a->recover, b->recover); | 195 | if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover); |
| 197 | if (what & IDE_TIMING_CYCLE ) m->cycle = MAX(a->cycle, b->cycle); | 196 | if (what & IDE_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); |
| 198 | if (what & IDE_TIMING_UDMA ) m->udma = MAX(a->udma, b->udma); | 197 | if (what & IDE_TIMING_UDMA ) m->udma = max(a->udma, b->udma); |
| 199 | } | 198 | } |
| 200 | 199 | ||
| 201 | static struct ide_timing* ide_timing_find_mode(short speed) | 200 | static struct ide_timing* ide_timing_find_mode(short speed) |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index dc0841b2721c..0ccf85fcee34 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/ide.h> | 43 | #include <linux/ide.h> |
| 44 | #include <linux/hdreg.h> | 44 | #include <linux/hdreg.h> |
| 45 | #include <linux/major.h> | 45 | #include <linux/major.h> |
| 46 | #include <linux/delay.h> | ||
| 46 | #include <asm/io.h> | 47 | #include <asm/io.h> |
| 47 | #include <asm/system.h> | 48 | #include <asm/system.h> |
| 48 | 49 | ||
| @@ -340,8 +341,7 @@ static void ide_config(dev_link_t *link) | |||
| 340 | break; | 341 | break; |
| 341 | } | 342 | } |
| 342 | } | 343 | } |
| 343 | __set_current_state(TASK_UNINTERRUPTIBLE); | 344 | msleep(100); |
| 344 | schedule_timeout(HZ/10); | ||
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | if (hd < 0) { | 347 | if (hd < 0) { |
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c index baf4bcad9bf9..0afe442db3b0 100644 --- a/drivers/isdn/i4l/isdn_bsdcomp.c +++ b/drivers/isdn/i4l/isdn_bsdcomp.c | |||
| @@ -283,23 +283,19 @@ static void bsd_free (void *state) | |||
| 283 | /* | 283 | /* |
| 284 | * Release the dictionary | 284 | * Release the dictionary |
| 285 | */ | 285 | */ |
| 286 | if (db->dict) { | 286 | vfree(db->dict); |
| 287 | vfree (db->dict); | 287 | db->dict = NULL; |
| 288 | db->dict = NULL; | ||
| 289 | } | ||
| 290 | 288 | ||
| 291 | /* | 289 | /* |
| 292 | * Release the string buffer | 290 | * Release the string buffer |
| 293 | */ | 291 | */ |
| 294 | if (db->lens) { | 292 | vfree(db->lens); |
| 295 | vfree (db->lens); | 293 | db->lens = NULL; |
| 296 | db->lens = NULL; | ||
| 297 | } | ||
| 298 | 294 | ||
| 299 | /* | 295 | /* |
| 300 | * Finally release the structure itself. | 296 | * Finally release the structure itself. |
| 301 | */ | 297 | */ |
| 302 | kfree (db); | 298 | kfree(db); |
| 303 | } | 299 | } |
| 304 | } | 300 | } |
| 305 | 301 | ||
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index eebcb0b97f0e..8a7d54a5c97d 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c | |||
| @@ -1953,7 +1953,8 @@ isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding) | |||
| 1953 | kfree(d->rcvcount); | 1953 | kfree(d->rcvcount); |
| 1954 | if (!(d->rcvcount = kmalloc(sizeof(int) * m, GFP_ATOMIC))) { | 1954 | if (!(d->rcvcount = kmalloc(sizeof(int) * m, GFP_ATOMIC))) { |
| 1955 | printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n"); | 1955 | printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n"); |
| 1956 | if (!adding) kfree(d->rcverr); | 1956 | if (!adding) |
| 1957 | kfree(d->rcverr); | ||
| 1957 | return -1; | 1958 | return -1; |
| 1958 | } | 1959 | } |
| 1959 | memset((char *) d->rcvcount, 0, sizeof(int) * m); | 1960 | memset((char *) d->rcvcount, 0, sizeof(int) * m); |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 17212b4201a1..cc07bbebbb16 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
| @@ -568,12 +568,9 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size) | |||
| 568 | 568 | ||
| 569 | bad: | 569 | bad: |
| 570 | dm_io_put(sectors_to_pages(chunk_size)); | 570 | dm_io_put(sectors_to_pages(chunk_size)); |
| 571 | if (ps) { | 571 | if (ps && ps->area) |
| 572 | if (ps->area) | 572 | free_area(ps); |
| 573 | free_area(ps); | 573 | kfree(ps); |
| 574 | |||
| 575 | kfree(ps); | ||
| 576 | } | ||
| 577 | return r; | 574 | return r; |
| 578 | } | 575 | } |
| 579 | 576 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 1554b924fbb9..2897df90df44 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pers_lock); | |||
| 74 | * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' | 74 | * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' |
| 75 | * is 1000 KB/sec, so the extra system load does not show up that much. | 75 | * is 1000 KB/sec, so the extra system load does not show up that much. |
| 76 | * Increase it if you want to have more _guaranteed_ speed. Note that | 76 | * Increase it if you want to have more _guaranteed_ speed. Note that |
| 77 | * the RAID driver will use the maximum available bandwith if the IO | 77 | * the RAID driver will use the maximum available bandwidth if the IO |
| 78 | * subsystem is idle. There is also an 'absolute maximum' reconstruction | 78 | * subsystem is idle. There is also an 'absolute maximum' reconstruction |
| 79 | * speed limit - in case reconstruction slows down your system despite | 79 | * speed limit - in case reconstruction slows down your system despite |
| 80 | * idle IO detection. | 80 | * idle IO detection. |
| @@ -3616,7 +3616,7 @@ static void md_do_sync(mddev_t *mddev) | |||
| 3616 | printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); | 3616 | printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); |
| 3617 | printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" | 3617 | printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" |
| 3618 | " %d KB/sec/disc.\n", sysctl_speed_limit_min); | 3618 | " %d KB/sec/disc.\n", sysctl_speed_limit_min); |
| 3619 | printk(KERN_INFO "md: using maximum available idle IO bandwith " | 3619 | printk(KERN_INFO "md: using maximum available idle IO bandwidth " |
| 3620 | "(but not more than %d KB/sec) for reconstruction.\n", | 3620 | "(but not more than %d KB/sec) for reconstruction.\n", |
| 3621 | sysctl_speed_limit_max); | 3621 | sysctl_speed_limit_max); |
| 3622 | 3622 | ||
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c index cd5828b5e9e3..206cc2f61f26 100644 --- a/drivers/media/common/saa7146_core.c +++ b/drivers/media/common/saa7146_core.c | |||
| @@ -168,10 +168,8 @@ void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt) | |||
| 168 | return; | 168 | return; |
| 169 | pci_free_consistent(pci, pt->size, pt->cpu, pt->dma); | 169 | pci_free_consistent(pci, pt->size, pt->cpu, pt->dma); |
| 170 | pt->cpu = NULL; | 170 | pt->cpu = NULL; |
| 171 | if (NULL != pt->slist) { | 171 | kfree(pt->slist); |
| 172 | kfree(pt->slist); | 172 | pt->slist = NULL; |
| 173 | pt->slist = NULL; | ||
| 174 | } | ||
| 175 | } | 173 | } |
| 176 | 174 | ||
| 177 | int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt) | 175 | int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt) |
diff --git a/drivers/media/video/cpia_usb.c b/drivers/media/video/cpia_usb.c index cdda423386c5..9774e94d1e7d 100644 --- a/drivers/media/video/cpia_usb.c +++ b/drivers/media/video/cpia_usb.c | |||
| @@ -445,10 +445,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try) | |||
| 445 | ucpia->sbuf[1].urb = NULL; | 445 | ucpia->sbuf[1].urb = NULL; |
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | if (ucpia->sbuf[1].data) { | 448 | kfree(ucpia->sbuf[1].data); |
| 449 | kfree(ucpia->sbuf[1].data); | 449 | ucpia->sbuf[1].data = NULL; |
| 450 | ucpia->sbuf[1].data = NULL; | ||
| 451 | } | ||
| 452 | 450 | ||
| 453 | if (ucpia->sbuf[0].urb) { | 451 | if (ucpia->sbuf[0].urb) { |
| 454 | usb_kill_urb(ucpia->sbuf[0].urb); | 452 | usb_kill_urb(ucpia->sbuf[0].urb); |
| @@ -456,10 +454,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try) | |||
| 456 | ucpia->sbuf[0].urb = NULL; | 454 | ucpia->sbuf[0].urb = NULL; |
| 457 | } | 455 | } |
| 458 | 456 | ||
| 459 | if (ucpia->sbuf[0].data) { | 457 | kfree(ucpia->sbuf[0].data); |
| 460 | kfree(ucpia->sbuf[0].data); | 458 | ucpia->sbuf[0].data = NULL; |
| 461 | ucpia->sbuf[0].data = NULL; | ||
| 462 | } | ||
| 463 | } | 459 | } |
| 464 | 460 | ||
| 465 | static int cpia_usb_close(void *privdata) | 461 | static int cpia_usb_close(void *privdata) |
| @@ -623,20 +619,14 @@ static void cpia_disconnect(struct usb_interface *intf) | |||
| 623 | 619 | ||
| 624 | ucpia->curbuff = ucpia->workbuff = NULL; | 620 | ucpia->curbuff = ucpia->workbuff = NULL; |
| 625 | 621 | ||
| 626 | if (ucpia->buffers[2]) { | 622 | vfree(ucpia->buffers[2]); |
| 627 | vfree(ucpia->buffers[2]); | 623 | ucpia->buffers[2] = NULL; |
| 628 | ucpia->buffers[2] = NULL; | ||
| 629 | } | ||
| 630 | 624 | ||
| 631 | if (ucpia->buffers[1]) { | 625 | vfree(ucpia->buffers[1]); |
| 632 | vfree(ucpia->buffers[1]); | 626 | ucpia->buffers[1] = NULL; |
| 633 | ucpia->buffers[1] = NULL; | ||
| 634 | } | ||
| 635 | 627 | ||
| 636 | if (ucpia->buffers[0]) { | 628 | vfree(ucpia->buffers[0]); |
| 637 | vfree(ucpia->buffers[0]); | 629 | ucpia->buffers[0] = NULL; |
| 638 | ucpia->buffers[0] = NULL; | ||
| 639 | } | ||
| 640 | 630 | ||
| 641 | cam->lowlevel_data = NULL; | 631 | cam->lowlevel_data = NULL; |
| 642 | kfree(ucpia); | 632 | kfree(ucpia); |
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c index b57743571087..d4497dbae05c 100644 --- a/drivers/media/video/stradis.c +++ b/drivers/media/video/stradis.c | |||
| @@ -2184,30 +2184,18 @@ static void release_saa(void) | |||
| 2184 | vfree(saa->vidbuf); | 2184 | vfree(saa->vidbuf); |
| 2185 | vfree(saa->audbuf); | 2185 | vfree(saa->audbuf); |
| 2186 | vfree(saa->osdbuf); | 2186 | vfree(saa->osdbuf); |
| 2187 | if (saa->dmavid2) | 2187 | kfree(saa->dmavid2); |
| 2188 | kfree((void *) saa->dmavid2); | ||
| 2189 | saa->audbuf = saa->vidbuf = saa->osdbuf = NULL; | 2188 | saa->audbuf = saa->vidbuf = saa->osdbuf = NULL; |
| 2190 | saa->dmavid2 = NULL; | 2189 | saa->dmavid2 = NULL; |
| 2191 | if (saa->dmadebi) | 2190 | kfree(saa->dmadebi); |
| 2192 | kfree((void *) saa->dmadebi); | 2191 | kfree(saa->dmavid1); |
| 2193 | if (saa->dmavid1) | 2192 | kfree(saa->dmavid3); |
| 2194 | kfree((void *) saa->dmavid1); | 2193 | kfree(saa->dmaa1in); |
| 2195 | if (saa->dmavid2) | 2194 | kfree(saa->dmaa1out); |
| 2196 | kfree((void *) saa->dmavid2); | 2195 | kfree(saa->dmaa2in); |
| 2197 | if (saa->dmavid3) | 2196 | kfree(saa->dmaa2out); |
| 2198 | kfree((void *) saa->dmavid3); | 2197 | kfree(saa->dmaRPS1); |
| 2199 | if (saa->dmaa1in) | 2198 | kfree(saa->dmaRPS2); |
| 2200 | kfree((void *) saa->dmaa1in); | ||
| 2201 | if (saa->dmaa1out) | ||
| 2202 | kfree((void *) saa->dmaa1out); | ||
| 2203 | if (saa->dmaa2in) | ||
| 2204 | kfree((void *) saa->dmaa2in); | ||
| 2205 | if (saa->dmaa2out) | ||
| 2206 | kfree((void *) saa->dmaa2out); | ||
| 2207 | if (saa->dmaRPS1) | ||
| 2208 | kfree((void *) saa->dmaRPS1); | ||
| 2209 | if (saa->dmaRPS2) | ||
| 2210 | kfree((void *) saa->dmaRPS2); | ||
| 2211 | free_irq(saa->irq, saa); | 2199 | free_irq(saa->irq, saa); |
| 2212 | if (saa->saa7146_mem) | 2200 | if (saa->saa7146_mem) |
| 2213 | iounmap(saa->saa7146_mem); | 2201 | iounmap(saa->saa7146_mem); |
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c index 97354f253a80..574b8e36f3c6 100644 --- a/drivers/media/video/video-buf.c +++ b/drivers/media/video/video-buf.c | |||
| @@ -267,10 +267,10 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma) | |||
| 267 | kfree(dma->pages); | 267 | kfree(dma->pages); |
| 268 | dma->pages = NULL; | 268 | dma->pages = NULL; |
| 269 | } | 269 | } |
| 270 | if (dma->vmalloc) { | 270 | |
| 271 | vfree(dma->vmalloc); | 271 | vfree(dma->vmalloc); |
| 272 | dma->vmalloc = NULL; | 272 | dma->vmalloc = NULL; |
| 273 | } | 273 | |
| 274 | if (dma->bus_addr) { | 274 | if (dma->bus_addr) { |
| 275 | dma->bus_addr = 0; | 275 | dma->bus_addr = 0; |
| 276 | } | 276 | } |
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c index ba838a42ec80..53adeb70f2ca 100644 --- a/drivers/media/video/zoran_driver.c +++ b/drivers/media/video/zoran_driver.c | |||
| @@ -650,7 +650,7 @@ jpg_fbuffer_free (struct file *file) | |||
| 650 | off += PAGE_SIZE) | 650 | off += PAGE_SIZE) |
| 651 | ClearPageReserved(MAP_NR | 651 | ClearPageReserved(MAP_NR |
| 652 | (mem + off)); | 652 | (mem + off)); |
| 653 | kfree((void *) mem); | 653 | kfree(mem); |
| 654 | fh->jpg_buffers.buffer[i].frag_tab[0] = 0; | 654 | fh->jpg_buffers.buffer[i].frag_tab[0] = 0; |
| 655 | fh->jpg_buffers.buffer[i].frag_tab[1] = 0; | 655 | fh->jpg_buffers.buffer[i].frag_tab[1] = 0; |
| 656 | } | 656 | } |
diff --git a/drivers/media/video/zr36120.c b/drivers/media/video/zr36120.c index c33533155cc7..07286816d7df 100644 --- a/drivers/media/video/zr36120.c +++ b/drivers/media/video/zr36120.c | |||
| @@ -820,11 +820,9 @@ void zoran_close(struct video_device* dev) | |||
| 820 | msleep(100); /* Wait 1/10th of a second */ | 820 | msleep(100); /* Wait 1/10th of a second */ |
| 821 | 821 | ||
| 822 | /* free the allocated framebuffer */ | 822 | /* free the allocated framebuffer */ |
| 823 | if (ztv->fbuffer) | 823 | bfree(ztv->fbuffer, ZORAN_MAX_FBUFSIZE); |
| 824 | bfree( ztv->fbuffer, ZORAN_MAX_FBUFSIZE ); | ||
| 825 | ztv->fbuffer = 0; | 824 | ztv->fbuffer = 0; |
| 826 | if (ztv->overinfo.overlay) | 825 | kfree(ztv->overinfo.overlay); |
| 827 | kfree( ztv->overinfo.overlay ); | ||
| 828 | ztv->overinfo.overlay = 0; | 826 | ztv->overinfo.overlay = 0; |
| 829 | 827 | ||
| 830 | } | 828 | } |
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c index 7501fab349e4..46de5c940555 100644 --- a/drivers/misc/hdpuftrs/hdpu_cpustate.c +++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c | |||
| @@ -192,22 +192,37 @@ static int hdpu_cpustate_probe(struct device *ddev) | |||
| 192 | { | 192 | { |
| 193 | struct platform_device *pdev = to_platform_device(ddev); | 193 | struct platform_device *pdev = to_platform_device(ddev); |
| 194 | struct resource *res; | 194 | struct resource *res; |
| 195 | struct proc_dir_entry *proc_de; | ||
| 196 | int ret; | ||
| 195 | 197 | ||
| 196 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 198 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 197 | cpustate.set_addr = (unsigned long *)res->start; | 199 | cpustate.set_addr = (unsigned long *)res->start; |
| 198 | cpustate.clr_addr = (unsigned long *)res->end - 1; | 200 | cpustate.clr_addr = (unsigned long *)res->end - 1; |
| 199 | 201 | ||
| 200 | misc_register(&cpustate_dev); | 202 | ret = misc_register(&cpustate_dev); |
| 201 | create_proc_read_entry("sky_cpustate", 0, 0, cpustate_read_proc, NULL); | 203 | if (ret) { |
| 204 | printk(KERN_WARNING "sky_cpustate: Unable to register misc " | ||
| 205 | "device.\n"); | ||
| 206 | cpustate.set_addr = NULL; | ||
| 207 | cpustate.clr_addr = NULL; | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | proc_de = create_proc_read_entry("sky_cpustate", 0, 0, | ||
| 212 | cpustate_read_proc, NULL); | ||
| 213 | if (proc_de == NULL) | ||
| 214 | printk(KERN_WARNING "sky_cpustate: Unable to create proc " | ||
| 215 | "dir entry\n"); | ||
| 202 | 216 | ||
| 203 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); | 217 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); |
| 204 | return 0; | 218 | return 0; |
| 205 | } | 219 | } |
| 220 | |||
| 206 | static int hdpu_cpustate_remove(struct device *ddev) | 221 | static int hdpu_cpustate_remove(struct device *ddev) |
| 207 | { | 222 | { |
| 208 | 223 | ||
| 209 | cpustate.set_addr = 0; | 224 | cpustate.set_addr = NULL; |
| 210 | cpustate.clr_addr = 0; | 225 | cpustate.clr_addr = NULL; |
| 211 | 226 | ||
| 212 | remove_proc_entry("sky_cpustate", NULL); | 227 | remove_proc_entry("sky_cpustate", NULL); |
| 213 | misc_deregister(&cpustate_dev); | 228 | misc_deregister(&cpustate_dev); |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index bb713fed2f37..1443117fd8f4 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
| @@ -91,8 +91,7 @@ static void __exit cleanup_mtdram(void) | |||
| 91 | { | 91 | { |
| 92 | if (mtd_info) { | 92 | if (mtd_info) { |
| 93 | del_mtd_device(mtd_info); | 93 | del_mtd_device(mtd_info); |
| 94 | if (mtd_info->priv) | 94 | vfree(mtd_info->priv); |
| 95 | vfree(mtd_info->priv); | ||
| 96 | kfree(mtd_info); | 95 | kfree(mtd_info); |
| 97 | } | 96 | } |
| 98 | } | 97 | } |
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index d9ab60b36fd4..d32c1b3a8ce3 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
| @@ -1017,27 +1017,16 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev, | |||
| 1017 | 1017 | ||
| 1018 | void ftl_freepart(partition_t *part) | 1018 | void ftl_freepart(partition_t *part) |
| 1019 | { | 1019 | { |
| 1020 | if (part->VirtualBlockMap) { | ||
| 1021 | vfree(part->VirtualBlockMap); | 1020 | vfree(part->VirtualBlockMap); |
| 1022 | part->VirtualBlockMap = NULL; | 1021 | part->VirtualBlockMap = NULL; |
| 1023 | } | ||
| 1024 | if (part->VirtualPageMap) { | ||
| 1025 | kfree(part->VirtualPageMap); | 1022 | kfree(part->VirtualPageMap); |
| 1026 | part->VirtualPageMap = NULL; | 1023 | part->VirtualPageMap = NULL; |
| 1027 | } | ||
| 1028 | if (part->EUNInfo) { | ||
| 1029 | kfree(part->EUNInfo); | 1024 | kfree(part->EUNInfo); |
| 1030 | part->EUNInfo = NULL; | 1025 | part->EUNInfo = NULL; |
| 1031 | } | ||
| 1032 | if (part->XferInfo) { | ||
| 1033 | kfree(part->XferInfo); | 1026 | kfree(part->XferInfo); |
| 1034 | part->XferInfo = NULL; | 1027 | part->XferInfo = NULL; |
| 1035 | } | ||
| 1036 | if (part->bam_cache) { | ||
| 1037 | kfree(part->bam_cache); | 1028 | kfree(part->bam_cache); |
| 1038 | part->bam_cache = NULL; | 1029 | part->bam_cache = NULL; |
| 1039 | } | ||
| 1040 | |||
| 1041 | } /* ftl_freepart */ | 1030 | } /* ftl_freepart */ |
| 1042 | 1031 | ||
| 1043 | static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | 1032 | static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) |
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c index 3d88ad622bdb..fb4098ed469e 100644 --- a/drivers/net/bsd_comp.c +++ b/drivers/net/bsd_comp.c | |||
| @@ -323,33 +323,27 @@ static void bsd_reset (void *state) | |||
| 323 | */ | 323 | */ |
| 324 | 324 | ||
| 325 | static void bsd_free (void *state) | 325 | static void bsd_free (void *state) |
| 326 | { | 326 | { |
| 327 | struct bsd_db *db = (struct bsd_db *) state; | 327 | struct bsd_db *db = state; |
| 328 | 328 | ||
| 329 | if (db) | 329 | if (!db) |
| 330 | { | 330 | return; |
| 331 | |||
| 331 | /* | 332 | /* |
| 332 | * Release the dictionary | 333 | * Release the dictionary |
| 333 | */ | 334 | */ |
| 334 | if (db->dict) | 335 | vfree(db->dict); |
| 335 | { | 336 | db->dict = NULL; |
| 336 | vfree (db->dict); | ||
| 337 | db->dict = NULL; | ||
| 338 | } | ||
| 339 | /* | 337 | /* |
| 340 | * Release the string buffer | 338 | * Release the string buffer |
| 341 | */ | 339 | */ |
| 342 | if (db->lens) | 340 | vfree(db->lens); |
| 343 | { | 341 | db->lens = NULL; |
| 344 | vfree (db->lens); | ||
| 345 | db->lens = NULL; | ||
| 346 | } | ||
| 347 | /* | 342 | /* |
| 348 | * Finally release the structure itself. | 343 | * Finally release the structure itself. |
| 349 | */ | 344 | */ |
| 350 | kfree (db); | 345 | kfree(db); |
| 351 | } | 346 | } |
| 352 | } | ||
| 353 | 347 | ||
| 354 | /* | 348 | /* |
| 355 | * Allocate space for a (de) compressor. | 349 | * Allocate space for a (de) compressor. |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index bb71638a7c44..0df7e92b0bf8 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
| @@ -1232,9 +1232,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
| 1232 | navail = 0; /* total # of usable channels (not deregistered) */ | 1232 | navail = 0; /* total # of usable channels (not deregistered) */ |
| 1233 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | 1233 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; |
| 1234 | i = 0; | 1234 | i = 0; |
| 1235 | list = &ppp->channels; | 1235 | list_for_each_entry(pch, &ppp->channels, clist) { |
| 1236 | while ((list = list->next) != &ppp->channels) { | ||
| 1237 | pch = list_entry(list, struct channel, clist); | ||
| 1238 | navail += pch->avail = (pch->chan != NULL); | 1236 | navail += pch->avail = (pch->chan != NULL); |
| 1239 | if (pch->avail) { | 1237 | if (pch->avail) { |
| 1240 | if (skb_queue_empty(&pch->file.xq) || | 1238 | if (skb_queue_empty(&pch->file.xq) || |
| @@ -1280,6 +1278,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
| 1280 | 1278 | ||
| 1281 | /* skip to the channel after the one we last used | 1279 | /* skip to the channel after the one we last used |
| 1282 | and start at that one */ | 1280 | and start at that one */ |
| 1281 | list = &ppp->channels; | ||
| 1283 | for (i = 0; i < ppp->nxchan; ++i) { | 1282 | for (i = 0; i < ppp->nxchan; ++i) { |
| 1284 | list = list->next; | 1283 | list = list->next; |
| 1285 | if (list == &ppp->channels) { | 1284 | if (list == &ppp->channels) { |
| @@ -1730,7 +1729,7 @@ static void | |||
| 1730 | ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | 1729 | ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) |
| 1731 | { | 1730 | { |
| 1732 | u32 mask, seq; | 1731 | u32 mask, seq; |
| 1733 | struct list_head *l; | 1732 | struct channel *ch; |
| 1734 | int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | 1733 | int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; |
| 1735 | 1734 | ||
| 1736 | if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) | 1735 | if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) |
| @@ -1784,8 +1783,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | |||
| 1784 | * The list of channels can't change because we have the receive | 1783 | * The list of channels can't change because we have the receive |
| 1785 | * side of the ppp unit locked. | 1784 | * side of the ppp unit locked. |
| 1786 | */ | 1785 | */ |
| 1787 | for (l = ppp->channels.next; l != &ppp->channels; l = l->next) { | 1786 | list_for_each_entry(ch, &ppp->channels, clist) { |
| 1788 | struct channel *ch = list_entry(l, struct channel, clist); | ||
| 1789 | if (seq_before(ch->lastseq, seq)) | 1787 | if (seq_before(ch->lastseq, seq)) |
| 1790 | seq = ch->lastseq; | 1788 | seq = ch->lastseq; |
| 1791 | } | 1789 | } |
| @@ -2271,10 +2269,8 @@ static struct compressor_entry * | |||
| 2271 | find_comp_entry(int proto) | 2269 | find_comp_entry(int proto) |
| 2272 | { | 2270 | { |
| 2273 | struct compressor_entry *ce; | 2271 | struct compressor_entry *ce; |
| 2274 | struct list_head *list = &compressor_list; | ||
| 2275 | 2272 | ||
| 2276 | while ((list = list->next) != &compressor_list) { | 2273 | list_for_each_entry(ce, &compressor_list, list) { |
| 2277 | ce = list_entry(list, struct compressor_entry, list); | ||
| 2278 | if (ce->comp->compress_proto == proto) | 2274 | if (ce->comp->compress_proto == proto) |
| 2279 | return ce; | 2275 | return ce; |
| 2280 | } | 2276 | } |
| @@ -2540,20 +2536,15 @@ static struct channel * | |||
| 2540 | ppp_find_channel(int unit) | 2536 | ppp_find_channel(int unit) |
| 2541 | { | 2537 | { |
| 2542 | struct channel *pch; | 2538 | struct channel *pch; |
| 2543 | struct list_head *list; | ||
| 2544 | 2539 | ||
| 2545 | list = &new_channels; | 2540 | list_for_each_entry(pch, &new_channels, list) { |
| 2546 | while ((list = list->next) != &new_channels) { | ||
| 2547 | pch = list_entry(list, struct channel, list); | ||
| 2548 | if (pch->file.index == unit) { | 2541 | if (pch->file.index == unit) { |
| 2549 | list_del(&pch->list); | 2542 | list_del(&pch->list); |
| 2550 | list_add(&pch->list, &all_channels); | 2543 | list_add(&pch->list, &all_channels); |
| 2551 | return pch; | 2544 | return pch; |
| 2552 | } | 2545 | } |
| 2553 | } | 2546 | } |
| 2554 | list = &all_channels; | 2547 | list_for_each_entry(pch, &all_channels, list) { |
| 2555 | while ((list = list->next) != &all_channels) { | ||
| 2556 | pch = list_entry(list, struct channel, list); | ||
| 2557 | if (pch->file.index == unit) | 2548 | if (pch->file.index == unit) |
| 2558 | return pch; | 2549 | return pch; |
| 2559 | } | 2550 | } |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 93800c126e86..ee48bfd67349 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
| @@ -2144,9 +2144,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) | |||
| 2144 | u_long iobase = 0; /* Clear upper 32 bits in Alphas */ | 2144 | u_long iobase = 0; /* Clear upper 32 bits in Alphas */ |
| 2145 | int i, j, cfrv; | 2145 | int i, j, cfrv; |
| 2146 | struct de4x5_private *lp = netdev_priv(dev); | 2146 | struct de4x5_private *lp = netdev_priv(dev); |
| 2147 | struct list_head *walk = &pdev->bus_list; | 2147 | struct list_head *walk; |
| 2148 | 2148 | ||
| 2149 | for (walk = walk->next; walk != &pdev->bus_list; walk = walk->next) { | 2149 | list_for_each(walk, &pdev->bus_list) { |
| 2150 | struct pci_dev *this_dev = pci_dev_b(walk); | 2150 | struct pci_dev *this_dev = pci_dev_b(walk); |
| 2151 | 2151 | ||
| 2152 | /* Skip the pci_bus list entry */ | 2152 | /* Skip the pci_bus list entry */ |
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index 731855053392..cb84a4e84a2f 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/pm.h> | 22 | #include <linux/pm.h> |
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 25 | 24 | ||
| 26 | #include <asm/io.h> | 25 | #include <asm/io.h> |
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 694bae162fed..5b887ba5aaf9 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c | |||
| @@ -196,7 +196,7 @@ int parport_wait_peripheral(struct parport *port, | |||
| 196 | return 1; | 196 | return 1; |
| 197 | 197 | ||
| 198 | /* 40ms of slow polling. */ | 198 | /* 40ms of slow polling. */ |
| 199 | deadline = jiffies + (HZ + 24) / 25; | 199 | deadline = jiffies + msecs_to_jiffies(40); |
| 200 | while (time_before (jiffies, deadline)) { | 200 | while (time_before (jiffies, deadline)) { |
| 201 | int ret; | 201 | int ret; |
| 202 | 202 | ||
| @@ -205,7 +205,7 @@ int parport_wait_peripheral(struct parport *port, | |||
| 205 | 205 | ||
| 206 | /* Wait for 10ms (or until an interrupt occurs if | 206 | /* Wait for 10ms (or until an interrupt occurs if |
| 207 | * the handler is set) */ | 207 | * the handler is set) */ |
| 208 | if ((ret = parport_wait_event (port, (HZ + 99) / 100)) < 0) | 208 | if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0) |
| 209 | return ret; | 209 | return ret; |
| 210 | 210 | ||
| 211 | status = parport_read_status (port); | 211 | status = parport_read_status (port); |
| @@ -216,8 +216,7 @@ int parport_wait_peripheral(struct parport *port, | |||
| 216 | /* parport_wait_event didn't time out, but the | 216 | /* parport_wait_event didn't time out, but the |
| 217 | * peripheral wasn't actually ready either. | 217 | * peripheral wasn't actually ready either. |
| 218 | * Wait for another 10ms. */ | 218 | * Wait for another 10ms. */ |
| 219 | __set_current_state (TASK_INTERRUPTIBLE); | 219 | schedule_timeout_interruptible(msecs_to_jiffies(10)); |
| 220 | schedule_timeout ((HZ+ 99) / 100); | ||
| 221 | } | 220 | } |
| 222 | } | 221 | } |
| 223 | 222 | ||
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 6624278c6ed8..ce1e2aad8b10 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c | |||
| @@ -60,7 +60,7 @@ size_t parport_ieee1284_write_compat (struct parport *port, | |||
| 60 | parport_data_forward (port); | 60 | parport_data_forward (port); |
| 61 | while (count < len) { | 61 | while (count < len) { |
| 62 | unsigned long expire = jiffies + dev->timeout; | 62 | unsigned long expire = jiffies + dev->timeout; |
| 63 | long wait = (HZ + 99) / 100; | 63 | long wait = msecs_to_jiffies(10); |
| 64 | unsigned char mask = (PARPORT_STATUS_ERROR | 64 | unsigned char mask = (PARPORT_STATUS_ERROR |
| 65 | | PARPORT_STATUS_BUSY); | 65 | | PARPORT_STATUS_BUSY); |
| 66 | unsigned char val = (PARPORT_STATUS_ERROR | 66 | unsigned char val = (PARPORT_STATUS_ERROR |
| @@ -97,8 +97,7 @@ size_t parport_ieee1284_write_compat (struct parport *port, | |||
| 97 | our interrupt handler called. */ | 97 | our interrupt handler called. */ |
| 98 | if (count && no_irq) { | 98 | if (count && no_irq) { |
| 99 | parport_release (dev); | 99 | parport_release (dev); |
| 100 | __set_current_state (TASK_INTERRUPTIBLE); | 100 | schedule_timeout_interruptible(wait); |
| 101 | schedule_timeout (wait); | ||
| 102 | parport_claim_or_block (dev); | 101 | parport_claim_or_block (dev); |
| 103 | } | 102 | } |
| 104 | else | 103 | else |
| @@ -542,13 +541,12 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, | |||
| 542 | /* Yield the port for a while. */ | 541 | /* Yield the port for a while. */ |
| 543 | if (count && dev->port->irq != PARPORT_IRQ_NONE) { | 542 | if (count && dev->port->irq != PARPORT_IRQ_NONE) { |
| 544 | parport_release (dev); | 543 | parport_release (dev); |
| 545 | __set_current_state (TASK_INTERRUPTIBLE); | 544 | schedule_timeout_interruptible(msecs_to_jiffies(40)); |
| 546 | schedule_timeout ((HZ + 24) / 25); | ||
| 547 | parport_claim_or_block (dev); | 545 | parport_claim_or_block (dev); |
| 548 | } | 546 | } |
| 549 | else | 547 | else |
| 550 | /* We must have the device claimed here. */ | 548 | /* We must have the device claimed here. */ |
| 551 | parport_wait_event (port, (HZ + 24) / 25); | 549 | parport_wait_event (port, msecs_to_jiffies(40)); |
| 552 | 550 | ||
| 553 | /* Is there a signal pending? */ | 551 | /* Is there a signal pending? */ |
| 554 | if (signal_pending (current)) | 552 | if (signal_pending (current)) |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 1b938bb9be3c..c6493ad7c0c8 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
| @@ -173,8 +173,7 @@ static int change_mode(struct parport *p, int m) | |||
| 173 | if (time_after_eq (jiffies, expire)) | 173 | if (time_after_eq (jiffies, expire)) |
| 174 | /* The FIFO is stuck. */ | 174 | /* The FIFO is stuck. */ |
| 175 | return -EBUSY; | 175 | return -EBUSY; |
| 176 | __set_current_state (TASK_INTERRUPTIBLE); | 176 | schedule_timeout_interruptible(msecs_to_jiffies(10)); |
| 177 | schedule_timeout ((HZ + 99) / 100); | ||
| 178 | if (signal_pending (current)) | 177 | if (signal_pending (current)) |
| 179 | break; | 178 | break; |
| 180 | } | 179 | } |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index cc9d65388e62..56a3b397efee 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -44,10 +44,14 @@ pci_config_attr(subsystem_device, "0x%04x\n"); | |||
| 44 | pci_config_attr(class, "0x%06x\n"); | 44 | pci_config_attr(class, "0x%06x\n"); |
| 45 | pci_config_attr(irq, "%u\n"); | 45 | pci_config_attr(irq, "%u\n"); |
| 46 | 46 | ||
| 47 | static ssize_t local_cpus_show(struct device *dev, struct device_attribute *attr, char *buf) | 47 | static ssize_t local_cpus_show(struct device *dev, |
| 48 | struct device_attribute *attr, char *buf) | ||
| 48 | { | 49 | { |
| 49 | cpumask_t mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | 50 | cpumask_t mask; |
| 50 | int len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | 51 | int len; |
| 52 | |||
| 53 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | ||
| 54 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | ||
| 51 | strcat(buf,"\n"); | 55 | strcat(buf,"\n"); |
| 52 | return 1+len; | 56 | return 1+len; |
| 53 | } | 57 | } |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 35caec13023a..26a55d08b506 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -72,11 +72,13 @@ void pci_remove_legacy_files(struct pci_bus *bus) { return; } | |||
| 72 | /* | 72 | /* |
| 73 | * PCI Bus Class Devices | 73 | * PCI Bus Class Devices |
| 74 | */ | 74 | */ |
| 75 | static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev, char *buf) | 75 | static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev, |
| 76 | char *buf) | ||
| 76 | { | 77 | { |
| 77 | cpumask_t cpumask = pcibus_to_cpumask(to_pci_bus(class_dev)); | ||
| 78 | int ret; | 78 | int ret; |
| 79 | cpumask_t cpumask; | ||
| 79 | 80 | ||
| 81 | cpumask = pcibus_to_cpumask(to_pci_bus(class_dev)); | ||
| 80 | ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask); | 82 | ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask); |
| 81 | if (ret < PAGE_SIZE) | 83 | if (ret < PAGE_SIZE) |
| 82 | buf[ret++] = '\n'; | 84 | buf[ret++] = '\n'; |
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index d44205d52bf3..d89f83f769f5 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #define __KERNEL_SYSCALLS__ | 7 | #define __KERNEL_SYSCALLS__ |
| 8 | static int errno; | ||
| 8 | 9 | ||
| 9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 10 | #include <linux/kthread.h> | 11 | #include <linux/kthread.h> |
| @@ -13,8 +14,6 @@ | |||
| 13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 14 | #include <asm/oplib.h> | 15 | #include <asm/oplib.h> |
| 15 | #include <asm/ebus.h> | 16 | #include <asm/ebus.h> |
| 16 | static int errno; | ||
| 17 | #include <asm/unistd.h> | ||
| 18 | 17 | ||
| 19 | #include "bbc_i2c.h" | 18 | #include "bbc_i2c.h" |
| 20 | #include "max1617.h" | 19 | #include "max1617.h" |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index d765cc1bf060..b0cc3c2588fd 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #define __KERNEL_SYSCALLS__ | 22 | #define __KERNEL_SYSCALLS__ |
| 23 | static int errno; | ||
| 23 | 24 | ||
| 24 | #include <linux/config.h> | 25 | #include <linux/config.h> |
| 25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| @@ -38,9 +39,6 @@ | |||
| 38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
| 39 | #include <asm/envctrl.h> | 40 | #include <asm/envctrl.h> |
| 40 | 41 | ||
| 41 | static int errno; | ||
| 42 | #include <asm/unistd.h> | ||
| 43 | |||
| 44 | #define ENVCTRL_MINOR 162 | 42 | #define ENVCTRL_MINOR 162 |
| 45 | 43 | ||
| 46 | #define PCF8584_ADDRESS 0x55 | 44 | #define PCF8584_ADDRESS 0x55 |
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c index 2341d27ceed7..7a33c708f5b3 100644 --- a/drivers/scsi/53c7xx.c +++ b/drivers/scsi/53c7xx.c | |||
| @@ -6090,8 +6090,8 @@ NCR53c7x0_release(struct Scsi_Host *host) { | |||
| 6090 | if (hostdata->num_cmds) | 6090 | if (hostdata->num_cmds) |
| 6091 | printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n", | 6091 | printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n", |
| 6092 | host->host_no, hostdata->num_cmds); | 6092 | host->host_no, hostdata->num_cmds); |
| 6093 | if (hostdata->events) | 6093 | |
| 6094 | vfree ((void *)hostdata->events); | 6094 | vfree(hostdata->events); |
| 6095 | 6095 | ||
| 6096 | /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which | 6096 | /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which |
| 6097 | * XXX may be invalid (CONFIG_060_WRITETHROUGH) | 6097 | * XXX may be invalid (CONFIG_060_WRITETHROUGH) |
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 13ecd0c47404..da6e51c7fe69 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
| @@ -560,7 +560,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem, | |||
| 560 | return result; | 560 | return result; |
| 561 | } | 561 | } |
| 562 | 562 | ||
| 563 | static int ch_gstatus(scsi_changer *ch, int type, unsigned char *dest) | 563 | static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest) |
| 564 | { | 564 | { |
| 565 | int retval = 0; | 565 | int retval = 0; |
| 566 | u_char data[16]; | 566 | u_char data[16]; |
| @@ -634,6 +634,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 634 | { | 634 | { |
| 635 | scsi_changer *ch = file->private_data; | 635 | scsi_changer *ch = file->private_data; |
| 636 | int retval; | 636 | int retval; |
| 637 | void __user *argp = (void __user *)arg; | ||
| 637 | 638 | ||
| 638 | switch (cmd) { | 639 | switch (cmd) { |
| 639 | case CHIOGPARAMS: | 640 | case CHIOGPARAMS: |
| @@ -646,7 +647,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 646 | params.cp_nportals = ch->counts[CHET_IE]; | 647 | params.cp_nportals = ch->counts[CHET_IE]; |
| 647 | params.cp_ndrives = ch->counts[CHET_DT]; | 648 | params.cp_ndrives = ch->counts[CHET_DT]; |
| 648 | 649 | ||
| 649 | if (copy_to_user((void *) arg, ¶ms, sizeof(params))) | 650 | if (copy_to_user(argp, ¶ms, sizeof(params))) |
| 650 | return -EFAULT; | 651 | return -EFAULT; |
| 651 | return 0; | 652 | return 0; |
| 652 | } | 653 | } |
| @@ -671,7 +672,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 671 | vparams.cvp_n4 = ch->counts[CHET_V4]; | 672 | vparams.cvp_n4 = ch->counts[CHET_V4]; |
| 672 | strncpy(vparams.cvp_label4,vendor_labels[3],16); | 673 | strncpy(vparams.cvp_label4,vendor_labels[3],16); |
| 673 | } | 674 | } |
| 674 | if (copy_to_user((void *) arg, &vparams, sizeof(vparams))) | 675 | if (copy_to_user(argp, &vparams, sizeof(vparams))) |
| 675 | return -EFAULT; | 676 | return -EFAULT; |
| 676 | return 0; | 677 | return 0; |
| 677 | } | 678 | } |
| @@ -680,7 +681,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 680 | { | 681 | { |
| 681 | struct changer_position pos; | 682 | struct changer_position pos; |
| 682 | 683 | ||
| 683 | if (copy_from_user(&pos, (void*)arg, sizeof (pos))) | 684 | if (copy_from_user(&pos, argp, sizeof (pos))) |
| 684 | return -EFAULT; | 685 | return -EFAULT; |
| 685 | 686 | ||
| 686 | if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) { | 687 | if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) { |
| @@ -699,7 +700,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 699 | { | 700 | { |
| 700 | struct changer_move mv; | 701 | struct changer_move mv; |
| 701 | 702 | ||
| 702 | if (copy_from_user(&mv, (void*)arg, sizeof (mv))) | 703 | if (copy_from_user(&mv, argp, sizeof (mv))) |
| 703 | return -EFAULT; | 704 | return -EFAULT; |
| 704 | 705 | ||
| 705 | if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) || | 706 | if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) || |
| @@ -721,7 +722,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 721 | { | 722 | { |
| 722 | struct changer_exchange mv; | 723 | struct changer_exchange mv; |
| 723 | 724 | ||
| 724 | if (copy_from_user(&mv, (void*)arg, sizeof (mv))) | 725 | if (copy_from_user(&mv, argp, sizeof (mv))) |
| 725 | return -EFAULT; | 726 | return -EFAULT; |
| 726 | 727 | ||
| 727 | if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) || | 728 | if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) || |
| @@ -746,7 +747,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 746 | { | 747 | { |
| 747 | struct changer_element_status ces; | 748 | struct changer_element_status ces; |
| 748 | 749 | ||
| 749 | if (copy_from_user(&ces, (void*)arg, sizeof (ces))) | 750 | if (copy_from_user(&ces, argp, sizeof (ces))) |
| 750 | return -EFAULT; | 751 | return -EFAULT; |
| 751 | if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES) | 752 | if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES) |
| 752 | return -EINVAL; | 753 | return -EINVAL; |
| @@ -762,7 +763,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 762 | unsigned int elem; | 763 | unsigned int elem; |
| 763 | int result,i; | 764 | int result,i; |
| 764 | 765 | ||
| 765 | if (copy_from_user(&cge, (void*)arg, sizeof (cge))) | 766 | if (copy_from_user(&cge, argp, sizeof (cge))) |
| 766 | return -EFAULT; | 767 | return -EFAULT; |
| 767 | 768 | ||
| 768 | if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) | 769 | if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) |
| @@ -825,7 +826,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 825 | kfree(buffer); | 826 | kfree(buffer); |
| 826 | up(&ch->lock); | 827 | up(&ch->lock); |
| 827 | 828 | ||
| 828 | if (copy_to_user((void*)arg, &cge, sizeof (cge))) | 829 | if (copy_to_user(argp, &cge, sizeof (cge))) |
| 829 | return -EFAULT; | 830 | return -EFAULT; |
| 830 | return result; | 831 | return result; |
| 831 | } | 832 | } |
| @@ -843,7 +844,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 843 | struct changer_set_voltag csv; | 844 | struct changer_set_voltag csv; |
| 844 | int elem; | 845 | int elem; |
| 845 | 846 | ||
| 846 | if (copy_from_user(&csv, (void*)arg, sizeof(csv))) | 847 | if (copy_from_user(&csv, argp, sizeof(csv))) |
| 847 | return -EFAULT; | 848 | return -EFAULT; |
| 848 | 849 | ||
| 849 | if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) { | 850 | if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) { |
| @@ -861,7 +862,7 @@ static int ch_ioctl(struct inode * inode, struct file * file, | |||
| 861 | } | 862 | } |
| 862 | 863 | ||
| 863 | default: | 864 | default: |
| 864 | return scsi_ioctl(ch->device, cmd, (void*)arg); | 865 | return scsi_ioctl(ch->device, cmd, argp); |
| 865 | 866 | ||
| 866 | } | 867 | } |
| 867 | } | 868 | } |
| @@ -894,9 +895,9 @@ static long ch_ioctl_compat(struct file * file, | |||
| 894 | case CHIOGSTATUS32: | 895 | case CHIOGSTATUS32: |
| 895 | { | 896 | { |
| 896 | struct changer_element_status32 ces32; | 897 | struct changer_element_status32 ces32; |
| 897 | unsigned char *data; | 898 | unsigned char __user *data; |
| 898 | 899 | ||
| 899 | if (copy_from_user(&ces32, (void*)arg, sizeof (ces32))) | 900 | if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32))) |
| 900 | return -EFAULT; | 901 | return -EFAULT; |
| 901 | if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) | 902 | if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) |
| 902 | return -EINVAL; | 903 | return -EINVAL; |
diff --git a/drivers/scsi/cpqfcTSinit.c b/drivers/scsi/cpqfcTSinit.c index d72be0ce89c8..3fda8d455c5b 100644 --- a/drivers/scsi/cpqfcTSinit.c +++ b/drivers/scsi/cpqfcTSinit.c | |||
| @@ -691,8 +691,7 @@ int cpqfcTS_ioctl( struct scsi_device *ScsiDev, int Cmnd, void *arg) | |||
| 691 | if( copy_to_user( vendor_cmd->bufp, buf, vendor_cmd->len)) | 691 | if( copy_to_user( vendor_cmd->bufp, buf, vendor_cmd->len)) |
| 692 | result = -EFAULT; | 692 | result = -EFAULT; |
| 693 | 693 | ||
| 694 | if( buf) | 694 | kfree(buf); |
| 695 | kfree( buf); | ||
| 696 | 695 | ||
| 697 | return result; | 696 | return result; |
| 698 | } | 697 | } |
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index b5dc35355570..6e54c7d9b33c 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
| 37 | #include <linux/stat.h> | 37 | #include <linux/stat.h> |
| 38 | #include <linux/mca.h> | 38 | #include <linux/mca.h> |
| 39 | #include <linux/string.h> | ||
| 40 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
| 41 | #include <linux/init.h> | 40 | #include <linux/init.h> |
| 42 | #include <linux/mca-legacy.h> | 41 | #include <linux/mca-legacy.h> |
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 89a4a0615c22..3f2f2464fa63 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
| @@ -1377,7 +1377,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi | |||
| 1377 | 1377 | ||
| 1378 | if ((STp->buffer)->syscall_result || !SRpnt) { | 1378 | if ((STp->buffer)->syscall_result || !SRpnt) { |
| 1379 | printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name); | 1379 | printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name); |
| 1380 | vfree((void *)buffer); | 1380 | vfree(buffer); |
| 1381 | *aSRpnt = SRpnt; | 1381 | *aSRpnt = SRpnt; |
| 1382 | return (-EIO); | 1382 | return (-EIO); |
| 1383 | } | 1383 | } |
| @@ -1419,7 +1419,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi | |||
| 1419 | 1419 | ||
| 1420 | if (new_frame > frame + 1000) { | 1420 | if (new_frame > frame + 1000) { |
| 1421 | printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name); | 1421 | printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name); |
| 1422 | vfree((void *)buffer); | 1422 | vfree(buffer); |
| 1423 | return (-EIO); | 1423 | return (-EIO); |
| 1424 | } | 1424 | } |
| 1425 | if ( i >= nframes + pending ) break; | 1425 | if ( i >= nframes + pending ) break; |
| @@ -1500,7 +1500,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi | |||
| 1500 | SRpnt->sr_sense_buffer[12] == 0 && | 1500 | SRpnt->sr_sense_buffer[12] == 0 && |
| 1501 | SRpnt->sr_sense_buffer[13] == 2) { | 1501 | SRpnt->sr_sense_buffer[13] == 2) { |
| 1502 | printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name); | 1502 | printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name); |
| 1503 | vfree((void *)buffer); | 1503 | vfree(buffer); |
| 1504 | return (-EIO); /* hit end of tape = fail */ | 1504 | return (-EIO); /* hit end of tape = fail */ |
| 1505 | } | 1505 | } |
| 1506 | i = ((SRpnt->sr_sense_buffer[3] << 24) | | 1506 | i = ((SRpnt->sr_sense_buffer[3] << 24) | |
| @@ -1525,7 +1525,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi | |||
| 1525 | } | 1525 | } |
| 1526 | if (!pending) | 1526 | if (!pending) |
| 1527 | osst_copy_to_buffer(STp->buffer, p); /* so buffer content == at entry in all cases */ | 1527 | osst_copy_to_buffer(STp->buffer, p); /* so buffer content == at entry in all cases */ |
| 1528 | vfree((void *)buffer); | 1528 | vfree(buffer); |
| 1529 | return 0; | 1529 | return 0; |
| 1530 | } | 1530 | } |
| 1531 | 1531 | ||
| @@ -5852,7 +5852,7 @@ static int osst_remove(struct device *dev) | |||
| 5852 | os_scsi_tapes[i] = NULL; | 5852 | os_scsi_tapes[i] = NULL; |
| 5853 | osst_nr_dev--; | 5853 | osst_nr_dev--; |
| 5854 | write_unlock(&os_scsi_tapes_lock); | 5854 | write_unlock(&os_scsi_tapes_lock); |
| 5855 | if (tpnt->header_cache != NULL) vfree(tpnt->header_cache); | 5855 | vfree(tpnt->header_cache); |
| 5856 | if (tpnt->buffer) { | 5856 | if (tpnt->buffer) { |
| 5857 | normalize_buffer(tpnt->buffer); | 5857 | normalize_buffer(tpnt->buffer); |
| 5858 | kfree(tpnt->buffer); | 5858 | kfree(tpnt->buffer); |
| @@ -5896,8 +5896,7 @@ static void __exit exit_osst (void) | |||
| 5896 | for (i=0; i < osst_max_dev; ++i) { | 5896 | for (i=0; i < osst_max_dev; ++i) { |
| 5897 | if (!(STp = os_scsi_tapes[i])) continue; | 5897 | if (!(STp = os_scsi_tapes[i])) continue; |
| 5898 | /* This is defensive, supposed to happen during detach */ | 5898 | /* This is defensive, supposed to happen during detach */ |
| 5899 | if (STp->header_cache) | 5899 | vfree(STp->header_cache); |
| 5900 | vfree(STp->header_cache); | ||
| 5901 | if (STp->buffer) { | 5900 | if (STp->buffer) { |
| 5902 | normalize_buffer(STp->buffer); | 5901 | normalize_buffer(STp->buffer); |
| 5903 | kfree(STp->buffer); | 5902 | kfree(STp->buffer); |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 5b65e208893b..4d75cdfa0a0a 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
| @@ -864,7 +864,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) | |||
| 864 | /* | 864 | /* |
| 865 | * We're pretty sure there's a port here. Lets find out what | 865 | * We're pretty sure there's a port here. Lets find out what |
| 866 | * type of port it is. The IIR top two bits allows us to find | 866 | * type of port it is. The IIR top two bits allows us to find |
| 867 | * out if its 8250 or 16450, 16550, 16550A or later. This | 867 | * out if it's 8250 or 16450, 16550, 16550A or later. This |
| 868 | * determines what we test for next. | 868 | * determines what we test for next. |
| 869 | * | 869 | * |
| 870 | * We also initialise the EFR (if any) to zero for later. The | 870 | * We also initialise the EFR (if any) to zero for later. The |
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index f2c9fa423d40..f6704688ee8c 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c | |||
| @@ -774,10 +774,7 @@ static int ixj_wink(IXJ *j) | |||
| 774 | j->pots_winkstart = jiffies; | 774 | j->pots_winkstart = jiffies; |
| 775 | SLIC_SetState(PLD_SLIC_STATE_OC, j); | 775 | SLIC_SetState(PLD_SLIC_STATE_OC, j); |
| 776 | 776 | ||
| 777 | while (time_before(jiffies, j->pots_winkstart + j->winktime)) { | 777 | msleep(jiffies_to_msecs(j->winktime)); |
| 778 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 779 | schedule_timeout(1); | ||
| 780 | } | ||
| 781 | 778 | ||
| 782 | SLIC_SetState(slicnow, j); | 779 | SLIC_SetState(slicnow, j); |
| 783 | return 0; | 780 | return 0; |
| @@ -1912,7 +1909,6 @@ static int ixj_pcmcia_cable_check(IXJ *j) | |||
| 1912 | 1909 | ||
| 1913 | static int ixj_hookstate(IXJ *j) | 1910 | static int ixj_hookstate(IXJ *j) |
| 1914 | { | 1911 | { |
| 1915 | unsigned long det; | ||
| 1916 | int fOffHook = 0; | 1912 | int fOffHook = 0; |
| 1917 | 1913 | ||
| 1918 | switch (j->cardtype) { | 1914 | switch (j->cardtype) { |
| @@ -1943,11 +1939,7 @@ static int ixj_hookstate(IXJ *j) | |||
| 1943 | j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) { | 1939 | j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) { |
| 1944 | if (j->flags.ringing || j->flags.cringing) { | 1940 | if (j->flags.ringing || j->flags.cringing) { |
| 1945 | if (!in_interrupt()) { | 1941 | if (!in_interrupt()) { |
| 1946 | det = jiffies + (hertz / 50); | 1942 | msleep(20); |
| 1947 | while (time_before(jiffies, det)) { | ||
| 1948 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1949 | schedule_timeout(1); | ||
| 1950 | } | ||
| 1951 | } | 1943 | } |
| 1952 | SLIC_GetState(j); | 1944 | SLIC_GetState(j); |
| 1953 | if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) { | 1945 | if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) { |
| @@ -2062,7 +2054,7 @@ static void ixj_ring_start(IXJ *j) | |||
| 2062 | static int ixj_ring(IXJ *j) | 2054 | static int ixj_ring(IXJ *j) |
| 2063 | { | 2055 | { |
| 2064 | char cntr; | 2056 | char cntr; |
| 2065 | unsigned long jif, det; | 2057 | unsigned long jif; |
| 2066 | 2058 | ||
| 2067 | j->flags.ringing = 1; | 2059 | j->flags.ringing = 1; |
| 2068 | if (ixj_hookstate(j) & 1) { | 2060 | if (ixj_hookstate(j) & 1) { |
| @@ -2070,7 +2062,6 @@ static int ixj_ring(IXJ *j) | |||
| 2070 | j->flags.ringing = 0; | 2062 | j->flags.ringing = 0; |
| 2071 | return 1; | 2063 | return 1; |
| 2072 | } | 2064 | } |
| 2073 | det = 0; | ||
| 2074 | for (cntr = 0; cntr < j->maxrings; cntr++) { | 2065 | for (cntr = 0; cntr < j->maxrings; cntr++) { |
| 2075 | jif = jiffies + (1 * hertz); | 2066 | jif = jiffies + (1 * hertz); |
| 2076 | ixj_ring_on(j); | 2067 | ixj_ring_on(j); |
| @@ -2080,8 +2071,7 @@ static int ixj_ring(IXJ *j) | |||
| 2080 | j->flags.ringing = 0; | 2071 | j->flags.ringing = 0; |
| 2081 | return 1; | 2072 | return 1; |
| 2082 | } | 2073 | } |
| 2083 | set_current_state(TASK_INTERRUPTIBLE); | 2074 | schedule_timeout_interruptible(1); |
| 2084 | schedule_timeout(1); | ||
| 2085 | if (signal_pending(current)) | 2075 | if (signal_pending(current)) |
| 2086 | break; | 2076 | break; |
| 2087 | } | 2077 | } |
| @@ -2089,20 +2079,13 @@ static int ixj_ring(IXJ *j) | |||
| 2089 | ixj_ring_off(j); | 2079 | ixj_ring_off(j); |
| 2090 | while (time_before(jiffies, jif)) { | 2080 | while (time_before(jiffies, jif)) { |
| 2091 | if (ixj_hookstate(j) & 1) { | 2081 | if (ixj_hookstate(j) & 1) { |
| 2092 | det = jiffies + (hertz / 100); | 2082 | msleep(10); |
| 2093 | while (time_before(jiffies, det)) { | ||
| 2094 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 2095 | schedule_timeout(1); | ||
| 2096 | if (signal_pending(current)) | ||
| 2097 | break; | ||
| 2098 | } | ||
| 2099 | if (ixj_hookstate(j) & 1) { | 2083 | if (ixj_hookstate(j) & 1) { |
| 2100 | j->flags.ringing = 0; | 2084 | j->flags.ringing = 0; |
| 2101 | return 1; | 2085 | return 1; |
| 2102 | } | 2086 | } |
| 2103 | } | 2087 | } |
| 2104 | set_current_state(TASK_INTERRUPTIBLE); | 2088 | schedule_timeout_interruptible(1); |
| 2105 | schedule_timeout(1); | ||
| 2106 | if (signal_pending(current)) | 2089 | if (signal_pending(current)) |
| 2107 | break; | 2090 | break; |
| 2108 | } | 2091 | } |
| @@ -2168,10 +2151,8 @@ static int ixj_release(struct inode *inode, struct file *file_p) | |||
| 2168 | * Set up locks to ensure that only one process is talking to the DSP at a time. | 2151 | * Set up locks to ensure that only one process is talking to the DSP at a time. |
| 2169 | * This is necessary to keep the DSP from locking up. | 2152 | * This is necessary to keep the DSP from locking up. |
| 2170 | */ | 2153 | */ |
| 2171 | while(test_and_set_bit(board, (void *)&j->busyflags) != 0) { | 2154 | while(test_and_set_bit(board, (void *)&j->busyflags) != 0) |
| 2172 | set_current_state(TASK_INTERRUPTIBLE); | 2155 | schedule_timeout_interruptible(1); |
| 2173 | schedule_timeout(1); | ||
| 2174 | } | ||
| 2175 | if (ixjdebug & 0x0002) | 2156 | if (ixjdebug & 0x0002) |
| 2176 | printk(KERN_INFO "Closing board %d\n", NUM(inode)); | 2157 | printk(KERN_INFO "Closing board %d\n", NUM(inode)); |
| 2177 | 2158 | ||
| @@ -3301,14 +3282,10 @@ static void ixj_write_cidcw(IXJ *j) | |||
| 3301 | ixj_play_tone(j, 23); | 3282 | ixj_play_tone(j, 23); |
| 3302 | 3283 | ||
| 3303 | clear_bit(j->board, &j->busyflags); | 3284 | clear_bit(j->board, &j->busyflags); |
| 3304 | while(j->tone_state) { | 3285 | while(j->tone_state) |
| 3305 | set_current_state(TASK_INTERRUPTIBLE); | 3286 | schedule_timeout_interruptible(1); |
| 3306 | schedule_timeout(1); | 3287 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) |
| 3307 | } | 3288 | schedule_timeout_interruptible(1); |
| 3308 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) { | ||
| 3309 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 3310 | schedule_timeout(1); | ||
| 3311 | } | ||
| 3312 | if(ixjdebug & 0x0200) { | 3289 | if(ixjdebug & 0x0200) { |
| 3313 | printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies); | 3290 | printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies); |
| 3314 | } | 3291 | } |
| @@ -3328,14 +3305,10 @@ static void ixj_write_cidcw(IXJ *j) | |||
| 3328 | ixj_play_tone(j, 24); | 3305 | ixj_play_tone(j, 24); |
| 3329 | 3306 | ||
| 3330 | clear_bit(j->board, &j->busyflags); | 3307 | clear_bit(j->board, &j->busyflags); |
| 3331 | while(j->tone_state) { | 3308 | while(j->tone_state) |
| 3332 | set_current_state(TASK_INTERRUPTIBLE); | 3309 | schedule_timeout_interruptible(1); |
| 3333 | schedule_timeout(1); | 3310 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) |
| 3334 | } | 3311 | schedule_timeout_interruptible(1); |
| 3335 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) { | ||
| 3336 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 3337 | schedule_timeout(1); | ||
| 3338 | } | ||
| 3339 | if(ixjdebug & 0x0200) { | 3312 | if(ixjdebug & 0x0200) { |
| 3340 | printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies); | 3313 | printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies); |
| 3341 | } | 3314 | } |
| @@ -3343,14 +3316,10 @@ static void ixj_write_cidcw(IXJ *j) | |||
| 3343 | j->cidcw_wait = jiffies + ((50 * hertz) / 100); | 3316 | j->cidcw_wait = jiffies + ((50 * hertz) / 100); |
| 3344 | 3317 | ||
| 3345 | clear_bit(j->board, &j->busyflags); | 3318 | clear_bit(j->board, &j->busyflags); |
| 3346 | while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait)) { | 3319 | while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait)) |
| 3347 | set_current_state(TASK_INTERRUPTIBLE); | 3320 | schedule_timeout_interruptible(1); |
| 3348 | schedule_timeout(1); | 3321 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) |
| 3349 | } | 3322 | schedule_timeout_interruptible(1); |
| 3350 | while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) { | ||
| 3351 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 3352 | schedule_timeout(1); | ||
| 3353 | } | ||
| 3354 | j->cidcw_wait = 0; | 3323 | j->cidcw_wait = 0; |
| 3355 | if(!j->flags.cidcw_ack) { | 3324 | if(!j->flags.cidcw_ack) { |
| 3356 | if(ixjdebug & 0x0200) { | 3325 | if(ixjdebug & 0x0200) { |
| @@ -6125,10 +6094,8 @@ static int ixj_ioctl(struct inode *inode, struct file *file_p, unsigned int cmd, | |||
| 6125 | * Set up locks to ensure that only one process is talking to the DSP at a time. | 6094 | * Set up locks to ensure that only one process is talking to the DSP at a time. |
| 6126 | * This is necessary to keep the DSP from locking up. | 6095 | * This is necessary to keep the DSP from locking up. |
| 6127 | */ | 6096 | */ |
| 6128 | while(test_and_set_bit(board, (void *)&j->busyflags) != 0) { | 6097 | while(test_and_set_bit(board, (void *)&j->busyflags) != 0) |
| 6129 | set_current_state(TASK_INTERRUPTIBLE); | 6098 | schedule_timeout_interruptible(1); |
| 6130 | schedule_timeout(1); | ||
| 6131 | } | ||
| 6132 | if (ixjdebug & 0x0040) | 6099 | if (ixjdebug & 0x0040) |
| 6133 | printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg); | 6100 | printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg); |
| 6134 | if (minor >= IXJMAX) { | 6101 | if (minor >= IXJMAX) { |
| @@ -6694,8 +6661,6 @@ static struct file_operations ixj_fops = | |||
| 6694 | 6661 | ||
| 6695 | static int ixj_linetest(IXJ *j) | 6662 | static int ixj_linetest(IXJ *j) |
| 6696 | { | 6663 | { |
| 6697 | unsigned long jifwait; | ||
| 6698 | |||
| 6699 | j->flags.pstncheck = 1; /* Testing */ | 6664 | j->flags.pstncheck = 1; /* Testing */ |
| 6700 | j->flags.pstn_present = 0; /* Assume the line is not there */ | 6665 | j->flags.pstn_present = 0; /* Assume the line is not there */ |
| 6701 | 6666 | ||
| @@ -6726,11 +6691,7 @@ static int ixj_linetest(IXJ *j) | |||
| 6726 | 6691 | ||
| 6727 | outb_p(j->pld_scrw.byte, j->XILINXbase); | 6692 | outb_p(j->pld_scrw.byte, j->XILINXbase); |
| 6728 | daa_set_mode(j, SOP_PU_CONVERSATION); | 6693 | daa_set_mode(j, SOP_PU_CONVERSATION); |
| 6729 | jifwait = jiffies + hertz; | 6694 | msleep(1000); |
| 6730 | while (time_before(jiffies, jifwait)) { | ||
| 6731 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6732 | schedule_timeout(1); | ||
| 6733 | } | ||
| 6734 | daa_int_read(j); | 6695 | daa_int_read(j); |
| 6735 | daa_set_mode(j, SOP_PU_RESET); | 6696 | daa_set_mode(j, SOP_PU_RESET); |
| 6736 | if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) { | 6697 | if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) { |
| @@ -6750,11 +6711,7 @@ static int ixj_linetest(IXJ *j) | |||
| 6750 | j->pld_slicw.bits.rly3 = 0; | 6711 | j->pld_slicw.bits.rly3 = 0; |
| 6751 | outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01); | 6712 | outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01); |
| 6752 | daa_set_mode(j, SOP_PU_CONVERSATION); | 6713 | daa_set_mode(j, SOP_PU_CONVERSATION); |
| 6753 | jifwait = jiffies + hertz; | 6714 | msleep(1000); |
| 6754 | while (time_before(jiffies, jifwait)) { | ||
| 6755 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6756 | schedule_timeout(1); | ||
| 6757 | } | ||
| 6758 | daa_int_read(j); | 6715 | daa_int_read(j); |
| 6759 | daa_set_mode(j, SOP_PU_RESET); | 6716 | daa_set_mode(j, SOP_PU_RESET); |
| 6760 | if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) { | 6717 | if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) { |
| @@ -6783,7 +6740,6 @@ static int ixj_linetest(IXJ *j) | |||
| 6783 | static int ixj_selfprobe(IXJ *j) | 6740 | static int ixj_selfprobe(IXJ *j) |
| 6784 | { | 6741 | { |
| 6785 | unsigned short cmd; | 6742 | unsigned short cmd; |
| 6786 | unsigned long jif; | ||
| 6787 | int cnt; | 6743 | int cnt; |
| 6788 | BYTES bytes; | 6744 | BYTES bytes; |
| 6789 | 6745 | ||
| @@ -6933,29 +6889,13 @@ static int ixj_selfprobe(IXJ *j) | |||
| 6933 | } else { | 6889 | } else { |
| 6934 | if (j->cardtype == QTI_LINEJACK) { | 6890 | if (j->cardtype == QTI_LINEJACK) { |
| 6935 | LED_SetState(0x1, j); | 6891 | LED_SetState(0x1, j); |
| 6936 | jif = jiffies + (hertz / 10); | 6892 | msleep(100); |
| 6937 | while (time_before(jiffies, jif)) { | ||
| 6938 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6939 | schedule_timeout(1); | ||
| 6940 | } | ||
| 6941 | LED_SetState(0x2, j); | 6893 | LED_SetState(0x2, j); |
| 6942 | jif = jiffies + (hertz / 10); | 6894 | msleep(100); |
| 6943 | while (time_before(jiffies, jif)) { | ||
| 6944 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6945 | schedule_timeout(1); | ||
| 6946 | } | ||
| 6947 | LED_SetState(0x4, j); | 6895 | LED_SetState(0x4, j); |
| 6948 | jif = jiffies + (hertz / 10); | 6896 | msleep(100); |
| 6949 | while (time_before(jiffies, jif)) { | ||
| 6950 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6951 | schedule_timeout(1); | ||
| 6952 | } | ||
| 6953 | LED_SetState(0x8, j); | 6897 | LED_SetState(0x8, j); |
| 6954 | jif = jiffies + (hertz / 10); | 6898 | msleep(100); |
| 6955 | while (time_before(jiffies, jif)) { | ||
| 6956 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 6957 | schedule_timeout(1); | ||
| 6958 | } | ||
| 6959 | LED_SetState(0x0, j); | 6899 | LED_SetState(0x0, j); |
| 6960 | daa_get_version(j); | 6900 | daa_get_version(j); |
| 6961 | if (ixjdebug & 0x0002) | 6901 | if (ixjdebug & 0x0002) |
diff --git a/drivers/usb/media/stv680.c b/drivers/usb/media/stv680.c index 7398a7f19c1e..0fd0fa9fec21 100644 --- a/drivers/usb/media/stv680.c +++ b/drivers/usb/media/stv680.c | |||
| @@ -260,7 +260,7 @@ static int stv_stop_video (struct usb_stv *dev) | |||
| 260 | PDEBUG (0, "STV(i): Camera set to original resolution"); | 260 | PDEBUG (0, "STV(i): Camera set to original resolution"); |
| 261 | } | 261 | } |
| 262 | /* origMode */ | 262 | /* origMode */ |
| 263 | kfree (buf); | 263 | kfree(buf); |
| 264 | return i; | 264 | return i; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| @@ -276,7 +276,7 @@ static int stv_set_video_mode (struct usb_stv *dev) | |||
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | if ((i = stv_set_config (dev, 1, 0, 0)) < 0) { | 278 | if ((i = stv_set_config (dev, 1, 0, 0)) < 0) { |
| 279 | kfree (buf); | 279 | kfree(buf); |
| 280 | return i; | 280 | return i; |
| 281 | } | 281 | } |
| 282 | 282 | ||
| @@ -301,13 +301,13 @@ static int stv_set_video_mode (struct usb_stv *dev) | |||
| 301 | goto exit; | 301 | goto exit; |
| 302 | 302 | ||
| 303 | error: | 303 | error: |
| 304 | kfree (buf); | 304 | kfree(buf); |
| 305 | if (stop_video == 1) | 305 | if (stop_video == 1) |
| 306 | stv_stop_video (dev); | 306 | stv_stop_video (dev); |
| 307 | return -1; | 307 | return -1; |
| 308 | 308 | ||
| 309 | exit: | 309 | exit: |
| 310 | kfree (buf); | 310 | kfree(buf); |
| 311 | return 0; | 311 | return 0; |
| 312 | } | 312 | } |
| 313 | 313 | ||
| @@ -327,7 +327,7 @@ static int stv_init (struct usb_stv *stv680) | |||
| 327 | 327 | ||
| 328 | /* set config 1, interface 0, alternate 0 */ | 328 | /* set config 1, interface 0, alternate 0 */ |
| 329 | if ((i = stv_set_config (stv680, 1, 0, 0)) < 0) { | 329 | if ((i = stv_set_config (stv680, 1, 0, 0)) < 0) { |
| 330 | kfree (buffer); | 330 | kfree(buffer); |
| 331 | PDEBUG (0, "STV(e): set config 1,0,0 failed"); | 331 | PDEBUG (0, "STV(e): set config 1,0,0 failed"); |
| 332 | return -1; | 332 | return -1; |
| 333 | } | 333 | } |
| @@ -435,11 +435,11 @@ static int stv_init (struct usb_stv *stv680) | |||
| 435 | error: | 435 | error: |
| 436 | i = stv_sndctrl (0, stv680, 0x80, 0, buffer, 0x02); /* Get Last Error */ | 436 | i = stv_sndctrl (0, stv680, 0x80, 0, buffer, 0x02); /* Get Last Error */ |
| 437 | PDEBUG (1, "STV(i): last error: %i, command = 0x%x", buffer[0], buffer[1]); | 437 | PDEBUG (1, "STV(i): last error: %i, command = 0x%x", buffer[0], buffer[1]); |
| 438 | kfree (buffer); | 438 | kfree(buffer); |
| 439 | return -1; | 439 | return -1; |
| 440 | 440 | ||
| 441 | exit: | 441 | exit: |
| 442 | kfree (buffer); | 442 | kfree(buffer); |
| 443 | 443 | ||
| 444 | /* video = 320x240, 352x288 */ | 444 | /* video = 320x240, 352x288 */ |
| 445 | if (stv680->CIF == 1) { | 445 | if (stv680->CIF == 1) { |
| @@ -708,10 +708,10 @@ static int stv680_stop_stream (struct usb_stv *stv680) | |||
| 708 | usb_kill_urb (stv680->urb[i]); | 708 | usb_kill_urb (stv680->urb[i]); |
| 709 | usb_free_urb (stv680->urb[i]); | 709 | usb_free_urb (stv680->urb[i]); |
| 710 | stv680->urb[i] = NULL; | 710 | stv680->urb[i] = NULL; |
| 711 | kfree (stv680->sbuf[i].data); | 711 | kfree(stv680->sbuf[i].data); |
| 712 | } | 712 | } |
| 713 | for (i = 0; i < STV680_NUMSCRATCH; i++) { | 713 | for (i = 0; i < STV680_NUMSCRATCH; i++) { |
| 714 | kfree (stv680->scratch[i].data); | 714 | kfree(stv680->scratch[i].data); |
| 715 | stv680->scratch[i].data = NULL; | 715 | stv680->scratch[i].data = NULL; |
| 716 | } | 716 | } |
| 717 | 717 | ||
| @@ -1068,7 +1068,7 @@ static int stv_close (struct inode *inode, struct file *file) | |||
| 1068 | stv680->user = 0; | 1068 | stv680->user = 0; |
| 1069 | 1069 | ||
| 1070 | if (stv680->removed) { | 1070 | if (stv680->removed) { |
| 1071 | kfree (stv680); | 1071 | kfree(stv680); |
| 1072 | stv680 = NULL; | 1072 | stv680 = NULL; |
| 1073 | PDEBUG (0, "STV(i): device unregistered"); | 1073 | PDEBUG (0, "STV(i): device unregistered"); |
| 1074 | } | 1074 | } |
| @@ -1445,14 +1445,14 @@ static inline void usb_stv680_remove_disconnected (struct usb_stv *stv680) | |||
| 1445 | usb_kill_urb (stv680->urb[i]); | 1445 | usb_kill_urb (stv680->urb[i]); |
| 1446 | usb_free_urb (stv680->urb[i]); | 1446 | usb_free_urb (stv680->urb[i]); |
| 1447 | stv680->urb[i] = NULL; | 1447 | stv680->urb[i] = NULL; |
| 1448 | kfree (stv680->sbuf[i].data); | 1448 | kfree(stv680->sbuf[i].data); |
| 1449 | } | 1449 | } |
| 1450 | for (i = 0; i < STV680_NUMSCRATCH; i++) | 1450 | for (i = 0; i < STV680_NUMSCRATCH; i++) |
| 1451 | kfree (stv680->scratch[i].data); | 1451 | kfree(stv680->scratch[i].data); |
| 1452 | PDEBUG (0, "STV(i): %s disconnected", stv680->camera_name); | 1452 | PDEBUG (0, "STV(i): %s disconnected", stv680->camera_name); |
| 1453 | 1453 | ||
| 1454 | /* Free the memory */ | 1454 | /* Free the memory */ |
| 1455 | kfree (stv680); | 1455 | kfree(stv680); |
| 1456 | } | 1456 | } |
| 1457 | 1457 | ||
| 1458 | static void stv680_disconnect (struct usb_interface *intf) | 1458 | static void stv680_disconnect (struct usb_interface *intf) |
diff --git a/drivers/video/vgastate.c b/drivers/video/vgastate.c index 0ea62d8bc703..ca92940f3943 100644 --- a/drivers/video/vgastate.c +++ b/drivers/video/vgastate.c | |||
| @@ -342,16 +342,11 @@ static void vga_cleanup(struct vgastate *state) | |||
| 342 | if (state->vidstate != NULL) { | 342 | if (state->vidstate != NULL) { |
| 343 | struct regstate *saved = (struct regstate *) state->vidstate; | 343 | struct regstate *saved = (struct regstate *) state->vidstate; |
| 344 | 344 | ||
| 345 | if (saved->vga_font0) | 345 | vfree(saved->vga_font0); |
| 346 | vfree(saved->vga_font0); | 346 | vfree(saved->vga_font1); |
| 347 | if (saved->vga_font1) | 347 | vfree(saved->vga_text); |
| 348 | vfree(saved->vga_font1); | 348 | vfree(saved->vga_cmap); |
| 349 | if (saved->vga_text) | 349 | vfree(saved->attr); |
| 350 | vfree(saved->vga_text); | ||
| 351 | if (saved->vga_cmap) | ||
| 352 | vfree(saved->vga_cmap); | ||
| 353 | if (saved->attr) | ||
| 354 | vfree(saved->attr); | ||
| 355 | kfree(saved); | 350 | kfree(saved); |
| 356 | state->vidstate = NULL; | 351 | state->vidstate = NULL; |
| 357 | } | 352 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index 1c62203a4906..6cbfceabd95d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
| 41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
| 42 | #include <linux/mpage.h> | 42 | #include <linux/mpage.h> |
| 43 | #include <linux/bit_spinlock.h> | ||
| 43 | 44 | ||
| 44 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 45 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
| 45 | static void invalidate_bh_lrus(void); | 46 | static void invalidate_bh_lrus(void); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3217ac5f6bd7..2335f14a1583 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -3215,10 +3215,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | |||
| 3215 | } | 3215 | } |
| 3216 | 3216 | ||
| 3217 | cifs_sb->tcon = NULL; | 3217 | cifs_sb->tcon = NULL; |
| 3218 | if (ses) { | 3218 | if (ses) |
| 3219 | set_current_state(TASK_INTERRUPTIBLE); | 3219 | schedule_timeout_interruptible(msecs_to_jiffies(500)); |
| 3220 | schedule_timeout(HZ / 2); | ||
| 3221 | } | ||
| 3222 | if (ses) | 3220 | if (ses) |
| 3223 | sesInfoFree(ses); | 3221 | sesInfoFree(ses); |
| 3224 | 3222 | ||
diff --git a/fs/cramfs/uncompress.c b/fs/cramfs/uncompress.c index 5034365b06a8..8def89f2c438 100644 --- a/fs/cramfs/uncompress.c +++ b/fs/cramfs/uncompress.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
| 20 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
| 21 | #include <linux/zlib.h> | 21 | #include <linux/zlib.h> |
| 22 | #include <linux/cramfs_fs.h> | ||
| 22 | 23 | ||
| 23 | static z_stream stream; | 24 | static z_stream stream; |
| 24 | static int initialized; | 25 | static int initialized; |
diff --git a/fs/dcache.c b/fs/dcache.c index a15a2e1f5520..7376b61269fb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -337,12 +337,10 @@ struct dentry * d_find_alias(struct inode *inode) | |||
| 337 | */ | 337 | */ |
| 338 | void d_prune_aliases(struct inode *inode) | 338 | void d_prune_aliases(struct inode *inode) |
| 339 | { | 339 | { |
| 340 | struct list_head *tmp, *head = &inode->i_dentry; | 340 | struct dentry *dentry; |
| 341 | restart: | 341 | restart: |
| 342 | spin_lock(&dcache_lock); | 342 | spin_lock(&dcache_lock); |
| 343 | tmp = head; | 343 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
| 344 | while ((tmp = tmp->next) != head) { | ||
| 345 | struct dentry *dentry = list_entry(tmp, struct dentry, d_alias); | ||
| 346 | spin_lock(&dentry->d_lock); | 344 | spin_lock(&dentry->d_lock); |
| 347 | if (!atomic_read(&dentry->d_count)) { | 345 | if (!atomic_read(&dentry->d_count)) { |
| 348 | __dget_locked(dentry); | 346 | __dget_locked(dentry); |
| @@ -463,10 +461,7 @@ void shrink_dcache_sb(struct super_block * sb) | |||
| 463 | * superblock to the most recent end of the unused list. | 461 | * superblock to the most recent end of the unused list. |
| 464 | */ | 462 | */ |
| 465 | spin_lock(&dcache_lock); | 463 | spin_lock(&dcache_lock); |
| 466 | next = dentry_unused.next; | 464 | list_for_each_safe(tmp, next, &dentry_unused) { |
| 467 | while (next != &dentry_unused) { | ||
| 468 | tmp = next; | ||
| 469 | next = tmp->next; | ||
| 470 | dentry = list_entry(tmp, struct dentry, d_lru); | 465 | dentry = list_entry(tmp, struct dentry, d_lru); |
| 471 | if (dentry->d_sb != sb) | 466 | if (dentry->d_sb != sb) |
| 472 | continue; | 467 | continue; |
| @@ -478,10 +473,7 @@ void shrink_dcache_sb(struct super_block * sb) | |||
| 478 | * Pass two ... free the dentries for this superblock. | 473 | * Pass two ... free the dentries for this superblock. |
| 479 | */ | 474 | */ |
| 480 | repeat: | 475 | repeat: |
| 481 | next = dentry_unused.next; | 476 | list_for_each_safe(tmp, next, &dentry_unused) { |
| 482 | while (next != &dentry_unused) { | ||
| 483 | tmp = next; | ||
| 484 | next = tmp->next; | ||
| 485 | dentry = list_entry(tmp, struct dentry, d_lru); | 477 | dentry = list_entry(tmp, struct dentry, d_lru); |
| 486 | if (dentry->d_sb != sb) | 478 | if (dentry->d_sb != sb) |
| 487 | continue; | 479 | continue; |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index c6ec66fd8766..49bbc2be3d72 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
| @@ -1340,8 +1340,7 @@ int journal_stop(handle_t *handle) | |||
| 1340 | if (handle->h_sync) { | 1340 | if (handle->h_sync) { |
| 1341 | do { | 1341 | do { |
| 1342 | old_handle_count = transaction->t_handle_count; | 1342 | old_handle_count = transaction->t_handle_count; |
| 1343 | set_current_state(TASK_UNINTERRUPTIBLE); | 1343 | schedule_timeout_uninterruptible(1); |
| 1344 | schedule_timeout(1); | ||
| 1345 | } while (old_handle_count != transaction->t_handle_count); | 1344 | } while (old_handle_count != transaction->t_handle_count); |
| 1346 | } | 1345 | } |
| 1347 | 1346 | ||
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index 456d7e6e29c2..27f199e94cfc 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c | |||
| @@ -1701,12 +1701,10 @@ jffs_find_file(struct jffs_control *c, __u32 ino) | |||
| 1701 | { | 1701 | { |
| 1702 | struct jffs_file *f; | 1702 | struct jffs_file *f; |
| 1703 | int i = ino % c->hash_len; | 1703 | int i = ino % c->hash_len; |
| 1704 | struct list_head *tmp; | ||
| 1705 | 1704 | ||
| 1706 | D3(printk("jffs_find_file(): ino: %u\n", ino)); | 1705 | D3(printk("jffs_find_file(): ino: %u\n", ino)); |
| 1707 | 1706 | ||
| 1708 | for (tmp = c->hash[i].next; tmp != &c->hash[i]; tmp = tmp->next) { | 1707 | list_for_each_entry(f, &c->hash[i], hash) { |
| 1709 | f = list_entry(tmp, struct jffs_file, hash); | ||
| 1710 | if (ino != f->ino) | 1708 | if (ino != f->ino) |
| 1711 | continue; | 1709 | continue; |
| 1712 | D3(printk("jffs_find_file(): Found file with ino " | 1710 | D3(printk("jffs_find_file(): Found file with ino " |
| @@ -2102,13 +2100,12 @@ jffs_foreach_file(struct jffs_control *c, int (*func)(struct jffs_file *)) | |||
| 2102 | int result = 0; | 2100 | int result = 0; |
| 2103 | 2101 | ||
| 2104 | for (pos = 0; pos < c->hash_len; pos++) { | 2102 | for (pos = 0; pos < c->hash_len; pos++) { |
| 2105 | struct list_head *p, *next; | 2103 | struct jffs_file *f, *next; |
| 2106 | for (p = c->hash[pos].next; p != &c->hash[pos]; p = next) { | 2104 | |
| 2107 | /* We need a reference to the next file in the | 2105 | /* We must do _safe, because 'func' might remove the |
| 2108 | list because `func' might remove the current | 2106 | current file 'f' from the list. */ |
| 2109 | file `f'. */ | 2107 | list_for_each_entry_safe(f, next, &c->hash[pos], hash) { |
| 2110 | next = p->next; | 2108 | r = func(f); |
| 2111 | r = func(list_entry(p, struct jffs_file, hash)); | ||
| 2112 | if (r < 0) | 2109 | if (r < 0) |
| 2113 | return r; | 2110 | return r; |
| 2114 | result += r; | 2111 | result += r; |
| @@ -2613,9 +2610,8 @@ jffs_print_hash_table(struct jffs_control *c) | |||
| 2613 | 2610 | ||
| 2614 | printk("JFFS: Dumping the file system's hash table...\n"); | 2611 | printk("JFFS: Dumping the file system's hash table...\n"); |
| 2615 | for (i = 0; i < c->hash_len; i++) { | 2612 | for (i = 0; i < c->hash_len; i++) { |
| 2616 | struct list_head *p; | 2613 | struct jffs_file *f; |
| 2617 | for (p = c->hash[i].next; p != &c->hash[i]; p = p->next) { | 2614 | list_for_each_entry(f, &c->hash[i], hash) { |
| 2618 | struct jffs_file *f=list_entry(p,struct jffs_file,hash); | ||
| 2619 | printk("*** c->hash[%u]: \"%s\" " | 2615 | printk("*** c->hash[%u]: \"%s\" " |
| 2620 | "(ino: %u, pino: %u)\n", | 2616 | "(ino: %u, pino: %u)\n", |
| 2621 | i, (f->name ? f->name : ""), | 2617 | i, (f->name ? f->name : ""), |
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 14b3ce87fa29..87332f30141b 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
| @@ -299,8 +299,7 @@ nlmclnt_alloc_call(void) | |||
| 299 | return call; | 299 | return call; |
| 300 | } | 300 | } |
| 301 | printk("nlmclnt_alloc_call: failed, waiting for memory\n"); | 301 | printk("nlmclnt_alloc_call: failed, waiting for memory\n"); |
| 302 | current->state = TASK_INTERRUPTIBLE; | 302 | schedule_timeout_interruptible(5*HZ); |
| 303 | schedule_timeout(5*HZ); | ||
| 304 | } | 303 | } |
| 305 | return NULL; | 304 | return NULL; |
| 306 | } | 305 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 34156260c9b6..2fa9fdf7d6f5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -537,7 +537,6 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | |||
| 537 | static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | 537 | static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) |
| 538 | { | 538 | { |
| 539 | struct vfsmount *res, *p, *q, *r, *s; | 539 | struct vfsmount *res, *p, *q, *r, *s; |
| 540 | struct list_head *h; | ||
| 541 | struct nameidata nd; | 540 | struct nameidata nd; |
| 542 | 541 | ||
| 543 | res = q = clone_mnt(mnt, dentry); | 542 | res = q = clone_mnt(mnt, dentry); |
| @@ -546,8 +545,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
| 546 | q->mnt_mountpoint = mnt->mnt_mountpoint; | 545 | q->mnt_mountpoint = mnt->mnt_mountpoint; |
| 547 | 546 | ||
| 548 | p = mnt; | 547 | p = mnt; |
| 549 | for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) { | 548 | list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { |
| 550 | r = list_entry(h, struct vfsmount, mnt_child); | ||
| 551 | if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry)) | 549 | if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry)) |
| 552 | continue; | 550 | continue; |
| 553 | 551 | ||
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 2681485cf2d0..edc95514046d 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
| @@ -34,8 +34,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
| 34 | res = rpc_call_sync(clnt, msg, flags); | 34 | res = rpc_call_sync(clnt, msg, flags); |
| 35 | if (res != -EJUKEBOX) | 35 | if (res != -EJUKEBOX) |
| 36 | break; | 36 | break; |
| 37 | set_current_state(TASK_INTERRUPTIBLE); | 37 | schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME); |
| 38 | schedule_timeout(NFS_JUKEBOX_RETRY_TIME); | ||
| 39 | res = -ERESTARTSYS; | 38 | res = -ERESTARTSYS; |
| 40 | } while (!signalled()); | 39 | } while (!signalled()); |
| 41 | rpc_clnt_sigunmask(clnt, &oldset); | 40 | rpc_clnt_sigunmask(clnt, &oldset); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0c5a308e4963..9701ca8c9428 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -2418,14 +2418,11 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) | |||
| 2418 | *timeout = NFS4_POLL_RETRY_MAX; | 2418 | *timeout = NFS4_POLL_RETRY_MAX; |
| 2419 | rpc_clnt_sigmask(clnt, &oldset); | 2419 | rpc_clnt_sigmask(clnt, &oldset); |
| 2420 | if (clnt->cl_intr) { | 2420 | if (clnt->cl_intr) { |
| 2421 | set_current_state(TASK_INTERRUPTIBLE); | 2421 | schedule_timeout_interruptible(*timeout); |
| 2422 | schedule_timeout(*timeout); | ||
| 2423 | if (signalled()) | 2422 | if (signalled()) |
| 2424 | res = -ERESTARTSYS; | 2423 | res = -ERESTARTSYS; |
| 2425 | } else { | 2424 | } else |
| 2426 | set_current_state(TASK_UNINTERRUPTIBLE); | 2425 | schedule_timeout_uninterruptible(*timeout); |
| 2427 | schedule_timeout(*timeout); | ||
| 2428 | } | ||
| 2429 | rpc_clnt_sigunmask(clnt, &oldset); | 2426 | rpc_clnt_sigunmask(clnt, &oldset); |
| 2430 | *timeout <<= 1; | 2427 | *timeout <<= 1; |
| 2431 | return res; | 2428 | return res; |
| @@ -2578,8 +2575,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4 | |||
| 2578 | static unsigned long | 2575 | static unsigned long |
| 2579 | nfs4_set_lock_task_retry(unsigned long timeout) | 2576 | nfs4_set_lock_task_retry(unsigned long timeout) |
| 2580 | { | 2577 | { |
| 2581 | current->state = TASK_INTERRUPTIBLE; | 2578 | schedule_timeout_interruptible(timeout); |
| 2582 | schedule_timeout(timeout); | ||
| 2583 | timeout <<= 1; | 2579 | timeout <<= 1; |
| 2584 | if (timeout > NFS4_LOCK_MAXTIMEOUT) | 2580 | if (timeout > NFS4_LOCK_MAXTIMEOUT) |
| 2585 | return NFS4_LOCK_MAXTIMEOUT; | 2581 | return NFS4_LOCK_MAXTIMEOUT; |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 545236414d59..b6cc8cf24626 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/swap.h> | 27 | #include <linux/swap.h> |
| 28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
| 29 | #include <linux/writeback.h> | 29 | #include <linux/writeback.h> |
| 30 | #include <linux/bit_spinlock.h> | ||
| 30 | 31 | ||
| 31 | #include "aops.h" | 32 | #include "aops.h" |
| 32 | #include "attrib.h" | 33 | #include "attrib.h" |
| @@ -39,7 +39,11 @@ void pipe_wait(struct inode * inode) | |||
| 39 | { | 39 | { |
| 40 | DEFINE_WAIT(wait); | 40 | DEFINE_WAIT(wait); |
| 41 | 41 | ||
| 42 | prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE); | 42 | /* |
| 43 | * Pipes are system-local resources, so sleeping on them | ||
| 44 | * is considered a noninteractive wait: | ||
| 45 | */ | ||
| 46 | prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE); | ||
| 43 | up(PIPE_SEM(*inode)); | 47 | up(PIPE_SEM(*inode)); |
| 44 | schedule(); | 48 | schedule(); |
| 45 | finish_wait(PIPE_WAIT(*inode), &wait); | 49 | finish_wait(PIPE_WAIT(*inode), &wait); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index a8e29e9bbbd0..4b15761434bc 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
| @@ -2868,8 +2868,7 @@ static void let_transaction_grow(struct super_block *sb, unsigned long trans_id) | |||
| 2868 | struct reiserfs_journal *journal = SB_JOURNAL(sb); | 2868 | struct reiserfs_journal *journal = SB_JOURNAL(sb); |
| 2869 | unsigned long bcount = journal->j_bcount; | 2869 | unsigned long bcount = journal->j_bcount; |
| 2870 | while (1) { | 2870 | while (1) { |
| 2871 | set_current_state(TASK_UNINTERRUPTIBLE); | 2871 | schedule_timeout_uninterruptible(1); |
| 2872 | schedule_timeout(1); | ||
| 2873 | journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; | 2872 | journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; |
| 2874 | while ((atomic_read(&journal->j_wcount) > 0 || | 2873 | while ((atomic_read(&journal->j_wcount) > 0 || |
| 2875 | atomic_read(&journal->j_jlock)) && | 2874 | atomic_read(&journal->j_jlock)) && |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 6951c35755be..44b02fc02ebe 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
| @@ -1934,8 +1934,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) | |||
| 1934 | if (SB_AP_BITMAP(s)) | 1934 | if (SB_AP_BITMAP(s)) |
| 1935 | brelse(SB_AP_BITMAP(s)[j].bh); | 1935 | brelse(SB_AP_BITMAP(s)[j].bh); |
| 1936 | } | 1936 | } |
| 1937 | if (SB_AP_BITMAP(s)) | 1937 | vfree(SB_AP_BITMAP(s)); |
| 1938 | vfree(SB_AP_BITMAP(s)); | ||
| 1939 | } | 1938 | } |
| 1940 | if (SB_BUFFER_WITH_SB(s)) | 1939 | if (SB_BUFFER_WITH_SB(s)) |
| 1941 | brelse(SB_BUFFER_WITH_SB(s)); | 1940 | brelse(SB_BUFFER_WITH_SB(s)); |
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c index 220babe91efd..38ab558835c4 100644 --- a/fs/smbfs/proc.c +++ b/fs/smbfs/proc.c | |||
| @@ -2397,8 +2397,7 @@ smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir, | |||
| 2397 | if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) { | 2397 | if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) { |
| 2398 | /* a damn Win95 bug - sometimes it clags if you | 2398 | /* a damn Win95 bug - sometimes it clags if you |
| 2399 | ask it too fast */ | 2399 | ask it too fast */ |
| 2400 | current->state = TASK_INTERRUPTIBLE; | 2400 | schedule_timeout_interruptible(msecs_to_jiffies(200)); |
| 2401 | schedule_timeout(HZ/5); | ||
| 2402 | continue; | 2401 | continue; |
| 2403 | } | 2402 | } |
| 2404 | 2403 | ||
diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/linux-2.6/time.h index 6c6fd0faa8e1..b0d2873ab274 100644 --- a/fs/xfs/linux-2.6/time.h +++ b/fs/xfs/linux-2.6/time.h | |||
| @@ -39,8 +39,7 @@ typedef struct timespec timespec_t; | |||
| 39 | 39 | ||
| 40 | static inline void delay(long ticks) | 40 | static inline void delay(long ticks) |
| 41 | { | 41 | { |
| 42 | set_current_state(TASK_UNINTERRUPTIBLE); | 42 | schedule_timeout_uninterruptible(ticks); |
| 43 | schedule_timeout(ticks); | ||
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | static inline void nanotime(struct timespec *tvp) | 45 | static inline void nanotime(struct timespec *tvp) |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 655bf4a78afe..e82cf72ac599 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
| @@ -1780,10 +1780,10 @@ xfsbufd( | |||
| 1780 | xfsbufd_force_sleep = 0; | 1780 | xfsbufd_force_sleep = 0; |
| 1781 | } | 1781 | } |
| 1782 | 1782 | ||
| 1783 | set_current_state(TASK_INTERRUPTIBLE); | 1783 | schedule_timeout_interruptible |
| 1784 | schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100); | 1784 | (xfs_buf_timer_centisecs * msecs_to_jiffies(10)); |
| 1785 | 1785 | ||
| 1786 | age = (xfs_buf_age_centisecs * HZ) / 100; | 1786 | age = xfs_buf_age_centisecs * msecs_to_jiffies(10); |
| 1787 | spin_lock(&pbd_delwrite_lock); | 1787 | spin_lock(&pbd_delwrite_lock); |
| 1788 | list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { | 1788 | list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { |
| 1789 | PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); | 1789 | PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 0da87bfc9999..2302454d8d47 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -467,7 +467,7 @@ xfs_flush_inode( | |||
| 467 | 467 | ||
| 468 | igrab(inode); | 468 | igrab(inode); |
| 469 | xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work); | 469 | xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work); |
| 470 | delay(HZ/2); | 470 | delay(msecs_to_jiffies(500)); |
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | /* | 473 | /* |
| @@ -492,7 +492,7 @@ xfs_flush_device( | |||
| 492 | 492 | ||
| 493 | igrab(inode); | 493 | igrab(inode); |
| 494 | xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work); | 494 | xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work); |
| 495 | delay(HZ/2); | 495 | delay(msecs_to_jiffies(500)); |
| 496 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); | 496 | xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); |
| 497 | } | 497 | } |
| 498 | 498 | ||
| @@ -520,10 +520,9 @@ xfssyncd( | |||
| 520 | struct vfs_sync_work *work, *n; | 520 | struct vfs_sync_work *work, *n; |
| 521 | LIST_HEAD (tmp); | 521 | LIST_HEAD (tmp); |
| 522 | 522 | ||
| 523 | timeleft = (xfs_syncd_centisecs * HZ) / 100; | 523 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); |
| 524 | for (;;) { | 524 | for (;;) { |
| 525 | set_current_state(TASK_INTERRUPTIBLE); | 525 | timeleft = schedule_timeout_interruptible(timeleft); |
| 526 | timeleft = schedule_timeout(timeleft); | ||
| 527 | /* swsusp */ | 526 | /* swsusp */ |
| 528 | try_to_freeze(); | 527 | try_to_freeze(); |
| 529 | if (kthread_should_stop()) | 528 | if (kthread_should_stop()) |
| @@ -537,7 +536,8 @@ xfssyncd( | |||
| 537 | */ | 536 | */ |
| 538 | if (!timeleft || list_empty(&vfsp->vfs_sync_list)) { | 537 | if (!timeleft || list_empty(&vfsp->vfs_sync_list)) { |
| 539 | if (!timeleft) | 538 | if (!timeleft) |
| 540 | timeleft = (xfs_syncd_centisecs * HZ) / 100; | 539 | timeleft = xfs_syncd_centisecs * |
| 540 | msecs_to_jiffies(10); | ||
| 541 | INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list); | 541 | INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list); |
| 542 | list_add_tail(&vfsp->vfs_sync_work.w_list, | 542 | list_add_tail(&vfsp->vfs_sync_work.w_list, |
| 543 | &vfsp->vfs_sync_list); | 543 | &vfsp->vfs_sync_list); |
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index 80780dba9986..8197c69eff44 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
| 7 | #include <asm/current.h> | 7 | #include <asm/current.h> |
| 8 | 8 | ||
| 9 | |||
| 10 | /* | 9 | /* |
| 11 | * Simple spin lock operations. There are two variants, one clears IRQ's | 10 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 12 | * on the local processor, one does not. | 11 | * on the local processor, one does not. |
| @@ -14,43 +13,18 @@ | |||
| 14 | * We make no fairness assumptions. They have a cost. | 13 | * We make no fairness assumptions. They have a cost. |
| 15 | */ | 14 | */ |
| 16 | 15 | ||
| 17 | typedef struct { | 16 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 18 | volatile unsigned int lock; | 17 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 19 | #ifdef CONFIG_DEBUG_SPINLOCK | 18 | #define __raw_spin_unlock_wait(x) \ |
| 20 | int on_cpu; | 19 | do { cpu_relax(); } while ((x)->lock) |
| 21 | int line_no; | 20 | |
| 22 | void *previous; | 21 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) |
| 23 | struct task_struct * task; | ||
| 24 | const char *base_file; | ||
| 25 | #endif | ||
| 26 | } spinlock_t; | ||
| 27 | |||
| 28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 29 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL } | ||
| 30 | #else | ||
| 31 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 } | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 35 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 36 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
| 37 | |||
| 38 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 39 | extern void _raw_spin_unlock(spinlock_t * lock); | ||
| 40 | extern void debug_spin_lock(spinlock_t * lock, const char *, int); | ||
| 41 | extern int debug_spin_trylock(spinlock_t * lock, const char *, int); | ||
| 42 | #define _raw_spin_lock(LOCK) \ | ||
| 43 | debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) | ||
| 44 | #define _raw_spin_trylock(LOCK) \ | ||
| 45 | debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) | ||
| 46 | #else | ||
| 47 | static inline void _raw_spin_unlock(spinlock_t * lock) | ||
| 48 | { | 22 | { |
| 49 | mb(); | 23 | mb(); |
| 50 | lock->lock = 0; | 24 | lock->lock = 0; |
| 51 | } | 25 | } |
| 52 | 26 | ||
| 53 | static inline void _raw_spin_lock(spinlock_t * lock) | 27 | static inline void __raw_spin_lock(raw_spinlock_t * lock) |
| 54 | { | 28 | { |
| 55 | long tmp; | 29 | long tmp; |
| 56 | 30 | ||
| @@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock) | |||
| 70 | : "m"(lock->lock) : "memory"); | 44 | : "m"(lock->lock) : "memory"); |
| 71 | } | 45 | } |
| 72 | 46 | ||
| 73 | static inline int _raw_spin_trylock(spinlock_t *lock) | 47 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 74 | { | 48 | { |
| 75 | return !test_and_set_bit(0, &lock->lock); | 49 | return !test_and_set_bit(0, &lock->lock); |
| 76 | } | 50 | } |
| 77 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 78 | |||
| 79 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 80 | 51 | ||
| 81 | /***********************************************************/ | 52 | /***********************************************************/ |
| 82 | 53 | ||
| 83 | typedef struct { | 54 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
| 84 | volatile unsigned int lock; | ||
| 85 | } rwlock_t; | ||
| 86 | |||
| 87 | #define RW_LOCK_UNLOCKED (rwlock_t){ 0 } | ||
| 88 | |||
| 89 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 90 | |||
| 91 | static inline int read_can_lock(rwlock_t *lock) | ||
| 92 | { | 55 | { |
| 93 | return (lock->lock & 1) == 0; | 56 | return (lock->lock & 1) == 0; |
| 94 | } | 57 | } |
| 95 | 58 | ||
| 96 | static inline int write_can_lock(rwlock_t *lock) | 59 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
| 97 | { | 60 | { |
| 98 | return lock->lock == 0; | 61 | return lock->lock == 0; |
| 99 | } | 62 | } |
| 100 | 63 | ||
| 101 | #ifdef CONFIG_DEBUG_RWLOCK | 64 | static inline void __raw_read_lock(raw_rwlock_t *lock) |
| 102 | extern void _raw_write_lock(rwlock_t * lock); | ||
| 103 | extern void _raw_read_lock(rwlock_t * lock); | ||
| 104 | #else | ||
| 105 | static inline void _raw_write_lock(rwlock_t * lock) | ||
| 106 | { | 65 | { |
| 107 | long regx; | 66 | long regx; |
| 108 | 67 | ||
| 109 | __asm__ __volatile__( | 68 | __asm__ __volatile__( |
| 110 | "1: ldl_l %1,%0\n" | 69 | "1: ldl_l %1,%0\n" |
| 111 | " bne %1,6f\n" | 70 | " blbs %1,6f\n" |
| 112 | " lda %1,1\n" | 71 | " subl %1,2,%1\n" |
| 113 | " stl_c %1,%0\n" | 72 | " stl_c %1,%0\n" |
| 114 | " beq %1,6f\n" | 73 | " beq %1,6f\n" |
| 115 | " mb\n" | 74 | " mb\n" |
| 116 | ".subsection 2\n" | 75 | ".subsection 2\n" |
| 117 | "6: ldl %1,%0\n" | 76 | "6: ldl %1,%0\n" |
| 118 | " bne %1,6b\n" | 77 | " blbs %1,6b\n" |
| 119 | " br 1b\n" | 78 | " br 1b\n" |
| 120 | ".previous" | 79 | ".previous" |
| 121 | : "=m" (*lock), "=&r" (regx) | 80 | : "=m" (*lock), "=&r" (regx) |
| 122 | : "m" (*lock) : "memory"); | 81 | : "m" (*lock) : "memory"); |
| 123 | } | 82 | } |
| 124 | 83 | ||
| 125 | static inline void _raw_read_lock(rwlock_t * lock) | 84 | static inline void __raw_write_lock(raw_rwlock_t *lock) |
| 126 | { | 85 | { |
| 127 | long regx; | 86 | long regx; |
| 128 | 87 | ||
| 129 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
| 130 | "1: ldl_l %1,%0\n" | 89 | "1: ldl_l %1,%0\n" |
| 131 | " blbs %1,6f\n" | 90 | " bne %1,6f\n" |
| 132 | " subl %1,2,%1\n" | 91 | " lda %1,1\n" |
| 133 | " stl_c %1,%0\n" | 92 | " stl_c %1,%0\n" |
| 134 | " beq %1,6f\n" | 93 | " beq %1,6f\n" |
| 135 | " mb\n" | 94 | " mb\n" |
| 136 | ".subsection 2\n" | 95 | ".subsection 2\n" |
| 137 | "6: ldl %1,%0\n" | 96 | "6: ldl %1,%0\n" |
| 138 | " blbs %1,6b\n" | 97 | " bne %1,6b\n" |
| 139 | " br 1b\n" | 98 | " br 1b\n" |
| 140 | ".previous" | 99 | ".previous" |
| 141 | : "=m" (*lock), "=&r" (regx) | 100 | : "=m" (*lock), "=&r" (regx) |
| 142 | : "m" (*lock) : "memory"); | 101 | : "m" (*lock) : "memory"); |
| 143 | } | 102 | } |
| 144 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 145 | 103 | ||
| 146 | static inline int _raw_read_trylock(rwlock_t * lock) | 104 | static inline int __raw_read_trylock(raw_rwlock_t * lock) |
| 147 | { | 105 | { |
| 148 | long regx; | 106 | long regx; |
| 149 | int success; | 107 | int success; |
| @@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock) | |||
| 165 | return success; | 123 | return success; |
| 166 | } | 124 | } |
| 167 | 125 | ||
| 168 | static inline int _raw_write_trylock(rwlock_t * lock) | 126 | static inline int __raw_write_trylock(raw_rwlock_t * lock) |
| 169 | { | 127 | { |
| 170 | long regx; | 128 | long regx; |
| 171 | int success; | 129 | int success; |
| @@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock) | |||
| 187 | return success; | 145 | return success; |
| 188 | } | 146 | } |
| 189 | 147 | ||
| 190 | static inline void _raw_write_unlock(rwlock_t * lock) | 148 | static inline void __raw_read_unlock(raw_rwlock_t * lock) |
| 191 | { | ||
| 192 | mb(); | ||
| 193 | lock->lock = 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline void _raw_read_unlock(rwlock_t * lock) | ||
| 197 | { | 149 | { |
| 198 | long regx; | 150 | long regx; |
| 199 | __asm__ __volatile__( | 151 | __asm__ __volatile__( |
| @@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock) | |||
| 209 | : "m" (*lock) : "memory"); | 161 | : "m" (*lock) : "memory"); |
| 210 | } | 162 | } |
| 211 | 163 | ||
| 164 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | ||
| 165 | { | ||
| 166 | mb(); | ||
| 167 | lock->lock = 0; | ||
| 168 | } | ||
| 169 | |||
| 212 | #endif /* _ALPHA_SPINLOCK_H */ | 170 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h new file mode 100644 index 000000000000..8141eb5ebf0d --- /dev/null +++ b/include/asm-alpha/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _ALPHA_SPINLOCK_TYPES_H | ||
| 2 | #define _ALPHA_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b688..cb4906b45555 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
| @@ -16,21 +16,14 @@ | |||
| 16 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
| 17 | * Locked value: 1 | 17 | * Locked value: 1 |
| 18 | */ | 18 | */ |
| 19 | typedef struct { | ||
| 20 | volatile unsigned int lock; | ||
| 21 | #ifdef CONFIG_PREEMPT | ||
| 22 | unsigned int break_lock; | ||
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | 19 | ||
| 26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 21 | #define __raw_spin_unlock_wait(lock) \ | ||
| 22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
| 27 | 23 | ||
| 28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
| 31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 32 | 25 | ||
| 33 | static inline void _raw_spin_lock(spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 34 | { | 27 | { |
| 35 | unsigned long tmp; | 28 | unsigned long tmp; |
| 36 | 29 | ||
| @@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 47 | smp_mb(); | 40 | smp_mb(); |
| 48 | } | 41 | } |
| 49 | 42 | ||
| 50 | static inline int _raw_spin_trylock(spinlock_t *lock) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 51 | { | 44 | { |
| 52 | unsigned long tmp; | 45 | unsigned long tmp; |
| 53 | 46 | ||
| @@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 67 | } | 60 | } |
| 68 | } | 61 | } |
| 69 | 62 | ||
| 70 | static inline void _raw_spin_unlock(spinlock_t *lock) | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 71 | { | 64 | { |
| 72 | smp_mb(); | 65 | smp_mb(); |
| 73 | 66 | ||
| @@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 80 | 73 | ||
| 81 | /* | 74 | /* |
| 82 | * RWLOCKS | 75 | * RWLOCKS |
| 83 | */ | 76 | * |
| 84 | typedef struct { | 77 | * |
| 85 | volatile unsigned int lock; | ||
| 86 | #ifdef CONFIG_PREEMPT | ||
| 87 | unsigned int break_lock; | ||
| 88 | #endif | ||
| 89 | } rwlock_t; | ||
| 90 | |||
| 91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
| 93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Write locks are easy - we just set bit 31. When unlocking, we can | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 97 | * just write zero since the lock is exclusively held. | 79 | * just write zero since the lock is exclusively held. |
| 98 | */ | 80 | */ |
| 99 | static inline void _raw_write_lock(rwlock_t *rw) | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
| 82 | |||
| 83 | static inline void __raw_write_lock(rwlock_t *rw) | ||
| 100 | { | 84 | { |
| 101 | unsigned long tmp; | 85 | unsigned long tmp; |
| 102 | 86 | ||
| @@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 113 | smp_mb(); | 97 | smp_mb(); |
| 114 | } | 98 | } |
| 115 | 99 | ||
| 116 | static inline int _raw_write_trylock(rwlock_t *rw) | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
| 117 | { | 101 | { |
| 118 | unsigned long tmp; | 102 | unsigned long tmp; |
| 119 | 103 | ||
| @@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 133 | } | 117 | } |
| 134 | } | 118 | } |
| 135 | 119 | ||
| 136 | static inline void _raw_write_unlock(rwlock_t *rw) | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 137 | { | 121 | { |
| 138 | smp_mb(); | 122 | smp_mb(); |
| 139 | 123 | ||
| @@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
| 156 | * currently active. However, we know we won't have any write | 140 | * currently active. However, we know we won't have any write |
| 157 | * locks. | 141 | * locks. |
| 158 | */ | 142 | */ |
| 159 | static inline void _raw_read_lock(rwlock_t *rw) | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 160 | { | 144 | { |
| 161 | unsigned long tmp, tmp2; | 145 | unsigned long tmp, tmp2; |
| 162 | 146 | ||
| @@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 173 | smp_mb(); | 157 | smp_mb(); |
| 174 | } | 158 | } |
| 175 | 159 | ||
| 176 | static inline void _raw_read_unlock(rwlock_t *rw) | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
| 177 | { | 161 | { |
| 178 | unsigned long tmp, tmp2; | 162 | unsigned long tmp, tmp2; |
| 179 | 163 | ||
| @@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 190 | : "cc"); | 174 | : "cc"); |
| 191 | } | 175 | } |
| 192 | 176 | ||
| 193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 194 | 178 | ||
| 195 | #endif /* __ASM_SPINLOCK_H */ | 179 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h new file mode 100644 index 000000000000..43e83f6d2ee5 --- /dev/null +++ b/include/asm-arm/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h index 278de61224d1..c49df635a80f 100644 --- a/include/asm-arm/unistd.h +++ b/include/asm-arm/unistd.h | |||
| @@ -355,6 +355,9 @@ | |||
| 355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) | 355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) |
| 356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) | 356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) |
| 357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) | 357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) |
| 358 | #define __NR_mbind (__NR_SYSCALL_BASE+319) | ||
| 359 | #define __NR_get_mempolicy (__NR_SYSCALL_BASE+320) | ||
| 360 | #define __NR_set_mempolicy (__NR_SYSCALL_BASE+321) | ||
| 358 | 361 | ||
| 359 | /* | 362 | /* |
| 360 | * The following SWIs are ARM private. | 363 | * The following SWIs are ARM private. |
diff --git a/include/asm-arm26/hardirq.h b/include/asm-arm26/hardirq.h index 791ee1da9bfa..dc28daab8aa8 100644 --- a/include/asm-arm26/hardirq.h +++ b/include/asm-arm26/hardirq.h | |||
| @@ -22,8 +22,6 @@ typedef struct { | |||
| 22 | # error HARDIRQ_BITS is too low! | 22 | # error HARDIRQ_BITS is too low! |
| 23 | #endif | 23 | #endif |
| 24 | 24 | ||
| 25 | #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) | ||
| 26 | |||
| 27 | #ifndef CONFIG_SMP | 25 | #ifndef CONFIG_SMP |
| 28 | 26 | ||
| 29 | extern asmlinkage void __do_softirq(void); | 27 | extern asmlinkage void __do_softirq(void); |
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h index 28ed8b296afc..75c67c785bb8 100644 --- a/include/asm-i386/div64.h +++ b/include/asm-i386/div64.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | */ | 35 | */ |
| 36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | 36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) |
| 37 | 37 | ||
| 38 | extern inline long | 38 | static inline long |
| 39 | div_ll_X_l_rem(long long divs, long div, long *rem) | 39 | div_ll_X_l_rem(long long divs, long div, long *rem) |
| 40 | { | 40 | { |
| 41 | long dum2; | 41 | long dum2; |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 37bef8ed7bed..0a4ec764377c 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
| @@ -679,7 +679,7 @@ static inline void rep_nop(void) | |||
| 679 | However we don't do prefetches for pre XP Athlons currently | 679 | However we don't do prefetches for pre XP Athlons currently |
| 680 | That should be fixed. */ | 680 | That should be fixed. */ |
| 681 | #define ARCH_HAS_PREFETCH | 681 | #define ARCH_HAS_PREFETCH |
| 682 | extern inline void prefetch(const void *x) | 682 | static inline void prefetch(const void *x) |
| 683 | { | 683 | { |
| 684 | alternative_input(ASM_NOP4, | 684 | alternative_input(ASM_NOP4, |
| 685 | "prefetchnta (%1)", | 685 | "prefetchnta (%1)", |
| @@ -693,7 +693,7 @@ extern inline void prefetch(const void *x) | |||
| 693 | 693 | ||
| 694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for |
| 695 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 695 | spinlocks to avoid one state transition in the cache coherency protocol. */ |
| 696 | extern inline void prefetchw(const void *x) | 696 | static inline void prefetchw(const void *x) |
| 697 | { | 697 | { |
| 698 | alternative_input(ASM_NOP4, | 698 | alternative_input(ASM_NOP4, |
| 699 | "prefetchw (%1)", | 699 | "prefetchw (%1)", |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index f9ff31f40036..23604350cdf4 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
| @@ -7,46 +7,21 @@ | |||
| 7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
| 8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
| 9 | 9 | ||
| 10 | asmlinkage int printk(const char * fmt, ...) | ||
| 11 | __attribute__ ((format (printf, 1, 2))); | ||
| 12 | |||
| 13 | /* | 10 | /* |
| 14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 15 | */ | 12 | * |
| 16 | |||
| 17 | typedef struct { | ||
| 18 | volatile unsigned int slock; | ||
| 19 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 20 | unsigned magic; | ||
| 21 | #endif | ||
| 22 | #ifdef CONFIG_PREEMPT | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | } spinlock_t; | ||
| 26 | |||
| 27 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 28 | |||
| 29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 30 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 31 | #else | ||
| 32 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 36 | |||
| 37 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Simple spin lock operations. There are two variants, one clears IRQ's | 13 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 41 | * on the local processor, one does not. | 14 | * on the local processor, one does not. |
| 42 | * | 15 | * |
| 43 | * We make no fairness assumptions. They have a cost. | 16 | * We make no fairness assumptions. They have a cost. |
| 17 | * | ||
| 18 | * (the type definitions are in asm/spinlock_types.h) | ||
| 44 | */ | 19 | */ |
| 45 | 20 | ||
| 46 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) | 21 | #define __raw_spin_is_locked(x) \ |
| 47 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 22 | (*(volatile signed char *)(&(x)->slock) <= 0) |
| 48 | 23 | ||
| 49 | #define spin_lock_string \ | 24 | #define __raw_spin_lock_string \ |
| 50 | "\n1:\t" \ | 25 | "\n1:\t" \ |
| 51 | "lock ; decb %0\n\t" \ | 26 | "lock ; decb %0\n\t" \ |
| 52 | "jns 3f\n" \ | 27 | "jns 3f\n" \ |
| @@ -57,7 +32,7 @@ typedef struct { | |||
| 57 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
| 58 | "3:\n\t" | 33 | "3:\n\t" |
| 59 | 34 | ||
| 60 | #define spin_lock_string_flags \ | 35 | #define __raw_spin_lock_string_flags \ |
| 61 | "\n1:\t" \ | 36 | "\n1:\t" \ |
| 62 | "lock ; decb %0\n\t" \ | 37 | "lock ; decb %0\n\t" \ |
| 63 | "jns 4f\n\t" \ | 38 | "jns 4f\n\t" \ |
| @@ -73,86 +48,71 @@ typedef struct { | |||
| 73 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
| 74 | "4:\n\t" | 49 | "4:\n\t" |
| 75 | 50 | ||
| 51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
| 52 | { | ||
| 53 | __asm__ __volatile__( | ||
| 54 | __raw_spin_lock_string | ||
| 55 | :"=m" (lock->slock) : : "memory"); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 59 | { | ||
| 60 | __asm__ __volatile__( | ||
| 61 | __raw_spin_lock_string_flags | ||
| 62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
| 66 | { | ||
| 67 | char oldval; | ||
| 68 | __asm__ __volatile__( | ||
| 69 | "xchgb %b0,%1" | ||
| 70 | :"=q" (oldval), "=m" (lock->slock) | ||
| 71 | :"0" (0) : "memory"); | ||
| 72 | return oldval > 0; | ||
| 73 | } | ||
| 74 | |||
| 76 | /* | 75 | /* |
| 77 | * This works. Despite all the confusion. | 76 | * __raw_spin_unlock based on writing $1 to the low byte. |
| 78 | * (except on PPro SMP or if we are using OOSTORE) | 77 | * This method works. Despite all the confusion. |
| 78 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
| 79 | * (PPro errata 66, 92) | 79 | * (PPro errata 66, 92) |
| 80 | */ | 80 | */ |
| 81 | 81 | ||
| 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
| 83 | 83 | ||
| 84 | #define spin_unlock_string \ | 84 | #define __raw_spin_unlock_string \ |
| 85 | "movb $1,%0" \ | 85 | "movb $1,%0" \ |
| 86 | :"=m" (lock->slock) : : "memory" | 86 | :"=m" (lock->slock) : : "memory" |
| 87 | 87 | ||
| 88 | 88 | ||
| 89 | static inline void _raw_spin_unlock(spinlock_t *lock) | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 90 | { | 90 | { |
| 91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 92 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 93 | BUG_ON(!spin_is_locked(lock)); | ||
| 94 | #endif | ||
| 95 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
| 96 | spin_unlock_string | 92 | __raw_spin_unlock_string |
| 97 | ); | 93 | ); |
| 98 | } | 94 | } |
| 99 | 95 | ||
| 100 | #else | 96 | #else |
| 101 | 97 | ||
| 102 | #define spin_unlock_string \ | 98 | #define __raw_spin_unlock_string \ |
| 103 | "xchgb %b0, %1" \ | 99 | "xchgb %b0, %1" \ |
| 104 | :"=q" (oldval), "=m" (lock->slock) \ | 100 | :"=q" (oldval), "=m" (lock->slock) \ |
| 105 | :"0" (oldval) : "memory" | 101 | :"0" (oldval) : "memory" |
| 106 | 102 | ||
| 107 | static inline void _raw_spin_unlock(spinlock_t *lock) | 103 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 108 | { | 104 | { |
| 109 | char oldval = 1; | 105 | char oldval = 1; |
| 110 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 111 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 112 | BUG_ON(!spin_is_locked(lock)); | ||
| 113 | #endif | ||
| 114 | __asm__ __volatile__( | ||
| 115 | spin_unlock_string | ||
| 116 | ); | ||
| 117 | } | ||
| 118 | 106 | ||
| 119 | #endif | ||
| 120 | |||
| 121 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
| 122 | { | ||
| 123 | char oldval; | ||
| 124 | __asm__ __volatile__( | 107 | __asm__ __volatile__( |
| 125 | "xchgb %b0,%1" | 108 | __raw_spin_unlock_string |
| 126 | :"=q" (oldval), "=m" (lock->slock) | 109 | ); |
| 127 | :"0" (0) : "memory"); | ||
| 128 | return oldval > 0; | ||
| 129 | } | 110 | } |
| 130 | 111 | ||
| 131 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 132 | { | ||
| 133 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 134 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 135 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 136 | BUG(); | ||
| 137 | } | ||
| 138 | #endif | 112 | #endif |
| 139 | __asm__ __volatile__( | ||
| 140 | spin_lock_string | ||
| 141 | :"=m" (lock->slock) : : "memory"); | ||
| 142 | } | ||
| 143 | 113 | ||
| 144 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 114 | #define __raw_spin_unlock_wait(lock) \ |
| 145 | { | 115 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 146 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 147 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 148 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 149 | BUG(); | ||
| 150 | } | ||
| 151 | #endif | ||
| 152 | __asm__ __volatile__( | ||
| 153 | spin_lock_string_flags | ||
| 154 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
| 155 | } | ||
| 156 | 116 | ||
| 157 | /* | 117 | /* |
| 158 | * Read-write spinlocks, allowing multiple readers | 118 | * Read-write spinlocks, allowing multiple readers |
| @@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
| 163 | * can "mix" irq-safe locks - any writer needs to get a | 123 | * can "mix" irq-safe locks - any writer needs to get a |
| 164 | * irq-safe write-lock, but readers can get non-irqsafe | 124 | * irq-safe write-lock, but readers can get non-irqsafe |
| 165 | * read-locks. | 125 | * read-locks. |
| 126 | * | ||
| 127 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 128 | * with the high bit (sign) being the "contended" bit. | ||
| 129 | * | ||
| 130 | * The inline assembly is non-obvious. Think about it. | ||
| 131 | * | ||
| 132 | * Changed to use the same technique as rw semaphores. See | ||
| 133 | * semaphore.h for details. -ben | ||
| 134 | * | ||
| 135 | * the helpers are in arch/i386/kernel/semaphore.c | ||
| 166 | */ | 136 | */ |
| 167 | typedef struct { | ||
| 168 | volatile unsigned int lock; | ||
| 169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 170 | unsigned magic; | ||
| 171 | #endif | ||
| 172 | #ifdef CONFIG_PREEMPT | ||
| 173 | unsigned int break_lock; | ||
| 174 | #endif | ||
| 175 | } rwlock_t; | ||
| 176 | |||
| 177 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 178 | |||
| 179 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 180 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 181 | #else | ||
| 182 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 183 | #endif | ||
| 184 | |||
| 185 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 186 | |||
| 187 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 188 | 137 | ||
| 189 | /** | 138 | /** |
| 190 | * read_can_lock - would read_trylock() succeed? | 139 | * read_can_lock - would read_trylock() succeed? |
| 191 | * @lock: the rwlock in question. | 140 | * @lock: the rwlock in question. |
| 192 | */ | 141 | */ |
| 193 | #define read_can_lock(x) ((int)(x)->lock > 0) | 142 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 194 | 143 | ||
| 195 | /** | 144 | /** |
| 196 | * write_can_lock - would write_trylock() succeed? | 145 | * write_can_lock - would write_trylock() succeed? |
| 197 | * @lock: the rwlock in question. | 146 | * @lock: the rwlock in question. |
| 198 | */ | 147 | */ |
| 199 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 148 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 200 | 149 | ||
| 201 | /* | 150 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 202 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 203 | * with the high bit (sign) being the "contended" bit. | ||
| 204 | * | ||
| 205 | * The inline assembly is non-obvious. Think about it. | ||
| 206 | * | ||
| 207 | * Changed to use the same technique as rw semaphores. See | ||
| 208 | * semaphore.h for details. -ben | ||
| 209 | */ | ||
| 210 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 211 | |||
| 212 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 213 | { | 151 | { |
| 214 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 215 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 216 | #endif | ||
| 217 | __build_read_lock(rw, "__read_lock_failed"); | 152 | __build_read_lock(rw, "__read_lock_failed"); |
| 218 | } | 153 | } |
| 219 | 154 | ||
| 220 | static inline void _raw_write_lock(rwlock_t *rw) | 155 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 221 | { | 156 | { |
| 222 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 223 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 224 | #endif | ||
| 225 | __build_write_lock(rw, "__write_lock_failed"); | 157 | __build_write_lock(rw, "__write_lock_failed"); |
| 226 | } | 158 | } |
| 227 | 159 | ||
| 228 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 160 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 229 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
| 230 | |||
| 231 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
| 232 | { | 161 | { |
| 233 | atomic_t *count = (atomic_t *)lock; | 162 | atomic_t *count = (atomic_t *)lock; |
| 234 | atomic_dec(count); | 163 | atomic_dec(count); |
| @@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
| 238 | return 0; | 167 | return 0; |
| 239 | } | 168 | } |
| 240 | 169 | ||
| 241 | static inline int _raw_write_trylock(rwlock_t *lock) | 170 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 242 | { | 171 | { |
| 243 | atomic_t *count = (atomic_t *)lock; | 172 | atomic_t *count = (atomic_t *)lock; |
| 244 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 173 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
| 247 | return 0; | 176 | return 0; |
| 248 | } | 177 | } |
| 249 | 178 | ||
| 179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
| 180 | { | ||
| 181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 185 | { | ||
| 186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | ||
| 187 | : "=m" (rw->lock) : : "memory"); | ||
| 188 | } | ||
| 189 | |||
| 250 | #endif /* __ASM_SPINLOCK_H */ | 190 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-i386/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
| @@ -17,28 +17,20 @@ | |||
| 17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
| 18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 19 | 19 | ||
| 20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
| 21 | volatile unsigned int lock; | ||
| 22 | #ifdef CONFIG_PREEMPT | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | } spinlock_t; | ||
| 26 | |||
| 27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
| 29 | 21 | ||
| 30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
| 31 | /* | 23 | /* |
| 32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
| 33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
| 34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
| 35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
| 36 | */ | 28 | */ |
| 37 | 29 | ||
| 38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
| 39 | 31 | ||
| 40 | static inline void | 32 | static inline void |
| 41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
| 42 | { | 34 | { |
| 43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
| 44 | 36 | ||
| @@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
| 94 | #endif | 86 | #endif |
| 95 | } | 87 | } |
| 96 | 88 | ||
| 97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
| 98 | 90 | ||
| 99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
| 100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
| 101 | barrier(); | 93 | barrier(); |
| 102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
| 103 | } | 95 | } |
| 104 | 96 | ||
| 105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
| 106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
| 108 | do { \ | 100 | do { \ |
| 109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
| 110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
| @@ -117,29 +109,20 @@ do { \ | |||
| 117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
| 118 | } \ | 110 | } \ |
| 119 | } while (0) | 111 | } while (0) |
| 120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
| 121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
| 122 | 114 | ||
| 123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
| 125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
| 126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
| 127 | typedef struct { | ||
| 128 | volatile unsigned int read_counter : 24; | ||
| 129 | volatile unsigned int write_lock : 8; | ||
| 130 | #ifdef CONFIG_PREEMPT | ||
| 131 | unsigned int break_lock; | ||
| 132 | #endif | ||
| 133 | } rwlock_t; | ||
| 134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
| 135 | 119 | ||
| 136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
| 137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
| 138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
| 139 | 122 | ||
| 140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
| 141 | do { \ | 124 | do { \ |
| 142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 143 | \ | 126 | \ |
| 144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
| 145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| @@ -148,14 +131,14 @@ do { \ | |||
| 148 | } \ | 131 | } \ |
| 149 | } while (0) | 132 | } while (0) |
| 150 | 133 | ||
| 151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
| 152 | do { \ | 135 | do { \ |
| 153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| 155 | } while (0) | 138 | } while (0) |
| 156 | 139 | ||
| 157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
| 158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
| 159 | do { \ | 142 | do { \ |
| 160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
| 161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
| @@ -170,7 +153,7 @@ do { \ | |||
| 170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
| 171 | } while(0) | 154 | } while(0) |
| 172 | 155 | ||
| 173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
| 174 | ({ \ | 157 | ({ \ |
| 175 | register long result; \ | 158 | register long result; \ |
| 176 | \ | 159 | \ |
| @@ -182,7 +165,7 @@ do { \ | |||
| 182 | (result == 0); \ | 165 | (result == 0); \ |
| 183 | }) | 166 | }) |
| 184 | 167 | ||
| 185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 186 | { | 169 | { |
| 187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
| 188 | barrier(); | 171 | barrier(); |
| @@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 191 | 174 | ||
| 192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
| 193 | 176 | ||
| 194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
| 195 | ({ \ | 178 | ({ \ |
| 196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
| 197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
| @@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
| 203 | }) | 186 | }) |
| 204 | 187 | ||
| 205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
| 206 | ({ \ | 189 | ({ \ |
| 207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
| 208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
| @@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
| 211 | }) | 194 | }) |
| 212 | 195 | ||
| 213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 214 | { | 197 | { |
| 215 | barrier(); | 198 | barrier(); |
| 216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
| @@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 218 | 201 | ||
| 219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
| 220 | 203 | ||
| 221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 222 | 205 | ||
| 223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 000000000000..474e46f1ab4a --- /dev/null +++ b/include/asm-ia64/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef _ASM_IA64_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_IA64_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int read_counter : 31; | ||
| 16 | volatile unsigned int write_lock : 1; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h index 6608d8371c50..7de7def28da9 100644 --- a/include/asm-m32r/spinlock.h +++ b/include/asm-m32r/spinlock.h | |||
| @@ -14,57 +14,30 @@ | |||
| 14 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
| 15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| 16 | 16 | ||
| 17 | extern int printk(const char * fmt, ...) | ||
| 18 | __attribute__ ((format (printf, 1, 2))); | ||
| 19 | |||
| 20 | #define RW_LOCK_BIAS 0x01000000 | ||
| 21 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
| 22 | |||
| 23 | /* | 17 | /* |
| 24 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 25 | */ | 19 | * |
| 26 | 20 | * (the type definitions are in asm/spinlock_types.h) | |
| 27 | typedef struct { | 21 | * |
| 28 | volatile int slock; | ||
| 29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 30 | unsigned magic; | ||
| 31 | #endif | ||
| 32 | #ifdef CONFIG_PREEMPT | ||
| 33 | unsigned int break_lock; | ||
| 34 | #endif | ||
| 35 | } spinlock_t; | ||
| 36 | |||
| 37 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 38 | |||
| 39 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 40 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 41 | #else | ||
| 42 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 46 | |||
| 47 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Simple spin lock operations. There are two variants, one clears IRQ's | 22 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 51 | * on the local processor, one does not. | 23 | * on the local processor, one does not. |
| 52 | * | 24 | * |
| 53 | * We make no fairness assumptions. They have a cost. | 25 | * We make no fairness assumptions. They have a cost. |
| 54 | */ | 26 | */ |
| 55 | 27 | ||
| 56 | #define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 28 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
| 57 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 29 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 58 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 30 | #define __raw_spin_unlock_wait(x) \ |
| 31 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
| 59 | 32 | ||
| 60 | /** | 33 | /** |
| 61 | * _raw_spin_trylock - Try spin lock and return a result | 34 | * __raw_spin_trylock - Try spin lock and return a result |
| 62 | * @lock: Pointer to the lock variable | 35 | * @lock: Pointer to the lock variable |
| 63 | * | 36 | * |
| 64 | * _raw_spin_trylock() tries to get the lock and returns a result. | 37 | * __raw_spin_trylock() tries to get the lock and returns a result. |
| 65 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
| 66 | */ | 39 | */ |
| 67 | static inline int _raw_spin_trylock(spinlock_t *lock) | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 68 | { | 41 | { |
| 69 | int oldval; | 42 | int oldval; |
| 70 | unsigned long tmp1, tmp2; | 43 | unsigned long tmp1, tmp2; |
| @@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 78 | * } | 51 | * } |
| 79 | */ | 52 | */ |
| 80 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
| 81 | "# spin_trylock \n\t" | 54 | "# __raw_spin_trylock \n\t" |
| 82 | "ldi %1, #0; \n\t" | 55 | "ldi %1, #0; \n\t" |
| 83 | "mvfc %2, psw; \n\t" | 56 | "mvfc %2, psw; \n\t" |
| 84 | "clrpsw #0x40 -> nop; \n\t" | 57 | "clrpsw #0x40 -> nop; \n\t" |
| @@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 97 | return (oldval > 0); | 70 | return (oldval > 0); |
| 98 | } | 71 | } |
| 99 | 72 | ||
| 100 | static inline void _raw_spin_lock(spinlock_t *lock) | 73 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 101 | { | 74 | { |
| 102 | unsigned long tmp0, tmp1; | 75 | unsigned long tmp0, tmp1; |
| 103 | 76 | ||
| 104 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 105 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
| 106 | printk("pc: %p\n", __builtin_return_address(0)); | ||
| 107 | BUG(); | ||
| 108 | } | ||
| 109 | #endif | ||
| 110 | /* | 77 | /* |
| 111 | * lock->slock : =1 : unlock | 78 | * lock->slock : =1 : unlock |
| 112 | * : <=0 : lock | 79 | * : <=0 : lock |
| @@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 118 | * } | 85 | * } |
| 119 | */ | 86 | */ |
| 120 | __asm__ __volatile__ ( | 87 | __asm__ __volatile__ ( |
| 121 | "# spin_lock \n\t" | 88 | "# __raw_spin_lock \n\t" |
| 122 | ".fillinsn \n" | 89 | ".fillinsn \n" |
| 123 | "1: \n\t" | 90 | "1: \n\t" |
| 124 | "mvfc %1, psw; \n\t" | 91 | "mvfc %1, psw; \n\t" |
| @@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 145 | ); | 112 | ); |
| 146 | } | 113 | } |
| 147 | 114 | ||
| 148 | static inline void _raw_spin_unlock(spinlock_t *lock) | 115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 149 | { | 116 | { |
| 150 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 151 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 152 | BUG_ON(!spin_is_locked(lock)); | ||
| 153 | #endif | ||
| 154 | mb(); | 117 | mb(); |
| 155 | lock->slock = 1; | 118 | lock->slock = 1; |
| 156 | } | 119 | } |
| @@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 164 | * can "mix" irq-safe locks - any writer needs to get a | 127 | * can "mix" irq-safe locks - any writer needs to get a |
| 165 | * irq-safe write-lock, but readers can get non-irqsafe | 128 | * irq-safe write-lock, but readers can get non-irqsafe |
| 166 | * read-locks. | 129 | * read-locks. |
| 130 | * | ||
| 131 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 132 | * with the high bit (sign) being the "contended" bit. | ||
| 133 | * | ||
| 134 | * The inline assembly is non-obvious. Think about it. | ||
| 135 | * | ||
| 136 | * Changed to use the same technique as rw semaphores. See | ||
| 137 | * semaphore.h for details. -ben | ||
| 167 | */ | 138 | */ |
| 168 | typedef struct { | ||
| 169 | volatile int lock; | ||
| 170 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 171 | unsigned magic; | ||
| 172 | #endif | ||
| 173 | #ifdef CONFIG_PREEMPT | ||
| 174 | unsigned int break_lock; | ||
| 175 | #endif | ||
| 176 | } rwlock_t; | ||
| 177 | |||
| 178 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 179 | |||
| 180 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 181 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 182 | #else | ||
| 183 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 184 | #endif | ||
| 185 | |||
| 186 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 187 | |||
| 188 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 189 | 139 | ||
| 190 | /** | 140 | /** |
| 191 | * read_can_lock - would read_trylock() succeed? | 141 | * read_can_lock - would read_trylock() succeed? |
| 192 | * @lock: the rwlock in question. | 142 | * @lock: the rwlock in question. |
| 193 | */ | 143 | */ |
| 194 | #define read_can_lock(x) ((int)(x)->lock > 0) | 144 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 195 | 145 | ||
| 196 | /** | 146 | /** |
| 197 | * write_can_lock - would write_trylock() succeed? | 147 | * write_can_lock - would write_trylock() succeed? |
| 198 | * @lock: the rwlock in question. | 148 | * @lock: the rwlock in question. |
| 199 | */ | 149 | */ |
| 200 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 150 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 201 | |||
| 202 | /* | ||
| 203 | * On x86, we implement read-write locks as a 32-bit counter | ||
| 204 | * with the high bit (sign) being the "contended" bit. | ||
| 205 | * | ||
| 206 | * The inline assembly is non-obvious. Think about it. | ||
| 207 | * | ||
| 208 | * Changed to use the same technique as rw semaphores. See | ||
| 209 | * semaphore.h for details. -ben | ||
| 210 | */ | ||
| 211 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 212 | 151 | ||
| 213 | static inline void _raw_read_lock(rwlock_t *rw) | 152 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 214 | { | 153 | { |
| 215 | unsigned long tmp0, tmp1; | 154 | unsigned long tmp0, tmp1; |
| 216 | 155 | ||
| 217 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 218 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 219 | #endif | ||
| 220 | /* | 156 | /* |
| 221 | * rw->lock : >0 : unlock | 157 | * rw->lock : >0 : unlock |
| 222 | * : <=0 : lock | 158 | * : <=0 : lock |
| @@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 264 | ); | 200 | ); |
| 265 | } | 201 | } |
| 266 | 202 | ||
| 267 | static inline void _raw_write_lock(rwlock_t *rw) | 203 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 268 | { | 204 | { |
| 269 | unsigned long tmp0, tmp1, tmp2; | 205 | unsigned long tmp0, tmp1, tmp2; |
| 270 | 206 | ||
| 271 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 272 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 273 | #endif | ||
| 274 | /* | 207 | /* |
| 275 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | 208 | * rw->lock : =RW_LOCK_BIAS_STR : unlock |
| 276 | * : !=RW_LOCK_BIAS_STR : lock | 209 | * : !=RW_LOCK_BIAS_STR : lock |
| @@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 320 | ); | 253 | ); |
| 321 | } | 254 | } |
| 322 | 255 | ||
| 323 | static inline void _raw_read_unlock(rwlock_t *rw) | 256 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 324 | { | 257 | { |
| 325 | unsigned long tmp0, tmp1; | 258 | unsigned long tmp0, tmp1; |
| 326 | 259 | ||
| @@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 342 | ); | 275 | ); |
| 343 | } | 276 | } |
| 344 | 277 | ||
| 345 | static inline void _raw_write_unlock(rwlock_t *rw) | 278 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 346 | { | 279 | { |
| 347 | unsigned long tmp0, tmp1, tmp2; | 280 | unsigned long tmp0, tmp1, tmp2; |
| 348 | 281 | ||
| @@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
| 366 | ); | 299 | ); |
| 367 | } | 300 | } |
| 368 | 301 | ||
| 369 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 302 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 370 | 303 | ||
| 371 | static inline int _raw_write_trylock(rwlock_t *lock) | 304 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 372 | { | 305 | { |
| 373 | atomic_t *count = (atomic_t *)lock; | 306 | atomic_t *count = (atomic_t *)lock; |
| 374 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 307 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h new file mode 100644 index 000000000000..7e9941c45f40 --- /dev/null +++ b/include/asm-m32r/spinlock_types.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef _ASM_M32R_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_M32R_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define RW_LOCK_BIAS 0x01000000 | ||
| 19 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
| 20 | |||
| 21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 114d3eb98a6a..4d0135b11156 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
| @@ -16,20 +16,10 @@ | |||
| 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | typedef struct { | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 20 | volatile unsigned int lock; | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 21 | #ifdef CONFIG_PREEMPT | 21 | #define __raw_spin_unlock_wait(x) \ |
| 22 | unsigned int break_lock; | 22 | do { cpu_relax(); } while ((x)->lock) |
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | |||
| 26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 27 | |||
| 28 | #define spin_lock_init(x) do { (x)->lock = 0; } while(0) | ||
| 29 | |||
| 30 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 31 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
| 32 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 33 | 23 | ||
| 34 | /* | 24 | /* |
| 35 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| @@ -38,13 +28,13 @@ typedef struct { | |||
| 38 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
| 39 | */ | 29 | */ |
| 40 | 30 | ||
| 41 | static inline void _raw_spin_lock(spinlock_t *lock) | 31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 42 | { | 32 | { |
| 43 | unsigned int tmp; | 33 | unsigned int tmp; |
| 44 | 34 | ||
| 45 | if (R10000_LLSC_WAR) { | 35 | if (R10000_LLSC_WAR) { |
| 46 | __asm__ __volatile__( | 36 | __asm__ __volatile__( |
| 47 | " .set noreorder # _raw_spin_lock \n" | 37 | " .set noreorder # __raw_spin_lock \n" |
| 48 | "1: ll %1, %2 \n" | 38 | "1: ll %1, %2 \n" |
| 49 | " bnez %1, 1b \n" | 39 | " bnez %1, 1b \n" |
| 50 | " li %1, 1 \n" | 40 | " li %1, 1 \n" |
| @@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 58 | : "memory"); | 48 | : "memory"); |
| 59 | } else { | 49 | } else { |
| 60 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
| 61 | " .set noreorder # _raw_spin_lock \n" | 51 | " .set noreorder # __raw_spin_lock \n" |
| 62 | "1: ll %1, %2 \n" | 52 | "1: ll %1, %2 \n" |
| 63 | " bnez %1, 1b \n" | 53 | " bnez %1, 1b \n" |
| 64 | " li %1, 1 \n" | 54 | " li %1, 1 \n" |
| @@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 72 | } | 62 | } |
| 73 | } | 63 | } |
| 74 | 64 | ||
| 75 | static inline void _raw_spin_unlock(spinlock_t *lock) | 65 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 76 | { | 66 | { |
| 77 | __asm__ __volatile__( | 67 | __asm__ __volatile__( |
| 78 | " .set noreorder # _raw_spin_unlock \n" | 68 | " .set noreorder # __raw_spin_unlock \n" |
| 79 | " sync \n" | 69 | " sync \n" |
| 80 | " sw $0, %0 \n" | 70 | " sw $0, %0 \n" |
| 81 | " .set\treorder \n" | 71 | " .set\treorder \n" |
| @@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 84 | : "memory"); | 74 | : "memory"); |
| 85 | } | 75 | } |
| 86 | 76 | ||
| 87 | static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | 77 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) |
| 88 | { | 78 | { |
| 89 | unsigned int temp, res; | 79 | unsigned int temp, res; |
| 90 | 80 | ||
| 91 | if (R10000_LLSC_WAR) { | 81 | if (R10000_LLSC_WAR) { |
| 92 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
| 93 | " .set noreorder # _raw_spin_trylock \n" | 83 | " .set noreorder # __raw_spin_trylock \n" |
| 94 | "1: ll %0, %3 \n" | 84 | "1: ll %0, %3 \n" |
| 95 | " ori %2, %0, 1 \n" | 85 | " ori %2, %0, 1 \n" |
| 96 | " sc %2, %1 \n" | 86 | " sc %2, %1 \n" |
| @@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
| 104 | : "memory"); | 94 | : "memory"); |
| 105 | } else { | 95 | } else { |
| 106 | __asm__ __volatile__( | 96 | __asm__ __volatile__( |
| 107 | " .set noreorder # _raw_spin_trylock \n" | 97 | " .set noreorder # __raw_spin_trylock \n" |
| 108 | "1: ll %0, %3 \n" | 98 | "1: ll %0, %3 \n" |
| 109 | " ori %2, %0, 1 \n" | 99 | " ori %2, %0, 1 \n" |
| 110 | " sc %2, %1 \n" | 100 | " sc %2, %1 \n" |
| @@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
| 129 | * read-locks. | 119 | * read-locks. |
| 130 | */ | 120 | */ |
| 131 | 121 | ||
| 132 | typedef struct { | 122 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 133 | volatile unsigned int lock; | ||
| 134 | #ifdef CONFIG_PREEMPT | ||
| 135 | unsigned int break_lock; | ||
| 136 | #endif | ||
| 137 | } rwlock_t; | ||
| 138 | |||
| 139 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 140 | |||
| 141 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 142 | |||
| 143 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 144 | { | 123 | { |
| 145 | unsigned int tmp; | 124 | unsigned int tmp; |
| 146 | 125 | ||
| 147 | if (R10000_LLSC_WAR) { | 126 | if (R10000_LLSC_WAR) { |
| 148 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
| 149 | " .set noreorder # _raw_read_lock \n" | 128 | " .set noreorder # __raw_read_lock \n" |
| 150 | "1: ll %1, %2 \n" | 129 | "1: ll %1, %2 \n" |
| 151 | " bltz %1, 1b \n" | 130 | " bltz %1, 1b \n" |
| 152 | " addu %1, 1 \n" | 131 | " addu %1, 1 \n" |
| @@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 160 | : "memory"); | 139 | : "memory"); |
| 161 | } else { | 140 | } else { |
| 162 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
| 163 | " .set noreorder # _raw_read_lock \n" | 142 | " .set noreorder # __raw_read_lock \n" |
| 164 | "1: ll %1, %2 \n" | 143 | "1: ll %1, %2 \n" |
| 165 | " bltz %1, 1b \n" | 144 | " bltz %1, 1b \n" |
| 166 | " addu %1, 1 \n" | 145 | " addu %1, 1 \n" |
| @@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 177 | /* Note the use of sub, not subu which will make the kernel die with an | 156 | /* Note the use of sub, not subu which will make the kernel die with an |
| 178 | overflow exception if we ever try to unlock an rwlock that is already | 157 | overflow exception if we ever try to unlock an rwlock that is already |
| 179 | unlocked or is being held by a writer. */ | 158 | unlocked or is being held by a writer. */ |
| 180 | static inline void _raw_read_unlock(rwlock_t *rw) | 159 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 181 | { | 160 | { |
| 182 | unsigned int tmp; | 161 | unsigned int tmp; |
| 183 | 162 | ||
| 184 | if (R10000_LLSC_WAR) { | 163 | if (R10000_LLSC_WAR) { |
| 185 | __asm__ __volatile__( | 164 | __asm__ __volatile__( |
| 186 | "1: ll %1, %2 # _raw_read_unlock \n" | 165 | "1: ll %1, %2 # __raw_read_unlock \n" |
| 187 | " sub %1, 1 \n" | 166 | " sub %1, 1 \n" |
| 188 | " sc %1, %0 \n" | 167 | " sc %1, %0 \n" |
| 189 | " beqzl %1, 1b \n" | 168 | " beqzl %1, 1b \n" |
| @@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 193 | : "memory"); | 172 | : "memory"); |
| 194 | } else { | 173 | } else { |
| 195 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
| 196 | " .set noreorder # _raw_read_unlock \n" | 175 | " .set noreorder # __raw_read_unlock \n" |
| 197 | "1: ll %1, %2 \n" | 176 | "1: ll %1, %2 \n" |
| 198 | " sub %1, 1 \n" | 177 | " sub %1, 1 \n" |
| 199 | " sc %1, %0 \n" | 178 | " sc %1, %0 \n" |
| @@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 206 | } | 185 | } |
| 207 | } | 186 | } |
| 208 | 187 | ||
| 209 | static inline void _raw_write_lock(rwlock_t *rw) | 188 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 210 | { | 189 | { |
| 211 | unsigned int tmp; | 190 | unsigned int tmp; |
| 212 | 191 | ||
| 213 | if (R10000_LLSC_WAR) { | 192 | if (R10000_LLSC_WAR) { |
| 214 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
| 215 | " .set noreorder # _raw_write_lock \n" | 194 | " .set noreorder # __raw_write_lock \n" |
| 216 | "1: ll %1, %2 \n" | 195 | "1: ll %1, %2 \n" |
| 217 | " bnez %1, 1b \n" | 196 | " bnez %1, 1b \n" |
| 218 | " lui %1, 0x8000 \n" | 197 | " lui %1, 0x8000 \n" |
| @@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 226 | : "memory"); | 205 | : "memory"); |
| 227 | } else { | 206 | } else { |
| 228 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
| 229 | " .set noreorder # _raw_write_lock \n" | 208 | " .set noreorder # __raw_write_lock \n" |
| 230 | "1: ll %1, %2 \n" | 209 | "1: ll %1, %2 \n" |
| 231 | " bnez %1, 1b \n" | 210 | " bnez %1, 1b \n" |
| 232 | " lui %1, 0x8000 \n" | 211 | " lui %1, 0x8000 \n" |
| @@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 241 | } | 220 | } |
| 242 | } | 221 | } |
| 243 | 222 | ||
| 244 | static inline void _raw_write_unlock(rwlock_t *rw) | 223 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 245 | { | 224 | { |
| 246 | __asm__ __volatile__( | 225 | __asm__ __volatile__( |
| 247 | " sync # _raw_write_unlock \n" | 226 | " sync # __raw_write_unlock \n" |
| 248 | " sw $0, %0 \n" | 227 | " sw $0, %0 \n" |
| 249 | : "=m" (rw->lock) | 228 | : "=m" (rw->lock) |
| 250 | : "m" (rw->lock) | 229 | : "m" (rw->lock) |
| 251 | : "memory"); | 230 | : "memory"); |
| 252 | } | 231 | } |
| 253 | 232 | ||
| 254 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 233 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 255 | 234 | ||
| 256 | static inline int _raw_write_trylock(rwlock_t *rw) | 235 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 257 | { | 236 | { |
| 258 | unsigned int tmp; | 237 | unsigned int tmp; |
| 259 | int ret; | 238 | int ret; |
| 260 | 239 | ||
| 261 | if (R10000_LLSC_WAR) { | 240 | if (R10000_LLSC_WAR) { |
| 262 | __asm__ __volatile__( | 241 | __asm__ __volatile__( |
| 263 | " .set noreorder # _raw_write_trylock \n" | 242 | " .set noreorder # __raw_write_trylock \n" |
| 264 | " li %2, 0 \n" | 243 | " li %2, 0 \n" |
| 265 | "1: ll %1, %3 \n" | 244 | "1: ll %1, %3 \n" |
| 266 | " bnez %1, 2f \n" | 245 | " bnez %1, 2f \n" |
| @@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 277 | : "memory"); | 256 | : "memory"); |
| 278 | } else { | 257 | } else { |
| 279 | __asm__ __volatile__( | 258 | __asm__ __volatile__( |
| 280 | " .set noreorder # _raw_write_trylock \n" | 259 | " .set noreorder # __raw_write_trylock \n" |
| 281 | " li %2, 0 \n" | 260 | " li %2, 0 \n" |
| 282 | "1: ll %1, %3 \n" | 261 | "1: ll %1, %3 \n" |
| 283 | " bnez %1, 2f \n" | 262 | " bnez %1, 2f \n" |
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h new file mode 100644 index 000000000000..ce26c5048b15 --- /dev/null +++ b/include/asm-mips/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef _ASM_SPINLOCK_TYPES_H | ||
| 2 | #define _ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index e24f7579adb0..048a2c7fd0c0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
| @@ -24,19 +24,19 @@ | |||
| 24 | # define ATOMIC_HASH_SIZE 4 | 24 | # define ATOMIC_HASH_SIZE 4 |
| 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
| 26 | 26 | ||
| 27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 27 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
| 28 | 28 | ||
| 29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | 29 | /* Can't use raw_spin_lock_irq because of #include problems, so |
| 30 | * this is the substitute */ | 30 | * this is the substitute */ |
| 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
| 32 | spinlock_t *s = ATOMIC_HASH(l); \ | 32 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
| 33 | local_irq_save(f); \ | 33 | local_irq_save(f); \ |
| 34 | _raw_spin_lock(s); \ | 34 | __raw_spin_lock(s); \ |
| 35 | } while(0) | 35 | } while(0) |
| 36 | 36 | ||
| 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
| 38 | spinlock_t *s = ATOMIC_HASH(l); \ | 38 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
| 39 | _raw_spin_unlock(s); \ | 39 | __raw_spin_unlock(s); \ |
| 40 | local_irq_restore(f); \ | 40 | local_irq_restore(f); \ |
| 41 | } while(0) | 41 | } while(0) |
| 42 | 42 | ||
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 928e5ef850bd..af7db694b22d 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | #include <asm/system.h> | 5 | #include <asm/spinlock.h> |
| 6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
| 7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
| 8 | 8 | ||
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 06732719d927..aa592d8c0e39 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
| 5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
| 6 | #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ | ||
| 6 | 7 | ||
| 7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
| 8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index 0b61f51d8467..a9dfadd05658 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
| 12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
| 13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
| 14 | #include <linux/spinlock_types.h> | ||
| 14 | 15 | ||
| 15 | #include <asm/hardware.h> | 16 | #include <asm/hardware.h> |
| 16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 679ea1c651ef..43eaa6e742e0 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
| @@ -2,30 +2,25 @@ | |||
| 2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
| 3 | 3 | ||
| 4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
| 5 | #include <asm/processor.h> | ||
| 6 | #include <asm/spinlock_types.h> | ||
| 5 | 7 | ||
| 6 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked |
| 7 | * since it only has load-and-zero. Moreover, at least on some PA processors, | 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, |
| 8 | * the semaphore address has to be 16-byte aligned. | 10 | * the semaphore address has to be 16-byte aligned. |
| 9 | */ | 11 | */ |
| 10 | 12 | ||
| 11 | #ifndef CONFIG_DEBUG_SPINLOCK | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
| 12 | |||
| 13 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 14 | #undef SPIN_LOCK_UNLOCKED | ||
| 15 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
| 16 | |||
| 17 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 18 | |||
| 19 | static inline int spin_is_locked(spinlock_t *x) | ||
| 20 | { | 14 | { |
| 21 | volatile unsigned int *a = __ldcw_align(x); | 15 | volatile unsigned int *a = __ldcw_align(x); |
| 22 | return *a == 0; | 16 | return *a == 0; |
| 23 | } | 17 | } |
| 24 | 18 | ||
| 25 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 26 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 20 | #define __raw_spin_unlock_wait(x) \ |
| 21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
| 27 | 22 | ||
| 28 | static inline void _raw_spin_lock(spinlock_t *x) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
| 29 | { | 24 | { |
| 30 | volatile unsigned int *a; | 25 | volatile unsigned int *a; |
| 31 | 26 | ||
| @@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x) | |||
| 36 | mb(); | 31 | mb(); |
| 37 | } | 32 | } |
| 38 | 33 | ||
| 39 | static inline void _raw_spin_unlock(spinlock_t *x) | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
| 40 | { | 35 | { |
| 41 | volatile unsigned int *a; | 36 | volatile unsigned int *a; |
| 42 | mb(); | 37 | mb(); |
| @@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x) | |||
| 45 | mb(); | 40 | mb(); |
| 46 | } | 41 | } |
| 47 | 42 | ||
| 48 | static inline int _raw_spin_trylock(spinlock_t *x) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
| 49 | { | 44 | { |
| 50 | volatile unsigned int *a; | 45 | volatile unsigned int *a; |
| 51 | int ret; | 46 | int ret; |
| @@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x) | |||
| 57 | 52 | ||
| 58 | return ret; | 53 | return ret; |
| 59 | } | 54 | } |
| 60 | |||
| 61 | #define spin_lock_own(LOCK, LOCATION) ((void)0) | ||
| 62 | |||
| 63 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 64 | |||
| 65 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
| 66 | |||
| 67 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL } | ||
| 68 | #undef SPIN_LOCK_UNLOCKED | ||
| 69 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
| 70 | |||
| 71 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 72 | |||
| 73 | #define CHECK_LOCK(x) \ | ||
| 74 | do { \ | ||
| 75 | if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \ | ||
| 76 | printk(KERN_ERR "%s:%d: spin_is_locked" \ | ||
| 77 | " on uninitialized spinlock %p.\n", \ | ||
| 78 | __FILE__, __LINE__, (x)); \ | ||
| 79 | } \ | ||
| 80 | } while(0) | ||
| 81 | |||
| 82 | #define spin_is_locked(x) \ | ||
| 83 | ({ \ | ||
| 84 | CHECK_LOCK(x); \ | ||
| 85 | volatile unsigned int *a = __ldcw_align(x); \ | ||
| 86 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
| 87 | (x)->babble--; \ | ||
| 88 | printk("KERN_WARNING \ | ||
| 89 | %s:%d: spin_is_locked(%s/%p) already" \ | ||
| 90 | " locked by %s:%d in %s at %p(%d)\n", \ | ||
| 91 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 92 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
| 93 | (x)->previous, (x)->oncpu); \ | ||
| 94 | } \ | ||
| 95 | *a == 0; \ | ||
| 96 | }) | ||
| 97 | |||
| 98 | #define spin_unlock_wait(x) \ | ||
| 99 | do { \ | ||
| 100 | CHECK_LOCK(x); \ | ||
| 101 | volatile unsigned int *a = __ldcw_align(x); \ | ||
| 102 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
| 103 | (x)->babble--; \ | ||
| 104 | printk("KERN_WARNING \ | ||
| 105 | %s:%d: spin_unlock_wait(%s/%p)" \ | ||
| 106 | " owned by %s:%d in %s at %p(%d)\n", \ | ||
| 107 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 108 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
| 109 | (x)->previous, (x)->oncpu); \ | ||
| 110 | } \ | ||
| 111 | barrier(); \ | ||
| 112 | } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0) | ||
| 113 | |||
| 114 | extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no); | ||
| 115 | extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int); | ||
| 116 | extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int); | ||
| 117 | |||
| 118 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 119 | |||
| 120 | #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__) | ||
| 121 | #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__) | ||
| 122 | #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__) | ||
| 123 | |||
| 124 | /* just in case we need it */ | ||
| 125 | #define spin_lock_own(LOCK, LOCATION) \ | ||
| 126 | do { \ | ||
| 127 | volatile unsigned int *a = __ldcw_align(LOCK); \ | ||
| 128 | if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \ | ||
| 129 | printk("KERN_WARNING \ | ||
| 130 | %s: called on %d from %p but lock %s on %d\n", \ | ||
| 131 | LOCATION, smp_processor_id(), \ | ||
| 132 | __builtin_return_address(0), \ | ||
| 133 | (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \ | ||
| 134 | } while (0) | ||
| 135 | |||
| 136 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 137 | 55 | ||
| 138 | /* | 56 | /* |
| 139 | * Read-write spinlocks, allowing multiple readers | 57 | * Read-write spinlocks, allowing multiple readers |
| 140 | * but only one writer. | 58 | * but only one writer. |
| 141 | */ | 59 | */ |
| 142 | typedef struct { | ||
| 143 | spinlock_t lock; | ||
| 144 | volatile int counter; | ||
| 145 | #ifdef CONFIG_PREEMPT | ||
| 146 | unsigned int break_lock; | ||
| 147 | #endif | ||
| 148 | } rwlock_t; | ||
| 149 | |||
| 150 | #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 } | ||
| 151 | |||
| 152 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
| 153 | 60 | ||
| 154 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 155 | 62 | ||
| 156 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
| 157 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
| 158 | 65 | ||
| 159 | #ifdef CONFIG_DEBUG_RWLOCK | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
| 160 | extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline); | ||
| 161 | #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__) | ||
| 162 | #else | ||
| 163 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
| 164 | { | 67 | { |
| 165 | unsigned long flags; | 68 | unsigned long flags; |
| 166 | local_irq_save(flags); | 69 | local_irq_save(flags); |
| 167 | _raw_spin_lock(&rw->lock); | 70 | __raw_spin_lock(&rw->lock); |
| 168 | 71 | ||
| 169 | rw->counter++; | 72 | rw->counter++; |
| 170 | 73 | ||
| 171 | _raw_spin_unlock(&rw->lock); | 74 | __raw_spin_unlock(&rw->lock); |
| 172 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
| 173 | } | 76 | } |
| 174 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 175 | 77 | ||
| 176 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
| 177 | { | 79 | { |
| 178 | unsigned long flags; | 80 | unsigned long flags; |
| 179 | local_irq_save(flags); | 81 | local_irq_save(flags); |
| 180 | _raw_spin_lock(&rw->lock); | 82 | __raw_spin_lock(&rw->lock); |
| 181 | 83 | ||
| 182 | rw->counter--; | 84 | rw->counter--; |
| 183 | 85 | ||
| 184 | _raw_spin_unlock(&rw->lock); | 86 | __raw_spin_unlock(&rw->lock); |
| 185 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
| 186 | } | 88 | } |
| 187 | 89 | ||
| @@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
| 194 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | 96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
| 195 | * sooner or later anyway. prumpf */ | 97 | * sooner or later anyway. prumpf */ |
| 196 | 98 | ||
| 197 | #ifdef CONFIG_DEBUG_RWLOCK | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 198 | extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline); | ||
| 199 | #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__) | ||
| 200 | #else | ||
| 201 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
| 202 | { | 100 | { |
| 203 | retry: | 101 | retry: |
| 204 | _raw_spin_lock(&rw->lock); | 102 | __raw_spin_lock(&rw->lock); |
| 205 | 103 | ||
| 206 | if(rw->counter != 0) { | 104 | if(rw->counter != 0) { |
| 207 | /* this basically never happens */ | 105 | /* this basically never happens */ |
| 208 | _raw_spin_unlock(&rw->lock); | 106 | __raw_spin_unlock(&rw->lock); |
| 209 | 107 | ||
| 210 | while(rw->counter != 0); | 108 | while (rw->counter != 0) |
| 109 | cpu_relax(); | ||
| 211 | 110 | ||
| 212 | goto retry; | 111 | goto retry; |
| 213 | } | 112 | } |
| @@ -215,26 +114,21 @@ retry: | |||
| 215 | /* got it. now leave without unlocking */ | 114 | /* got it. now leave without unlocking */ |
| 216 | rw->counter = -1; /* remember we are locked */ | 115 | rw->counter = -1; /* remember we are locked */ |
| 217 | } | 116 | } |
| 218 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 219 | 117 | ||
| 220 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
| 221 | 119 | ||
| 222 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
| 223 | { | 121 | { |
| 224 | rw->counter = 0; | 122 | rw->counter = 0; |
| 225 | _raw_spin_unlock(&rw->lock); | 123 | __raw_spin_unlock(&rw->lock); |
| 226 | } | 124 | } |
| 227 | 125 | ||
| 228 | #ifdef CONFIG_DEBUG_RWLOCK | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
| 229 | extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline); | ||
| 230 | #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__) | ||
| 231 | #else | ||
| 232 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | ||
| 233 | { | 127 | { |
| 234 | _raw_spin_lock(&rw->lock); | 128 | __raw_spin_lock(&rw->lock); |
| 235 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
| 236 | /* this basically never happens */ | 130 | /* this basically never happens */ |
| 237 | _raw_spin_unlock(&rw->lock); | 131 | __raw_spin_unlock(&rw->lock); |
| 238 | 132 | ||
| 239 | return 0; | 133 | return 0; |
| 240 | } | 134 | } |
| @@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
| 243 | rw->counter = -1; /* remember we are locked */ | 137 | rw->counter = -1; /* remember we are locked */ |
| 244 | return 1; | 138 | return 1; |
| 245 | } | 139 | } |
| 246 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
| 247 | 140 | ||
| 248 | static __inline__ int is_read_locked(rwlock_t *rw) | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
| 249 | { | 142 | { |
| 250 | return rw->counter > 0; | 143 | return rw->counter > 0; |
| 251 | } | 144 | } |
| 252 | 145 | ||
| 253 | static __inline__ int is_write_locked(rwlock_t *rw) | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
| 254 | { | 147 | { |
| 255 | return rw->counter < 0; | 148 | return rw->counter < 0; |
| 256 | } | 149 | } |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h new file mode 100644 index 000000000000..785bba822fbf --- /dev/null +++ b/include/asm-parisc/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock[4]; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | raw_spinlock_t lock; | ||
| 16 | volatile int counter; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 81c543339036..26ff844a21c1 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
| @@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
| 160 | }) | 160 | }) |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
| 163 | /* | 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
| 164 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
| 165 | */ | ||
| 166 | |||
| 167 | typedef struct { | ||
| 168 | volatile unsigned int lock[4]; | ||
| 169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 170 | unsigned long magic; | ||
| 171 | volatile unsigned int babble; | ||
| 172 | const char *module; | ||
| 173 | char *bfile; | ||
| 174 | int bline; | ||
| 175 | int oncpu; | ||
| 176 | void *previous; | ||
| 177 | struct task_struct * task; | ||
| 178 | #endif | ||
| 179 | #ifdef CONFIG_PREEMPT | ||
| 180 | unsigned int break_lock; | ||
| 181 | #endif | ||
| 182 | } spinlock_t; | ||
| 183 | |||
| 184 | #define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
| 185 | |||
| 186 | #endif | 164 | #endif |
| 187 | 165 | ||
| 188 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define KERNEL_START (0x10100000 - 0x1000) |
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index 17530c232c76..829481c0a9dc 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h | |||
| @@ -41,6 +41,10 @@ extern void smp_send_xmon_break(int cpu); | |||
| 41 | struct pt_regs; | 41 | struct pt_regs; |
| 42 | extern void smp_message_recv(int, struct pt_regs *); | 42 | extern void smp_message_recv(int, struct pt_regs *); |
| 43 | 43 | ||
| 44 | extern int __cpu_disable(void); | ||
| 45 | extern void __cpu_die(unsigned int cpu); | ||
| 46 | extern void cpu_die(void) __attribute__((noreturn)); | ||
| 47 | |||
| 44 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 48 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
| 45 | #define PROC_CHANGE_PENALTY 20 | 49 | #define PROC_CHANGE_PENALTY 20 |
| 46 | 50 | ||
| @@ -64,6 +68,8 @@ extern struct klock_info_struct klock_info; | |||
| 64 | 68 | ||
| 65 | #else /* !(CONFIG_SMP) */ | 69 | #else /* !(CONFIG_SMP) */ |
| 66 | 70 | ||
| 71 | static inline void cpu_die(void) { } | ||
| 72 | |||
| 67 | #endif /* !(CONFIG_SMP) */ | 73 | #endif /* !(CONFIG_SMP) */ |
| 68 | 74 | ||
| 69 | #endif /* !(_PPC_SMP_H) */ | 75 | #endif /* !(_PPC_SMP_H) */ |
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h index 909199aae104..20edcf2a6e0c 100644 --- a/include/asm-ppc/spinlock.h +++ b/include/asm-ppc/spinlock.h | |||
| @@ -5,41 +5,21 @@ | |||
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * Simple spin lock operations. | 7 | * Simple spin lock operations. |
| 8 | * | ||
| 9 | * (the type definitions are in asm/raw_spinlock_types.h) | ||
| 8 | */ | 10 | */ |
| 9 | 11 | ||
| 10 | typedef struct { | 12 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 11 | volatile unsigned long lock; | 13 | #define __raw_spin_unlock_wait(lock) \ |
| 12 | #ifdef CONFIG_DEBUG_SPINLOCK | 14 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 13 | volatile unsigned long owner_pc; | 15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 14 | volatile unsigned long owner_cpu; | 16 | |
| 15 | #endif | 17 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 16 | #ifdef CONFIG_PREEMPT | ||
| 17 | unsigned int break_lock; | ||
| 18 | #endif | ||
| 19 | } spinlock_t; | ||
| 20 | |||
| 21 | #ifdef __KERNEL__ | ||
| 22 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 23 | #define SPINLOCK_DEBUG_INIT , 0, 0 | ||
| 24 | #else | ||
| 25 | #define SPINLOCK_DEBUG_INIT /* */ | ||
| 26 | #endif | ||
| 27 | |||
| 28 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT } | ||
| 29 | |||
| 30 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 31 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 32 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
| 33 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 34 | |||
| 35 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 36 | |||
| 37 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 38 | { | 18 | { |
| 39 | unsigned long tmp; | 19 | unsigned long tmp; |
| 40 | 20 | ||
| 41 | __asm__ __volatile__( | 21 | __asm__ __volatile__( |
| 42 | "b 1f # spin_lock\n\ | 22 | "b 1f # __raw_spin_lock\n\ |
| 43 | 2: lwzx %0,0,%1\n\ | 23 | 2: lwzx %0,0,%1\n\ |
| 44 | cmpwi 0,%0,0\n\ | 24 | cmpwi 0,%0,0\n\ |
| 45 | bne+ 2b\n\ | 25 | bne+ 2b\n\ |
| @@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 55 | : "cr0", "memory"); | 35 | : "cr0", "memory"); |
| 56 | } | 36 | } |
| 57 | 37 | ||
| 58 | static inline void _raw_spin_unlock(spinlock_t *lock) | 38 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 59 | { | 39 | { |
| 60 | __asm__ __volatile__("eieio # spin_unlock": : :"memory"); | 40 | __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); |
| 61 | lock->lock = 0; | 41 | lock->lock = 0; |
| 62 | } | 42 | } |
| 63 | 43 | ||
| 64 | #define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) | 44 | #define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) |
| 65 | |||
| 66 | #else | ||
| 67 | |||
| 68 | extern void _raw_spin_lock(spinlock_t *lock); | ||
| 69 | extern void _raw_spin_unlock(spinlock_t *lock); | ||
| 70 | extern int _raw_spin_trylock(spinlock_t *lock); | ||
| 71 | |||
| 72 | #endif | ||
| 73 | 45 | ||
| 74 | /* | 46 | /* |
| 75 | * Read-write spinlocks, allowing multiple readers | 47 | * Read-write spinlocks, allowing multiple readers |
| @@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock); | |||
| 81 | * irq-safe write-lock, but readers can get non-irqsafe | 53 | * irq-safe write-lock, but readers can get non-irqsafe |
| 82 | * read-locks. | 54 | * read-locks. |
| 83 | */ | 55 | */ |
| 84 | typedef struct { | ||
| 85 | volatile signed int lock; | ||
| 86 | #ifdef CONFIG_PREEMPT | ||
| 87 | unsigned int break_lock; | ||
| 88 | #endif | ||
| 89 | } rwlock_t; | ||
| 90 | 56 | ||
| 91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | 57 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
| 92 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | 58 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
| 93 | 59 | ||
| 94 | #define read_can_lock(rw) ((rw)->lock >= 0) | 60 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
| 95 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 96 | |||
| 97 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 98 | |||
| 99 | static __inline__ int _raw_read_trylock(rwlock_t *rw) | ||
| 100 | { | 61 | { |
| 101 | signed int tmp; | 62 | signed int tmp; |
| 102 | 63 | ||
| @@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw) | |||
| 116 | return tmp > 0; | 77 | return tmp > 0; |
| 117 | } | 78 | } |
| 118 | 79 | ||
| 119 | static __inline__ void _raw_read_lock(rwlock_t *rw) | 80 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
| 120 | { | 81 | { |
| 121 | signed int tmp; | 82 | signed int tmp; |
| 122 | 83 | ||
| @@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) | |||
| 137 | : "cr0", "memory"); | 98 | : "cr0", "memory"); |
| 138 | } | 99 | } |
| 139 | 100 | ||
| 140 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 101 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
| 141 | { | 102 | { |
| 142 | signed int tmp; | 103 | signed int tmp; |
| 143 | 104 | ||
| @@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
| 153 | : "cr0", "memory"); | 114 | : "cr0", "memory"); |
| 154 | } | 115 | } |
| 155 | 116 | ||
| 156 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | 117 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
| 157 | { | 118 | { |
| 158 | signed int tmp; | 119 | signed int tmp; |
| 159 | 120 | ||
| @@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
| 173 | return tmp == 0; | 134 | return tmp == 0; |
| 174 | } | 135 | } |
| 175 | 136 | ||
| 176 | static __inline__ void _raw_write_lock(rwlock_t *rw) | 137 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 177 | { | 138 | { |
| 178 | signed int tmp; | 139 | signed int tmp; |
| 179 | 140 | ||
| @@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) | |||
| 194 | : "cr0", "memory"); | 155 | : "cr0", "memory"); |
| 195 | } | 156 | } |
| 196 | 157 | ||
| 197 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 158 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
| 198 | { | 159 | { |
| 199 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); | 160 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); |
| 200 | rw->lock = 0; | 161 | rw->lock = 0; |
| 201 | } | 162 | } |
| 202 | 163 | ||
| 203 | #else | ||
| 204 | |||
| 205 | extern void _raw_read_lock(rwlock_t *rw); | ||
| 206 | extern void _raw_read_unlock(rwlock_t *rw); | ||
| 207 | extern void _raw_write_lock(rwlock_t *rw); | ||
| 208 | extern void _raw_write_unlock(rwlock_t *rw); | ||
| 209 | extern int _raw_read_trylock(rwlock_t *rw); | ||
| 210 | extern int _raw_write_trylock(rwlock_t *rw); | ||
| 211 | |||
| 212 | #endif | ||
| 213 | |||
| 214 | #endif /* __ASM_SPINLOCK_H */ | 164 | #endif /* __ASM_SPINLOCK_H */ |
| 215 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h new file mode 100644 index 000000000000..7919ccc75b8a --- /dev/null +++ b/include/asm-ppc/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned long lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile signed int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 513a334c5810..d754ab570fe0 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h | |||
| @@ -88,6 +88,7 @@ extern void *cacheable_memcpy(void *, const void *, unsigned int); | |||
| 88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | 88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); |
| 89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); |
| 90 | extern void die(const char *, struct pt_regs *, long); | 90 | extern void die(const char *, struct pt_regs *, long); |
| 91 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
| 91 | #ifdef CONFIG_BOOKE_WDT | 92 | #ifdef CONFIG_BOOKE_WDT |
| 92 | extern u32 booke_wdt_enabled; | 93 | extern u32 booke_wdt_enabled; |
| 93 | extern u32 booke_wdt_period; | 94 | extern u32 booke_wdt_period; |
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index acd11564dd75..14cb895bb607 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h | |||
| @@ -15,36 +15,42 @@ | |||
| 15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
| 16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
| 17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
| 18 | * | ||
| 19 | * (the type definitions are in asm/spinlock_types.h) | ||
| 18 | */ | 20 | */ |
| 19 | #include <linux/config.h> | 21 | #include <linux/config.h> |
| 20 | #include <asm/paca.h> | 22 | #include <asm/paca.h> |
| 21 | #include <asm/hvcall.h> | 23 | #include <asm/hvcall.h> |
| 22 | #include <asm/iSeries/HvCall.h> | 24 | #include <asm/iSeries/HvCall.h> |
| 23 | 25 | ||
| 24 | typedef struct { | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
| 25 | volatile unsigned int lock; | ||
| 26 | #ifdef CONFIG_PREEMPT | ||
| 27 | unsigned int break_lock; | ||
| 28 | #endif | ||
| 29 | } spinlock_t; | ||
| 30 | 27 | ||
| 31 | typedef struct { | 28 | /* |
| 32 | volatile signed int lock; | 29 | * This returns the old value in the lock, so we succeeded |
| 33 | #ifdef CONFIG_PREEMPT | 30 | * in getting the lock if the return value is 0. |
| 34 | unsigned int break_lock; | 31 | */ |
| 35 | #endif | 32 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) |
| 36 | } rwlock_t; | 33 | { |
| 34 | unsigned long tmp, tmp2; | ||
| 37 | 35 | ||
| 38 | #ifdef __KERNEL__ | 36 | __asm__ __volatile__( |
| 39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 37 | " lwz %1,%3(13) # __spin_trylock\n\ |
| 38 | 1: lwarx %0,0,%2\n\ | ||
| 39 | cmpwi 0,%0,0\n\ | ||
| 40 | bne- 2f\n\ | ||
| 41 | stwcx. %1,0,%2\n\ | ||
| 42 | bne- 1b\n\ | ||
| 43 | isync\n\ | ||
| 44 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
| 45 | : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) | ||
| 46 | : "cr0", "memory"); | ||
| 40 | 47 | ||
| 41 | #define spin_is_locked(x) ((x)->lock != 0) | 48 | return tmp; |
| 42 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 49 | } |
| 43 | 50 | ||
| 44 | static __inline__ void _raw_spin_unlock(spinlock_t *lock) | 51 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
| 45 | { | 52 | { |
| 46 | __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); | 53 | return __spin_trylock(lock) == 0; |
| 47 | lock->lock = 0; | ||
| 48 | } | 54 | } |
| 49 | 55 | ||
| 50 | /* | 56 | /* |
| @@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
| 64 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 70 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
| 65 | /* We only yield to the hypervisor if we are in shared processor mode */ | 71 | /* We only yield to the hypervisor if we are in shared processor mode */ |
| 66 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | 72 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) |
| 67 | extern void __spin_yield(spinlock_t *lock); | 73 | extern void __spin_yield(raw_spinlock_t *lock); |
| 68 | extern void __rw_yield(rwlock_t *lock); | 74 | extern void __rw_yield(raw_rwlock_t *lock); |
| 69 | #else /* SPLPAR || ISERIES */ | 75 | #else /* SPLPAR || ISERIES */ |
| 70 | #define __spin_yield(x) barrier() | 76 | #define __spin_yield(x) barrier() |
| 71 | #define __rw_yield(x) barrier() | 77 | #define __rw_yield(x) barrier() |
| 72 | #define SHARED_PROCESSOR 0 | 78 | #define SHARED_PROCESSOR 0 |
| 73 | #endif | 79 | #endif |
| 74 | extern void spin_unlock_wait(spinlock_t *lock); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * This returns the old value in the lock, so we succeeded | ||
| 78 | * in getting the lock if the return value is 0. | ||
| 79 | */ | ||
| 80 | static __inline__ unsigned long __spin_trylock(spinlock_t *lock) | ||
| 81 | { | ||
| 82 | unsigned long tmp, tmp2; | ||
| 83 | |||
| 84 | __asm__ __volatile__( | ||
| 85 | " lwz %1,%3(13) # __spin_trylock\n\ | ||
| 86 | 1: lwarx %0,0,%2\n\ | ||
| 87 | cmpwi 0,%0,0\n\ | ||
| 88 | bne- 2f\n\ | ||
| 89 | stwcx. %1,0,%2\n\ | ||
| 90 | bne- 1b\n\ | ||
| 91 | isync\n\ | ||
| 92 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
| 93 | : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
| 94 | : "cr0", "memory"); | ||
| 95 | |||
| 96 | return tmp; | ||
| 97 | } | ||
| 98 | |||
| 99 | static int __inline__ _raw_spin_trylock(spinlock_t *lock) | ||
| 100 | { | ||
| 101 | return __spin_trylock(lock) == 0; | ||
| 102 | } | ||
| 103 | 80 | ||
| 104 | static void __inline__ _raw_spin_lock(spinlock_t *lock) | 81 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
| 105 | { | 82 | { |
| 106 | while (1) { | 83 | while (1) { |
| 107 | if (likely(__spin_trylock(lock) == 0)) | 84 | if (likely(__spin_trylock(lock) == 0)) |
| @@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) | |||
| 110 | HMT_low(); | 87 | HMT_low(); |
| 111 | if (SHARED_PROCESSOR) | 88 | if (SHARED_PROCESSOR) |
| 112 | __spin_yield(lock); | 89 | __spin_yield(lock); |
| 113 | } while (unlikely(lock->lock != 0)); | 90 | } while (unlikely(lock->slock != 0)); |
| 114 | HMT_medium(); | 91 | HMT_medium(); |
| 115 | } | 92 | } |
| 116 | } | 93 | } |
| 117 | 94 | ||
| 118 | static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 95 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
| 119 | { | 96 | { |
| 120 | unsigned long flags_dis; | 97 | unsigned long flags_dis; |
| 121 | 98 | ||
| @@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
| 128 | HMT_low(); | 105 | HMT_low(); |
| 129 | if (SHARED_PROCESSOR) | 106 | if (SHARED_PROCESSOR) |
| 130 | __spin_yield(lock); | 107 | __spin_yield(lock); |
| 131 | } while (unlikely(lock->lock != 0)); | 108 | } while (unlikely(lock->slock != 0)); |
| 132 | HMT_medium(); | 109 | HMT_medium(); |
| 133 | local_irq_restore(flags_dis); | 110 | local_irq_restore(flags_dis); |
| 134 | } | 111 | } |
| 135 | } | 112 | } |
| 136 | 113 | ||
| 114 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | ||
| 115 | { | ||
| 116 | __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); | ||
| 117 | lock->slock = 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
| 121 | |||
| 137 | /* | 122 | /* |
| 138 | * Read-write spinlocks, allowing multiple readers | 123 | * Read-write spinlocks, allowing multiple readers |
| 139 | * but only one writer. | 124 | * but only one writer. |
| @@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
| 144 | * irq-safe write-lock, but readers can get non-irqsafe | 129 | * irq-safe write-lock, but readers can get non-irqsafe |
| 145 | * read-locks. | 130 | * read-locks. |
| 146 | */ | 131 | */ |
| 147 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 148 | 132 | ||
| 149 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 133 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
| 150 | 134 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
| 151 | #define read_can_lock(rw) ((rw)->lock >= 0) | ||
| 152 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 153 | |||
| 154 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
| 155 | { | ||
| 156 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
| 157 | rw->lock = 0; | ||
| 158 | } | ||
| 159 | 135 | ||
| 160 | /* | 136 | /* |
| 161 | * This returns the old value in the lock + 1, | 137 | * This returns the old value in the lock + 1, |
| 162 | * so we got a read lock if the return value is > 0. | 138 | * so we got a read lock if the return value is > 0. |
| 163 | */ | 139 | */ |
| 164 | static long __inline__ __read_trylock(rwlock_t *rw) | 140 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
| 165 | { | 141 | { |
| 166 | long tmp; | 142 | long tmp; |
| 167 | 143 | ||
| @@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw) | |||
| 180 | return tmp; | 156 | return tmp; |
| 181 | } | 157 | } |
| 182 | 158 | ||
| 183 | static int __inline__ _raw_read_trylock(rwlock_t *rw) | ||
| 184 | { | ||
| 185 | return __read_trylock(rw) > 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | static void __inline__ _raw_read_lock(rwlock_t *rw) | ||
| 189 | { | ||
| 190 | while (1) { | ||
| 191 | if (likely(__read_trylock(rw) > 0)) | ||
| 192 | break; | ||
| 193 | do { | ||
| 194 | HMT_low(); | ||
| 195 | if (SHARED_PROCESSOR) | ||
| 196 | __rw_yield(rw); | ||
| 197 | } while (unlikely(rw->lock < 0)); | ||
| 198 | HMT_medium(); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | static void __inline__ _raw_read_unlock(rwlock_t *rw) | ||
| 203 | { | ||
| 204 | long tmp; | ||
| 205 | |||
| 206 | __asm__ __volatile__( | ||
| 207 | "eieio # read_unlock\n\ | ||
| 208 | 1: lwarx %0,0,%1\n\ | ||
| 209 | addic %0,%0,-1\n\ | ||
| 210 | stwcx. %0,0,%1\n\ | ||
| 211 | bne- 1b" | ||
| 212 | : "=&r"(tmp) | ||
| 213 | : "r"(&rw->lock) | ||
| 214 | : "cr0", "memory"); | ||
| 215 | } | ||
| 216 | |||
| 217 | /* | 159 | /* |
| 218 | * This returns the old value in the lock, | 160 | * This returns the old value in the lock, |
| 219 | * so we got the write lock if the return value is 0. | 161 | * so we got the write lock if the return value is 0. |
| 220 | */ | 162 | */ |
| 221 | static __inline__ long __write_trylock(rwlock_t *rw) | 163 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
| 222 | { | 164 | { |
| 223 | long tmp, tmp2; | 165 | long tmp, tmp2; |
| 224 | 166 | ||
| @@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw) | |||
| 237 | return tmp; | 179 | return tmp; |
| 238 | } | 180 | } |
| 239 | 181 | ||
| 240 | static int __inline__ _raw_write_trylock(rwlock_t *rw) | 182 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
| 241 | { | 183 | { |
| 242 | return __write_trylock(rw) == 0; | 184 | while (1) { |
| 185 | if (likely(__read_trylock(rw) > 0)) | ||
| 186 | break; | ||
| 187 | do { | ||
| 188 | HMT_low(); | ||
| 189 | if (SHARED_PROCESSOR) | ||
| 190 | __rw_yield(rw); | ||
| 191 | } while (unlikely(rw->lock < 0)); | ||
| 192 | HMT_medium(); | ||
| 193 | } | ||
| 243 | } | 194 | } |
| 244 | 195 | ||
| 245 | static void __inline__ _raw_write_lock(rwlock_t *rw) | 196 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
| 246 | { | 197 | { |
| 247 | while (1) { | 198 | while (1) { |
| 248 | if (likely(__write_trylock(rw) == 0)) | 199 | if (likely(__write_trylock(rw) == 0)) |
| @@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) | |||
| 256 | } | 207 | } |
| 257 | } | 208 | } |
| 258 | 209 | ||
| 259 | #endif /* __KERNEL__ */ | 210 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
| 211 | { | ||
| 212 | return __read_trylock(rw) > 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | ||
| 216 | { | ||
| 217 | return __write_trylock(rw) == 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | ||
| 221 | { | ||
| 222 | long tmp; | ||
| 223 | |||
| 224 | __asm__ __volatile__( | ||
| 225 | "eieio # read_unlock\n\ | ||
| 226 | 1: lwarx %0,0,%1\n\ | ||
| 227 | addic %0,%0,-1\n\ | ||
| 228 | stwcx. %0,0,%1\n\ | ||
| 229 | bne- 1b" | ||
| 230 | : "=&r"(tmp) | ||
| 231 | : "r"(&rw->lock) | ||
| 232 | : "cr0", "memory"); | ||
| 233 | } | ||
| 234 | |||
| 235 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 236 | { | ||
| 237 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
| 238 | rw->lock = 0; | ||
| 239 | } | ||
| 240 | |||
| 260 | #endif /* __ASM_SPINLOCK_H */ | 241 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h new file mode 100644 index 000000000000..a37c8eabb9f2 --- /dev/null +++ b/include/asm-ppc64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile signed int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 321b23bba1ec..273dbecf8ace 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h | |||
| @@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
| 27 | * on the local processor, one does not. | 27 | * on the local processor, one does not. |
| 28 | * | 28 | * |
| 29 | * We make no fairness assumptions. They have a cost. | 29 | * We make no fairness assumptions. They have a cost. |
| 30 | * | ||
| 31 | * (the type definitions are in asm/spinlock_types.h) | ||
| 30 | */ | 32 | */ |
| 31 | 33 | ||
| 32 | typedef struct { | 34 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 33 | volatile unsigned int lock; | 35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 34 | #ifdef CONFIG_PREEMPT | 36 | #define __raw_spin_unlock_wait(lock) \ |
| 35 | unsigned int break_lock; | 37 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 36 | #endif | ||
| 37 | } __attribute__ ((aligned (4))) spinlock_t; | ||
| 38 | |||
| 39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 40 | #define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) | ||
| 41 | #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) | ||
| 42 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 43 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 44 | 38 | ||
| 45 | extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); | 39 | extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); |
| 46 | extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); | 40 | extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); |
| 47 | 41 | ||
| 48 | static inline void _raw_spin_lock(spinlock_t *lp) | 42 | static inline void __raw_spin_lock(raw_spinlock_t *lp) |
| 49 | { | 43 | { |
| 50 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 44 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
| 51 | 45 | ||
| @@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp) | |||
| 53 | _raw_spin_lock_wait(lp, pc); | 47 | _raw_spin_lock_wait(lp, pc); |
| 54 | } | 48 | } |
| 55 | 49 | ||
| 56 | static inline int _raw_spin_trylock(spinlock_t *lp) | 50 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) |
| 57 | { | 51 | { |
| 58 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 52 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
| 59 | 53 | ||
| @@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp) | |||
| 62 | return _raw_spin_trylock_retry(lp, pc); | 56 | return _raw_spin_trylock_retry(lp, pc); |
| 63 | } | 57 | } |
| 64 | 58 | ||
| 65 | static inline void _raw_spin_unlock(spinlock_t *lp) | 59 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) |
| 66 | { | 60 | { |
| 67 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); | 61 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); |
| 68 | } | 62 | } |
| @@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp) | |||
| 77 | * irq-safe write-lock, but readers can get non-irqsafe | 71 | * irq-safe write-lock, but readers can get non-irqsafe |
| 78 | * read-locks. | 72 | * read-locks. |
| 79 | */ | 73 | */ |
| 80 | typedef struct { | ||
| 81 | volatile unsigned int lock; | ||
| 82 | volatile unsigned long owner_pc; | ||
| 83 | #ifdef CONFIG_PREEMPT | ||
| 84 | unsigned int break_lock; | ||
| 85 | #endif | ||
| 86 | } rwlock_t; | ||
| 87 | |||
| 88 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
| 89 | |||
| 90 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 91 | 74 | ||
| 92 | /** | 75 | /** |
| 93 | * read_can_lock - would read_trylock() succeed? | 76 | * read_can_lock - would read_trylock() succeed? |
| 94 | * @lock: the rwlock in question. | 77 | * @lock: the rwlock in question. |
| 95 | */ | 78 | */ |
| 96 | #define read_can_lock(x) ((int)(x)->lock >= 0) | 79 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) |
| 97 | 80 | ||
| 98 | /** | 81 | /** |
| 99 | * write_can_lock - would write_trylock() succeed? | 82 | * write_can_lock - would write_trylock() succeed? |
| 100 | * @lock: the rwlock in question. | 83 | * @lock: the rwlock in question. |
| 101 | */ | 84 | */ |
| 102 | #define write_can_lock(x) ((x)->lock == 0) | 85 | #define __raw_write_can_lock(x) ((x)->lock == 0) |
| 103 | 86 | ||
| 104 | extern void _raw_read_lock_wait(rwlock_t *lp); | 87 | extern void _raw_read_lock_wait(raw_rwlock_t *lp); |
| 105 | extern int _raw_read_trylock_retry(rwlock_t *lp); | 88 | extern int _raw_read_trylock_retry(raw_rwlock_t *lp); |
| 106 | extern void _raw_write_lock_wait(rwlock_t *lp); | 89 | extern void _raw_write_lock_wait(raw_rwlock_t *lp); |
| 107 | extern int _raw_write_trylock_retry(rwlock_t *lp); | 90 | extern int _raw_write_trylock_retry(raw_rwlock_t *lp); |
| 108 | 91 | ||
| 109 | static inline void _raw_read_lock(rwlock_t *rw) | 92 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 110 | { | 93 | { |
| 111 | unsigned int old; | 94 | unsigned int old; |
| 112 | old = rw->lock & 0x7fffffffU; | 95 | old = rw->lock & 0x7fffffffU; |
| @@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 114 | _raw_read_lock_wait(rw); | 97 | _raw_read_lock_wait(rw); |
| 115 | } | 98 | } |
| 116 | 99 | ||
| 117 | static inline void _raw_read_unlock(rwlock_t *rw) | 100 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 118 | { | 101 | { |
| 119 | unsigned int old, cmp; | 102 | unsigned int old, cmp; |
| 120 | 103 | ||
| @@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 125 | } while (cmp != old); | 108 | } while (cmp != old); |
| 126 | } | 109 | } |
| 127 | 110 | ||
| 128 | static inline void _raw_write_lock(rwlock_t *rw) | 111 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 129 | { | 112 | { |
| 130 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 113 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
| 131 | _raw_write_lock_wait(rw); | 114 | _raw_write_lock_wait(rw); |
| 132 | } | 115 | } |
| 133 | 116 | ||
| 134 | static inline void _raw_write_unlock(rwlock_t *rw) | 117 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 135 | { | 118 | { |
| 136 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 119 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
| 137 | } | 120 | } |
| 138 | 121 | ||
| 139 | static inline int _raw_read_trylock(rwlock_t *rw) | 122 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
| 140 | { | 123 | { |
| 141 | unsigned int old; | 124 | unsigned int old; |
| 142 | old = rw->lock & 0x7fffffffU; | 125 | old = rw->lock & 0x7fffffffU; |
| @@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw) | |||
| 145 | return _raw_read_trylock_retry(rw); | 128 | return _raw_read_trylock_retry(rw); |
| 146 | } | 129 | } |
| 147 | 130 | ||
| 148 | static inline int _raw_write_trylock(rwlock_t *rw) | 131 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 149 | { | 132 | { |
| 150 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 133 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
| 151 | return 1; | 134 | return 1; |
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h new file mode 100644 index 000000000000..f79a2216204f --- /dev/null +++ b/include/asm-s390/spinlock_types.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int lock; | ||
| 10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | volatile unsigned int owner_pc; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index e770b55649eb..846322d4c35d 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h | |||
| @@ -15,20 +15,11 @@ | |||
| 15 | /* | 15 | /* |
| 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 17 | */ | 17 | */ |
| 18 | typedef struct { | ||
| 19 | volatile unsigned long lock; | ||
| 20 | #ifdef CONFIG_PREEMPT | ||
| 21 | unsigned int break_lock; | ||
| 22 | #endif | ||
| 23 | } spinlock_t; | ||
| 24 | 18 | ||
| 25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 26 | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
| 27 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 21 | #define __raw_spin_unlock_wait(x) \ |
| 28 | 22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
| 29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
| 31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 32 | 23 | ||
| 33 | /* | 24 | /* |
| 34 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| @@ -36,7 +27,7 @@ typedef struct { | |||
| 36 | * | 27 | * |
| 37 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
| 38 | */ | 29 | */ |
| 39 | static inline void _raw_spin_lock(spinlock_t *lock) | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 40 | { | 31 | { |
| 41 | __asm__ __volatile__ ( | 32 | __asm__ __volatile__ ( |
| 42 | "1:\n\t" | 33 | "1:\n\t" |
| @@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 49 | ); | 40 | ); |
| 50 | } | 41 | } |
| 51 | 42 | ||
| 52 | static inline void _raw_spin_unlock(spinlock_t *lock) | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 53 | { | 44 | { |
| 54 | assert_spin_locked(lock); | 45 | assert_spin_locked(lock); |
| 55 | 46 | ||
| 56 | lock->lock = 0; | 47 | lock->lock = 0; |
| 57 | } | 48 | } |
| 58 | 49 | ||
| 59 | #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
| 60 | 51 | ||
| 61 | /* | 52 | /* |
| 62 | * Read-write spinlocks, allowing multiple readers but only one writer. | 53 | * Read-write spinlocks, allowing multiple readers but only one writer. |
| @@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 66 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | 57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
| 67 | * read-locks. | 58 | * read-locks. |
| 68 | */ | 59 | */ |
| 69 | typedef struct { | 60 | |
| 70 | spinlock_t lock; | 61 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 71 | atomic_t counter; | ||
| 72 | #ifdef CONFIG_PREEMPT | ||
| 73 | unsigned int break_lock; | ||
| 74 | #endif | ||
| 75 | } rwlock_t; | ||
| 76 | |||
| 77 | #define RW_LOCK_BIAS 0x01000000 | ||
| 78 | #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } } | ||
| 79 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
| 80 | |||
| 81 | static inline void _raw_read_lock(rwlock_t *rw) | ||
| 82 | { | 62 | { |
| 83 | _raw_spin_lock(&rw->lock); | 63 | __raw_spin_lock(&rw->lock); |
| 84 | 64 | ||
| 85 | atomic_inc(&rw->counter); | 65 | atomic_inc(&rw->counter); |
| 86 | 66 | ||
| 87 | _raw_spin_unlock(&rw->lock); | 67 | __raw_spin_unlock(&rw->lock); |
| 88 | } | 68 | } |
| 89 | 69 | ||
| 90 | static inline void _raw_read_unlock(rwlock_t *rw) | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 91 | { | 71 | { |
| 92 | _raw_spin_lock(&rw->lock); | 72 | __raw_spin_lock(&rw->lock); |
| 93 | 73 | ||
| 94 | atomic_dec(&rw->counter); | 74 | atomic_dec(&rw->counter); |
| 95 | 75 | ||
| 96 | _raw_spin_unlock(&rw->lock); | 76 | __raw_spin_unlock(&rw->lock); |
| 97 | } | 77 | } |
| 98 | 78 | ||
| 99 | static inline void _raw_write_lock(rwlock_t *rw) | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 100 | { | 80 | { |
| 101 | _raw_spin_lock(&rw->lock); | 81 | __raw_spin_lock(&rw->lock); |
| 102 | atomic_set(&rw->counter, -1); | 82 | atomic_set(&rw->counter, -1); |
| 103 | } | 83 | } |
| 104 | 84 | ||
| 105 | static inline void _raw_write_unlock(rwlock_t *rw) | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 106 | { | 86 | { |
| 107 | atomic_set(&rw->counter, 0); | 87 | atomic_set(&rw->counter, 0); |
| 108 | _raw_spin_unlock(&rw->lock); | 88 | __raw_spin_unlock(&rw->lock); |
| 109 | } | 89 | } |
| 110 | 90 | ||
| 111 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 91 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 112 | 92 | ||
| 113 | static inline int _raw_write_trylock(rwlock_t *rw) | 93 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
| 114 | { | 94 | { |
| 115 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | 95 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) |
| 116 | return 1; | 96 | return 1; |
| @@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 121 | } | 101 | } |
| 122 | 102 | ||
| 123 | #endif /* __ASM_SH_SPINLOCK_H */ | 103 | #endif /* __ASM_SH_SPINLOCK_H */ |
| 124 | |||
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h new file mode 100644 index 000000000000..8c41b6c3aac8 --- /dev/null +++ b/include/asm-sh/spinlock_types.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | #ifndef __ASM_SH_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SH_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned long lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | raw_spinlock_t lock; | ||
| 16 | atomic_t counter; | ||
| 17 | } raw_rwlock_t; | ||
| 18 | |||
| 19 | #define RW_LOCK_BIAS 0x01000000 | ||
| 20 | #define __RAW_RW_LOCK_UNLOCKED { { 0 }, { RW_LOCK_BIAS } } | ||
| 21 | |||
| 22 | #endif | ||
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 0cbd87ad4912..111727a2bb4e 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
| @@ -12,96 +12,12 @@ | |||
| 12 | 12 | ||
| 13 | #include <asm/psr.h> | 13 | #include <asm/psr.h> |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_DEBUG_SPINLOCK | 15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
| 16 | struct _spinlock_debug { | ||
| 17 | unsigned char lock; | ||
| 18 | unsigned long owner_pc; | ||
| 19 | #ifdef CONFIG_PREEMPT | ||
| 20 | unsigned int break_lock; | ||
| 21 | #endif | ||
| 22 | }; | ||
| 23 | typedef struct _spinlock_debug spinlock_t; | ||
| 24 | |||
| 25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 } | ||
| 26 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
| 27 | #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0) | ||
| 28 | #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock)) | ||
| 29 | |||
| 30 | extern void _do_spin_lock(spinlock_t *lock, char *str); | ||
| 31 | extern int _spin_trylock(spinlock_t *lock); | ||
| 32 | extern void _do_spin_unlock(spinlock_t *lock); | ||
| 33 | |||
| 34 | #define _raw_spin_trylock(lp) _spin_trylock(lp) | ||
| 35 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | ||
| 36 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
| 37 | |||
| 38 | struct _rwlock_debug { | ||
| 39 | volatile unsigned int lock; | ||
| 40 | unsigned long owner_pc; | ||
| 41 | unsigned long reader_pc[NR_CPUS]; | ||
| 42 | #ifdef CONFIG_PREEMPT | ||
| 43 | unsigned int break_lock; | ||
| 44 | #endif | ||
| 45 | }; | ||
| 46 | typedef struct _rwlock_debug rwlock_t; | ||
| 47 | |||
| 48 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} } | ||
| 49 | |||
| 50 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
| 51 | |||
| 52 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
| 53 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
| 54 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
| 55 | extern void _do_write_unlock(rwlock_t *rw); | ||
| 56 | |||
| 57 | #define _raw_read_lock(lock) \ | ||
| 58 | do { unsigned long flags; \ | ||
| 59 | local_irq_save(flags); \ | ||
| 60 | _do_read_lock(lock, "read_lock"); \ | ||
| 61 | local_irq_restore(flags); \ | ||
| 62 | } while(0) | ||
| 63 | |||
| 64 | #define _raw_read_unlock(lock) \ | ||
| 65 | do { unsigned long flags; \ | ||
| 66 | local_irq_save(flags); \ | ||
| 67 | _do_read_unlock(lock, "read_unlock"); \ | ||
| 68 | local_irq_restore(flags); \ | ||
| 69 | } while(0) | ||
| 70 | |||
| 71 | #define _raw_write_lock(lock) \ | ||
| 72 | do { unsigned long flags; \ | ||
| 73 | local_irq_save(flags); \ | ||
| 74 | _do_write_lock(lock, "write_lock"); \ | ||
| 75 | local_irq_restore(flags); \ | ||
| 76 | } while(0) | ||
| 77 | |||
| 78 | #define _raw_write_unlock(lock) \ | ||
| 79 | do { unsigned long flags; \ | ||
| 80 | local_irq_save(flags); \ | ||
| 81 | _do_write_unlock(lock); \ | ||
| 82 | local_irq_restore(flags); \ | ||
| 83 | } while(0) | ||
| 84 | |||
| 85 | #else /* !CONFIG_DEBUG_SPINLOCK */ | ||
| 86 | |||
| 87 | typedef struct { | ||
| 88 | unsigned char lock; | ||
| 89 | #ifdef CONFIG_PREEMPT | ||
| 90 | unsigned int break_lock; | ||
| 91 | #endif | ||
| 92 | } spinlock_t; | ||
| 93 | |||
| 94 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 95 | |||
| 96 | #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) | ||
| 97 | #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
| 98 | 16 | ||
| 99 | #define spin_unlock_wait(lock) \ | 17 | #define __raw_spin_unlock_wait(lock) \ |
| 100 | do { \ | 18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
| 101 | barrier(); \ | ||
| 102 | } while(*((volatile unsigned char *)lock)) | ||
| 103 | 19 | ||
| 104 | extern __inline__ void _raw_spin_lock(spinlock_t *lock) | 20 | extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) |
| 105 | { | 21 | { |
| 106 | __asm__ __volatile__( | 22 | __asm__ __volatile__( |
| 107 | "\n1:\n\t" | 23 | "\n1:\n\t" |
| @@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock) | |||
| 121 | : "g2", "memory", "cc"); | 37 | : "g2", "memory", "cc"); |
| 122 | } | 38 | } |
| 123 | 39 | ||
| 124 | extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | 40 | extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) |
| 125 | { | 41 | { |
| 126 | unsigned int result; | 42 | unsigned int result; |
| 127 | __asm__ __volatile__("ldstub [%1], %0" | 43 | __asm__ __volatile__("ldstub [%1], %0" |
| @@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | |||
| 131 | return (result == 0); | 47 | return (result == 0); |
| 132 | } | 48 | } |
| 133 | 49 | ||
| 134 | extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | 50 | extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
| 135 | { | 51 | { |
| 136 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
| 137 | } | 53 | } |
| @@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
| 147 | * | 63 | * |
| 148 | * XXX This might create some problems with my dual spinlock | 64 | * XXX This might create some problems with my dual spinlock |
| 149 | * XXX scheme, deadlocks etc. -DaveM | 65 | * XXX scheme, deadlocks etc. -DaveM |
| 150 | */ | 66 | * |
| 151 | typedef struct { | 67 | * Sort of like atomic_t's on Sparc, but even more clever. |
| 152 | volatile unsigned int lock; | ||
| 153 | #ifdef CONFIG_PREEMPT | ||
| 154 | unsigned int break_lock; | ||
| 155 | #endif | ||
| 156 | } rwlock_t; | ||
| 157 | |||
| 158 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 159 | |||
| 160 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | |||
| 163 | /* Sort of like atomic_t's on Sparc, but even more clever. | ||
| 164 | * | 68 | * |
| 165 | * ------------------------------------ | 69 | * ------------------------------------ |
| 166 | * | 24-bit counter | wlock | rwlock_t | 70 | * | 24-bit counter | wlock | raw_rwlock_t |
| 167 | * ------------------------------------ | 71 | * ------------------------------------ |
| 168 | * 31 8 7 0 | 72 | * 31 8 7 0 |
| 169 | * | 73 | * |
| @@ -174,9 +78,9 @@ typedef struct { | |||
| 174 | * | 78 | * |
| 175 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
| 176 | */ | 80 | */ |
| 177 | extern __inline__ void _read_lock(rwlock_t *rw) | 81 | extern __inline__ void __read_lock(raw_rwlock_t *rw) |
| 178 | { | 82 | { |
| 179 | register rwlock_t *lp asm("g1"); | 83 | register raw_rwlock_t *lp asm("g1"); |
| 180 | lp = rw; | 84 | lp = rw; |
| 181 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
| 182 | "mov %%o7, %%g4\n\t" | 86 | "mov %%o7, %%g4\n\t" |
| @@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw) | |||
| 187 | : "g2", "g4", "memory", "cc"); | 91 | : "g2", "g4", "memory", "cc"); |
| 188 | } | 92 | } |
| 189 | 93 | ||
| 190 | #define _raw_read_lock(lock) \ | 94 | #define __raw_read_lock(lock) \ |
| 191 | do { unsigned long flags; \ | 95 | do { unsigned long flags; \ |
| 192 | local_irq_save(flags); \ | 96 | local_irq_save(flags); \ |
| 193 | _read_lock(lock); \ | 97 | __raw_read_lock(lock); \ |
| 194 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
| 195 | } while(0) | 99 | } while(0) |
| 196 | 100 | ||
| 197 | extern __inline__ void _read_unlock(rwlock_t *rw) | 101 | extern __inline__ void __read_unlock(raw_rwlock_t *rw) |
| 198 | { | 102 | { |
| 199 | register rwlock_t *lp asm("g1"); | 103 | register raw_rwlock_t *lp asm("g1"); |
| 200 | lp = rw; | 104 | lp = rw; |
| 201 | __asm__ __volatile__( | 105 | __asm__ __volatile__( |
| 202 | "mov %%o7, %%g4\n\t" | 106 | "mov %%o7, %%g4\n\t" |
| @@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw) | |||
| 207 | : "g2", "g4", "memory", "cc"); | 111 | : "g2", "g4", "memory", "cc"); |
| 208 | } | 112 | } |
| 209 | 113 | ||
| 210 | #define _raw_read_unlock(lock) \ | 114 | #define __raw_read_unlock(lock) \ |
| 211 | do { unsigned long flags; \ | 115 | do { unsigned long flags; \ |
| 212 | local_irq_save(flags); \ | 116 | local_irq_save(flags); \ |
| 213 | _read_unlock(lock); \ | 117 | __raw_read_unlock(lock); \ |
| 214 | local_irq_restore(flags); \ | 118 | local_irq_restore(flags); \ |
| 215 | } while(0) | 119 | } while(0) |
| 216 | 120 | ||
| 217 | extern __inline__ void _raw_write_lock(rwlock_t *rw) | 121 | extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
| 218 | { | 122 | { |
| 219 | register rwlock_t *lp asm("g1"); | 123 | register raw_rwlock_t *lp asm("g1"); |
| 220 | lp = rw; | 124 | lp = rw; |
| 221 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
| 222 | "mov %%o7, %%g4\n\t" | 126 | "mov %%o7, %%g4\n\t" |
| @@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw) | |||
| 227 | : "g2", "g4", "memory", "cc"); | 131 | : "g2", "g4", "memory", "cc"); |
| 228 | } | 132 | } |
| 229 | 133 | ||
| 230 | #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 134 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
| 231 | |||
| 232 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 233 | 135 | ||
| 234 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 136 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 235 | 137 | ||
| 236 | #endif /* !(__ASSEMBLY__) */ | 138 | #endif /* !(__ASSEMBLY__) */ |
| 237 | 139 | ||
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h new file mode 100644 index 000000000000..0a0fb116c4ec --- /dev/null +++ b/include/asm-sparc/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __SPARC_SPINLOCK_TYPES_H | ||
| 2 | #define __SPARC_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | unsigned char lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index a02c4370eb42..ec85d12d73b9 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
| @@ -29,24 +29,13 @@ | |||
| 29 | * must be pre-V9 branches. | 29 | * must be pre-V9 branches. |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | #ifndef CONFIG_DEBUG_SPINLOCK | 32 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) |
| 33 | 33 | ||
| 34 | typedef struct { | 34 | #define __raw_spin_unlock_wait(lp) \ |
| 35 | volatile unsigned char lock; | 35 | do { rmb(); \ |
| 36 | #ifdef CONFIG_PREEMPT | 36 | } while((lp)->lock) |
| 37 | unsigned int break_lock; | ||
| 38 | #endif | ||
| 39 | } spinlock_t; | ||
| 40 | #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} | ||
| 41 | 37 | ||
| 42 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | 38 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 43 | #define spin_is_locked(lp) ((lp)->lock != 0) | ||
| 44 | |||
| 45 | #define spin_unlock_wait(lp) \ | ||
| 46 | do { rmb(); \ | ||
| 47 | } while((lp)->lock) | ||
| 48 | |||
| 49 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
| 50 | { | 39 | { |
| 51 | unsigned long tmp; | 40 | unsigned long tmp; |
| 52 | 41 | ||
| @@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 67 | : "memory"); | 56 | : "memory"); |
| 68 | } | 57 | } |
| 69 | 58 | ||
| 70 | static inline int _raw_spin_trylock(spinlock_t *lock) | 59 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 71 | { | 60 | { |
| 72 | unsigned long result; | 61 | unsigned long result; |
| 73 | 62 | ||
| @@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 81 | return (result == 0UL); | 70 | return (result == 0UL); |
| 82 | } | 71 | } |
| 83 | 72 | ||
| 84 | static inline void _raw_spin_unlock(spinlock_t *lock) | 73 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 85 | { | 74 | { |
| 86 | __asm__ __volatile__( | 75 | __asm__ __volatile__( |
| 87 | " membar #StoreStore | #LoadStore\n" | 76 | " membar #StoreStore | #LoadStore\n" |
| @@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 91 | : "memory"); | 80 | : "memory"); |
| 92 | } | 81 | } |
| 93 | 82 | ||
| 94 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 83 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
| 95 | { | 84 | { |
| 96 | unsigned long tmp1, tmp2; | 85 | unsigned long tmp1, tmp2; |
| 97 | 86 | ||
| @@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | |||
| 115 | : "memory"); | 104 | : "memory"); |
| 116 | } | 105 | } |
| 117 | 106 | ||
| 118 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
| 119 | |||
| 120 | typedef struct { | ||
| 121 | volatile unsigned char lock; | ||
| 122 | unsigned int owner_pc, owner_cpu; | ||
| 123 | #ifdef CONFIG_PREEMPT | ||
| 124 | unsigned int break_lock; | ||
| 125 | #endif | ||
| 126 | } spinlock_t; | ||
| 127 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | ||
| 128 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
| 129 | #define spin_is_locked(__lock) ((__lock)->lock != 0) | ||
| 130 | #define spin_unlock_wait(__lock) \ | ||
| 131 | do { \ | ||
| 132 | rmb(); \ | ||
| 133 | } while((__lock)->lock) | ||
| 134 | |||
| 135 | extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); | ||
| 136 | extern void _do_spin_unlock(spinlock_t *lock); | ||
| 137 | extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); | ||
| 138 | |||
| 139 | #define _raw_spin_trylock(lp) \ | ||
| 140 | _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) | ||
| 141 | #define _raw_spin_lock(lock) \ | ||
| 142 | _do_spin_lock(lock, "spin_lock", \ | ||
| 143 | (unsigned long) __builtin_return_address(0)) | ||
| 144 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
| 145 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 146 | |||
| 147 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 148 | |||
| 149 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 107 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
| 150 | 108 | ||
| 151 | #ifndef CONFIG_DEBUG_SPINLOCK | 109 | static void inline __read_lock(raw_rwlock_t *lock) |
| 152 | |||
| 153 | typedef struct { | ||
| 154 | volatile unsigned int lock; | ||
| 155 | #ifdef CONFIG_PREEMPT | ||
| 156 | unsigned int break_lock; | ||
| 157 | #endif | ||
| 158 | } rwlock_t; | ||
| 159 | #define RW_LOCK_UNLOCKED (rwlock_t) {0,} | ||
| 160 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | static void inline __read_lock(rwlock_t *lock) | ||
| 163 | { | 110 | { |
| 164 | unsigned long tmp1, tmp2; | 111 | unsigned long tmp1, tmp2; |
| 165 | 112 | ||
| @@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) | |||
| 184 | : "memory"); | 131 | : "memory"); |
| 185 | } | 132 | } |
| 186 | 133 | ||
| 187 | static void inline __read_unlock(rwlock_t *lock) | 134 | static void inline __read_unlock(raw_rwlock_t *lock) |
| 188 | { | 135 | { |
| 189 | unsigned long tmp1, tmp2; | 136 | unsigned long tmp1, tmp2; |
| 190 | 137 | ||
| @@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) | |||
| 201 | : "memory"); | 148 | : "memory"); |
| 202 | } | 149 | } |
| 203 | 150 | ||
| 204 | static void inline __write_lock(rwlock_t *lock) | 151 | static void inline __write_lock(raw_rwlock_t *lock) |
| 205 | { | 152 | { |
| 206 | unsigned long mask, tmp1, tmp2; | 153 | unsigned long mask, tmp1, tmp2; |
| 207 | 154 | ||
| @@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) | |||
| 228 | : "memory"); | 175 | : "memory"); |
| 229 | } | 176 | } |
| 230 | 177 | ||
| 231 | static void inline __write_unlock(rwlock_t *lock) | 178 | static void inline __write_unlock(raw_rwlock_t *lock) |
| 232 | { | 179 | { |
| 233 | __asm__ __volatile__( | 180 | __asm__ __volatile__( |
| 234 | " membar #LoadStore | #StoreStore\n" | 181 | " membar #LoadStore | #StoreStore\n" |
| @@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) | |||
| 238 | : "memory"); | 185 | : "memory"); |
| 239 | } | 186 | } |
| 240 | 187 | ||
| 241 | static int inline __write_trylock(rwlock_t *lock) | 188 | static int inline __write_trylock(raw_rwlock_t *lock) |
| 242 | { | 189 | { |
| 243 | unsigned long mask, tmp1, tmp2, result; | 190 | unsigned long mask, tmp1, tmp2, result; |
| 244 | 191 | ||
| @@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) | |||
| 263 | return result; | 210 | return result; |
| 264 | } | 211 | } |
| 265 | 212 | ||
| 266 | #define _raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) __read_lock(p) |
| 267 | #define _raw_read_unlock(p) __read_unlock(p) | 214 | #define __raw_read_unlock(p) __read_unlock(p) |
| 268 | #define _raw_write_lock(p) __write_lock(p) | 215 | #define __raw_write_lock(p) __write_lock(p) |
| 269 | #define _raw_write_unlock(p) __write_unlock(p) | 216 | #define __raw_write_unlock(p) __write_unlock(p) |
| 270 | #define _raw_write_trylock(p) __write_trylock(p) | 217 | #define __raw_write_trylock(p) __write_trylock(p) |
| 271 | 218 | ||
| 272 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | 219 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 273 | 220 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | |
| 274 | typedef struct { | 221 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
| 275 | volatile unsigned long lock; | ||
| 276 | unsigned int writer_pc, writer_cpu; | ||
| 277 | unsigned int reader_pc[NR_CPUS]; | ||
| 278 | #ifdef CONFIG_PREEMPT | ||
| 279 | unsigned int break_lock; | ||
| 280 | #endif | ||
| 281 | } rwlock_t; | ||
| 282 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | ||
| 283 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
| 284 | |||
| 285 | extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 286 | extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 287 | extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 288 | extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); | ||
| 289 | extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); | ||
| 290 | |||
| 291 | #define _raw_read_lock(lock) \ | ||
| 292 | do { unsigned long flags; \ | ||
| 293 | local_irq_save(flags); \ | ||
| 294 | _do_read_lock(lock, "read_lock", \ | ||
| 295 | (unsigned long) __builtin_return_address(0)); \ | ||
| 296 | local_irq_restore(flags); \ | ||
| 297 | } while(0) | ||
| 298 | |||
| 299 | #define _raw_read_unlock(lock) \ | ||
| 300 | do { unsigned long flags; \ | ||
| 301 | local_irq_save(flags); \ | ||
| 302 | _do_read_unlock(lock, "read_unlock", \ | ||
| 303 | (unsigned long) __builtin_return_address(0)); \ | ||
| 304 | local_irq_restore(flags); \ | ||
| 305 | } while(0) | ||
| 306 | |||
| 307 | #define _raw_write_lock(lock) \ | ||
| 308 | do { unsigned long flags; \ | ||
| 309 | local_irq_save(flags); \ | ||
| 310 | _do_write_lock(lock, "write_lock", \ | ||
| 311 | (unsigned long) __builtin_return_address(0)); \ | ||
| 312 | local_irq_restore(flags); \ | ||
| 313 | } while(0) | ||
| 314 | |||
| 315 | #define _raw_write_unlock(lock) \ | ||
| 316 | do { unsigned long flags; \ | ||
| 317 | local_irq_save(flags); \ | ||
| 318 | _do_write_unlock(lock, \ | ||
| 319 | (unsigned long) __builtin_return_address(0)); \ | ||
| 320 | local_irq_restore(flags); \ | ||
| 321 | } while(0) | ||
| 322 | |||
| 323 | #define _raw_write_trylock(lock) \ | ||
| 324 | ({ unsigned long flags; \ | ||
| 325 | int val; \ | ||
| 326 | local_irq_save(flags); \ | ||
| 327 | val = _do_write_trylock(lock, "write_trylock", \ | ||
| 328 | (unsigned long) __builtin_return_address(0)); \ | ||
| 329 | local_irq_restore(flags); \ | ||
| 330 | val; \ | ||
| 331 | }) | ||
| 332 | |||
| 333 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 334 | |||
| 335 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
| 336 | #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | ||
| 337 | #define write_can_lock(rw) (!(rw)->lock) | ||
| 338 | 222 | ||
| 339 | #endif /* !(__ASSEMBLY__) */ | 223 | #endif /* !(__ASSEMBLY__) */ |
| 340 | 224 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 000000000000..e128112a0d7c --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __SPARC64_SPINLOCK_TYPES_H | ||
| 2 | #define __SPARC64_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned char lock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 6c813eb521f3..f7574196424e 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | struct cpuinfo_x86; | 8 | struct cpuinfo_x86; |
| 9 | struct pt_regs; | 9 | struct pt_regs; |
| 10 | 10 | ||
| 11 | extern void get_cpu_vendor(struct cpuinfo_x86*); | ||
| 12 | extern void start_kernel(void); | 11 | extern void start_kernel(void); |
| 13 | extern void pda_init(int); | 12 | extern void pda_init(int); |
| 14 | 13 | ||
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baad..69636831ad2f 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
| @@ -6,47 +6,21 @@ | |||
| 6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
| 7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
| 8 | 8 | ||
| 9 | extern int printk(const char * fmt, ...) | ||
| 10 | __attribute__ ((format (printf, 1, 2))); | ||
| 11 | |||
| 12 | /* | 9 | /* |
| 13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 14 | */ | 11 | * |
| 15 | |||
| 16 | typedef struct { | ||
| 17 | volatile unsigned int lock; | ||
| 18 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 19 | unsigned magic; | ||
| 20 | #endif | ||
| 21 | #ifdef CONFIG_PREEMPT | ||
| 22 | unsigned int break_lock; | ||
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | |||
| 26 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 27 | |||
| 28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
| 30 | #else | ||
| 31 | #define SPINLOCK_MAGIC_INIT /* */ | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
| 35 | |||
| 36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Simple spin lock operations. There are two variants, one clears IRQ's | 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 40 | * on the local processor, one does not. | 13 | * on the local processor, one does not. |
| 41 | * | 14 | * |
| 42 | * We make no fairness assumptions. They have a cost. | 15 | * We make no fairness assumptions. They have a cost. |
| 16 | * | ||
| 17 | * (the type definitions are in asm/spinlock_types.h) | ||
| 43 | */ | 18 | */ |
| 44 | 19 | ||
| 45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | 20 | #define __raw_spin_is_locked(x) \ |
| 46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 21 | (*(volatile signed char *)(&(x)->slock) <= 0) |
| 47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 48 | 22 | ||
| 49 | #define spin_lock_string \ | 23 | #define __raw_spin_lock_string \ |
| 50 | "\n1:\t" \ | 24 | "\n1:\t" \ |
| 51 | "lock ; decb %0\n\t" \ | 25 | "lock ; decb %0\n\t" \ |
| 52 | "js 2f\n" \ | 26 | "js 2f\n" \ |
| @@ -58,74 +32,40 @@ typedef struct { | |||
| 58 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
| 59 | LOCK_SECTION_END | 33 | LOCK_SECTION_END |
| 60 | 34 | ||
| 61 | /* | 35 | #define __raw_spin_unlock_string \ |
| 62 | * This works. Despite all the confusion. | ||
| 63 | * (except on PPro SMP or if we are using OOSTORE) | ||
| 64 | * (PPro errata 66, 92) | ||
| 65 | */ | ||
| 66 | |||
| 67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
| 68 | |||
| 69 | #define spin_unlock_string \ | ||
| 70 | "movb $1,%0" \ | 36 | "movb $1,%0" \ |
| 71 | :"=m" (lock->lock) : : "memory" | 37 | :"=m" (lock->slock) : : "memory" |
| 72 | |||
| 73 | |||
| 74 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
| 75 | { | ||
| 76 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 78 | assert_spin_locked(lock); | ||
| 79 | #endif | ||
| 80 | __asm__ __volatile__( | ||
| 81 | spin_unlock_string | ||
| 82 | ); | ||
| 83 | } | ||
| 84 | |||
| 85 | #else | ||
| 86 | |||
| 87 | #define spin_unlock_string \ | ||
| 88 | "xchgb %b0, %1" \ | ||
| 89 | :"=q" (oldval), "=m" (lock->lock) \ | ||
| 90 | :"0" (oldval) : "memory" | ||
| 91 | 38 | ||
| 92 | static inline void _raw_spin_unlock(spinlock_t *lock) | 39 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 93 | { | 40 | { |
| 94 | char oldval = 1; | ||
| 95 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
| 97 | assert_spin_locked(lock); | ||
| 98 | #endif | ||
| 99 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
| 100 | spin_unlock_string | 42 | __raw_spin_lock_string |
| 101 | ); | 43 | :"=m" (lock->slock) : : "memory"); |
| 102 | } | 44 | } |
| 103 | 45 | ||
| 104 | #endif | 46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 105 | 47 | ||
| 106 | static inline int _raw_spin_trylock(spinlock_t *lock) | 48 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 107 | { | 49 | { |
| 108 | char oldval; | 50 | char oldval; |
| 51 | |||
| 109 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
| 110 | "xchgb %b0,%1" | 53 | "xchgb %b0,%1" |
| 111 | :"=q" (oldval), "=m" (lock->lock) | 54 | :"=q" (oldval), "=m" (lock->slock) |
| 112 | :"0" (0) : "memory"); | 55 | :"0" (0) : "memory"); |
| 56 | |||
| 113 | return oldval > 0; | 57 | return oldval > 0; |
| 114 | } | 58 | } |
| 115 | 59 | ||
| 116 | static inline void _raw_spin_lock(spinlock_t *lock) | 60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 117 | { | 61 | { |
| 118 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 119 | if (lock->magic != SPINLOCK_MAGIC) { | ||
| 120 | printk("eip: %p\n", __builtin_return_address(0)); | ||
| 121 | BUG(); | ||
| 122 | } | ||
| 123 | #endif | ||
| 124 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
| 125 | spin_lock_string | 63 | __raw_spin_unlock_string |
| 126 | :"=m" (lock->lock) : : "memory"); | 64 | ); |
| 127 | } | 65 | } |
| 128 | 66 | ||
| 67 | #define __raw_spin_unlock_wait(lock) \ | ||
| 68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
| 129 | 69 | ||
| 130 | /* | 70 | /* |
| 131 | * Read-write spinlocks, allowing multiple readers | 71 | * Read-write spinlocks, allowing multiple readers |
| @@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 136 | * can "mix" irq-safe locks - any writer needs to get a | 76 | * can "mix" irq-safe locks - any writer needs to get a |
| 137 | * irq-safe write-lock, but readers can get non-irqsafe | 77 | * irq-safe write-lock, but readers can get non-irqsafe |
| 138 | * read-locks. | 78 | * read-locks. |
| 139 | */ | 79 | * |
| 140 | typedef struct { | ||
| 141 | volatile unsigned int lock; | ||
| 142 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 143 | unsigned magic; | ||
| 144 | #endif | ||
| 145 | #ifdef CONFIG_PREEMPT | ||
| 146 | unsigned int break_lock; | ||
| 147 | #endif | ||
| 148 | } rwlock_t; | ||
| 149 | |||
| 150 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 151 | |||
| 152 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
| 154 | #else | ||
| 155 | #define RWLOCK_MAGIC_INIT /* */ | ||
| 156 | #endif | ||
| 157 | |||
| 158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
| 159 | |||
| 160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
| 161 | |||
| 162 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
| 163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
| 164 | |||
| 165 | /* | ||
| 166 | * On x86, we implement read-write locks as a 32-bit counter | 80 | * On x86, we implement read-write locks as a 32-bit counter |
| 167 | * with the high bit (sign) being the "contended" bit. | 81 | * with the high bit (sign) being the "contended" bit. |
| 168 | * | 82 | * |
| @@ -170,29 +84,24 @@ typedef struct { | |||
| 170 | * | 84 | * |
| 171 | * Changed to use the same technique as rw semaphores. See | 85 | * Changed to use the same technique as rw semaphores. See |
| 172 | * semaphore.h for details. -ben | 86 | * semaphore.h for details. -ben |
| 87 | * | ||
| 88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
| 173 | */ | 89 | */ |
| 174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
| 175 | 90 | ||
| 176 | static inline void _raw_read_lock(rwlock_t *rw) | 91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
| 92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
| 93 | |||
| 94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
| 177 | { | 95 | { |
| 178 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 180 | #endif | ||
| 181 | __build_read_lock(rw, "__read_lock_failed"); | 96 | __build_read_lock(rw, "__read_lock_failed"); |
| 182 | } | 97 | } |
| 183 | 98 | ||
| 184 | static inline void _raw_write_lock(rwlock_t *rw) | 99 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 185 | { | 100 | { |
| 186 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
| 188 | #endif | ||
| 189 | __build_write_lock(rw, "__write_lock_failed"); | 101 | __build_write_lock(rw, "__write_lock_failed"); |
| 190 | } | 102 | } |
| 191 | 103 | ||
| 192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
| 194 | |||
| 195 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
| 196 | { | 105 | { |
| 197 | atomic_t *count = (atomic_t *)lock; | 106 | atomic_t *count = (atomic_t *)lock; |
| 198 | atomic_dec(count); | 107 | atomic_dec(count); |
| @@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
| 202 | return 0; | 111 | return 0; |
| 203 | } | 112 | } |
| 204 | 113 | ||
| 205 | static inline int _raw_write_trylock(rwlock_t *lock) | 114 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 206 | { | 115 | { |
| 207 | atomic_t *count = (atomic_t *)lock; | 116 | atomic_t *count = (atomic_t *)lock; |
| 208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 117 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
| 211 | return 0; | 120 | return 0; |
| 212 | } | 121 | } |
| 213 | 122 | ||
| 123 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
| 124 | { | ||
| 125 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
| 129 | { | ||
| 130 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
| 131 | : "=m" (rw->lock) : : "memory"); | ||
| 132 | } | ||
| 133 | |||
| 214 | #endif /* __ASM_SPINLOCK_H */ | 134 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
| 2 | #define __ASM_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | typedef struct { | ||
| 9 | volatile unsigned int slock; | ||
| 10 | } raw_spinlock_t; | ||
| 11 | |||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 13 | |||
| 14 | typedef struct { | ||
| 15 | volatile unsigned int lock; | ||
| 16 | } raw_rwlock_t; | ||
| 17 | |||
| 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
| 19 | |||
| 20 | #endif | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index cdaf03a14a51..6e1c79c8b6bf 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio); | |||
| 314 | * bvec_kmap_irq and bvec_kunmap_irq!! | 314 | * bvec_kmap_irq and bvec_kunmap_irq!! |
| 315 | * | 315 | * |
| 316 | * This function MUST be inlined - it plays with the CPU interrupt flags. | 316 | * This function MUST be inlined - it plays with the CPU interrupt flags. |
| 317 | * Hence the `extern inline'. | ||
| 318 | */ | 317 | */ |
| 319 | extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | 318 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 320 | { | 319 | { |
| 321 | unsigned long addr; | 320 | unsigned long addr; |
| 322 | 321 | ||
| @@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | |||
| 332 | return (char *) addr + bvec->bv_offset; | 331 | return (char *) addr + bvec->bv_offset; |
| 333 | } | 332 | } |
| 334 | 333 | ||
| 335 | extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | 334 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 336 | { | 335 | { |
| 337 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | 336 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
| 338 | 337 | ||
| @@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |||
| 345 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | 344 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) |
| 346 | #endif | 345 | #endif |
| 347 | 346 | ||
| 348 | extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, | 347 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
| 349 | unsigned long *flags) | 348 | unsigned long *flags) |
| 350 | { | 349 | { |
| 351 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); | 350 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); |
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 000000000000..6b20af0bbb79 --- /dev/null +++ b/include/linux/bit_spinlock.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | #ifndef __LINUX_BIT_SPINLOCK_H | ||
| 2 | #define __LINUX_BIT_SPINLOCK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * bit-based spin_lock() | ||
| 6 | * | ||
| 7 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
| 8 | * are significantly faster. | ||
| 9 | */ | ||
| 10 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
| 11 | { | ||
| 12 | /* | ||
| 13 | * Assuming the lock is uncontended, this never enters | ||
| 14 | * the body of the outer loop. If it is contended, then | ||
| 15 | * within the inner loop a non-atomic test is used to | ||
| 16 | * busywait with less bus contention for a good time to | ||
| 17 | * attempt to acquire the lock bit. | ||
| 18 | */ | ||
| 19 | preempt_disable(); | ||
| 20 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 21 | while (test_and_set_bit(bitnum, addr)) { | ||
| 22 | while (test_bit(bitnum, addr)) { | ||
| 23 | preempt_enable(); | ||
| 24 | cpu_relax(); | ||
| 25 | preempt_disable(); | ||
| 26 | } | ||
| 27 | } | ||
| 28 | #endif | ||
| 29 | __acquire(bitlock); | ||
| 30 | } | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Return true if it was acquired | ||
| 34 | */ | ||
| 35 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | ||
| 36 | { | ||
| 37 | preempt_disable(); | ||
| 38 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 39 | if (test_and_set_bit(bitnum, addr)) { | ||
| 40 | preempt_enable(); | ||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | #endif | ||
| 44 | __acquire(bitlock); | ||
| 45 | return 1; | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * bit-based spin_unlock() | ||
| 50 | */ | ||
| 51 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
| 52 | { | ||
| 53 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 54 | BUG_ON(!test_bit(bitnum, addr)); | ||
| 55 | smp_mb__before_clear_bit(); | ||
| 56 | clear_bit(bitnum, addr); | ||
| 57 | #endif | ||
| 58 | preempt_enable(); | ||
| 59 | __release(bitlock); | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Return true if the lock is held. | ||
| 64 | */ | ||
| 65 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | ||
| 66 | { | ||
| 67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 68 | return test_bit(bitnum, addr); | ||
| 69 | #elif defined CONFIG_PREEMPT | ||
| 70 | return preempt_count(); | ||
| 71 | #else | ||
| 72 | return 1; | ||
| 73 | #endif | ||
| 74 | } | ||
| 75 | |||
| 76 | #endif /* __LINUX_BIT_SPINLOCK_H */ | ||
| 77 | |||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aefa26fbae8a..efdc9b5bc05c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size) | |||
| 728 | return bits; | 728 | return bits; |
| 729 | } | 729 | } |
| 730 | 730 | ||
| 731 | extern inline unsigned int block_size(struct block_device *bdev) | 731 | static inline unsigned int block_size(struct block_device *bdev) |
| 732 | { | 732 | { |
| 733 | return bdev->bd_block_size; | 733 | return bdev->bd_block_size; |
| 734 | } | 734 | } |
diff --git a/include/linux/chio.h b/include/linux/chio.h index 63035ae67e63..a404c111c937 100644 --- a/include/linux/chio.h +++ b/include/linux/chio.h | |||
| @@ -96,7 +96,7 @@ struct changer_position { | |||
| 96 | */ | 96 | */ |
| 97 | struct changer_element_status { | 97 | struct changer_element_status { |
| 98 | int ces_type; | 98 | int ces_type; |
| 99 | unsigned char *ces_data; | 99 | unsigned char __user *ces_data; |
| 100 | }; | 100 | }; |
| 101 | #define CESTATUS_FULL 0x01 /* full */ | 101 | #define CESTATUS_FULL 0x01 /* full */ |
| 102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ | 102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index e60bfdac348d..4932ee5c77f0 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
| @@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, | |||
| 19 | 19 | ||
| 20 | void dma_pool_destroy(struct dma_pool *pool); | 20 | void dma_pool_destroy(struct dma_pool *pool); |
| 21 | 21 | ||
| 22 | void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle); | 22 | void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, |
| 23 | dma_addr_t *handle); | ||
| 23 | 24 | ||
| 24 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); | 25 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); |
| 25 | 26 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f61227827d7..e0b77c5af9a0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1509,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping, | |||
| 1509 | loff_t *, read_descriptor_t *, read_actor_t); | 1509 | loff_t *, read_descriptor_t *, read_actor_t); |
| 1510 | extern void | 1510 | extern void |
| 1511 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | 1511 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); |
| 1512 | extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, | ||
| 1513 | const struct iovec *iov, loff_t offset, unsigned long nr_segs); | ||
| 1514 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, | 1512 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, |
| 1515 | unsigned long nr_segs, loff_t *ppos); | 1513 | unsigned long nr_segs, loff_t *ppos); |
| 1516 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, | 1514 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 84321a4cac93..de097269bd7f 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
| 29 | #include <linux/journal-head.h> | 29 | #include <linux/journal-head.h> |
| 30 | #include <linux/stddef.h> | 30 | #include <linux/stddef.h> |
| 31 | #include <linux/bit_spinlock.h> | ||
| 31 | #include <asm/semaphore.h> | 32 | #include <asm/semaphore.h> |
| 32 | #endif | 33 | #endif |
| 33 | 34 | ||
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index d7a2555a886c..6acfdbba734b 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
| @@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void) | |||
| 254 | */ | 254 | */ |
| 255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) | 255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) |
| 256 | { | 256 | { |
| 257 | #if HZ <= 1000 && !(1000 % HZ) | 257 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 258 | return (1000 / HZ) * j; | 258 | return (MSEC_PER_SEC / HZ) * j; |
| 259 | #elif HZ > 1000 && !(HZ % 1000) | 259 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
| 260 | return (j + (HZ / 1000) - 1)/(HZ / 1000); | 260 | return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); |
| 261 | #else | 261 | #else |
| 262 | return (j * 1000) / HZ; | 262 | return (j * MSEC_PER_SEC) / HZ; |
| 263 | #endif | 263 | #endif |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) | 266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) |
| 267 | { | 267 | { |
| 268 | #if HZ <= 1000000 && !(1000000 % HZ) | 268 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
| 269 | return (1000000 / HZ) * j; | 269 | return (USEC_PER_SEC / HZ) * j; |
| 270 | #elif HZ > 1000000 && !(HZ % 1000000) | 270 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
| 271 | return (j + (HZ / 1000000) - 1)/(HZ / 1000000); | 271 | return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); |
| 272 | #else | 272 | #else |
| 273 | return (j * 1000000) / HZ; | 273 | return (j * USEC_PER_SEC) / HZ; |
| 274 | #endif | 274 | #endif |
| 275 | } | 275 | } |
| 276 | 276 | ||
| @@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m) | |||
| 278 | { | 278 | { |
| 279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) | 279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) |
| 280 | return MAX_JIFFY_OFFSET; | 280 | return MAX_JIFFY_OFFSET; |
| 281 | #if HZ <= 1000 && !(1000 % HZ) | 281 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 282 | return (m + (1000 / HZ) - 1) / (1000 / HZ); | 282 | return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); |
| 283 | #elif HZ > 1000 && !(HZ % 1000) | 283 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
| 284 | return m * (HZ / 1000); | 284 | return m * (HZ / MSEC_PER_SEC); |
| 285 | #else | 285 | #else |
| 286 | return (m * HZ + 999) / 1000; | 286 | return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; |
| 287 | #endif | 287 | #endif |
| 288 | } | 288 | } |
| 289 | 289 | ||
| @@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u) | |||
| 291 | { | 291 | { |
| 292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) | 292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) |
| 293 | return MAX_JIFFY_OFFSET; | 293 | return MAX_JIFFY_OFFSET; |
| 294 | #if HZ <= 1000000 && !(1000000 % HZ) | 294 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
| 295 | return (u + (1000000 / HZ) - 1) / (1000000 / HZ); | 295 | return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); |
| 296 | #elif HZ > 1000000 && !(HZ % 1000000) | 296 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
| 297 | return u * (HZ / 1000000); | 297 | return u * (HZ / USEC_PER_SEC); |
| 298 | #else | 298 | #else |
| 299 | return (u * HZ + 999999) / 1000000; | 299 | return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; |
| 300 | #endif | 300 | #endif |
| 301 | } | 301 | } |
| 302 | 302 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 8081a281fa5e..9c51917b1cce 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | struct radix_tree_root { | 25 | struct radix_tree_root { |
| 26 | unsigned int height; | 26 | unsigned int height; |
| 27 | int gfp_mask; | 27 | unsigned int gfp_mask; |
| 28 | struct radix_tree_node *rnode; | 28 | struct radix_tree_node *rnode; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| @@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |||
| 50 | unsigned int | 50 | unsigned int |
| 51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
| 52 | unsigned long first_index, unsigned int max_items); | 52 | unsigned long first_index, unsigned int max_items); |
| 53 | int radix_tree_preload(int gfp_mask); | 53 | int radix_tree_preload(unsigned int __nocast gfp_mask); |
| 54 | void radix_tree_init(void); | 54 | void radix_tree_init(void); |
| 55 | void *radix_tree_tag_set(struct radix_tree_root *root, | 55 | void *radix_tree_tag_set(struct radix_tree_root *root, |
| 56 | unsigned long index, int tag); | 56 | unsigned long index, int tag); |
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 17e458e17e2b..af00b10294cd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
| @@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *, | |||
| 2097 | b_blocknr_t, int for_unformatted); | 2097 | b_blocknr_t, int for_unformatted); |
| 2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, | 2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, |
| 2099 | int); | 2099 | int); |
| 2100 | extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | 2100 | static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, |
| 2101 | b_blocknr_t * new_blocknrs, | 2101 | b_blocknr_t * new_blocknrs, |
| 2102 | int amount_needed) | 2102 | int amount_needed) |
| 2103 | { | 2103 | { |
| @@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | |||
| 2113 | 0); | 2113 | 0); |
| 2114 | } | 2114 | } |
| 2115 | 2115 | ||
| 2116 | extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | 2116 | static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle |
| 2117 | *th, struct inode *inode, | 2117 | *th, struct inode *inode, |
| 2118 | b_blocknr_t * new_blocknrs, | 2118 | b_blocknr_t * new_blocknrs, |
| 2119 | struct path *path, long block) | 2119 | struct path *path, long block) |
| @@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | |||
| 2130 | } | 2130 | } |
| 2131 | 2131 | ||
| 2132 | #ifdef REISERFS_PREALLOCATE | 2132 | #ifdef REISERFS_PREALLOCATE |
| 2133 | extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle | 2133 | static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle |
| 2134 | *th, struct inode *inode, | 2134 | *th, struct inode *inode, |
| 2135 | b_blocknr_t * new_blocknrs, | 2135 | b_blocknr_t * new_blocknrs, |
| 2136 | struct path *path, long block) | 2136 | struct path *path, long block) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c551e6a1447e..4b83cb230006 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void); | |||
| 114 | #define TASK_TRACED 8 | 114 | #define TASK_TRACED 8 |
| 115 | #define EXIT_ZOMBIE 16 | 115 | #define EXIT_ZOMBIE 16 |
| 116 | #define EXIT_DEAD 32 | 116 | #define EXIT_DEAD 32 |
| 117 | #define TASK_NONINTERACTIVE 64 | ||
| 117 | 118 | ||
| 118 | #define __set_task_state(tsk, state_value) \ | 119 | #define __set_task_state(tsk, state_value) \ |
| 119 | do { (tsk)->state = (state_value); } while (0) | 120 | do { (tsk)->state = (state_value); } while (0) |
| @@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr); | |||
| 202 | 203 | ||
| 203 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | 204 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
| 204 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 205 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
| 206 | extern signed long schedule_timeout_interruptible(signed long timeout); | ||
| 207 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | ||
| 205 | asmlinkage void schedule(void); | 208 | asmlinkage void schedule(void); |
| 206 | 209 | ||
| 207 | struct namespace; | 210 | struct namespace; |
| @@ -782,6 +785,7 @@ struct task_struct { | |||
| 782 | short il_next; | 785 | short il_next; |
| 783 | #endif | 786 | #endif |
| 784 | #ifdef CONFIG_CPUSETS | 787 | #ifdef CONFIG_CPUSETS |
| 788 | short cpuset_sem_nest_depth; | ||
| 785 | struct cpuset *cpuset; | 789 | struct cpuset *cpuset; |
| 786 | nodemask_t mems_allowed; | 790 | nodemask_t mems_allowed; |
| 787 | int cpuset_mems_generation; | 791 | int cpuset_mems_generation; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 42a6bea58af3..1f356f3bbc64 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -118,7 +118,8 @@ extern void kfree(const void *); | |||
| 118 | extern unsigned int ksize(const void *); | 118 | extern unsigned int ksize(const void *); |
| 119 | 119 | ||
| 120 | #ifdef CONFIG_NUMA | 120 | #ifdef CONFIG_NUMA |
| 121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); | 121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, |
| 122 | unsigned int __nocast flags, int node); | ||
| 122 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); | 123 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); |
| 123 | #else | 124 | #else |
| 124 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) | 125 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b6..cdc99a27840d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -2,7 +2,48 @@ | |||
| 2 | #define __LINUX_SPINLOCK_H | 2 | #define __LINUX_SPINLOCK_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * include/linux/spinlock.h - generic locking declarations | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
| 6 | * | ||
| 7 | * here's the role of the various spinlock/rwlock related include files: | ||
| 8 | * | ||
| 9 | * on SMP builds: | ||
| 10 | * | ||
| 11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | ||
| 12 | * initializers | ||
| 13 | * | ||
| 14 | * linux/spinlock_types.h: | ||
| 15 | * defines the generic type and initializers | ||
| 16 | * | ||
| 17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | ||
| 18 | * implementations, mostly inline assembly code | ||
| 19 | * | ||
| 20 | * (also included on UP-debug builds:) | ||
| 21 | * | ||
| 22 | * linux/spinlock_api_smp.h: | ||
| 23 | * contains the prototypes for the _spin_*() APIs. | ||
| 24 | * | ||
| 25 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
| 26 | * | ||
| 27 | * on UP builds: | ||
| 28 | * | ||
| 29 | * linux/spinlock_type_up.h: | ||
| 30 | * contains the generic, simplified UP spinlock type. | ||
| 31 | * (which is an empty structure on non-debug builds) | ||
| 32 | * | ||
| 33 | * linux/spinlock_types.h: | ||
| 34 | * defines the generic type and initializers | ||
| 35 | * | ||
| 36 | * linux/spinlock_up.h: | ||
| 37 | * contains the __raw_spin_*()/etc. version of UP | ||
| 38 | * builds. (which are NOPs on non-debug, non-preempt | ||
| 39 | * builds) | ||
| 40 | * | ||
| 41 | * (included on UP-non-debug builds:) | ||
| 42 | * | ||
| 43 | * linux/spinlock_api_up.h: | ||
| 44 | * builds the _spin_*() APIs. | ||
| 45 | * | ||
| 46 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
| 6 | */ | 47 | */ |
| 7 | 48 | ||
| 8 | #include <linux/config.h> | 49 | #include <linux/config.h> |
| @@ -13,7 +54,6 @@ | |||
| 13 | #include <linux/kernel.h> | 54 | #include <linux/kernel.h> |
| 14 | #include <linux/stringify.h> | 55 | #include <linux/stringify.h> |
| 15 | 56 | ||
| 16 | #include <asm/processor.h> /* for cpu relax */ | ||
| 17 | #include <asm/system.h> | 57 | #include <asm/system.h> |
| 18 | 58 | ||
| 19 | /* | 59 | /* |
| @@ -35,423 +75,84 @@ | |||
| 35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) |
| 36 | 76 | ||
| 37 | /* | 77 | /* |
| 38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | 78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
| 39 | */ | 79 | */ |
| 40 | #ifdef CONFIG_SMP | 80 | #include <linux/spinlock_types.h> |
| 41 | |||
| 42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
| 43 | #include <asm/spinlock.h> | ||
| 44 | |||
| 45 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
| 46 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
| 47 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
| 48 | |||
| 49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
| 50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 52 | |||
| 53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
| 54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 56 | |||
| 57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | ||
| 58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
| 59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
| 60 | |||
| 61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
| 62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
| 63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 67 | |||
| 68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | ||
| 69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
| 70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
| 71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
| 72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
| 75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 77 | |||
| 78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
| 79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | ||
| 80 | int in_lock_functions(unsigned long addr); | ||
| 81 | |||
| 82 | #else | ||
| 83 | 81 | ||
| 84 | #define in_lock_functions(ADDR) 0 | 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
| 85 | 83 | ||
| 86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
| 87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | ||
| 88 | # define ATOMIC_DEC_AND_LOCK | ||
| 89 | #endif | ||
| 90 | |||
| 91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 92 | |||
| 93 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
| 94 | typedef struct { | ||
| 95 | unsigned long magic; | ||
| 96 | volatile unsigned long lock; | ||
| 97 | volatile unsigned int babble; | ||
| 98 | const char *module; | ||
| 99 | char *owner; | ||
| 100 | int oline; | ||
| 101 | } spinlock_t; | ||
| 102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | ||
| 103 | |||
| 104 | #define spin_lock_init(x) \ | ||
| 105 | do { \ | ||
| 106 | (x)->magic = SPINLOCK_MAGIC; \ | ||
| 107 | (x)->lock = 0; \ | ||
| 108 | (x)->babble = 5; \ | ||
| 109 | (x)->module = __FILE__; \ | ||
| 110 | (x)->owner = NULL; \ | ||
| 111 | (x)->oline = 0; \ | ||
| 112 | } while (0) | ||
| 113 | |||
| 114 | #define CHECK_LOCK(x) \ | ||
| 115 | do { \ | ||
| 116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | ||
| 117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | ||
| 118 | __FILE__, __LINE__, (x)); \ | ||
| 119 | } \ | ||
| 120 | } while(0) | ||
| 121 | |||
| 122 | #define _raw_spin_lock(x) \ | ||
| 123 | do { \ | ||
| 124 | CHECK_LOCK(x); \ | ||
| 125 | if ((x)->lock&&(x)->babble) { \ | ||
| 126 | (x)->babble--; \ | ||
| 127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | ||
| 128 | __FILE__,__LINE__, (x)->module, \ | ||
| 129 | (x), (x)->owner, (x)->oline); \ | ||
| 130 | } \ | ||
| 131 | (x)->lock = 1; \ | ||
| 132 | (x)->owner = __FILE__; \ | ||
| 133 | (x)->oline = __LINE__; \ | ||
| 134 | } while (0) | ||
| 135 | |||
| 136 | /* without debugging, spin_is_locked on UP always says | ||
| 137 | * FALSE. --> printk if already locked. */ | ||
| 138 | #define spin_is_locked(x) \ | ||
| 139 | ({ \ | ||
| 140 | CHECK_LOCK(x); \ | ||
| 141 | if ((x)->lock&&(x)->babble) { \ | ||
| 142 | (x)->babble--; \ | ||
| 143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | ||
| 144 | __FILE__,__LINE__, (x)->module, \ | ||
| 145 | (x), (x)->owner, (x)->oline); \ | ||
| 146 | } \ | ||
| 147 | 0; \ | ||
| 148 | }) | ||
| 149 | |||
| 150 | /* with debugging, assert_spin_locked() on UP does check | ||
| 151 | * the lock value properly */ | ||
| 152 | #define assert_spin_locked(x) \ | ||
| 153 | ({ \ | ||
| 154 | CHECK_LOCK(x); \ | ||
| 155 | BUG_ON(!(x)->lock); \ | ||
| 156 | }) | ||
| 157 | |||
| 158 | /* without debugging, spin_trylock on UP always says | ||
| 159 | * TRUE. --> printk if already locked. */ | ||
| 160 | #define _raw_spin_trylock(x) \ | ||
| 161 | ({ \ | ||
| 162 | CHECK_LOCK(x); \ | ||
| 163 | if ((x)->lock&&(x)->babble) { \ | ||
| 164 | (x)->babble--; \ | ||
| 165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | ||
| 166 | __FILE__,__LINE__, (x)->module, \ | ||
| 167 | (x), (x)->owner, (x)->oline); \ | ||
| 168 | } \ | ||
| 169 | (x)->lock = 1; \ | ||
| 170 | (x)->owner = __FILE__; \ | ||
| 171 | (x)->oline = __LINE__; \ | ||
| 172 | 1; \ | ||
| 173 | }) | ||
| 174 | |||
| 175 | #define spin_unlock_wait(x) \ | ||
| 176 | do { \ | ||
| 177 | CHECK_LOCK(x); \ | ||
| 178 | if ((x)->lock&&(x)->babble) { \ | ||
| 179 | (x)->babble--; \ | ||
| 180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | ||
| 181 | __FILE__,__LINE__, (x)->module, (x), \ | ||
| 182 | (x)->owner, (x)->oline); \ | ||
| 183 | }\ | ||
| 184 | } while (0) | ||
| 185 | |||
| 186 | #define _raw_spin_unlock(x) \ | ||
| 187 | do { \ | ||
| 188 | CHECK_LOCK(x); \ | ||
| 189 | if (!(x)->lock&&(x)->babble) { \ | ||
| 190 | (x)->babble--; \ | ||
| 191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | ||
| 192 | __FILE__,__LINE__, (x)->module, (x));\ | ||
| 193 | } \ | ||
| 194 | (x)->lock = 0; \ | ||
| 195 | } while (0) | ||
| 196 | #else | ||
| 197 | /* | 84 | /* |
| 198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
| 199 | */ | 86 | */ |
| 200 | #if (__GNUC__ > 2) | 87 | #if defined(CONFIG_SMP) |
| 201 | typedef struct { } spinlock_t; | 88 | # include <asm/spinlock.h> |
| 202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | ||
| 203 | #else | 89 | #else |
| 204 | typedef struct { int gcc_is_buggy; } spinlock_t; | 90 | # include <linux/spinlock_up.h> |
| 205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 206 | #endif | 91 | #endif |
| 207 | 92 | ||
| 93 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
| 94 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
| 95 | |||
| 96 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
| 97 | |||
| 98 | /** | ||
| 99 | * spin_unlock_wait - wait until the spinlock gets unlocked | ||
| 100 | * @lock: the spinlock in question. | ||
| 101 | */ | ||
| 102 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | ||
| 103 | |||
| 208 | /* | 104 | /* |
| 209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | 105 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
| 210 | */ | 106 | */ |
| 211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | 107 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | 108 | # include <linux/spinlock_api_smp.h> |
| 213 | #define spin_is_locked(lock) ((void)(lock), 0) | ||
| 214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | ||
| 215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | ||
| 216 | #define spin_unlock_wait(lock) (void)(lock) | ||
| 217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | ||
| 218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 219 | |||
| 220 | /* RW spinlocks: No debug version */ | ||
| 221 | |||
| 222 | #if (__GNUC__ > 2) | ||
| 223 | typedef struct { } rwlock_t; | ||
| 224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | ||
| 225 | #else | 109 | #else |
| 226 | typedef struct { int gcc_is_buggy; } rwlock_t; | 110 | # include <linux/spinlock_api_up.h> |
| 227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 228 | #endif | 111 | #endif |
| 229 | 112 | ||
| 230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | 113 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | 114 | extern void _raw_spin_lock(spinlock_t *lock); |
| 232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | 115 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
| 233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | 116 | extern int _raw_spin_trylock(spinlock_t *lock); |
| 234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | 117 | extern void _raw_spin_unlock(spinlock_t *lock); |
| 235 | #define read_can_lock(lock) (((void)(lock), 1)) | 118 | |
| 236 | #define write_can_lock(lock) (((void)(lock), 1)) | 119 | extern void _raw_read_lock(rwlock_t *lock); |
| 237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | 120 | extern int _raw_read_trylock(rwlock_t *lock); |
| 238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | 121 | extern void _raw_read_unlock(rwlock_t *lock); |
| 239 | 122 | extern void _raw_write_lock(rwlock_t *lock); | |
| 240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | 123 | extern int _raw_write_trylock(rwlock_t *lock); |
| 241 | 1 : ({preempt_enable(); 0;});}) | 124 | extern void _raw_write_unlock(rwlock_t *lock); |
| 242 | 125 | #else | |
| 243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | 126 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
| 244 | 1 : ({preempt_enable(); 0;});}) | 127 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
| 245 | 128 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | |
| 246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | 129 | # define _raw_spin_lock_flags(lock, flags) \ |
| 247 | 1 : ({preempt_enable(); 0;});}) | 130 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
| 248 | 131 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | |
| 249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 132 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
| 250 | _raw_spin_trylock(lock) ? \ | 133 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
| 251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) | 134 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
| 252 | 135 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | |
| 253 | #define _spin_lock(lock) \ | 136 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
| 254 | do { \ | 137 | #endif |
| 255 | preempt_disable(); \ | ||
| 256 | _raw_spin_lock(lock); \ | ||
| 257 | __acquire(lock); \ | ||
| 258 | } while(0) | ||
| 259 | |||
| 260 | #define _write_lock(lock) \ | ||
| 261 | do { \ | ||
| 262 | preempt_disable(); \ | ||
| 263 | _raw_write_lock(lock); \ | ||
| 264 | __acquire(lock); \ | ||
| 265 | } while(0) | ||
| 266 | |||
| 267 | #define _read_lock(lock) \ | ||
| 268 | do { \ | ||
| 269 | preempt_disable(); \ | ||
| 270 | _raw_read_lock(lock); \ | ||
| 271 | __acquire(lock); \ | ||
| 272 | } while(0) | ||
| 273 | |||
| 274 | #define _spin_unlock(lock) \ | ||
| 275 | do { \ | ||
| 276 | _raw_spin_unlock(lock); \ | ||
| 277 | preempt_enable(); \ | ||
| 278 | __release(lock); \ | ||
| 279 | } while (0) | ||
| 280 | |||
| 281 | #define _write_unlock(lock) \ | ||
| 282 | do { \ | ||
| 283 | _raw_write_unlock(lock); \ | ||
| 284 | preempt_enable(); \ | ||
| 285 | __release(lock); \ | ||
| 286 | } while(0) | ||
| 287 | |||
| 288 | #define _read_unlock(lock) \ | ||
| 289 | do { \ | ||
| 290 | _raw_read_unlock(lock); \ | ||
| 291 | preempt_enable(); \ | ||
| 292 | __release(lock); \ | ||
| 293 | } while(0) | ||
| 294 | |||
| 295 | #define _spin_lock_irqsave(lock, flags) \ | ||
| 296 | do { \ | ||
| 297 | local_irq_save(flags); \ | ||
| 298 | preempt_disable(); \ | ||
| 299 | _raw_spin_lock(lock); \ | ||
| 300 | __acquire(lock); \ | ||
| 301 | } while (0) | ||
| 302 | |||
| 303 | #define _spin_lock_irq(lock) \ | ||
| 304 | do { \ | ||
| 305 | local_irq_disable(); \ | ||
| 306 | preempt_disable(); \ | ||
| 307 | _raw_spin_lock(lock); \ | ||
| 308 | __acquire(lock); \ | ||
| 309 | } while (0) | ||
| 310 | |||
| 311 | #define _spin_lock_bh(lock) \ | ||
| 312 | do { \ | ||
| 313 | local_bh_disable(); \ | ||
| 314 | preempt_disable(); \ | ||
| 315 | _raw_spin_lock(lock); \ | ||
| 316 | __acquire(lock); \ | ||
| 317 | } while (0) | ||
| 318 | |||
| 319 | #define _read_lock_irqsave(lock, flags) \ | ||
| 320 | do { \ | ||
| 321 | local_irq_save(flags); \ | ||
| 322 | preempt_disable(); \ | ||
| 323 | _raw_read_lock(lock); \ | ||
| 324 | __acquire(lock); \ | ||
| 325 | } while (0) | ||
| 326 | |||
| 327 | #define _read_lock_irq(lock) \ | ||
| 328 | do { \ | ||
| 329 | local_irq_disable(); \ | ||
| 330 | preempt_disable(); \ | ||
| 331 | _raw_read_lock(lock); \ | ||
| 332 | __acquire(lock); \ | ||
| 333 | } while (0) | ||
| 334 | |||
| 335 | #define _read_lock_bh(lock) \ | ||
| 336 | do { \ | ||
| 337 | local_bh_disable(); \ | ||
| 338 | preempt_disable(); \ | ||
| 339 | _raw_read_lock(lock); \ | ||
| 340 | __acquire(lock); \ | ||
| 341 | } while (0) | ||
| 342 | |||
| 343 | #define _write_lock_irqsave(lock, flags) \ | ||
| 344 | do { \ | ||
| 345 | local_irq_save(flags); \ | ||
| 346 | preempt_disable(); \ | ||
| 347 | _raw_write_lock(lock); \ | ||
| 348 | __acquire(lock); \ | ||
| 349 | } while (0) | ||
| 350 | 138 | ||
| 351 | #define _write_lock_irq(lock) \ | 139 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
| 352 | do { \ | 140 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
| 353 | local_irq_disable(); \ | ||
| 354 | preempt_disable(); \ | ||
| 355 | _raw_write_lock(lock); \ | ||
| 356 | __acquire(lock); \ | ||
| 357 | } while (0) | ||
| 358 | |||
| 359 | #define _write_lock_bh(lock) \ | ||
| 360 | do { \ | ||
| 361 | local_bh_disable(); \ | ||
| 362 | preempt_disable(); \ | ||
| 363 | _raw_write_lock(lock); \ | ||
| 364 | __acquire(lock); \ | ||
| 365 | } while (0) | ||
| 366 | |||
| 367 | #define _spin_unlock_irqrestore(lock, flags) \ | ||
| 368 | do { \ | ||
| 369 | _raw_spin_unlock(lock); \ | ||
| 370 | local_irq_restore(flags); \ | ||
| 371 | preempt_enable(); \ | ||
| 372 | __release(lock); \ | ||
| 373 | } while (0) | ||
| 374 | |||
| 375 | #define _spin_unlock_irq(lock) \ | ||
| 376 | do { \ | ||
| 377 | _raw_spin_unlock(lock); \ | ||
| 378 | local_irq_enable(); \ | ||
| 379 | preempt_enable(); \ | ||
| 380 | __release(lock); \ | ||
| 381 | } while (0) | ||
| 382 | |||
| 383 | #define _spin_unlock_bh(lock) \ | ||
| 384 | do { \ | ||
| 385 | _raw_spin_unlock(lock); \ | ||
| 386 | preempt_enable_no_resched(); \ | ||
| 387 | local_bh_enable(); \ | ||
| 388 | __release(lock); \ | ||
| 389 | } while (0) | ||
| 390 | |||
| 391 | #define _write_unlock_bh(lock) \ | ||
| 392 | do { \ | ||
| 393 | _raw_write_unlock(lock); \ | ||
| 394 | preempt_enable_no_resched(); \ | ||
| 395 | local_bh_enable(); \ | ||
| 396 | __release(lock); \ | ||
| 397 | } while (0) | ||
| 398 | |||
| 399 | #define _read_unlock_irqrestore(lock, flags) \ | ||
| 400 | do { \ | ||
| 401 | _raw_read_unlock(lock); \ | ||
| 402 | local_irq_restore(flags); \ | ||
| 403 | preempt_enable(); \ | ||
| 404 | __release(lock); \ | ||
| 405 | } while (0) | ||
| 406 | |||
| 407 | #define _write_unlock_irqrestore(lock, flags) \ | ||
| 408 | do { \ | ||
| 409 | _raw_write_unlock(lock); \ | ||
| 410 | local_irq_restore(flags); \ | ||
| 411 | preempt_enable(); \ | ||
| 412 | __release(lock); \ | ||
| 413 | } while (0) | ||
| 414 | |||
| 415 | #define _read_unlock_irq(lock) \ | ||
| 416 | do { \ | ||
| 417 | _raw_read_unlock(lock); \ | ||
| 418 | local_irq_enable(); \ | ||
| 419 | preempt_enable(); \ | ||
| 420 | __release(lock); \ | ||
| 421 | } while (0) | ||
| 422 | |||
| 423 | #define _read_unlock_bh(lock) \ | ||
| 424 | do { \ | ||
| 425 | _raw_read_unlock(lock); \ | ||
| 426 | preempt_enable_no_resched(); \ | ||
| 427 | local_bh_enable(); \ | ||
| 428 | __release(lock); \ | ||
| 429 | } while (0) | ||
| 430 | |||
| 431 | #define _write_unlock_irq(lock) \ | ||
| 432 | do { \ | ||
| 433 | _raw_write_unlock(lock); \ | ||
| 434 | local_irq_enable(); \ | ||
| 435 | preempt_enable(); \ | ||
| 436 | __release(lock); \ | ||
| 437 | } while (0) | ||
| 438 | |||
| 439 | #endif /* !SMP */ | ||
| 440 | 141 | ||
| 441 | /* | 142 | /* |
| 442 | * Define the various spin_lock and rw_lock methods. Note we define these | 143 | * Define the various spin_lock and rw_lock methods. Note we define these |
| 443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 144 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
| 444 | * methods are defined as nops in the case they are not required. | 145 | * methods are defined as nops in the case they are not required. |
| 445 | */ | 146 | */ |
| 446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | 147 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) |
| 447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | 148 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) |
| 448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 149 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
| 449 | 150 | ||
| 450 | #define spin_lock(lock) _spin_lock(lock) | 151 | #define spin_lock(lock) _spin_lock(lock) |
| 451 | #define write_lock(lock) _write_lock(lock) | 152 | #define write_lock(lock) _write_lock(lock) |
| 452 | #define read_lock(lock) _read_lock(lock) | 153 | #define read_lock(lock) _read_lock(lock) |
| 453 | 154 | ||
| 454 | #ifdef CONFIG_SMP | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | 156 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) |
| 456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | 157 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) |
| 457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | 158 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
| @@ -470,137 +171,59 @@ do { \ | |||
| 470 | #define write_lock_irq(lock) _write_lock_irq(lock) | 171 | #define write_lock_irq(lock) _write_lock_irq(lock) |
| 471 | #define write_lock_bh(lock) _write_lock_bh(lock) | 172 | #define write_lock_bh(lock) _write_lock_bh(lock) |
| 472 | 173 | ||
| 473 | #define spin_unlock(lock) _spin_unlock(lock) | 174 | #define spin_unlock(lock) _spin_unlock(lock) |
| 474 | #define write_unlock(lock) _write_unlock(lock) | 175 | #define write_unlock(lock) _write_unlock(lock) |
| 475 | #define read_unlock(lock) _read_unlock(lock) | 176 | #define read_unlock(lock) _read_unlock(lock) |
| 476 | 177 | ||
| 477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | 178 | #define spin_unlock_irqrestore(lock, flags) \ |
| 179 | _spin_unlock_irqrestore(lock, flags) | ||
| 478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 180 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
| 479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 181 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
| 480 | 182 | ||
| 481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | 183 | #define read_unlock_irqrestore(lock, flags) \ |
| 482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 184 | _read_unlock_irqrestore(lock, flags) |
| 483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 185 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
| 186 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
| 484 | 187 | ||
| 485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | 188 | #define write_unlock_irqrestore(lock, flags) \ |
| 486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 189 | _write_unlock_irqrestore(lock, flags) |
| 487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 190 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
| 191 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
| 488 | 192 | ||
| 489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | 193 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) |
| 490 | 194 | ||
| 491 | #define spin_trylock_irq(lock) \ | 195 | #define spin_trylock_irq(lock) \ |
| 492 | ({ \ | 196 | ({ \ |
| 493 | local_irq_disable(); \ | 197 | local_irq_disable(); \ |
| 494 | _spin_trylock(lock) ? \ | 198 | _spin_trylock(lock) ? \ |
| 495 | 1 : ({local_irq_enable(); 0; }); \ | 199 | 1 : ({ local_irq_enable(); 0; }); \ |
| 496 | }) | 200 | }) |
| 497 | 201 | ||
| 498 | #define spin_trylock_irqsave(lock, flags) \ | 202 | #define spin_trylock_irqsave(lock, flags) \ |
| 499 | ({ \ | 203 | ({ \ |
| 500 | local_irq_save(flags); \ | 204 | local_irq_save(flags); \ |
| 501 | _spin_trylock(lock) ? \ | 205 | _spin_trylock(lock) ? \ |
| 502 | 1 : ({local_irq_restore(flags); 0;}); \ | 206 | 1 : ({ local_irq_restore(flags); 0; }); \ |
| 503 | }) | 207 | }) |
| 504 | 208 | ||
| 505 | #ifdef CONFIG_LOCKMETER | ||
| 506 | extern void _metered_spin_lock (spinlock_t *lock); | ||
| 507 | extern void _metered_spin_unlock (spinlock_t *lock); | ||
| 508 | extern int _metered_spin_trylock(spinlock_t *lock); | ||
| 509 | extern void _metered_read_lock (rwlock_t *lock); | ||
| 510 | extern void _metered_read_unlock (rwlock_t *lock); | ||
| 511 | extern void _metered_write_lock (rwlock_t *lock); | ||
| 512 | extern void _metered_write_unlock (rwlock_t *lock); | ||
| 513 | extern int _metered_read_trylock (rwlock_t *lock); | ||
| 514 | extern int _metered_write_trylock(rwlock_t *lock); | ||
| 515 | #endif | ||
| 516 | |||
| 517 | /* "lock on reference count zero" */ | ||
| 518 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 519 | #include <asm/atomic.h> | ||
| 520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | ||
| 521 | #endif | ||
| 522 | |||
| 523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | ||
| 524 | |||
| 525 | /* | ||
| 526 | * bit-based spin_lock() | ||
| 527 | * | ||
| 528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
| 529 | * are significantly faster. | ||
| 530 | */ | ||
| 531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
| 532 | { | ||
| 533 | /* | ||
| 534 | * Assuming the lock is uncontended, this never enters | ||
| 535 | * the body of the outer loop. If it is contended, then | ||
| 536 | * within the inner loop a non-atomic test is used to | ||
| 537 | * busywait with less bus contention for a good time to | ||
| 538 | * attempt to acquire the lock bit. | ||
| 539 | */ | ||
| 540 | preempt_disable(); | ||
| 541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 542 | while (test_and_set_bit(bitnum, addr)) { | ||
| 543 | while (test_bit(bitnum, addr)) { | ||
| 544 | preempt_enable(); | ||
| 545 | cpu_relax(); | ||
| 546 | preempt_disable(); | ||
| 547 | } | ||
| 548 | } | ||
| 549 | #endif | ||
| 550 | __acquire(bitlock); | ||
| 551 | } | ||
| 552 | |||
| 553 | /* | 209 | /* |
| 554 | * Return true if it was acquired | 210 | * Pull the atomic_t declaration: |
| 211 | * (asm-mips/atomic.h needs above definitions) | ||
| 555 | */ | 212 | */ |
| 556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 213 | #include <asm/atomic.h> |
| 557 | { | 214 | /** |
| 558 | preempt_disable(); | 215 | * atomic_dec_and_lock - lock on reaching reference count zero |
| 559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 216 | * @atomic: the atomic counter |
| 560 | if (test_and_set_bit(bitnum, addr)) { | 217 | * @lock: the spinlock in question |
| 561 | preempt_enable(); | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | #endif | ||
| 565 | __acquire(bitlock); | ||
| 566 | return 1; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* | ||
| 570 | * bit-based spin_unlock() | ||
| 571 | */ | ||
| 572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
| 573 | { | ||
| 574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
| 575 | BUG_ON(!test_bit(bitnum, addr)); | ||
| 576 | smp_mb__before_clear_bit(); | ||
| 577 | clear_bit(bitnum, addr); | ||
| 578 | #endif | ||
| 579 | preempt_enable(); | ||
| 580 | __release(bitlock); | ||
| 581 | } | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Return true if the lock is held. | ||
| 585 | */ | 218 | */ |
| 586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 219 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
| 587 | { | 220 | #define atomic_dec_and_lock(atomic, lock) \ |
| 588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 221 | __cond_lock(_atomic_dec_and_lock(atomic, lock)) |
| 589 | return test_bit(bitnum, addr); | ||
| 590 | #elif defined CONFIG_PREEMPT | ||
| 591 | return preempt_count(); | ||
| 592 | #else | ||
| 593 | return 1; | ||
| 594 | #endif | ||
| 595 | } | ||
| 596 | |||
| 597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
| 598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
| 599 | 222 | ||
| 600 | /** | 223 | /** |
| 601 | * spin_can_lock - would spin_trylock() succeed? | 224 | * spin_can_lock - would spin_trylock() succeed? |
| 602 | * @lock: the spinlock in question. | 225 | * @lock: the spinlock in question. |
| 603 | */ | 226 | */ |
| 604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 227 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
| 605 | 228 | ||
| 606 | #endif /* __LINUX_SPINLOCK_H */ | 229 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 000000000000..78e6989ffb54 --- /dev/null +++ b/include/linux/spinlock_api_smp.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
| 2 | #define __LINUX_SPINLOCK_API_SMP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_api_smp.h | ||
| 10 | * | ||
| 11 | * spinlock API declarations on SMP (and debug) | ||
| 12 | * (implemented in kernel/spinlock.c) | ||
| 13 | * | ||
| 14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 15 | * Released under the General Public License (GPL). | ||
| 16 | */ | ||
| 17 | |||
| 18 | int in_lock_functions(unsigned long addr); | ||
| 19 | |||
| 20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
| 21 | |||
| 22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
| 23 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 24 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
| 25 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
| 26 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 27 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
| 28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
| 29 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 30 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
| 31 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
| 32 | __acquires(spinlock_t); | ||
| 33 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
| 34 | __acquires(rwlock_t); | ||
| 35 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
| 36 | __acquires(rwlock_t); | ||
| 37 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
| 38 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
| 39 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
| 40 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
| 41 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
| 42 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 43 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
| 44 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
| 45 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 46 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
| 47 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
| 48 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 49 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
| 50 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
| 51 | __releases(spinlock_t); | ||
| 52 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 53 | __releases(rwlock_t); | ||
| 54 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 55 | __releases(rwlock_t); | ||
| 56 | |||
| 57 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | ||
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 000000000000..cd81cee566f4 --- /dev/null +++ b/include/linux/spinlock_api_up.h | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_API_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_API_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_api_up.h | ||
| 10 | * | ||
| 11 | * spinlock API implementation on UP-nondebug (inlined implementation) | ||
| 12 | * | ||
| 13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 14 | * Released under the General Public License (GPL). | ||
| 15 | */ | ||
| 16 | |||
| 17 | #define in_lock_functions(ADDR) 0 | ||
| 18 | |||
| 19 | #define assert_spin_locked(lock) do { (void)(lock); } while (0) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * In the UP-nondebug case there's no real locking going on, so the | ||
| 23 | * only thing we have to do is to keep the preempt counts and irq | ||
| 24 | * flags straight, to supress compiler warnings of unused lock | ||
| 25 | * variables, and to add the proper checker annotations: | ||
| 26 | */ | ||
| 27 | #define __LOCK(lock) \ | ||
| 28 | do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) | ||
| 29 | |||
| 30 | #define __LOCK_BH(lock) \ | ||
| 31 | do { local_bh_disable(); __LOCK(lock); } while (0) | ||
| 32 | |||
| 33 | #define __LOCK_IRQ(lock) \ | ||
| 34 | do { local_irq_disable(); __LOCK(lock); } while (0) | ||
| 35 | |||
| 36 | #define __LOCK_IRQSAVE(lock, flags) \ | ||
| 37 | do { local_irq_save(flags); __LOCK(lock); } while (0) | ||
| 38 | |||
| 39 | #define __UNLOCK(lock) \ | ||
| 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | ||
| 41 | |||
| 42 | #define __UNLOCK_BH(lock) \ | ||
| 43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | ||
| 44 | |||
| 45 | #define __UNLOCK_IRQ(lock) \ | ||
| 46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | ||
| 47 | |||
| 48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | ||
| 49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | ||
| 50 | |||
| 51 | #define _spin_lock(lock) __LOCK(lock) | ||
| 52 | #define _read_lock(lock) __LOCK(lock) | ||
| 53 | #define _write_lock(lock) __LOCK(lock) | ||
| 54 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | ||
| 55 | #define _read_lock_bh(lock) __LOCK_BH(lock) | ||
| 56 | #define _write_lock_bh(lock) __LOCK_BH(lock) | ||
| 57 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 58 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 59 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | ||
| 60 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 61 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 62 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
| 63 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 64 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 65 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | ||
| 66 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | ||
| 67 | #define _spin_unlock(lock) __UNLOCK(lock) | ||
| 68 | #define _read_unlock(lock) __UNLOCK(lock) | ||
| 69 | #define _write_unlock(lock) __UNLOCK(lock) | ||
| 70 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 71 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 72 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | ||
| 73 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 74 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 75 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
| 76 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 77 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 78 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
| 79 | |||
| 80 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | ||
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 000000000000..9cb51e070390 --- /dev/null +++ b/include/linux/spinlock_types.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 2 | #define __LINUX_SPINLOCK_TYPES_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * include/linux/spinlock_types.h - generic spinlock type definitions | ||
| 6 | * and initializers | ||
| 7 | * | ||
| 8 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 9 | * Released under the General Public License (GPL). | ||
| 10 | */ | ||
| 11 | |||
| 12 | #if defined(CONFIG_SMP) | ||
| 13 | # include <asm/spinlock_types.h> | ||
| 14 | #else | ||
| 15 | # include <linux/spinlock_types_up.h> | ||
| 16 | #endif | ||
| 17 | |||
| 18 | typedef struct { | ||
| 19 | raw_spinlock_t raw_lock; | ||
| 20 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
| 21 | unsigned int break_lock; | ||
| 22 | #endif | ||
| 23 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 24 | unsigned int magic, owner_cpu; | ||
| 25 | void *owner; | ||
| 26 | #endif | ||
| 27 | } spinlock_t; | ||
| 28 | |||
| 29 | #define SPINLOCK_MAGIC 0xdead4ead | ||
| 30 | |||
| 31 | typedef struct { | ||
| 32 | raw_rwlock_t raw_lock; | ||
| 33 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
| 34 | unsigned int break_lock; | ||
| 35 | #endif | ||
| 36 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 37 | unsigned int magic, owner_cpu; | ||
| 38 | void *owner; | ||
| 39 | #endif | ||
| 40 | } rwlock_t; | ||
| 41 | |||
| 42 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
| 43 | |||
| 44 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | ||
| 45 | |||
| 46 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 47 | # define SPIN_LOCK_UNLOCKED \ | ||
| 48 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | ||
| 49 | .magic = SPINLOCK_MAGIC, \ | ||
| 50 | .owner = SPINLOCK_OWNER_INIT, \ | ||
| 51 | .owner_cpu = -1 } | ||
| 52 | #define RW_LOCK_UNLOCKED \ | ||
| 53 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | ||
| 54 | .magic = RWLOCK_MAGIC, \ | ||
| 55 | .owner = SPINLOCK_OWNER_INIT, \ | ||
| 56 | .owner_cpu = -1 } | ||
| 57 | #else | ||
| 58 | # define SPIN_LOCK_UNLOCKED \ | ||
| 59 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } | ||
| 60 | #define RW_LOCK_UNLOCKED \ | ||
| 61 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } | ||
| 62 | #endif | ||
| 63 | |||
| 64 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
| 65 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
| 66 | |||
| 67 | #endif /* __LINUX_SPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 000000000000..def2d173a8db --- /dev/null +++ b/include/linux/spinlock_types_up.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_TYPES_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_TYPES_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_types_up.h - spinlock type definitions for UP | ||
| 10 | * | ||
| 11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 12 | * Released under the General Public License (GPL). | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 16 | |||
| 17 | typedef struct { | ||
| 18 | volatile unsigned int slock; | ||
| 19 | } raw_spinlock_t; | ||
| 20 | |||
| 21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
| 22 | |||
| 23 | #else | ||
| 24 | |||
| 25 | /* | ||
| 26 | * All gcc 2.95 versions and early versions of 2.96 have a nasty bug | ||
| 27 | * with empty initializers. | ||
| 28 | */ | ||
| 29 | #if (__GNUC__ > 2) | ||
| 30 | typedef struct { } raw_spinlock_t; | ||
| 31 | |||
| 32 | #define __RAW_SPIN_LOCK_UNLOCKED { } | ||
| 33 | #else | ||
| 34 | typedef struct { int gcc_is_buggy; } raw_spinlock_t; | ||
| 35 | #define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 } | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #endif | ||
| 39 | |||
| 40 | #if (__GNUC__ > 2) | ||
| 41 | typedef struct { | ||
| 42 | /* no debug version on UP */ | ||
| 43 | } raw_rwlock_t; | ||
| 44 | |||
| 45 | #define __RAW_RW_LOCK_UNLOCKED { } | ||
| 46 | #else | ||
| 47 | typedef struct { int gcc_is_buggy; } raw_rwlock_t; | ||
| 48 | #define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 } | ||
| 49 | #endif | ||
| 50 | |||
| 51 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ | ||
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 000000000000..31accf2f0b13 --- /dev/null +++ b/include/linux/spinlock_up.h | |||
| @@ -0,0 +1,74 @@ | |||
| 1 | #ifndef __LINUX_SPINLOCK_UP_H | ||
| 2 | #define __LINUX_SPINLOCK_UP_H | ||
| 3 | |||
| 4 | #ifndef __LINUX_SPINLOCK_H | ||
| 5 | # error "please don't include this file directly" | ||
| 6 | #endif | ||
| 7 | |||
| 8 | /* | ||
| 9 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | ||
| 10 | * | ||
| 11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 12 | * Released under the General Public License (GPL). | ||
| 13 | * | ||
| 14 | * In the debug case, 1 means unlocked, 0 means locked. (the values | ||
| 15 | * are inverted, to catch initialization bugs) | ||
| 16 | * | ||
| 17 | * No atomicity anywhere, we are on UP. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 21 | |||
| 22 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | ||
| 23 | |||
| 24 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
| 25 | { | ||
| 26 | lock->slock = 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline void | ||
| 30 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 31 | { | ||
| 32 | local_irq_save(flags); | ||
| 33 | lock->slock = 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
| 37 | { | ||
| 38 | char oldval = lock->slock; | ||
| 39 | |||
| 40 | lock->slock = 0; | ||
| 41 | |||
| 42 | return oldval > 0; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
| 46 | { | ||
| 47 | lock->slock = 1; | ||
| 48 | } | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Read-write spinlocks. No debug version. | ||
| 52 | */ | ||
| 53 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | ||
| 54 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | ||
| 55 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | ||
| 56 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | ||
| 57 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | ||
| 58 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | ||
| 59 | |||
| 60 | #else /* DEBUG_SPINLOCK */ | ||
| 61 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | ||
| 62 | /* for sched.c and kernel_lock.c: */ | ||
| 63 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | ||
| 64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | ||
| 65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | ||
| 66 | #endif /* DEBUG_SPINLOCK */ | ||
| 67 | |||
| 68 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | ||
| 69 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | ||
| 70 | |||
| 71 | #define __raw_spin_unlock_wait(lock) \ | ||
| 72 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | ||
| 73 | |||
| 74 | #endif /* __LINUX_SPINLOCK_UP_H */ | ||
diff --git a/include/linux/time.h b/include/linux/time.h index c10d4c21c183..8e83f4e778bb 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -28,17 +28,10 @@ struct timezone { | |||
| 28 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
| 29 | 29 | ||
| 30 | /* Parameters used to convert the timespec values */ | 30 | /* Parameters used to convert the timespec values */ |
| 31 | #ifndef USEC_PER_SEC | 31 | #define MSEC_PER_SEC (1000L) |
| 32 | #define USEC_PER_SEC (1000000L) | 32 | #define USEC_PER_SEC (1000000L) |
| 33 | #endif | ||
| 34 | |||
| 35 | #ifndef NSEC_PER_SEC | ||
| 36 | #define NSEC_PER_SEC (1000000000L) | 33 | #define NSEC_PER_SEC (1000000000L) |
| 37 | #endif | ||
| 38 | |||
| 39 | #ifndef NSEC_PER_USEC | ||
| 40 | #define NSEC_PER_USEC (1000L) | 34 | #define NSEC_PER_USEC (1000L) |
| 41 | #endif | ||
| 42 | 35 | ||
| 43 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) | 36 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) |
| 44 | { | 37 | { |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 542dbaee6512..343d883d69c5 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | |||
| 109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
| 110 | int sync_page_range(struct inode *inode, struct address_space *mapping, | 110 | int sync_page_range(struct inode *inode, struct address_space *mapping, |
| 111 | loff_t pos, size_t count); | 111 | loff_t pos, size_t count); |
| 112 | int sync_page_range_nolock(struct inode *inode, struct address_space | ||
| 113 | *mapping, loff_t pos, size_t count); | ||
| 114 | 112 | ||
| 115 | /* pdflush.c */ | 113 | /* pdflush.c */ |
| 116 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | 114 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 0acf245f441d..3a926011507b 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
| @@ -69,7 +69,7 @@ struct mqueue_inode_info { | |||
| 69 | 69 | ||
| 70 | struct sigevent notify; | 70 | struct sigevent notify; |
| 71 | pid_t notify_owner; | 71 | pid_t notify_owner; |
| 72 | struct user_struct *user; /* user who created, for accouting */ | 72 | struct user_struct *user; /* user who created, for accounting */ |
| 73 | struct sock *notify_sock; | 73 | struct sock *notify_sock; |
| 74 | struct sk_buff *notify_cookie; | 74 | struct sk_buff *notify_cookie; |
| 75 | 75 | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 8d57a2f1226b..ff4dc02ce170 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -12,6 +12,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
| 12 | obj-$(CONFIG_FUTEX) += futex.o | 12 | obj-$(CONFIG_FUTEX) += futex.o |
| 13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| 14 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 14 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
| 15 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | ||
| 15 | obj-$(CONFIG_UID16) += uid16.o | 16 | obj-$(CONFIG_UID16) += uid16.o |
| 16 | obj-$(CONFIG_MODULES) += module.o | 17 | obj-$(CONFIG_MODULES) += module.o |
| 17 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 18 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
diff --git a/kernel/acct.c b/kernel/acct.c index f70e6027cca9..b756f527497e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
| @@ -165,7 +165,7 @@ out: | |||
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | /* | 167 | /* |
| 168 | * Close the old accouting file (if currently open) and then replace | 168 | * Close the old accounting file (if currently open) and then replace |
| 169 | * it with file (if non-NULL). | 169 | * it with file (if non-NULL). |
| 170 | * | 170 | * |
| 171 | * NOTE: acct_globals.lock MUST be held on entry and exit. | 171 | * NOTE: acct_globals.lock MUST be held on entry and exit. |
| @@ -199,11 +199,16 @@ static void acct_file_reopen(struct file *file) | |||
| 199 | } | 199 | } |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | /* | 202 | /** |
| 203 | * sys_acct() is the only system call needed to implement process | 203 | * sys_acct - enable/disable process accounting |
| 204 | * accounting. It takes the name of the file where accounting records | 204 | * @name: file name for accounting records or NULL to shutdown accounting |
| 205 | * should be written. If the filename is NULL, accounting will be | 205 | * |
| 206 | * shutdown. | 206 | * Returns 0 for success or negative errno values for failure. |
| 207 | * | ||
| 208 | * sys_acct() is the only system call needed to implement process | ||
| 209 | * accounting. It takes the name of the file where accounting records | ||
| 210 | * should be written. If the filename is NULL, accounting will be | ||
| 211 | * shutdown. | ||
| 207 | */ | 212 | */ |
| 208 | asmlinkage long sys_acct(const char __user *name) | 213 | asmlinkage long sys_acct(const char __user *name) |
| 209 | { | 214 | { |
| @@ -250,9 +255,12 @@ asmlinkage long sys_acct(const char __user *name) | |||
| 250 | return (0); | 255 | return (0); |
| 251 | } | 256 | } |
| 252 | 257 | ||
| 253 | /* | 258 | /** |
| 254 | * If the accouting is turned on for a file in the filesystem pointed | 259 | * acct_auto_close - turn off a filesystem's accounting if it is on |
| 255 | * to by sb, turn accouting off. | 260 | * @sb: super block for the filesystem |
| 261 | * | ||
| 262 | * If the accounting is turned on for a file in the filesystem pointed | ||
| 263 | * to by sb, turn accounting off. | ||
| 256 | */ | 264 | */ |
| 257 | void acct_auto_close(struct super_block *sb) | 265 | void acct_auto_close(struct super_block *sb) |
| 258 | { | 266 | { |
| @@ -503,8 +511,11 @@ static void do_acct_process(long exitcode, struct file *file) | |||
| 503 | set_fs(fs); | 511 | set_fs(fs); |
| 504 | } | 512 | } |
| 505 | 513 | ||
| 506 | /* | 514 | /** |
| 507 | * acct_process - now just a wrapper around do_acct_process | 515 | * acct_process - now just a wrapper around do_acct_process |
| 516 | * @exitcode: task exit code | ||
| 517 | * | ||
| 518 | * handles process accounting for an exiting task | ||
| 508 | */ | 519 | */ |
| 509 | void acct_process(long exitcode) | 520 | void acct_process(long exitcode) |
| 510 | { | 521 | { |
| @@ -530,9 +541,9 @@ void acct_process(long exitcode) | |||
| 530 | } | 541 | } |
| 531 | 542 | ||
| 532 | 543 | ||
| 533 | /* | 544 | /** |
| 534 | * acct_update_integrals | 545 | * acct_update_integrals - update mm integral fields in task_struct |
| 535 | * - update mm integral fields in task_struct | 546 | * @tsk: task_struct for accounting |
| 536 | */ | 547 | */ |
| 537 | void acct_update_integrals(struct task_struct *tsk) | 548 | void acct_update_integrals(struct task_struct *tsk) |
| 538 | { | 549 | { |
| @@ -547,9 +558,9 @@ void acct_update_integrals(struct task_struct *tsk) | |||
| 547 | } | 558 | } |
| 548 | } | 559 | } |
| 549 | 560 | ||
| 550 | /* | 561 | /** |
| 551 | * acct_clear_integrals | 562 | * acct_clear_integrals - clear the mm integral fields in task_struct |
| 552 | * - clear the mm integral fields in task_struct | 563 | * @tsk: task_struct whose accounting fields are cleared |
| 553 | */ | 564 | */ |
| 554 | void acct_clear_integrals(struct task_struct *tsk) | 565 | void acct_clear_integrals(struct task_struct *tsk) |
| 555 | { | 566 | { |
diff --git a/kernel/compat.c b/kernel/compat.c index ddfcaaa86623..102296e21ea8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -48,8 +48,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
| 48 | if (!time_after(expire, now)) | 48 | if (!time_after(expire, now)) |
| 49 | return 0; | 49 | return 0; |
| 50 | 50 | ||
| 51 | current->state = TASK_INTERRUPTIBLE; | 51 | expire = schedule_timeout_interruptible(expire - now); |
| 52 | expire = schedule_timeout(expire - now); | ||
| 53 | if (expire == 0) | 52 | if (expire == 0) |
| 54 | return 0; | 53 | return 0; |
| 55 | 54 | ||
| @@ -82,8 +81,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | |||
| 82 | return -EINVAL; | 81 | return -EINVAL; |
| 83 | 82 | ||
| 84 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); | 83 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); |
| 85 | current->state = TASK_INTERRUPTIBLE; | 84 | expire = schedule_timeout_interruptible(expire); |
| 86 | expire = schedule_timeout(expire); | ||
| 87 | if (expire == 0) | 85 | if (expire == 0) |
| 88 | return 0; | 86 | return 0; |
| 89 | 87 | ||
| @@ -795,8 +793,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, | |||
| 795 | recalc_sigpending(); | 793 | recalc_sigpending(); |
| 796 | spin_unlock_irq(¤t->sighand->siglock); | 794 | spin_unlock_irq(¤t->sighand->siglock); |
| 797 | 795 | ||
| 798 | current->state = TASK_INTERRUPTIBLE; | 796 | timeout = schedule_timeout_interruptible(timeout); |
| 799 | timeout = schedule_timeout(timeout); | ||
| 800 | 797 | ||
| 801 | spin_lock_irq(¤t->sighand->siglock); | 798 | spin_lock_irq(¤t->sighand->siglock); |
| 802 | sig = dequeue_signal(current, &s, &info); | 799 | sig = dequeue_signal(current, &s, &info); |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 712d02029971..407b5f0a8c8e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -182,6 +182,37 @@ static struct super_block *cpuset_sb = NULL; | |||
| 182 | static DECLARE_MUTEX(cpuset_sem); | 182 | static DECLARE_MUTEX(cpuset_sem); |
| 183 | 183 | ||
| 184 | /* | 184 | /* |
| 185 | * The global cpuset semaphore cpuset_sem can be needed by the | ||
| 186 | * memory allocator to update a tasks mems_allowed (see the calls | ||
| 187 | * to cpuset_update_current_mems_allowed()) or to walk up the | ||
| 188 | * cpuset hierarchy to find a mem_exclusive cpuset see the calls | ||
| 189 | * to cpuset_excl_nodes_overlap()). | ||
| 190 | * | ||
| 191 | * But if the memory allocation is being done by cpuset.c code, it | ||
| 192 | * usually already holds cpuset_sem. Double tripping on a kernel | ||
| 193 | * semaphore deadlocks the current task, and any other task that | ||
| 194 | * subsequently tries to obtain the lock. | ||
| 195 | * | ||
| 196 | * Run all up's and down's on cpuset_sem through the following | ||
| 197 | * wrappers, which will detect this nested locking, and avoid | ||
| 198 | * deadlocking. | ||
| 199 | */ | ||
| 200 | |||
| 201 | static inline void cpuset_down(struct semaphore *psem) | ||
| 202 | { | ||
| 203 | if (current->cpuset_sem_nest_depth == 0) | ||
| 204 | down(psem); | ||
| 205 | current->cpuset_sem_nest_depth++; | ||
| 206 | } | ||
| 207 | |||
| 208 | static inline void cpuset_up(struct semaphore *psem) | ||
| 209 | { | ||
| 210 | current->cpuset_sem_nest_depth--; | ||
| 211 | if (current->cpuset_sem_nest_depth == 0) | ||
| 212 | up(psem); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* | ||
| 185 | * A couple of forward declarations required, due to cyclic reference loop: | 216 | * A couple of forward declarations required, due to cyclic reference loop: |
| 186 | * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file | 217 | * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file |
| 187 | * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. | 218 | * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. |
| @@ -522,19 +553,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | |||
| 522 | * Refresh current tasks mems_allowed and mems_generation from | 553 | * Refresh current tasks mems_allowed and mems_generation from |
| 523 | * current tasks cpuset. Call with cpuset_sem held. | 554 | * current tasks cpuset. Call with cpuset_sem held. |
| 524 | * | 555 | * |
| 525 | * Be sure to call refresh_mems() on any cpuset operation which | 556 | * This routine is needed to update the per-task mems_allowed |
| 526 | * (1) holds cpuset_sem, and (2) might possibly alloc memory. | 557 | * data, within the tasks context, when it is trying to allocate |
| 527 | * Call after obtaining cpuset_sem lock, before any possible | 558 | * memory (in various mm/mempolicy.c routines) and notices |
| 528 | * allocation. Otherwise one risks trying to allocate memory | 559 | * that some other task has been modifying its cpuset. |
| 529 | * while the task cpuset_mems_generation is not the same as | ||
| 530 | * the mems_generation in its cpuset, which would deadlock on | ||
| 531 | * cpuset_sem in cpuset_update_current_mems_allowed(). | ||
| 532 | * | ||
| 533 | * Since we hold cpuset_sem, once refresh_mems() is called, the | ||
| 534 | * test (current->cpuset_mems_generation != cs->mems_generation) | ||
| 535 | * in cpuset_update_current_mems_allowed() will remain false, | ||
| 536 | * until we drop cpuset_sem. Anyone else who would change our | ||
| 537 | * cpusets mems_generation needs to lock cpuset_sem first. | ||
| 538 | */ | 560 | */ |
| 539 | 561 | ||
| 540 | static void refresh_mems(void) | 562 | static void refresh_mems(void) |
| @@ -840,7 +862,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us | |||
| 840 | } | 862 | } |
| 841 | buffer[nbytes] = 0; /* nul-terminate */ | 863 | buffer[nbytes] = 0; /* nul-terminate */ |
| 842 | 864 | ||
| 843 | down(&cpuset_sem); | 865 | cpuset_down(&cpuset_sem); |
| 844 | 866 | ||
| 845 | if (is_removed(cs)) { | 867 | if (is_removed(cs)) { |
| 846 | retval = -ENODEV; | 868 | retval = -ENODEV; |
| @@ -874,7 +896,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us | |||
| 874 | if (retval == 0) | 896 | if (retval == 0) |
| 875 | retval = nbytes; | 897 | retval = nbytes; |
| 876 | out2: | 898 | out2: |
| 877 | up(&cpuset_sem); | 899 | cpuset_up(&cpuset_sem); |
| 878 | cpuset_release_agent(pathbuf); | 900 | cpuset_release_agent(pathbuf); |
| 879 | out1: | 901 | out1: |
| 880 | kfree(buffer); | 902 | kfree(buffer); |
| @@ -914,9 +936,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |||
| 914 | { | 936 | { |
| 915 | cpumask_t mask; | 937 | cpumask_t mask; |
| 916 | 938 | ||
| 917 | down(&cpuset_sem); | 939 | cpuset_down(&cpuset_sem); |
| 918 | mask = cs->cpus_allowed; | 940 | mask = cs->cpus_allowed; |
| 919 | up(&cpuset_sem); | 941 | cpuset_up(&cpuset_sem); |
| 920 | 942 | ||
| 921 | return cpulist_scnprintf(page, PAGE_SIZE, mask); | 943 | return cpulist_scnprintf(page, PAGE_SIZE, mask); |
| 922 | } | 944 | } |
| @@ -925,9 +947,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | |||
| 925 | { | 947 | { |
| 926 | nodemask_t mask; | 948 | nodemask_t mask; |
| 927 | 949 | ||
| 928 | down(&cpuset_sem); | 950 | cpuset_down(&cpuset_sem); |
| 929 | mask = cs->mems_allowed; | 951 | mask = cs->mems_allowed; |
| 930 | up(&cpuset_sem); | 952 | cpuset_up(&cpuset_sem); |
| 931 | 953 | ||
| 932 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | 954 | return nodelist_scnprintf(page, PAGE_SIZE, mask); |
| 933 | } | 955 | } |
| @@ -1334,8 +1356,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) | |||
| 1334 | if (!cs) | 1356 | if (!cs) |
| 1335 | return -ENOMEM; | 1357 | return -ENOMEM; |
| 1336 | 1358 | ||
| 1337 | down(&cpuset_sem); | 1359 | cpuset_down(&cpuset_sem); |
| 1338 | refresh_mems(); | ||
| 1339 | cs->flags = 0; | 1360 | cs->flags = 0; |
| 1340 | if (notify_on_release(parent)) | 1361 | if (notify_on_release(parent)) |
| 1341 | set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); | 1362 | set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); |
| @@ -1360,14 +1381,14 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) | |||
| 1360 | * will down() this new directory's i_sem and if we race with | 1381 | * will down() this new directory's i_sem and if we race with |
| 1361 | * another mkdir, we might deadlock. | 1382 | * another mkdir, we might deadlock. |
| 1362 | */ | 1383 | */ |
| 1363 | up(&cpuset_sem); | 1384 | cpuset_up(&cpuset_sem); |
| 1364 | 1385 | ||
| 1365 | err = cpuset_populate_dir(cs->dentry); | 1386 | err = cpuset_populate_dir(cs->dentry); |
| 1366 | /* If err < 0, we have a half-filled directory - oh well ;) */ | 1387 | /* If err < 0, we have a half-filled directory - oh well ;) */ |
| 1367 | return 0; | 1388 | return 0; |
| 1368 | err: | 1389 | err: |
| 1369 | list_del(&cs->sibling); | 1390 | list_del(&cs->sibling); |
| 1370 | up(&cpuset_sem); | 1391 | cpuset_up(&cpuset_sem); |
| 1371 | kfree(cs); | 1392 | kfree(cs); |
| 1372 | return err; | 1393 | return err; |
| 1373 | } | 1394 | } |
| @@ -1389,14 +1410,13 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
| 1389 | 1410 | ||
| 1390 | /* the vfs holds both inode->i_sem already */ | 1411 | /* the vfs holds both inode->i_sem already */ |
| 1391 | 1412 | ||
| 1392 | down(&cpuset_sem); | 1413 | cpuset_down(&cpuset_sem); |
| 1393 | refresh_mems(); | ||
| 1394 | if (atomic_read(&cs->count) > 0) { | 1414 | if (atomic_read(&cs->count) > 0) { |
| 1395 | up(&cpuset_sem); | 1415 | cpuset_up(&cpuset_sem); |
| 1396 | return -EBUSY; | 1416 | return -EBUSY; |
| 1397 | } | 1417 | } |
| 1398 | if (!list_empty(&cs->children)) { | 1418 | if (!list_empty(&cs->children)) { |
| 1399 | up(&cpuset_sem); | 1419 | cpuset_up(&cpuset_sem); |
| 1400 | return -EBUSY; | 1420 | return -EBUSY; |
| 1401 | } | 1421 | } |
| 1402 | parent = cs->parent; | 1422 | parent = cs->parent; |
| @@ -1412,7 +1432,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
| 1412 | spin_unlock(&d->d_lock); | 1432 | spin_unlock(&d->d_lock); |
| 1413 | cpuset_d_remove_dir(d); | 1433 | cpuset_d_remove_dir(d); |
| 1414 | dput(d); | 1434 | dput(d); |
| 1415 | up(&cpuset_sem); | 1435 | cpuset_up(&cpuset_sem); |
| 1416 | cpuset_release_agent(pathbuf); | 1436 | cpuset_release_agent(pathbuf); |
| 1417 | return 0; | 1437 | return 0; |
| 1418 | } | 1438 | } |
| @@ -1515,10 +1535,10 @@ void cpuset_exit(struct task_struct *tsk) | |||
| 1515 | if (notify_on_release(cs)) { | 1535 | if (notify_on_release(cs)) { |
| 1516 | char *pathbuf = NULL; | 1536 | char *pathbuf = NULL; |
| 1517 | 1537 | ||
| 1518 | down(&cpuset_sem); | 1538 | cpuset_down(&cpuset_sem); |
| 1519 | if (atomic_dec_and_test(&cs->count)) | 1539 | if (atomic_dec_and_test(&cs->count)) |
| 1520 | check_for_release(cs, &pathbuf); | 1540 | check_for_release(cs, &pathbuf); |
| 1521 | up(&cpuset_sem); | 1541 | cpuset_up(&cpuset_sem); |
| 1522 | cpuset_release_agent(pathbuf); | 1542 | cpuset_release_agent(pathbuf); |
| 1523 | } else { | 1543 | } else { |
| 1524 | atomic_dec(&cs->count); | 1544 | atomic_dec(&cs->count); |
| @@ -1539,11 +1559,11 @@ cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk) | |||
| 1539 | { | 1559 | { |
| 1540 | cpumask_t mask; | 1560 | cpumask_t mask; |
| 1541 | 1561 | ||
| 1542 | down(&cpuset_sem); | 1562 | cpuset_down(&cpuset_sem); |
| 1543 | task_lock((struct task_struct *)tsk); | 1563 | task_lock((struct task_struct *)tsk); |
| 1544 | guarantee_online_cpus(tsk->cpuset, &mask); | 1564 | guarantee_online_cpus(tsk->cpuset, &mask); |
| 1545 | task_unlock((struct task_struct *)tsk); | 1565 | task_unlock((struct task_struct *)tsk); |
| 1546 | up(&cpuset_sem); | 1566 | cpuset_up(&cpuset_sem); |
| 1547 | 1567 | ||
| 1548 | return mask; | 1568 | return mask; |
| 1549 | } | 1569 | } |
| @@ -1568,9 +1588,9 @@ void cpuset_update_current_mems_allowed(void) | |||
| 1568 | if (!cs) | 1588 | if (!cs) |
| 1569 | return; /* task is exiting */ | 1589 | return; /* task is exiting */ |
| 1570 | if (current->cpuset_mems_generation != cs->mems_generation) { | 1590 | if (current->cpuset_mems_generation != cs->mems_generation) { |
| 1571 | down(&cpuset_sem); | 1591 | cpuset_down(&cpuset_sem); |
| 1572 | refresh_mems(); | 1592 | refresh_mems(); |
| 1573 | up(&cpuset_sem); | 1593 | cpuset_up(&cpuset_sem); |
| 1574 | } | 1594 | } |
| 1575 | } | 1595 | } |
| 1576 | 1596 | ||
| @@ -1669,14 +1689,14 @@ int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask) | |||
| 1669 | return 0; | 1689 | return 0; |
| 1670 | 1690 | ||
| 1671 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ | 1691 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
| 1672 | down(&cpuset_sem); | 1692 | cpuset_down(&cpuset_sem); |
| 1673 | cs = current->cpuset; | 1693 | cs = current->cpuset; |
| 1674 | if (!cs) | 1694 | if (!cs) |
| 1675 | goto done; /* current task exiting */ | 1695 | goto done; /* current task exiting */ |
| 1676 | cs = nearest_exclusive_ancestor(cs); | 1696 | cs = nearest_exclusive_ancestor(cs); |
| 1677 | allowed = node_isset(node, cs->mems_allowed); | 1697 | allowed = node_isset(node, cs->mems_allowed); |
| 1678 | done: | 1698 | done: |
| 1679 | up(&cpuset_sem); | 1699 | cpuset_up(&cpuset_sem); |
| 1680 | return allowed; | 1700 | return allowed; |
| 1681 | } | 1701 | } |
| 1682 | 1702 | ||
| @@ -1697,7 +1717,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p) | |||
| 1697 | const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ | 1717 | const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ |
| 1698 | int overlap = 0; /* do cpusets overlap? */ | 1718 | int overlap = 0; /* do cpusets overlap? */ |
| 1699 | 1719 | ||
| 1700 | down(&cpuset_sem); | 1720 | cpuset_down(&cpuset_sem); |
| 1701 | cs1 = current->cpuset; | 1721 | cs1 = current->cpuset; |
| 1702 | if (!cs1) | 1722 | if (!cs1) |
| 1703 | goto done; /* current task exiting */ | 1723 | goto done; /* current task exiting */ |
| @@ -1708,7 +1728,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p) | |||
| 1708 | cs2 = nearest_exclusive_ancestor(cs2); | 1728 | cs2 = nearest_exclusive_ancestor(cs2); |
| 1709 | overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); | 1729 | overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); |
| 1710 | done: | 1730 | done: |
| 1711 | up(&cpuset_sem); | 1731 | cpuset_up(&cpuset_sem); |
| 1712 | 1732 | ||
| 1713 | return overlap; | 1733 | return overlap; |
| 1714 | } | 1734 | } |
| @@ -1731,7 +1751,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) | |||
| 1731 | return -ENOMEM; | 1751 | return -ENOMEM; |
| 1732 | 1752 | ||
| 1733 | tsk = m->private; | 1753 | tsk = m->private; |
| 1734 | down(&cpuset_sem); | 1754 | cpuset_down(&cpuset_sem); |
| 1735 | task_lock(tsk); | 1755 | task_lock(tsk); |
| 1736 | cs = tsk->cpuset; | 1756 | cs = tsk->cpuset; |
| 1737 | task_unlock(tsk); | 1757 | task_unlock(tsk); |
| @@ -1746,7 +1766,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) | |||
| 1746 | seq_puts(m, buf); | 1766 | seq_puts(m, buf); |
| 1747 | seq_putc(m, '\n'); | 1767 | seq_putc(m, '\n'); |
| 1748 | out: | 1768 | out: |
| 1749 | up(&cpuset_sem); | 1769 | cpuset_up(&cpuset_sem); |
| 1750 | kfree(buf); | 1770 | kfree(buf); |
| 1751 | return retval; | 1771 | return retval; |
| 1752 | } | 1772 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 2632b812cf24..dbd4490afec1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | |||
| 875 | * smp_call_function() if an IPI is sent by the same process we are | 875 | * smp_call_function() if an IPI is sent by the same process we are |
| 876 | * waiting to become inactive. | 876 | * waiting to become inactive. |
| 877 | */ | 877 | */ |
| 878 | void wait_task_inactive(task_t * p) | 878 | void wait_task_inactive(task_t *p) |
| 879 | { | 879 | { |
| 880 | unsigned long flags; | 880 | unsigned long flags; |
| 881 | runqueue_t *rq; | 881 | runqueue_t *rq; |
| @@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
| 966 | int local_group; | 966 | int local_group; |
| 967 | int i; | 967 | int i; |
| 968 | 968 | ||
| 969 | /* Skip over this group if it has no CPUs allowed */ | ||
| 970 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | ||
| 971 | goto nextgroup; | ||
| 972 | |||
| 969 | local_group = cpu_isset(this_cpu, group->cpumask); | 973 | local_group = cpu_isset(this_cpu, group->cpumask); |
| 970 | /* XXX: put a cpus allowed check */ | ||
| 971 | 974 | ||
| 972 | /* Tally up the load of all CPUs in the group */ | 975 | /* Tally up the load of all CPUs in the group */ |
| 973 | avg_load = 0; | 976 | avg_load = 0; |
| @@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
| 992 | min_load = avg_load; | 995 | min_load = avg_load; |
| 993 | idlest = group; | 996 | idlest = group; |
| 994 | } | 997 | } |
| 998 | nextgroup: | ||
| 995 | group = group->next; | 999 | group = group->next; |
| 996 | } while (group != sd->groups); | 1000 | } while (group != sd->groups); |
| 997 | 1001 | ||
| @@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
| 1003 | /* | 1007 | /* |
| 1004 | * find_idlest_queue - find the idlest runqueue among the cpus in group. | 1008 | * find_idlest_queue - find the idlest runqueue among the cpus in group. |
| 1005 | */ | 1009 | */ |
| 1006 | static int find_idlest_cpu(struct sched_group *group, int this_cpu) | 1010 | static int |
| 1011 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | ||
| 1007 | { | 1012 | { |
| 1013 | cpumask_t tmp; | ||
| 1008 | unsigned long load, min_load = ULONG_MAX; | 1014 | unsigned long load, min_load = ULONG_MAX; |
| 1009 | int idlest = -1; | 1015 | int idlest = -1; |
| 1010 | int i; | 1016 | int i; |
| 1011 | 1017 | ||
| 1012 | for_each_cpu_mask(i, group->cpumask) { | 1018 | /* Traverse only the allowed CPUs */ |
| 1019 | cpus_and(tmp, group->cpumask, p->cpus_allowed); | ||
| 1020 | |||
| 1021 | for_each_cpu_mask(i, tmp) { | ||
| 1013 | load = source_load(i, 0); | 1022 | load = source_load(i, 0); |
| 1014 | 1023 | ||
| 1015 | if (load < min_load || (load == min_load && i == this_cpu)) { | 1024 | if (load < min_load || (load == min_load && i == this_cpu)) { |
| @@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag) | |||
| 1052 | if (!group) | 1061 | if (!group) |
| 1053 | goto nextlevel; | 1062 | goto nextlevel; |
| 1054 | 1063 | ||
| 1055 | new_cpu = find_idlest_cpu(group, cpu); | 1064 | new_cpu = find_idlest_cpu(group, t, cpu); |
| 1056 | if (new_cpu == -1 || new_cpu == cpu) | 1065 | if (new_cpu == -1 || new_cpu == cpu) |
| 1057 | goto nextlevel; | 1066 | goto nextlevel; |
| 1058 | 1067 | ||
| @@ -1127,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p) | |||
| 1127 | * | 1136 | * |
| 1128 | * returns failure only if the task is already active. | 1137 | * returns failure only if the task is already active. |
| 1129 | */ | 1138 | */ |
| 1130 | static int try_to_wake_up(task_t * p, unsigned int state, int sync) | 1139 | static int try_to_wake_up(task_t *p, unsigned int state, int sync) |
| 1131 | { | 1140 | { |
| 1132 | int cpu, this_cpu, success = 0; | 1141 | int cpu, this_cpu, success = 0; |
| 1133 | unsigned long flags; | 1142 | unsigned long flags; |
| @@ -1252,6 +1261,16 @@ out_activate: | |||
| 1252 | } | 1261 | } |
| 1253 | 1262 | ||
| 1254 | /* | 1263 | /* |
| 1264 | * Tasks that have marked their sleep as noninteractive get | ||
| 1265 | * woken up without updating their sleep average. (i.e. their | ||
| 1266 | * sleep is handled in a priority-neutral manner, no priority | ||
| 1267 | * boost and no penalty.) | ||
| 1268 | */ | ||
| 1269 | if (old_state & TASK_NONINTERACTIVE) | ||
| 1270 | __activate_task(p, rq); | ||
| 1271 | else | ||
| 1272 | activate_task(p, rq, cpu == this_cpu); | ||
| 1273 | /* | ||
| 1255 | * Sync wakeups (i.e. those types of wakeups where the waker | 1274 | * Sync wakeups (i.e. those types of wakeups where the waker |
| 1256 | * has indicated that it will leave the CPU in short order) | 1275 | * has indicated that it will leave the CPU in short order) |
| 1257 | * don't trigger a preemption, if the woken up task will run on | 1276 | * don't trigger a preemption, if the woken up task will run on |
| @@ -1259,7 +1278,6 @@ out_activate: | |||
| 1259 | * the waker guarantees that the freshly woken up task is going | 1278 | * the waker guarantees that the freshly woken up task is going |
| 1260 | * to be considered on this CPU.) | 1279 | * to be considered on this CPU.) |
| 1261 | */ | 1280 | */ |
| 1262 | activate_task(p, rq, cpu == this_cpu); | ||
| 1263 | if (!sync || cpu != this_cpu) { | 1281 | if (!sync || cpu != this_cpu) { |
| 1264 | if (TASK_PREEMPTS_CURR(p, rq)) | 1282 | if (TASK_PREEMPTS_CURR(p, rq)) |
| 1265 | resched_task(rq->curr); | 1283 | resched_task(rq->curr); |
| @@ -1274,7 +1292,7 @@ out: | |||
| 1274 | return success; | 1292 | return success; |
| 1275 | } | 1293 | } |
| 1276 | 1294 | ||
| 1277 | int fastcall wake_up_process(task_t * p) | 1295 | int fastcall wake_up_process(task_t *p) |
| 1278 | { | 1296 | { |
| 1279 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1297 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | |
| 1280 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | 1298 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); |
| @@ -1353,7 +1371,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
| 1353 | * that must be done for every newly created context, then puts the task | 1371 | * that must be done for every newly created context, then puts the task |
| 1354 | * on the runqueue and wakes it. | 1372 | * on the runqueue and wakes it. |
| 1355 | */ | 1373 | */ |
| 1356 | void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) | 1374 | void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) |
| 1357 | { | 1375 | { |
| 1358 | unsigned long flags; | 1376 | unsigned long flags; |
| 1359 | int this_cpu, cpu; | 1377 | int this_cpu, cpu; |
| @@ -1436,7 +1454,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) | |||
| 1436 | * artificially, because any timeslice recovered here | 1454 | * artificially, because any timeslice recovered here |
| 1437 | * was given away by the parent in the first place.) | 1455 | * was given away by the parent in the first place.) |
| 1438 | */ | 1456 | */ |
| 1439 | void fastcall sched_exit(task_t * p) | 1457 | void fastcall sched_exit(task_t *p) |
| 1440 | { | 1458 | { |
| 1441 | unsigned long flags; | 1459 | unsigned long flags; |
| 1442 | runqueue_t *rq; | 1460 | runqueue_t *rq; |
| @@ -1511,6 +1529,10 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) | |||
| 1511 | * Manfred Spraul <manfred@colorfullife.com> | 1529 | * Manfred Spraul <manfred@colorfullife.com> |
| 1512 | */ | 1530 | */ |
| 1513 | prev_task_flags = prev->flags; | 1531 | prev_task_flags = prev->flags; |
| 1532 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 1533 | /* this is a valid case when another task releases the spinlock */ | ||
| 1534 | rq->lock.owner = current; | ||
| 1535 | #endif | ||
| 1514 | finish_arch_switch(prev); | 1536 | finish_arch_switch(prev); |
| 1515 | finish_lock_switch(rq, prev); | 1537 | finish_lock_switch(rq, prev); |
| 1516 | if (mm) | 1538 | if (mm) |
| @@ -1753,7 +1775,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | |||
| 1753 | */ | 1775 | */ |
| 1754 | static inline | 1776 | static inline |
| 1755 | int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | 1777 | int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, |
| 1756 | struct sched_domain *sd, enum idle_type idle, int *all_pinned) | 1778 | struct sched_domain *sd, enum idle_type idle, |
| 1779 | int *all_pinned) | ||
| 1757 | { | 1780 | { |
| 1758 | /* | 1781 | /* |
| 1759 | * We do not migrate tasks that are: | 1782 | * We do not migrate tasks that are: |
| @@ -1883,10 +1906,11 @@ out: | |||
| 1883 | */ | 1906 | */ |
| 1884 | static struct sched_group * | 1907 | static struct sched_group * |
| 1885 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 1908 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
| 1886 | unsigned long *imbalance, enum idle_type idle) | 1909 | unsigned long *imbalance, enum idle_type idle, int *sd_idle) |
| 1887 | { | 1910 | { |
| 1888 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 1911 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
| 1889 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 1912 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
| 1913 | unsigned long max_pull; | ||
| 1890 | int load_idx; | 1914 | int load_idx; |
| 1891 | 1915 | ||
| 1892 | max_load = this_load = total_load = total_pwr = 0; | 1916 | max_load = this_load = total_load = total_pwr = 0; |
| @@ -1908,6 +1932,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 1908 | avg_load = 0; | 1932 | avg_load = 0; |
| 1909 | 1933 | ||
| 1910 | for_each_cpu_mask(i, group->cpumask) { | 1934 | for_each_cpu_mask(i, group->cpumask) { |
| 1935 | if (*sd_idle && !idle_cpu(i)) | ||
| 1936 | *sd_idle = 0; | ||
| 1937 | |||
| 1911 | /* Bias balancing toward cpus of our domain */ | 1938 | /* Bias balancing toward cpus of our domain */ |
| 1912 | if (local_group) | 1939 | if (local_group) |
| 1913 | load = target_load(i, load_idx); | 1940 | load = target_load(i, load_idx); |
| @@ -1933,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 1933 | group = group->next; | 1960 | group = group->next; |
| 1934 | } while (group != sd->groups); | 1961 | } while (group != sd->groups); |
| 1935 | 1962 | ||
| 1936 | if (!busiest || this_load >= max_load) | 1963 | if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE) |
| 1937 | goto out_balanced; | 1964 | goto out_balanced; |
| 1938 | 1965 | ||
| 1939 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; | 1966 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; |
| @@ -1953,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 1953 | * by pulling tasks to us. Be careful of negative numbers as they'll | 1980 | * by pulling tasks to us. Be careful of negative numbers as they'll |
| 1954 | * appear as very large values with unsigned longs. | 1981 | * appear as very large values with unsigned longs. |
| 1955 | */ | 1982 | */ |
| 1983 | |||
| 1984 | /* Don't want to pull so many tasks that a group would go idle */ | ||
| 1985 | max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE); | ||
| 1986 | |||
| 1956 | /* How much load to actually move to equalise the imbalance */ | 1987 | /* How much load to actually move to equalise the imbalance */ |
| 1957 | *imbalance = min((max_load - avg_load) * busiest->cpu_power, | 1988 | *imbalance = min(max_pull * busiest->cpu_power, |
| 1958 | (avg_load - this_load) * this->cpu_power) | 1989 | (avg_load - this_load) * this->cpu_power) |
| 1959 | / SCHED_LOAD_SCALE; | 1990 | / SCHED_LOAD_SCALE; |
| 1960 | 1991 | ||
| @@ -2051,11 +2082,14 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
| 2051 | unsigned long imbalance; | 2082 | unsigned long imbalance; |
| 2052 | int nr_moved, all_pinned = 0; | 2083 | int nr_moved, all_pinned = 0; |
| 2053 | int active_balance = 0; | 2084 | int active_balance = 0; |
| 2085 | int sd_idle = 0; | ||
| 2086 | |||
| 2087 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER) | ||
| 2088 | sd_idle = 1; | ||
| 2054 | 2089 | ||
| 2055 | spin_lock(&this_rq->lock); | ||
| 2056 | schedstat_inc(sd, lb_cnt[idle]); | 2090 | schedstat_inc(sd, lb_cnt[idle]); |
| 2057 | 2091 | ||
| 2058 | group = find_busiest_group(sd, this_cpu, &imbalance, idle); | 2092 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle); |
| 2059 | if (!group) { | 2093 | if (!group) { |
| 2060 | schedstat_inc(sd, lb_nobusyg[idle]); | 2094 | schedstat_inc(sd, lb_nobusyg[idle]); |
| 2061 | goto out_balanced; | 2095 | goto out_balanced; |
| @@ -2079,19 +2113,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
| 2079 | * still unbalanced. nr_moved simply stays zero, so it is | 2113 | * still unbalanced. nr_moved simply stays zero, so it is |
| 2080 | * correctly treated as an imbalance. | 2114 | * correctly treated as an imbalance. |
| 2081 | */ | 2115 | */ |
| 2082 | double_lock_balance(this_rq, busiest); | 2116 | double_rq_lock(this_rq, busiest); |
| 2083 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2117 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
| 2084 | imbalance, sd, idle, | 2118 | imbalance, sd, idle, &all_pinned); |
| 2085 | &all_pinned); | 2119 | double_rq_unlock(this_rq, busiest); |
| 2086 | spin_unlock(&busiest->lock); | ||
| 2087 | 2120 | ||
| 2088 | /* All tasks on this runqueue were pinned by CPU affinity */ | 2121 | /* All tasks on this runqueue were pinned by CPU affinity */ |
| 2089 | if (unlikely(all_pinned)) | 2122 | if (unlikely(all_pinned)) |
| 2090 | goto out_balanced; | 2123 | goto out_balanced; |
| 2091 | } | 2124 | } |
| 2092 | 2125 | ||
| 2093 | spin_unlock(&this_rq->lock); | ||
| 2094 | |||
| 2095 | if (!nr_moved) { | 2126 | if (!nr_moved) { |
| 2096 | schedstat_inc(sd, lb_failed[idle]); | 2127 | schedstat_inc(sd, lb_failed[idle]); |
| 2097 | sd->nr_balance_failed++; | 2128 | sd->nr_balance_failed++; |
| @@ -2099,6 +2130,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
| 2099 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | 2130 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { |
| 2100 | 2131 | ||
| 2101 | spin_lock(&busiest->lock); | 2132 | spin_lock(&busiest->lock); |
| 2133 | |||
| 2134 | /* don't kick the migration_thread, if the curr | ||
| 2135 | * task on busiest cpu can't be moved to this_cpu | ||
| 2136 | */ | ||
| 2137 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | ||
| 2138 | spin_unlock(&busiest->lock); | ||
| 2139 | all_pinned = 1; | ||
| 2140 | goto out_one_pinned; | ||
| 2141 | } | ||
| 2142 | |||
| 2102 | if (!busiest->active_balance) { | 2143 | if (!busiest->active_balance) { |
| 2103 | busiest->active_balance = 1; | 2144 | busiest->active_balance = 1; |
| 2104 | busiest->push_cpu = this_cpu; | 2145 | busiest->push_cpu = this_cpu; |
| @@ -2131,19 +2172,23 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
| 2131 | sd->balance_interval *= 2; | 2172 | sd->balance_interval *= 2; |
| 2132 | } | 2173 | } |
| 2133 | 2174 | ||
| 2175 | if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER) | ||
| 2176 | return -1; | ||
| 2134 | return nr_moved; | 2177 | return nr_moved; |
| 2135 | 2178 | ||
| 2136 | out_balanced: | 2179 | out_balanced: |
| 2137 | spin_unlock(&this_rq->lock); | ||
| 2138 | |||
| 2139 | schedstat_inc(sd, lb_balanced[idle]); | 2180 | schedstat_inc(sd, lb_balanced[idle]); |
| 2140 | 2181 | ||
| 2141 | sd->nr_balance_failed = 0; | 2182 | sd->nr_balance_failed = 0; |
| 2183 | |||
| 2184 | out_one_pinned: | ||
| 2142 | /* tune up the balancing interval */ | 2185 | /* tune up the balancing interval */ |
| 2143 | if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || | 2186 | if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || |
| 2144 | (sd->balance_interval < sd->max_interval)) | 2187 | (sd->balance_interval < sd->max_interval)) |
| 2145 | sd->balance_interval *= 2; | 2188 | sd->balance_interval *= 2; |
| 2146 | 2189 | ||
| 2190 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | ||
| 2191 | return -1; | ||
| 2147 | return 0; | 2192 | return 0; |
| 2148 | } | 2193 | } |
| 2149 | 2194 | ||
| @@ -2161,9 +2206,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
| 2161 | runqueue_t *busiest = NULL; | 2206 | runqueue_t *busiest = NULL; |
| 2162 | unsigned long imbalance; | 2207 | unsigned long imbalance; |
| 2163 | int nr_moved = 0; | 2208 | int nr_moved = 0; |
| 2209 | int sd_idle = 0; | ||
| 2210 | |||
| 2211 | if (sd->flags & SD_SHARE_CPUPOWER) | ||
| 2212 | sd_idle = 1; | ||
| 2164 | 2213 | ||
| 2165 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); | 2214 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); |
| 2166 | group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); | 2215 | group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle); |
| 2167 | if (!group) { | 2216 | if (!group) { |
| 2168 | schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); | 2217 | schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); |
| 2169 | goto out_balanced; | 2218 | goto out_balanced; |
| @@ -2177,22 +2226,30 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
| 2177 | 2226 | ||
| 2178 | BUG_ON(busiest == this_rq); | 2227 | BUG_ON(busiest == this_rq); |
| 2179 | 2228 | ||
| 2180 | /* Attempt to move tasks */ | ||
| 2181 | double_lock_balance(this_rq, busiest); | ||
| 2182 | |||
| 2183 | schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); | 2229 | schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); |
| 2184 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2230 | |
| 2231 | nr_moved = 0; | ||
| 2232 | if (busiest->nr_running > 1) { | ||
| 2233 | /* Attempt to move tasks */ | ||
| 2234 | double_lock_balance(this_rq, busiest); | ||
| 2235 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | ||
| 2185 | imbalance, sd, NEWLY_IDLE, NULL); | 2236 | imbalance, sd, NEWLY_IDLE, NULL); |
| 2186 | if (!nr_moved) | 2237 | spin_unlock(&busiest->lock); |
| 2238 | } | ||
| 2239 | |||
| 2240 | if (!nr_moved) { | ||
| 2187 | schedstat_inc(sd, lb_failed[NEWLY_IDLE]); | 2241 | schedstat_inc(sd, lb_failed[NEWLY_IDLE]); |
| 2188 | else | 2242 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) |
| 2243 | return -1; | ||
| 2244 | } else | ||
| 2189 | sd->nr_balance_failed = 0; | 2245 | sd->nr_balance_failed = 0; |
| 2190 | 2246 | ||
| 2191 | spin_unlock(&busiest->lock); | ||
| 2192 | return nr_moved; | 2247 | return nr_moved; |
| 2193 | 2248 | ||
| 2194 | out_balanced: | 2249 | out_balanced: |
| 2195 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); | 2250 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); |
| 2251 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) | ||
| 2252 | return -1; | ||
| 2196 | sd->nr_balance_failed = 0; | 2253 | sd->nr_balance_failed = 0; |
| 2197 | return 0; | 2254 | return 0; |
| 2198 | } | 2255 | } |
| @@ -2317,7 +2374,11 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq, | |||
| 2317 | 2374 | ||
| 2318 | if (j - sd->last_balance >= interval) { | 2375 | if (j - sd->last_balance >= interval) { |
| 2319 | if (load_balance(this_cpu, this_rq, sd, idle)) { | 2376 | if (load_balance(this_cpu, this_rq, sd, idle)) { |
| 2320 | /* We've pulled tasks over so no longer idle */ | 2377 | /* |
| 2378 | * We've pulled tasks over so either we're no | ||
| 2379 | * longer idle, or one of our SMT siblings is | ||
| 2380 | * not idle. | ||
| 2381 | */ | ||
| 2321 | idle = NOT_IDLE; | 2382 | idle = NOT_IDLE; |
| 2322 | } | 2383 | } |
| 2323 | sd->last_balance += interval; | 2384 | sd->last_balance += interval; |
| @@ -2576,6 +2637,13 @@ out: | |||
| 2576 | } | 2637 | } |
| 2577 | 2638 | ||
| 2578 | #ifdef CONFIG_SCHED_SMT | 2639 | #ifdef CONFIG_SCHED_SMT |
| 2640 | static inline void wakeup_busy_runqueue(runqueue_t *rq) | ||
| 2641 | { | ||
| 2642 | /* If an SMT runqueue is sleeping due to priority reasons wake it up */ | ||
| 2643 | if (rq->curr == rq->idle && rq->nr_running) | ||
| 2644 | resched_task(rq->idle); | ||
| 2645 | } | ||
| 2646 | |||
| 2579 | static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | 2647 | static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) |
| 2580 | { | 2648 | { |
| 2581 | struct sched_domain *tmp, *sd = NULL; | 2649 | struct sched_domain *tmp, *sd = NULL; |
| @@ -2609,12 +2677,7 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | |||
| 2609 | for_each_cpu_mask(i, sibling_map) { | 2677 | for_each_cpu_mask(i, sibling_map) { |
| 2610 | runqueue_t *smt_rq = cpu_rq(i); | 2678 | runqueue_t *smt_rq = cpu_rq(i); |
| 2611 | 2679 | ||
| 2612 | /* | 2680 | wakeup_busy_runqueue(smt_rq); |
| 2613 | * If an SMT sibling task is sleeping due to priority | ||
| 2614 | * reasons wake it up now. | ||
| 2615 | */ | ||
| 2616 | if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running) | ||
| 2617 | resched_task(smt_rq->idle); | ||
| 2618 | } | 2681 | } |
| 2619 | 2682 | ||
| 2620 | for_each_cpu_mask(i, sibling_map) | 2683 | for_each_cpu_mask(i, sibling_map) |
| @@ -2625,6 +2688,16 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) | |||
| 2625 | */ | 2688 | */ |
| 2626 | } | 2689 | } |
| 2627 | 2690 | ||
| 2691 | /* | ||
| 2692 | * number of 'lost' timeslices this task wont be able to fully | ||
| 2693 | * utilize, if another task runs on a sibling. This models the | ||
| 2694 | * slowdown effect of other tasks running on siblings: | ||
| 2695 | */ | ||
| 2696 | static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | ||
| 2697 | { | ||
| 2698 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | ||
| 2699 | } | ||
| 2700 | |||
| 2628 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | 2701 | static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) |
| 2629 | { | 2702 | { |
| 2630 | struct sched_domain *tmp, *sd = NULL; | 2703 | struct sched_domain *tmp, *sd = NULL; |
| @@ -2668,6 +2741,10 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | |||
| 2668 | runqueue_t *smt_rq = cpu_rq(i); | 2741 | runqueue_t *smt_rq = cpu_rq(i); |
| 2669 | task_t *smt_curr = smt_rq->curr; | 2742 | task_t *smt_curr = smt_rq->curr; |
| 2670 | 2743 | ||
| 2744 | /* Kernel threads do not participate in dependent sleeping */ | ||
| 2745 | if (!p->mm || !smt_curr->mm || rt_task(p)) | ||
| 2746 | goto check_smt_task; | ||
| 2747 | |||
| 2671 | /* | 2748 | /* |
| 2672 | * If a user task with lower static priority than the | 2749 | * If a user task with lower static priority than the |
| 2673 | * running task on the SMT sibling is trying to schedule, | 2750 | * running task on the SMT sibling is trying to schedule, |
| @@ -2676,21 +2753,45 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) | |||
| 2676 | * task from using an unfair proportion of the | 2753 | * task from using an unfair proportion of the |
| 2677 | * physical cpu's resources. -ck | 2754 | * physical cpu's resources. -ck |
| 2678 | */ | 2755 | */ |
| 2679 | if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) > | 2756 | if (rt_task(smt_curr)) { |
| 2680 | task_timeslice(p) || rt_task(smt_curr)) && | 2757 | /* |
| 2681 | p->mm && smt_curr->mm && !rt_task(p)) | 2758 | * With real time tasks we run non-rt tasks only |
| 2682 | ret = 1; | 2759 | * per_cpu_gain% of the time. |
| 2760 | */ | ||
| 2761 | if ((jiffies % DEF_TIMESLICE) > | ||
| 2762 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
| 2763 | ret = 1; | ||
| 2764 | } else | ||
| 2765 | if (smt_curr->static_prio < p->static_prio && | ||
| 2766 | !TASK_PREEMPTS_CURR(p, smt_rq) && | ||
| 2767 | smt_slice(smt_curr, sd) > task_timeslice(p)) | ||
| 2768 | ret = 1; | ||
| 2769 | |||
| 2770 | check_smt_task: | ||
| 2771 | if ((!smt_curr->mm && smt_curr != smt_rq->idle) || | ||
| 2772 | rt_task(smt_curr)) | ||
| 2773 | continue; | ||
| 2774 | if (!p->mm) { | ||
| 2775 | wakeup_busy_runqueue(smt_rq); | ||
| 2776 | continue; | ||
| 2777 | } | ||
| 2683 | 2778 | ||
| 2684 | /* | 2779 | /* |
| 2685 | * Reschedule a lower priority task on the SMT sibling, | 2780 | * Reschedule a lower priority task on the SMT sibling for |
| 2686 | * or wake it up if it has been put to sleep for priority | 2781 | * it to be put to sleep, or wake it up if it has been put to |
| 2687 | * reasons. | 2782 | * sleep for priority reasons to see if it should run now. |
| 2688 | */ | 2783 | */ |
| 2689 | if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) > | 2784 | if (rt_task(p)) { |
| 2690 | task_timeslice(smt_curr) || rt_task(p)) && | 2785 | if ((jiffies % DEF_TIMESLICE) > |
| 2691 | smt_curr->mm && p->mm && !rt_task(smt_curr)) || | 2786 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) |
| 2692 | (smt_curr == smt_rq->idle && smt_rq->nr_running)) | 2787 | resched_task(smt_curr); |
| 2693 | resched_task(smt_curr); | 2788 | } else { |
| 2789 | if (TASK_PREEMPTS_CURR(p, smt_rq) && | ||
| 2790 | smt_slice(p, sd) > task_timeslice(smt_curr)) | ||
| 2791 | resched_task(smt_curr); | ||
| 2792 | else | ||
| 2793 | wakeup_busy_runqueue(smt_rq); | ||
| 2794 | } | ||
| 2694 | } | 2795 | } |
| 2695 | out_unlock: | 2796 | out_unlock: |
| 2696 | for_each_cpu_mask(i, sibling_map) | 2797 | for_each_cpu_mask(i, sibling_map) |
| @@ -3016,7 +3117,8 @@ need_resched: | |||
| 3016 | 3117 | ||
| 3017 | #endif /* CONFIG_PREEMPT */ | 3118 | #endif /* CONFIG_PREEMPT */ |
| 3018 | 3119 | ||
| 3019 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) | 3120 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, |
| 3121 | void *key) | ||
| 3020 | { | 3122 | { |
| 3021 | task_t *p = curr->private; | 3123 | task_t *p = curr->private; |
| 3022 | return try_to_wake_up(p, mode, sync); | 3124 | return try_to_wake_up(p, mode, sync); |
| @@ -3058,7 +3160,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | |||
| 3058 | * @key: is directly passed to the wakeup function | 3160 | * @key: is directly passed to the wakeup function |
| 3059 | */ | 3161 | */ |
| 3060 | void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, | 3162 | void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, |
| 3061 | int nr_exclusive, void *key) | 3163 | int nr_exclusive, void *key) |
| 3062 | { | 3164 | { |
| 3063 | unsigned long flags; | 3165 | unsigned long flags; |
| 3064 | 3166 | ||
| @@ -3090,7 +3192,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
| 3090 | * | 3192 | * |
| 3091 | * On UP it can prevent extra preemption. | 3193 | * On UP it can prevent extra preemption. |
| 3092 | */ | 3194 | */ |
| 3093 | void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | 3195 | void fastcall |
| 3196 | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
| 3094 | { | 3197 | { |
| 3095 | unsigned long flags; | 3198 | unsigned long flags; |
| 3096 | int sync = 1; | 3199 | int sync = 1; |
| @@ -3281,7 +3384,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) | |||
| 3281 | 3384 | ||
| 3282 | EXPORT_SYMBOL(interruptible_sleep_on); | 3385 | EXPORT_SYMBOL(interruptible_sleep_on); |
| 3283 | 3386 | ||
| 3284 | long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | 3387 | long fastcall __sched |
| 3388 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
| 3285 | { | 3389 | { |
| 3286 | SLEEP_ON_VAR | 3390 | SLEEP_ON_VAR |
| 3287 | 3391 | ||
| @@ -3500,7 +3604,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
| 3500 | * @policy: new policy. | 3604 | * @policy: new policy. |
| 3501 | * @param: structure containing the new RT priority. | 3605 | * @param: structure containing the new RT priority. |
| 3502 | */ | 3606 | */ |
| 3503 | int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) | 3607 | int sched_setscheduler(struct task_struct *p, int policy, |
| 3608 | struct sched_param *param) | ||
| 3504 | { | 3609 | { |
| 3505 | int retval; | 3610 | int retval; |
| 3506 | int oldprio, oldpolicy = -1; | 3611 | int oldprio, oldpolicy = -1; |
| @@ -3520,7 +3625,7 @@ recheck: | |||
| 3520 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. | 3625 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. |
| 3521 | */ | 3626 | */ |
| 3522 | if (param->sched_priority < 0 || | 3627 | if (param->sched_priority < 0 || |
| 3523 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || | 3628 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
| 3524 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) | 3629 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
| 3525 | return -EINVAL; | 3630 | return -EINVAL; |
| 3526 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) | 3631 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) |
| @@ -3583,7 +3688,8 @@ recheck: | |||
| 3583 | } | 3688 | } |
| 3584 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 3689 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 3585 | 3690 | ||
| 3586 | static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 3691 | static int |
| 3692 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | ||
| 3587 | { | 3693 | { |
| 3588 | int retval; | 3694 | int retval; |
| 3589 | struct sched_param lparam; | 3695 | struct sched_param lparam; |
| @@ -3850,7 +3956,7 @@ asmlinkage long sys_sched_yield(void) | |||
| 3850 | if (rt_task(current)) | 3956 | if (rt_task(current)) |
| 3851 | target = rq->active; | 3957 | target = rq->active; |
| 3852 | 3958 | ||
| 3853 | if (current->array->nr_active == 1) { | 3959 | if (array->nr_active == 1) { |
| 3854 | schedstat_inc(rq, yld_act_empty); | 3960 | schedstat_inc(rq, yld_act_empty); |
| 3855 | if (!rq->expired->nr_active) | 3961 | if (!rq->expired->nr_active) |
| 3856 | schedstat_inc(rq, yld_both_empty); | 3962 | schedstat_inc(rq, yld_both_empty); |
| @@ -3914,7 +4020,7 @@ EXPORT_SYMBOL(cond_resched); | |||
| 3914 | * operations here to prevent schedule() from being called twice (once via | 4020 | * operations here to prevent schedule() from being called twice (once via |
| 3915 | * spin_unlock(), once by hand). | 4021 | * spin_unlock(), once by hand). |
| 3916 | */ | 4022 | */ |
| 3917 | int cond_resched_lock(spinlock_t * lock) | 4023 | int cond_resched_lock(spinlock_t *lock) |
| 3918 | { | 4024 | { |
| 3919 | int ret = 0; | 4025 | int ret = 0; |
| 3920 | 4026 | ||
| @@ -4097,7 +4203,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) | |||
| 4097 | return list_entry(p->sibling.next,struct task_struct,sibling); | 4203 | return list_entry(p->sibling.next,struct task_struct,sibling); |
| 4098 | } | 4204 | } |
| 4099 | 4205 | ||
| 4100 | static void show_task(task_t * p) | 4206 | static void show_task(task_t *p) |
| 4101 | { | 4207 | { |
| 4102 | task_t *relative; | 4208 | task_t *relative; |
| 4103 | unsigned state; | 4209 | unsigned state; |
| @@ -4123,7 +4229,7 @@ static void show_task(task_t * p) | |||
| 4123 | #endif | 4229 | #endif |
| 4124 | #ifdef CONFIG_DEBUG_STACK_USAGE | 4230 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 4125 | { | 4231 | { |
| 4126 | unsigned long * n = (unsigned long *) (p->thread_info+1); | 4232 | unsigned long *n = (unsigned long *) (p->thread_info+1); |
| 4127 | while (!*n) | 4233 | while (!*n) |
| 4128 | n++; | 4234 | n++; |
| 4129 | free = (unsigned long) n - (unsigned long)(p->thread_info+1); | 4235 | free = (unsigned long) n - (unsigned long)(p->thread_info+1); |
| @@ -4332,7 +4438,7 @@ out: | |||
| 4332 | * thread migration by bumping thread off CPU then 'pushing' onto | 4438 | * thread migration by bumping thread off CPU then 'pushing' onto |
| 4333 | * another runqueue. | 4439 | * another runqueue. |
| 4334 | */ | 4440 | */ |
| 4335 | static int migration_thread(void * data) | 4441 | static int migration_thread(void *data) |
| 4336 | { | 4442 | { |
| 4337 | runqueue_t *rq; | 4443 | runqueue_t *rq; |
| 4338 | int cpu = (long)data; | 4444 | int cpu = (long)data; |
diff --git a/kernel/signal.c b/kernel/signal.c index 4980a073237f..b92c3c9f8b9a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2221,8 +2221,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, | |||
| 2221 | recalc_sigpending(); | 2221 | recalc_sigpending(); |
| 2222 | spin_unlock_irq(¤t->sighand->siglock); | 2222 | spin_unlock_irq(¤t->sighand->siglock); |
| 2223 | 2223 | ||
| 2224 | current->state = TASK_INTERRUPTIBLE; | 2224 | timeout = schedule_timeout_interruptible(timeout); |
| 2225 | timeout = schedule_timeout(timeout); | ||
| 2226 | 2225 | ||
| 2227 | try_to_freeze(); | 2226 | try_to_freeze(); |
| 2228 | spin_lock_irq(¤t->sighand->siglock); | 2227 | spin_lock_irq(¤t->sighand->siglock); |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 0c3f9d8bbe17..0375fcd5921d 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -3,7 +3,10 @@ | |||
| 3 | * | 3 | * |
| 4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | 4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> |
| 5 | * | 5 | * |
| 6 | * Copyright (2004) Ingo Molnar | 6 | * Copyright (2004, 2005) Ingo Molnar |
| 7 | * | ||
| 8 | * This file contains the spinlock/rwlock implementations for the | ||
| 9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | ||
| 7 | */ | 10 | */ |
| 8 | 11 | ||
| 9 | #include <linux/config.h> | 12 | #include <linux/config.h> |
| @@ -17,12 +20,12 @@ | |||
| 17 | * Generic declaration of the raw read_trylock() function, | 20 | * Generic declaration of the raw read_trylock() function, |
| 18 | * architectures are supposed to optimize this: | 21 | * architectures are supposed to optimize this: |
| 19 | */ | 22 | */ |
| 20 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock) | 23 | int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock) |
| 21 | { | 24 | { |
| 22 | _raw_read_lock(lock); | 25 | __raw_read_lock(lock); |
| 23 | return 1; | 26 | return 1; |
| 24 | } | 27 | } |
| 25 | EXPORT_SYMBOL(generic_raw_read_trylock); | 28 | EXPORT_SYMBOL(generic__raw_read_trylock); |
| 26 | 29 | ||
| 27 | int __lockfunc _spin_trylock(spinlock_t *lock) | 30 | int __lockfunc _spin_trylock(spinlock_t *lock) |
| 28 | { | 31 | { |
| @@ -57,7 +60,7 @@ int __lockfunc _write_trylock(rwlock_t *lock) | |||
| 57 | } | 60 | } |
| 58 | EXPORT_SYMBOL(_write_trylock); | 61 | EXPORT_SYMBOL(_write_trylock); |
| 59 | 62 | ||
| 60 | #ifndef CONFIG_PREEMPT | 63 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) |
| 61 | 64 | ||
| 62 | void __lockfunc _read_lock(rwlock_t *lock) | 65 | void __lockfunc _read_lock(rwlock_t *lock) |
| 63 | { | 66 | { |
| @@ -72,7 +75,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |||
| 72 | 75 | ||
| 73 | local_irq_save(flags); | 76 | local_irq_save(flags); |
| 74 | preempt_disable(); | 77 | preempt_disable(); |
| 75 | _raw_spin_lock_flags(lock, flags); | 78 | _raw_spin_lock_flags(lock, &flags); |
| 76 | return flags; | 79 | return flags; |
| 77 | } | 80 | } |
| 78 | EXPORT_SYMBOL(_spin_lock_irqsave); | 81 | EXPORT_SYMBOL(_spin_lock_irqsave); |
diff --git a/kernel/timer.c b/kernel/timer.c index 13e2b513be01..f4152fcd9f8e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1154,6 +1154,20 @@ fastcall signed long __sched schedule_timeout(signed long timeout) | |||
| 1154 | 1154 | ||
| 1155 | EXPORT_SYMBOL(schedule_timeout); | 1155 | EXPORT_SYMBOL(schedule_timeout); |
| 1156 | 1156 | ||
| 1157 | signed long __sched schedule_timeout_interruptible(signed long timeout) | ||
| 1158 | { | ||
| 1159 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1160 | return schedule_timeout(timeout); | ||
| 1161 | } | ||
| 1162 | EXPORT_SYMBOL(schedule_timeout_interruptible); | ||
| 1163 | |||
| 1164 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | ||
| 1165 | { | ||
| 1166 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 1167 | return schedule_timeout(timeout); | ||
| 1168 | } | ||
| 1169 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | ||
| 1170 | |||
| 1157 | /* Thread ID - the internal kernel "pid" */ | 1171 | /* Thread ID - the internal kernel "pid" */ |
| 1158 | asmlinkage long sys_gettid(void) | 1172 | asmlinkage long sys_gettid(void) |
| 1159 | { | 1173 | { |
| @@ -1170,8 +1184,7 @@ static long __sched nanosleep_restart(struct restart_block *restart) | |||
| 1170 | if (!time_after(expire, now)) | 1184 | if (!time_after(expire, now)) |
| 1171 | return 0; | 1185 | return 0; |
| 1172 | 1186 | ||
| 1173 | current->state = TASK_INTERRUPTIBLE; | 1187 | expire = schedule_timeout_interruptible(expire - now); |
| 1174 | expire = schedule_timeout(expire - now); | ||
| 1175 | 1188 | ||
| 1176 | ret = 0; | 1189 | ret = 0; |
| 1177 | if (expire) { | 1190 | if (expire) { |
| @@ -1199,8 +1212,7 @@ asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __us | |||
| 1199 | return -EINVAL; | 1212 | return -EINVAL; |
| 1200 | 1213 | ||
| 1201 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); | 1214 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); |
| 1202 | current->state = TASK_INTERRUPTIBLE; | 1215 | expire = schedule_timeout_interruptible(expire); |
| 1203 | expire = schedule_timeout(expire); | ||
| 1204 | 1216 | ||
| 1205 | ret = 0; | 1217 | ret = 0; |
| 1206 | if (expire) { | 1218 | if (expire) { |
| @@ -1598,10 +1610,8 @@ void msleep(unsigned int msecs) | |||
| 1598 | { | 1610 | { |
| 1599 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 1611 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
| 1600 | 1612 | ||
| 1601 | while (timeout) { | 1613 | while (timeout) |
| 1602 | set_current_state(TASK_UNINTERRUPTIBLE); | 1614 | timeout = schedule_timeout_uninterruptible(timeout); |
| 1603 | timeout = schedule_timeout(timeout); | ||
| 1604 | } | ||
| 1605 | } | 1615 | } |
| 1606 | 1616 | ||
| 1607 | EXPORT_SYMBOL(msleep); | 1617 | EXPORT_SYMBOL(msleep); |
| @@ -1614,10 +1624,8 @@ unsigned long msleep_interruptible(unsigned int msecs) | |||
| 1614 | { | 1624 | { |
| 1615 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 1625 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
| 1616 | 1626 | ||
| 1617 | while (timeout && !signal_pending(current)) { | 1627 | while (timeout && !signal_pending(current)) |
| 1618 | set_current_state(TASK_INTERRUPTIBLE); | 1628 | timeout = schedule_timeout_interruptible(timeout); |
| 1619 | timeout = schedule_timeout(timeout); | ||
| 1620 | } | ||
| 1621 | return jiffies_to_msecs(timeout); | 1629 | return jiffies_to_msecs(timeout); |
| 1622 | } | 1630 | } |
| 1623 | 1631 | ||
diff --git a/lib/Makefile b/lib/Makefile index d9c38ba05e7b..44a46750690a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -16,6 +16,7 @@ CFLAGS_kobject.o += -DDEBUG | |||
| 16 | CFLAGS_kobject_uevent.o += -DDEBUG | 16 | CFLAGS_kobject_uevent.o += -DDEBUG |
| 17 | endif | 17 | endif |
| 18 | 18 | ||
| 19 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | ||
| 19 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 20 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
| 20 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 21 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
| 21 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | 22 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o |
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 6658d81e1836..2377af057d09 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
| @@ -25,8 +25,6 @@ | |||
| 25 | * this is trivially done efficiently using a load-locked | 25 | * this is trivially done efficiently using a load-locked |
| 26 | * store-conditional approach, for example. | 26 | * store-conditional approach, for example. |
| 27 | */ | 27 | */ |
| 28 | |||
| 29 | #ifndef ATOMIC_DEC_AND_LOCK | ||
| 30 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 28 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 31 | { | 29 | { |
| 32 | spin_lock(lock); | 30 | spin_lock(lock); |
| @@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
| 37 | } | 35 | } |
| 38 | 36 | ||
| 39 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 37 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
| 40 | #endif | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index bd2bc5d887b8..cb5490ec00f2 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -177,8 +177,7 @@ static inline void __lock_kernel(void) | |||
| 177 | 177 | ||
| 178 | static inline void __unlock_kernel(void) | 178 | static inline void __unlock_kernel(void) |
| 179 | { | 179 | { |
| 180 | _raw_spin_unlock(&kernel_flag); | 180 | spin_unlock(&kernel_flag); |
| 181 | preempt_enable(); | ||
| 182 | } | 181 | } |
| 183 | 182 | ||
| 184 | /* | 183 | /* |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index b972dd29289d..6a8bc6e06431 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
| 110 | * success, return zero, with preemption disabled. On error, return -ENOMEM | 110 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 111 | * with preemption not disabled. | 111 | * with preemption not disabled. |
| 112 | */ | 112 | */ |
| 113 | int radix_tree_preload(int gfp_mask) | 113 | int radix_tree_preload(unsigned int __nocast gfp_mask) |
| 114 | { | 114 | { |
| 115 | struct radix_tree_preload *rtp; | 115 | struct radix_tree_preload *rtp; |
| 116 | struct radix_tree_node *node; | 116 | struct radix_tree_node *node; |
diff --git a/lib/sort.c b/lib/sort.c index b73dbb0e7c83..ddc4d35df289 100644 --- a/lib/sort.c +++ b/lib/sort.c | |||
| @@ -6,15 +6,16 @@ | |||
| 6 | 6 | ||
| 7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include <linux/sort.h> | ||
| 9 | 10 | ||
| 10 | void u32_swap(void *a, void *b, int size) | 11 | static void u32_swap(void *a, void *b, int size) |
| 11 | { | 12 | { |
| 12 | u32 t = *(u32 *)a; | 13 | u32 t = *(u32 *)a; |
| 13 | *(u32 *)a = *(u32 *)b; | 14 | *(u32 *)a = *(u32 *)b; |
| 14 | *(u32 *)b = t; | 15 | *(u32 *)b = t; |
| 15 | } | 16 | } |
| 16 | 17 | ||
| 17 | void generic_swap(void *a, void *b, int size) | 18 | static void generic_swap(void *a, void *b, int size) |
| 18 | { | 19 | { |
| 19 | char t; | 20 | char t; |
| 20 | 21 | ||
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c new file mode 100644 index 000000000000..906ad101eab3 --- /dev/null +++ b/lib/spinlock_debug.c | |||
| @@ -0,0 +1,257 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 3 | * Released under the General Public License (GPL). | ||
| 4 | * | ||
| 5 | * This file contains the spinlock/rwlock implementations for | ||
| 6 | * DEBUG_SPINLOCK. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/config.h> | ||
| 10 | #include <linux/spinlock.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | |||
| 14 | static void spin_bug(spinlock_t *lock, const char *msg) | ||
| 15 | { | ||
| 16 | static long print_once = 1; | ||
| 17 | struct task_struct *owner = NULL; | ||
| 18 | |||
| 19 | if (xchg(&print_once, 0)) { | ||
| 20 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | ||
| 21 | owner = lock->owner; | ||
| 22 | printk("BUG: spinlock %s on CPU#%d, %s/%d\n", | ||
| 23 | msg, smp_processor_id(), current->comm, current->pid); | ||
| 24 | printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", | ||
| 25 | lock, lock->magic, | ||
| 26 | owner ? owner->comm : "<none>", | ||
| 27 | owner ? owner->pid : -1, | ||
| 28 | lock->owner_cpu); | ||
| 29 | dump_stack(); | ||
| 30 | #ifdef CONFIG_SMP | ||
| 31 | /* | ||
| 32 | * We cannot continue on SMP: | ||
| 33 | */ | ||
| 34 | // panic("bad locking"); | ||
| 35 | #endif | ||
| 36 | } | ||
| 37 | } | ||
| 38 | |||
| 39 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | ||
| 40 | |||
| 41 | static inline void debug_spin_lock_before(spinlock_t *lock) | ||
| 42 | { | ||
| 43 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 44 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 45 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 46 | lock, "cpu recursion"); | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void debug_spin_lock_after(spinlock_t *lock) | ||
| 50 | { | ||
| 51 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 52 | lock->owner = current; | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void debug_spin_unlock(spinlock_t *lock) | ||
| 56 | { | ||
| 57 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 58 | SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); | ||
| 59 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 60 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 61 | lock, "wrong CPU"); | ||
| 62 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 63 | lock->owner_cpu = -1; | ||
| 64 | } | ||
| 65 | |||
| 66 | static void __spin_lock_debug(spinlock_t *lock) | ||
| 67 | { | ||
| 68 | int print_once = 1; | ||
| 69 | u64 i; | ||
| 70 | |||
| 71 | for (;;) { | ||
| 72 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
| 73 | cpu_relax(); | ||
| 74 | if (__raw_spin_trylock(&lock->raw_lock)) | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | /* lockup suspected: */ | ||
| 78 | if (print_once) { | ||
| 79 | print_once = 0; | ||
| 80 | printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", | ||
| 81 | smp_processor_id(), current->comm, current->pid, | ||
| 82 | lock); | ||
| 83 | dump_stack(); | ||
| 84 | } | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | void _raw_spin_lock(spinlock_t *lock) | ||
| 89 | { | ||
| 90 | debug_spin_lock_before(lock); | ||
| 91 | if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) | ||
| 92 | __spin_lock_debug(lock); | ||
| 93 | debug_spin_lock_after(lock); | ||
| 94 | } | ||
| 95 | |||
| 96 | int _raw_spin_trylock(spinlock_t *lock) | ||
| 97 | { | ||
| 98 | int ret = __raw_spin_trylock(&lock->raw_lock); | ||
| 99 | |||
| 100 | if (ret) | ||
| 101 | debug_spin_lock_after(lock); | ||
| 102 | #ifndef CONFIG_SMP | ||
| 103 | /* | ||
| 104 | * Must not happen on UP: | ||
| 105 | */ | ||
| 106 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 107 | #endif | ||
| 108 | return ret; | ||
| 109 | } | ||
| 110 | |||
| 111 | void _raw_spin_unlock(spinlock_t *lock) | ||
| 112 | { | ||
| 113 | debug_spin_unlock(lock); | ||
| 114 | __raw_spin_unlock(&lock->raw_lock); | ||
| 115 | } | ||
| 116 | |||
| 117 | static void rwlock_bug(rwlock_t *lock, const char *msg) | ||
| 118 | { | ||
| 119 | static long print_once = 1; | ||
| 120 | |||
| 121 | if (xchg(&print_once, 0)) { | ||
| 122 | printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, | ||
| 123 | smp_processor_id(), current->comm, current->pid, lock); | ||
| 124 | dump_stack(); | ||
| 125 | #ifdef CONFIG_SMP | ||
| 126 | /* | ||
| 127 | * We cannot continue on SMP: | ||
| 128 | */ | ||
| 129 | panic("bad locking"); | ||
| 130 | #endif | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | ||
| 135 | |||
| 136 | static void __read_lock_debug(rwlock_t *lock) | ||
| 137 | { | ||
| 138 | int print_once = 1; | ||
| 139 | u64 i; | ||
| 140 | |||
| 141 | for (;;) { | ||
| 142 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
| 143 | cpu_relax(); | ||
| 144 | if (__raw_read_trylock(&lock->raw_lock)) | ||
| 145 | return; | ||
| 146 | } | ||
| 147 | /* lockup suspected: */ | ||
| 148 | if (print_once) { | ||
| 149 | print_once = 0; | ||
| 150 | printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", | ||
| 151 | smp_processor_id(), current->comm, current->pid, | ||
| 152 | lock); | ||
| 153 | dump_stack(); | ||
| 154 | } | ||
| 155 | } | ||
| 156 | } | ||
| 157 | |||
| 158 | void _raw_read_lock(rwlock_t *lock) | ||
| 159 | { | ||
| 160 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 161 | if (unlikely(!__raw_read_trylock(&lock->raw_lock))) | ||
| 162 | __read_lock_debug(lock); | ||
| 163 | } | ||
| 164 | |||
| 165 | int _raw_read_trylock(rwlock_t *lock) | ||
| 166 | { | ||
| 167 | int ret = __raw_read_trylock(&lock->raw_lock); | ||
| 168 | |||
| 169 | #ifndef CONFIG_SMP | ||
| 170 | /* | ||
| 171 | * Must not happen on UP: | ||
| 172 | */ | ||
| 173 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 174 | #endif | ||
| 175 | return ret; | ||
| 176 | } | ||
| 177 | |||
| 178 | void _raw_read_unlock(rwlock_t *lock) | ||
| 179 | { | ||
| 180 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 181 | __raw_read_unlock(&lock->raw_lock); | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void debug_write_lock_before(rwlock_t *lock) | ||
| 185 | { | ||
| 186 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 187 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 188 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 189 | lock, "cpu recursion"); | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline void debug_write_lock_after(rwlock_t *lock) | ||
| 193 | { | ||
| 194 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 195 | lock->owner = current; | ||
| 196 | } | ||
| 197 | |||
| 198 | static inline void debug_write_unlock(rwlock_t *lock) | ||
| 199 | { | ||
| 200 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 201 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 202 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 203 | lock, "wrong CPU"); | ||
| 204 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 205 | lock->owner_cpu = -1; | ||
| 206 | } | ||
| 207 | |||
| 208 | static void __write_lock_debug(rwlock_t *lock) | ||
| 209 | { | ||
| 210 | int print_once = 1; | ||
| 211 | u64 i; | ||
| 212 | |||
| 213 | for (;;) { | ||
| 214 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
| 215 | cpu_relax(); | ||
| 216 | if (__raw_write_trylock(&lock->raw_lock)) | ||
| 217 | return; | ||
| 218 | } | ||
| 219 | /* lockup suspected: */ | ||
| 220 | if (print_once) { | ||
| 221 | print_once = 0; | ||
| 222 | printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", | ||
| 223 | smp_processor_id(), current->comm, current->pid, | ||
| 224 | lock); | ||
| 225 | dump_stack(); | ||
| 226 | } | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | void _raw_write_lock(rwlock_t *lock) | ||
| 231 | { | ||
| 232 | debug_write_lock_before(lock); | ||
| 233 | if (unlikely(!__raw_write_trylock(&lock->raw_lock))) | ||
| 234 | __write_lock_debug(lock); | ||
| 235 | debug_write_lock_after(lock); | ||
| 236 | } | ||
| 237 | |||
| 238 | int _raw_write_trylock(rwlock_t *lock) | ||
| 239 | { | ||
| 240 | int ret = __raw_write_trylock(&lock->raw_lock); | ||
| 241 | |||
| 242 | if (ret) | ||
| 243 | debug_write_lock_after(lock); | ||
| 244 | #ifndef CONFIG_SMP | ||
| 245 | /* | ||
| 246 | * Must not happen on UP: | ||
| 247 | */ | ||
| 248 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 249 | #endif | ||
| 250 | return ret; | ||
| 251 | } | ||
| 252 | |||
| 253 | void _raw_write_unlock(rwlock_t *lock) | ||
| 254 | { | ||
| 255 | debug_write_unlock(lock); | ||
| 256 | __raw_write_unlock(&lock->raw_lock); | ||
| 257 | } | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 88611928e71f..b5346576e58d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -37,6 +37,10 @@ | |||
| 37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| 38 | #include <asm/mman.h> | 38 | #include <asm/mman.h> |
| 39 | 39 | ||
| 40 | static ssize_t | ||
| 41 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | ||
| 42 | loff_t offset, unsigned long nr_segs); | ||
| 43 | |||
| 40 | /* | 44 | /* |
| 41 | * Shared mappings implemented 30.11.1994. It's not fully working yet, | 45 | * Shared mappings implemented 30.11.1994. It's not fully working yet, |
| 42 | * though. | 46 | * though. |
| @@ -301,8 +305,9 @@ EXPORT_SYMBOL(sync_page_range); | |||
| 301 | * as it forces O_SYNC writers to different parts of the same file | 305 | * as it forces O_SYNC writers to different parts of the same file |
| 302 | * to be serialised right until io completion. | 306 | * to be serialised right until io completion. |
| 303 | */ | 307 | */ |
| 304 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, | 308 | static int sync_page_range_nolock(struct inode *inode, |
| 305 | loff_t pos, size_t count) | 309 | struct address_space *mapping, |
| 310 | loff_t pos, size_t count) | ||
| 306 | { | 311 | { |
| 307 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; | 312 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; |
| 308 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | 313 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; |
| @@ -317,7 +322,6 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, | |||
| 317 | ret = wait_on_page_writeback_range(mapping, start, end); | 322 | ret = wait_on_page_writeback_range(mapping, start, end); |
| 318 | return ret; | 323 | return ret; |
| 319 | } | 324 | } |
| 320 | EXPORT_SYMBOL(sync_page_range_nolock); | ||
| 321 | 325 | ||
| 322 | /** | 326 | /** |
| 323 | * filemap_fdatawait - walk the list of under-writeback pages of the given | 327 | * filemap_fdatawait - walk the list of under-writeback pages of the given |
| @@ -2008,7 +2012,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2008 | } | 2012 | } |
| 2009 | EXPORT_SYMBOL(generic_file_buffered_write); | 2013 | EXPORT_SYMBOL(generic_file_buffered_write); |
| 2010 | 2014 | ||
| 2011 | ssize_t | 2015 | static ssize_t |
| 2012 | __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, | 2016 | __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, |
| 2013 | unsigned long nr_segs, loff_t *ppos) | 2017 | unsigned long nr_segs, loff_t *ppos) |
| 2014 | { | 2018 | { |
| @@ -2108,7 +2112,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, | |||
| 2108 | return ret; | 2112 | return ret; |
| 2109 | } | 2113 | } |
| 2110 | 2114 | ||
| 2111 | ssize_t | 2115 | static ssize_t |
| 2112 | __generic_file_write_nolock(struct file *file, const struct iovec *iov, | 2116 | __generic_file_write_nolock(struct file *file, const struct iovec *iov, |
| 2113 | unsigned long nr_segs, loff_t *ppos) | 2117 | unsigned long nr_segs, loff_t *ppos) |
| 2114 | { | 2118 | { |
| @@ -2229,7 +2233,7 @@ EXPORT_SYMBOL(generic_file_writev); | |||
| 2229 | * Called under i_sem for writes to S_ISREG files. Returns -EIO if something | 2233 | * Called under i_sem for writes to S_ISREG files. Returns -EIO if something |
| 2230 | * went wrong during pagecache shootdown. | 2234 | * went wrong during pagecache shootdown. |
| 2231 | */ | 2235 | */ |
| 2232 | ssize_t | 2236 | static ssize_t |
| 2233 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | 2237 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
| 2234 | loff_t offset, unsigned long nr_segs) | 2238 | loff_t offset, unsigned long nr_segs) |
| 2235 | { | 2239 | { |
| @@ -2264,4 +2268,3 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
| 2264 | } | 2268 | } |
| 2265 | return retval; | 2269 | return retval; |
| 2266 | } | 2270 | } |
| 2267 | EXPORT_SYMBOL_GPL(generic_file_direct_IO); | ||
diff --git a/mm/memory.c b/mm/memory.c index 788a62810340..ae8161f1f459 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2225,7 +2225,7 @@ void update_mem_hiwater(struct task_struct *tsk) | |||
| 2225 | #if !defined(__HAVE_ARCH_GATE_AREA) | 2225 | #if !defined(__HAVE_ARCH_GATE_AREA) |
| 2226 | 2226 | ||
| 2227 | #if defined(AT_SYSINFO_EHDR) | 2227 | #if defined(AT_SYSINFO_EHDR) |
| 2228 | struct vm_area_struct gate_vma; | 2228 | static struct vm_area_struct gate_vma; |
| 2229 | 2229 | ||
| 2230 | static int __init gate_vma_init(void) | 2230 | static int __init gate_vma_init(void) |
| 2231 | { | 2231 | { |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5ec8da12cfd9..ac3bf33e5370 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -300,6 +300,5 @@ retry: | |||
| 300 | * Give "p" a good chance of killing itself before we | 300 | * Give "p" a good chance of killing itself before we |
| 301 | * retry to allocate memory. | 301 | * retry to allocate memory. |
| 302 | */ | 302 | */ |
| 303 | __set_current_state(TASK_INTERRUPTIBLE); | 303 | schedule_timeout_interruptible(1); |
| 304 | schedule_timeout(1); | ||
| 305 | } | 304 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3974fd81d27c..c5823c395f71 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -335,7 +335,7 @@ static inline void free_pages_check(const char *function, struct page *page) | |||
| 335 | /* | 335 | /* |
| 336 | * Frees a list of pages. | 336 | * Frees a list of pages. |
| 337 | * Assumes all pages on list are in same zone, and of same order. | 337 | * Assumes all pages on list are in same zone, and of same order. |
| 338 | * count is the number of pages to free, or 0 for all on the list. | 338 | * count is the number of pages to free. |
| 339 | * | 339 | * |
| 340 | * If the zone was previously in an "all pages pinned" state then look to | 340 | * If the zone was previously in an "all pages pinned" state then look to |
| 341 | * see if this freeing clears that state. | 341 | * see if this freeing clears that state. |
| @@ -1720,7 +1720,7 @@ next: | |||
| 1720 | cachep->objsize = size; | 1720 | cachep->objsize = size; |
| 1721 | 1721 | ||
| 1722 | if (flags & CFLGS_OFF_SLAB) | 1722 | if (flags & CFLGS_OFF_SLAB) |
| 1723 | cachep->slabp_cache = kmem_find_general_cachep(slab_size,0); | 1723 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); |
| 1724 | cachep->ctor = ctor; | 1724 | cachep->ctor = ctor; |
| 1725 | cachep->dtor = dtor; | 1725 | cachep->dtor = dtor; |
| 1726 | cachep->name = name; | 1726 | cachep->name = name; |
| @@ -2839,7 +2839,7 @@ out: | |||
| 2839 | * New and improved: it will now make sure that the object gets | 2839 | * New and improved: it will now make sure that the object gets |
| 2840 | * put on the correct node list so that there is no false sharing. | 2840 | * put on the correct node list so that there is no false sharing. |
| 2841 | */ | 2841 | */ |
| 2842 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) | 2842 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) |
| 2843 | { | 2843 | { |
| 2844 | unsigned long save_flags; | 2844 | unsigned long save_flags; |
| 2845 | void *ptr; | 2845 | void *ptr; |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 029e56eb5e77..adbc2b426c2f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -67,8 +67,8 @@ void show_swap_cache_info(void) | |||
| 67 | * __add_to_swap_cache resembles add_to_page_cache on swapper_space, | 67 | * __add_to_swap_cache resembles add_to_page_cache on swapper_space, |
| 68 | * but sets SwapCache flag and private instead of mapping and index. | 68 | * but sets SwapCache flag and private instead of mapping and index. |
| 69 | */ | 69 | */ |
| 70 | static int __add_to_swap_cache(struct page *page, | 70 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 71 | swp_entry_t entry, int gfp_mask) | 71 | unsigned int __nocast gfp_mask) |
| 72 | { | 72 | { |
| 73 | int error; | 73 | int error; |
| 74 | 74 | ||
diff --git a/mm/swapfile.c b/mm/swapfile.c index 4b6e8bf986bc..0184f510aace 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -1153,8 +1153,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) | |||
| 1153 | p->highest_bit = 0; /* cuts scans short */ | 1153 | p->highest_bit = 0; /* cuts scans short */ |
| 1154 | while (p->flags >= SWP_SCANNING) { | 1154 | while (p->flags >= SWP_SCANNING) { |
| 1155 | spin_unlock(&swap_lock); | 1155 | spin_unlock(&swap_lock); |
| 1156 | set_current_state(TASK_UNINTERRUPTIBLE); | 1156 | schedule_timeout_uninterruptible(1); |
| 1157 | schedule_timeout(1); | ||
| 1158 | spin_lock(&swap_lock); | 1157 | spin_lock(&swap_lock); |
| 1159 | } | 1158 | } |
| 1160 | 1159 | ||
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index b62920eead3d..d64790bcd831 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c | |||
| @@ -42,8 +42,6 @@ MODULE_LICENSE("GPL"); | |||
| 42 | #else | 42 | #else |
| 43 | #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24)) | 43 | #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24)) |
| 44 | #endif | 44 | #endif |
| 45 | #define LE_SHORT(v) le16_to_cpu(v) | ||
| 46 | #define LE_INT(v) le32_to_cpu(v) | ||
| 47 | 45 | ||
| 48 | #define RIFF_HEADER CSP_HDR_VALUE('R', 'I', 'F', 'F') | 46 | #define RIFF_HEADER CSP_HDR_VALUE('R', 'I', 'F', 'F') |
| 49 | #define CSP__HEADER CSP_HDR_VALUE('C', 'S', 'P', ' ') | 47 | #define CSP__HEADER CSP_HDR_VALUE('C', 'S', 'P', ' ') |
| @@ -56,20 +54,20 @@ MODULE_LICENSE("GPL"); | |||
| 56 | /* | 54 | /* |
| 57 | * RIFF data format | 55 | * RIFF data format |
| 58 | */ | 56 | */ |
| 59 | typedef struct riff_header { | 57 | struct riff_header { |
| 60 | __u32 name; | 58 | __u32 name; |
| 61 | __u32 len; | 59 | __u32 len; |
| 62 | } riff_header_t; | 60 | }; |
| 63 | 61 | ||
| 64 | typedef struct desc_header { | 62 | struct desc_header { |
| 65 | riff_header_t info; | 63 | struct riff_header info; |
| 66 | __u16 func_nr; | 64 | __u16 func_nr; |
| 67 | __u16 VOC_type; | 65 | __u16 VOC_type; |
| 68 | __u16 flags_play_rec; | 66 | __u16 flags_play_rec; |
| 69 | __u16 flags_16bit_8bit; | 67 | __u16 flags_16bit_8bit; |
| 70 | __u16 flags_stereo_mono; | 68 | __u16 flags_stereo_mono; |
| 71 | __u16 flags_rates; | 69 | __u16 flags_rates; |
| 72 | } desc_header_t; | 70 | }; |
| 73 | 71 | ||
| 74 | /* | 72 | /* |
| 75 | * prototypes | 73 | * prototypes |
| @@ -302,9 +300,9 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 302 | unsigned char __user *data_end; | 300 | unsigned char __user *data_end; |
| 303 | unsigned short func_nr = 0; | 301 | unsigned short func_nr = 0; |
| 304 | 302 | ||
| 305 | riff_header_t file_h, item_h, code_h; | 303 | struct riff_header file_h, item_h, code_h; |
| 306 | __u32 item_type; | 304 | __u32 item_type; |
| 307 | desc_header_t funcdesc_h; | 305 | struct desc_header funcdesc_h; |
| 308 | 306 | ||
| 309 | unsigned long flags; | 307 | unsigned long flags; |
| 310 | int err; | 308 | int err; |
| @@ -316,12 +314,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 316 | if (copy_from_user(&file_h, data_ptr, sizeof(file_h))) | 314 | if (copy_from_user(&file_h, data_ptr, sizeof(file_h))) |
| 317 | return -EFAULT; | 315 | return -EFAULT; |
| 318 | if ((file_h.name != RIFF_HEADER) || | 316 | if ((file_h.name != RIFF_HEADER) || |
| 319 | (LE_INT(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) { | 317 | (le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) { |
| 320 | snd_printd("%s: Invalid RIFF header\n", __FUNCTION__); | 318 | snd_printd("%s: Invalid RIFF header\n", __FUNCTION__); |
| 321 | return -EINVAL; | 319 | return -EINVAL; |
| 322 | } | 320 | } |
| 323 | data_ptr += sizeof(file_h); | 321 | data_ptr += sizeof(file_h); |
| 324 | data_end = data_ptr + LE_INT(file_h.len); | 322 | data_end = data_ptr + le32_to_cpu(file_h.len); |
| 325 | 323 | ||
| 326 | if (copy_from_user(&item_type, data_ptr, sizeof(item_type))) | 324 | if (copy_from_user(&item_type, data_ptr, sizeof(item_type))) |
| 327 | return -EFAULT; | 325 | return -EFAULT; |
| @@ -331,7 +329,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 331 | } | 329 | } |
| 332 | data_ptr += sizeof (item_type); | 330 | data_ptr += sizeof (item_type); |
| 333 | 331 | ||
| 334 | for (; data_ptr < data_end; data_ptr += LE_INT(item_h.len)) { | 332 | for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) { |
| 335 | if (copy_from_user(&item_h, data_ptr, sizeof(item_h))) | 333 | if (copy_from_user(&item_h, data_ptr, sizeof(item_h))) |
| 336 | return -EFAULT; | 334 | return -EFAULT; |
| 337 | data_ptr += sizeof(item_h); | 335 | data_ptr += sizeof(item_h); |
| @@ -344,7 +342,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 344 | case FUNC_HEADER: | 342 | case FUNC_HEADER: |
| 345 | if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h))) | 343 | if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h))) |
| 346 | return -EFAULT; | 344 | return -EFAULT; |
| 347 | func_nr = LE_SHORT(funcdesc_h.func_nr); | 345 | func_nr = le16_to_cpu(funcdesc_h.func_nr); |
| 348 | break; | 346 | break; |
| 349 | case CODE_HEADER: | 347 | case CODE_HEADER: |
| 350 | if (func_nr != info.func_req) | 348 | if (func_nr != info.func_req) |
| @@ -370,11 +368,11 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 370 | if (code_h.name != INIT_HEADER) | 368 | if (code_h.name != INIT_HEADER) |
| 371 | break; | 369 | break; |
| 372 | data_ptr += sizeof(code_h); | 370 | data_ptr += sizeof(code_h); |
| 373 | err = snd_sb_csp_load_user(p, data_ptr, LE_INT(code_h.len), | 371 | err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len), |
| 374 | SNDRV_SB_CSP_LOAD_INITBLOCK); | 372 | SNDRV_SB_CSP_LOAD_INITBLOCK); |
| 375 | if (err) | 373 | if (err) |
| 376 | return err; | 374 | return err; |
| 377 | data_ptr += LE_INT(code_h.len); | 375 | data_ptr += le32_to_cpu(code_h.len); |
| 378 | } | 376 | } |
| 379 | /* main microcode block */ | 377 | /* main microcode block */ |
| 380 | if (copy_from_user(&code_h, data_ptr, sizeof(code_h))) | 378 | if (copy_from_user(&code_h, data_ptr, sizeof(code_h))) |
| @@ -386,17 +384,17 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 386 | } | 384 | } |
| 387 | data_ptr += sizeof(code_h); | 385 | data_ptr += sizeof(code_h); |
| 388 | err = snd_sb_csp_load_user(p, data_ptr, | 386 | err = snd_sb_csp_load_user(p, data_ptr, |
| 389 | LE_INT(code_h.len), 0); | 387 | le32_to_cpu(code_h.len), 0); |
| 390 | if (err) | 388 | if (err) |
| 391 | return err; | 389 | return err; |
| 392 | 390 | ||
| 393 | /* fill in codec header */ | 391 | /* fill in codec header */ |
| 394 | strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name)); | 392 | strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name)); |
| 395 | p->func_nr = func_nr; | 393 | p->func_nr = func_nr; |
| 396 | p->mode = LE_SHORT(funcdesc_h.flags_play_rec); | 394 | p->mode = le16_to_cpu(funcdesc_h.flags_play_rec); |
| 397 | switch (LE_SHORT(funcdesc_h.VOC_type)) { | 395 | switch (le16_to_cpu(funcdesc_h.VOC_type)) { |
| 398 | case 0x0001: /* QSound decoder */ | 396 | case 0x0001: /* QSound decoder */ |
| 399 | if (LE_SHORT(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) { | 397 | if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) { |
| 400 | if (snd_sb_qsound_build(p) == 0) | 398 | if (snd_sb_qsound_build(p) == 0) |
| 401 | /* set QSound flag and clear all other mode flags */ | 399 | /* set QSound flag and clear all other mode flags */ |
| 402 | p->mode = SNDRV_SB_CSP_MODE_QSOUND; | 400 | p->mode = SNDRV_SB_CSP_MODE_QSOUND; |
| @@ -426,12 +424,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user | |||
| 426 | p->mode = 0; | 424 | p->mode = 0; |
| 427 | snd_printd("%s: Unsupported CSP codec type: 0x%04x\n", | 425 | snd_printd("%s: Unsupported CSP codec type: 0x%04x\n", |
| 428 | __FUNCTION__, | 426 | __FUNCTION__, |
| 429 | LE_SHORT(funcdesc_h.VOC_type)); | 427 | le16_to_cpu(funcdesc_h.VOC_type)); |
| 430 | return -EINVAL; | 428 | return -EINVAL; |
| 431 | } | 429 | } |
| 432 | p->acc_channels = LE_SHORT(funcdesc_h.flags_stereo_mono); | 430 | p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono); |
| 433 | p->acc_width = LE_SHORT(funcdesc_h.flags_16bit_8bit); | 431 | p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit); |
| 434 | p->acc_rates = LE_SHORT(funcdesc_h.flags_rates); | 432 | p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates); |
| 435 | 433 | ||
| 436 | /* Decouple CSP from IRQ and DMAREQ lines */ | 434 | /* Decouple CSP from IRQ and DMAREQ lines */ |
| 437 | spin_lock_irqsave(&p->chip->reg_lock, flags); | 435 | spin_lock_irqsave(&p->chip->reg_lock, flags); |
diff --git a/sound/oss/skeleton.c b/sound/oss/skeleton.c deleted file mode 100644 index 8fea783dd0cb..000000000000 --- a/sound/oss/skeleton.c +++ /dev/null | |||
| @@ -1,219 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * PCI sound skeleton example | ||
| 3 | * | ||
| 4 | * (c) 1998 Red Hat Software | ||
| 5 | * | ||
| 6 | * This software may be used and distributed according to the | ||
| 7 | * terms of the GNU General Public License, incorporated herein by | ||
| 8 | * reference. | ||
| 9 | * | ||
| 10 | * This example is designed to be built in the linux/drivers/sound | ||
| 11 | * directory as part of a kernel build. The example is modular only | ||
| 12 | * drop me a note once you have a working modular driver and want | ||
| 13 | * to integrate it with the main code. | ||
| 14 | * -- Alan <alan@redhat.com> | ||
| 15 | * | ||
| 16 | * This is a first draft. Please report any errors, corrections or | ||
| 17 | * improvements to me. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/module.h> | ||
| 21 | #include <linux/delay.h> | ||
| 22 | #include <linux/errno.h> | ||
| 23 | #include <linux/fs.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/pci.h> | ||
| 26 | |||
| 27 | #include <asm/io.h> | ||
| 28 | |||
| 29 | #include "sound_config.h" | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Define our PCI vendor ID here | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef PCI_VENDOR_MYIDENT | ||
| 36 | #define PCI_VENDOR_MYIDENT 0x125D | ||
| 37 | |||
| 38 | /* | ||
| 39 | * PCI identity for the card. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #define PCI_DEVICE_ID_MYIDENT_MYCARD1 0x1969 | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #define CARD_NAME "ExampleWave 3D Pro Ultra ThingyWotsit" | ||
| 46 | |||
| 47 | #define MAX_CARDS 8 | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Each address_info object holds the information about one of | ||
| 51 | * our card resources. In this case the MSS emulation of our | ||
| 52 | * ficticious card. Its used to manage and attach things. | ||
| 53 | */ | ||
| 54 | |||
| 55 | static struct address_info mss_data[MAX_CARDS]; | ||
| 56 | static int cards; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Install the actual card. This is an example | ||
| 60 | */ | ||
| 61 | |||
| 62 | static int mycard_install(struct pci_dev *pcidev) | ||
| 63 | { | ||
| 64 | int iobase; | ||
| 65 | int mssbase; | ||
| 66 | int mpubase; | ||
| 67 | u8 x; | ||
| 68 | u16 w; | ||
| 69 | u32 v; | ||
| 70 | int i; | ||
| 71 | int dma; | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Our imaginary code has its I/O on PCI address 0, a | ||
| 75 | * MSS on PCI address 1 and an MPU on address 2 | ||
| 76 | * | ||
| 77 | * For the example we will only initialise the MSS | ||
| 78 | */ | ||
| 79 | |||
| 80 | iobase = pci_resource_start(pcidev, 0); | ||
| 81 | mssbase = pci_resource_start(pcidev, 1); | ||
| 82 | mpubase = pci_resource_start(pcidev, 2); | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Reset the board | ||
| 86 | */ | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Wait for completion. udelay() waits in microseconds | ||
| 90 | */ | ||
| 91 | |||
| 92 | udelay(100); | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Ok card ready. Begin setup proper. You might for example | ||
| 96 | * load the firmware here | ||
| 97 | */ | ||
| 98 | |||
| 99 | dma = card_specific_magic(ioaddr); | ||
| 100 | |||
| 101 | /* | ||
| 102 | * Turn on legacy mode (example), There are also byte and | ||
| 103 | * dword (32bit) PCI configuration function calls | ||
| 104 | */ | ||
| 105 | |||
| 106 | pci_read_config_word(pcidev, 0x40, &w); | ||
| 107 | w&=~(1<<15); /* legacy decode on */ | ||
| 108 | w|=(1<<14); /* Reserved write as 1 in this case */ | ||
| 109 | w|=(1<<3)|(1<<1)|(1<<0); /* SB on , FM on, MPU on */ | ||
| 110 | pci_write_config_word(pcidev, 0x40, w); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Let the user know we found his toy. | ||
| 114 | */ | ||
| 115 | |||
| 116 | printk(KERN_INFO "Programmed "CARD_NAME" at 0x%X to legacy mode.\n", | ||
| 117 | iobase); | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Now set it up the description of the card | ||
| 121 | */ | ||
| 122 | |||
| 123 | mss_data[cards].io_base = mssbase; | ||
| 124 | mss_data[cards].irq = pcidev->irq; | ||
| 125 | mss_data[cards].dma = dma; | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Check there is an MSS present | ||
| 129 | */ | ||
| 130 | |||
| 131 | if(ad1848_detect(mssbase, NULL, mss_data[cards].osp)==0) | ||
| 132 | return 0; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Initialize it | ||
| 136 | */ | ||
| 137 | |||
| 138 | mss_data[cards].slots[3] = ad1848_init("MyCard MSS 16bit", | ||
| 139 | mssbase, | ||
| 140 | mss_data[cards].irq, | ||
| 141 | mss_data[cards].dma, | ||
| 142 | mss_data[cards].dma, | ||
| 143 | 0, | ||
| 144 | 0, | ||
| 145 | THIS_MODULE); | ||
| 146 | |||
| 147 | cards++; | ||
| 148 | return 1; | ||
| 149 | } | ||
| 150 | |||
| 151 | |||
| 152 | /* | ||
| 153 | * This loop walks the PCI configuration database and finds where | ||
| 154 | * the sound cards are. | ||
| 155 | */ | ||
| 156 | |||
| 157 | int init_mycard(void) | ||
| 158 | { | ||
| 159 | struct pci_dev *pcidev=NULL; | ||
| 160 | int count=0; | ||
| 161 | |||
| 162 | while((pcidev = pci_find_device(PCI_VENDOR_MYIDENT, PCI_DEVICE_ID_MYIDENT_MYCARD1, pcidev))!=NULL) | ||
| 163 | { | ||
| 164 | if (pci_enable_device(pcidev)) | ||
| 165 | continue; | ||
| 166 | count+=mycard_install(pcidev); | ||
| 167 | if(count) | ||
| 168 | return 0; | ||
| 169 | if(count==MAX_CARDS) | ||
| 170 | break; | ||
| 171 | } | ||
| 172 | |||
| 173 | if(count==0) | ||
| 174 | return -ENODEV; | ||
| 175 | return 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * This function is called when the user or kernel loads the | ||
| 180 | * module into memory. | ||
| 181 | */ | ||
| 182 | |||
| 183 | |||
| 184 | int init_module(void) | ||
| 185 | { | ||
| 186 | if(init_mycard()<0) | ||
| 187 | { | ||
| 188 | printk(KERN_ERR "No "CARD_NAME" cards found.\n"); | ||
| 189 | return -ENODEV; | ||
| 190 | } | ||
| 191 | |||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | /* | ||
| 196 | * This is called when it is removed. It will only be removed | ||
| 197 | * when its use count is 0. | ||
| 198 | */ | ||
| 199 | |||
| 200 | void cleanup_module(void) | ||
| 201 | { | ||
| 202 | for(i=0;i< cards; i++) | ||
| 203 | { | ||
| 204 | /* | ||
| 205 | * Free attached resources | ||
| 206 | */ | ||
| 207 | |||
| 208 | ad1848_unload(mss_data[i].io_base, | ||
| 209 | mss_data[i].irq, | ||
| 210 | mss_data[i].dma, | ||
| 211 | mss_data[i].dma, | ||
| 212 | 0); | ||
| 213 | /* | ||
| 214 | * And disconnect the device from the kernel | ||
| 215 | */ | ||
| 216 | sound_unload_audiodevice(mss_data[i].slots[3]); | ||
| 217 | } | ||
| 218 | } | ||
| 219 | |||
