diff options
author | John W. Linville <linville@tuxdriver.com> | 2006-03-15 17:02:08 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2006-03-15 17:02:08 -0500 |
commit | dd288e7d75b9041f79fecae77d61cfa345da7266 (patch) | |
tree | 85ff1d1ea0fe1d6eae0b6819422d5c6c05f862cd | |
parent | 30dcbf29cc6d92d70fa262e79e84011fe6913bed (diff) | |
parent | 72df16f109b73be37977a26d342e9103e8851cb6 (diff) |
Merge branch 'upstream-fixes'
54 files changed, 310 insertions, 178 deletions
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index 990998ee10b6..30c41459953c 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt | |||
@@ -4,8 +4,9 @@ | |||
4 | Copyright (C) 2004 BULL SA. | 4 | Copyright (C) 2004 BULL SA. |
5 | Written by Simon.Derr@bull.net | 5 | Written by Simon.Derr@bull.net |
6 | 6 | ||
7 | Portions Copyright (c) 2004 Silicon Graphics, Inc. | 7 | Portions Copyright (c) 2004-2006 Silicon Graphics, Inc. |
8 | Modified by Paul Jackson <pj@sgi.com> | 8 | Modified by Paul Jackson <pj@sgi.com> |
9 | Modified by Christoph Lameter <clameter@sgi.com> | ||
9 | 10 | ||
10 | CONTENTS: | 11 | CONTENTS: |
11 | ========= | 12 | ========= |
@@ -90,7 +91,8 @@ This can be especially valuable on: | |||
90 | 91 | ||
91 | These subsets, or "soft partitions" must be able to be dynamically | 92 | These subsets, or "soft partitions" must be able to be dynamically |
92 | adjusted, as the job mix changes, without impacting other concurrently | 93 | adjusted, as the job mix changes, without impacting other concurrently |
93 | executing jobs. | 94 | executing jobs. The location of the running jobs pages may also be moved |
95 | when the memory locations are changed. | ||
94 | 96 | ||
95 | The kernel cpuset patch provides the minimum essential kernel | 97 | The kernel cpuset patch provides the minimum essential kernel |
96 | mechanisms required to efficiently implement such subsets. It | 98 | mechanisms required to efficiently implement such subsets. It |
@@ -102,8 +104,8 @@ memory allocator code. | |||
102 | 1.3 How are cpusets implemented ? | 104 | 1.3 How are cpusets implemented ? |
103 | --------------------------------- | 105 | --------------------------------- |
104 | 106 | ||
105 | Cpusets provide a Linux kernel (2.6.7 and above) mechanism to constrain | 107 | Cpusets provide a Linux kernel mechanism to constrain which CPUs and |
106 | which CPUs and Memory Nodes are used by a process or set of processes. | 108 | Memory Nodes are used by a process or set of processes. |
107 | 109 | ||
108 | The Linux kernel already has a pair of mechanisms to specify on which | 110 | The Linux kernel already has a pair of mechanisms to specify on which |
109 | CPUs a task may be scheduled (sched_setaffinity) and on which Memory | 111 | CPUs a task may be scheduled (sched_setaffinity) and on which Memory |
@@ -371,22 +373,17 @@ cpusets memory placement policy 'mems' subsequently changes. | |||
371 | If the cpuset flag file 'memory_migrate' is set true, then when | 373 | If the cpuset flag file 'memory_migrate' is set true, then when |
372 | tasks are attached to that cpuset, any pages that task had | 374 | tasks are attached to that cpuset, any pages that task had |
373 | allocated to it on nodes in its previous cpuset are migrated | 375 | allocated to it on nodes in its previous cpuset are migrated |
374 | to the tasks new cpuset. Depending on the implementation, | 376 | to the tasks new cpuset. The relative placement of the page within |
375 | this migration may either be done by swapping the page out, | 377 | the cpuset is preserved during these migration operations if possible. |
376 | so that the next time the page is referenced, it will be paged | 378 | For example if the page was on the second valid node of the prior cpuset |
377 | into the tasks new cpuset, usually on the node where it was | 379 | then the page will be placed on the second valid node of the new cpuset. |
378 | referenced, or this migration may be done by directly copying | 380 | |
379 | the pages from the tasks previous cpuset to the new cpuset, | ||
380 | where possible to the same node, relative to the new cpuset, | ||
381 | as the node that held the page, relative to the old cpuset. | ||
382 | Also if 'memory_migrate' is set true, then if that cpusets | 381 | Also if 'memory_migrate' is set true, then if that cpusets |
383 | 'mems' file is modified, pages allocated to tasks in that | 382 | 'mems' file is modified, pages allocated to tasks in that |
384 | cpuset, that were on nodes in the previous setting of 'mems', | 383 | cpuset, that were on nodes in the previous setting of 'mems', |
385 | will be moved to nodes in the new setting of 'mems.' Again, | 384 | will be moved to nodes in the new setting of 'mems.' |
386 | depending on the implementation, this might be done by swapping, | 385 | Pages that were not in the tasks prior cpuset, or in the cpusets |
387 | or by direct copying. In either case, pages that were not in | 386 | prior 'mems' setting, will not be moved. |
388 | the tasks prior cpuset, or in the cpusets prior 'mems' setting, | ||
389 | will not be moved. | ||
390 | 387 | ||
391 | There is an exception to the above. If hotplug functionality is used | 388 | There is an exception to the above. If hotplug functionality is used |
392 | to remove all the CPUs that are currently assigned to a cpuset, | 389 | to remove all the CPUs that are currently assigned to a cpuset, |
@@ -434,16 +431,6 @@ and then start a subshell 'sh' in that cpuset: | |||
434 | # The next line should display '/Charlie' | 431 | # The next line should display '/Charlie' |
435 | cat /proc/self/cpuset | 432 | cat /proc/self/cpuset |
436 | 433 | ||
437 | In the case that a change of cpuset includes wanting to move already | ||
438 | allocated memory pages, consider further the work of IWAMOTO | ||
439 | Toshihiro <iwamoto@valinux.co.jp> for page remapping and memory | ||
440 | hotremoval, which can be found at: | ||
441 | |||
442 | http://people.valinux.co.jp/~iwamoto/mh.html | ||
443 | |||
444 | The integration of cpusets with such memory migration is not yet | ||
445 | available. | ||
446 | |||
447 | In the future, a C library interface to cpusets will likely be | 434 | In the future, a C library interface to cpusets will likely be |
448 | available. For now, the only way to query or modify cpusets is | 435 | available. For now, the only way to query or modify cpusets is |
449 | via the cpuset file system, using the various cd, mkdir, echo, cat, | 436 | via the cpuset file system, using the various cd, mkdir, echo, cat, |
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration index c52820fcf500..0dd4ef30c361 100644 --- a/Documentation/vm/page_migration +++ b/Documentation/vm/page_migration | |||
@@ -12,12 +12,18 @@ is running. | |||
12 | 12 | ||
13 | Page migration allows a process to manually relocate the node on which its | 13 | Page migration allows a process to manually relocate the node on which its |
14 | pages are located through the MF_MOVE and MF_MOVE_ALL options while setting | 14 | pages are located through the MF_MOVE and MF_MOVE_ALL options while setting |
15 | a new memory policy. The pages of process can also be relocated | 15 | a new memory policy via mbind(). The pages of process can also be relocated |
16 | from another process using the sys_migrate_pages() function call. The | 16 | from another process using the sys_migrate_pages() function call. The |
17 | migrate_pages function call takes two sets of nodes and moves pages of a | 17 | migrate_pages function call takes two sets of nodes and moves pages of a |
18 | process that are located on the from nodes to the destination nodes. | 18 | process that are located on the from nodes to the destination nodes. |
19 | 19 | Page migration functions are provided by the numactl package by Andi Kleen | |
20 | Manual migration is very useful if for example the scheduler has relocated | 20 | (a version later than 0.9.3 is required. Get it from |
21 | ftp://ftp.suse.com/pub/people/ak). numactl provided libnuma which | ||
22 | provides an interface similar to other numa functionality for page migration. | ||
23 | cat /proc/<pid>/numa_maps allows an easy review of where the pages of | ||
24 | a process are located. See also the numa_maps manpage in the numactl package. | ||
25 | |||
26 | Manual migration is useful if for example the scheduler has relocated | ||
21 | a process to a processor on a distant node. A batch scheduler or an | 27 | a process to a processor on a distant node. A batch scheduler or an |
22 | administrator may detect the situation and move the pages of the process | 28 | administrator may detect the situation and move the pages of the process |
23 | nearer to the new processor. At some point in the future we may have | 29 | nearer to the new processor. At some point in the future we may have |
@@ -25,10 +31,12 @@ some mechanism in the scheduler that will automatically move the pages. | |||
25 | 31 | ||
26 | Larger installations usually partition the system using cpusets into | 32 | Larger installations usually partition the system using cpusets into |
27 | sections of nodes. Paul Jackson has equipped cpusets with the ability to | 33 | sections of nodes. Paul Jackson has equipped cpusets with the ability to |
28 | move pages when a task is moved to another cpuset. This allows automatic | 34 | move pages when a task is moved to another cpuset (See ../cpusets.txt). |
29 | control over locality of a process. If a task is moved to a new cpuset | 35 | Cpusets allows the automation of process locality. If a task is moved to |
30 | then also all its pages are moved with it so that the performance of the | 36 | a new cpuset then also all its pages are moved with it so that the |
31 | process does not sink dramatically (as is the case today). | 37 | performance of the process does not sink dramatically. Also the pages |
38 | of processes in a cpuset are moved if the allowed memory nodes of a | ||
39 | cpuset are changed. | ||
32 | 40 | ||
33 | Page migration allows the preservation of the relative location of pages | 41 | Page migration allows the preservation of the relative location of pages |
34 | within a group of nodes for all migration techniques which will preserve a | 42 | within a group of nodes for all migration techniques which will preserve a |
@@ -37,22 +45,26 @@ process. This is necessary in order to preserve the memory latencies. | |||
37 | Processes will run with similar performance after migration. | 45 | Processes will run with similar performance after migration. |
38 | 46 | ||
39 | Page migration occurs in several steps. First a high level | 47 | Page migration occurs in several steps. First a high level |
40 | description for those trying to use migrate_pages() and then | 48 | description for those trying to use migrate_pages() from the kernel |
41 | a low level description of how the low level details work. | 49 | (for userspace usage see the Andi Kleen's numactl package mentioned above) |
50 | and then a low level description of how the low level details work. | ||
42 | 51 | ||
43 | A. Use of migrate_pages() | 52 | A. In kernel use of migrate_pages() |
44 | ------------------------- | 53 | ----------------------------------- |
45 | 54 | ||
46 | 1. Remove pages from the LRU. | 55 | 1. Remove pages from the LRU. |
47 | 56 | ||
48 | Lists of pages to be migrated are generated by scanning over | 57 | Lists of pages to be migrated are generated by scanning over |
49 | pages and moving them into lists. This is done by | 58 | pages and moving them into lists. This is done by |
50 | calling isolate_lru_page() or __isolate_lru_page(). | 59 | calling isolate_lru_page(). |
51 | Calling isolate_lru_page increases the references to the page | 60 | Calling isolate_lru_page increases the references to the page |
52 | so that it cannot vanish under us. | 61 | so that it cannot vanish while the page migration occurs. |
62 | It also prevents the swapper or other scans to encounter | ||
63 | the page. | ||
53 | 64 | ||
54 | 2. Generate a list of newly allocates page to move the contents | 65 | 2. Generate a list of newly allocates page. These pages will contain the |
55 | of the first list to. | 66 | contents of the pages from the first list after page migration is |
67 | complete. | ||
56 | 68 | ||
57 | 3. The migrate_pages() function is called which attempts | 69 | 3. The migrate_pages() function is called which attempts |
58 | to do the migration. It returns the moved pages in the | 70 | to do the migration. It returns the moved pages in the |
@@ -63,13 +75,17 @@ A. Use of migrate_pages() | |||
63 | 4. The leftover pages of various types are returned | 75 | 4. The leftover pages of various types are returned |
64 | to the LRU using putback_to_lru_pages() or otherwise | 76 | to the LRU using putback_to_lru_pages() or otherwise |
65 | disposed of. The pages will still have the refcount as | 77 | disposed of. The pages will still have the refcount as |
66 | increased by isolate_lru_pages()! | 78 | increased by isolate_lru_pages() if putback_to_lru_pages() is not |
79 | used! The kernel may want to handle the various cases of failures in | ||
80 | different ways. | ||
67 | 81 | ||
68 | B. Operation of migrate_pages() | 82 | B. How migrate_pages() works |
69 | -------------------------------- | 83 | ---------------------------- |
70 | 84 | ||
71 | migrate_pages does several passes over its list of pages. A page is moved | 85 | migrate_pages() does several passes over its list of pages. A page is moved |
72 | if all references to a page are removable at the time. | 86 | if all references to a page are removable at the time. The page has |
87 | already been removed from the LRU via isolate_lru_page() and the refcount | ||
88 | is increased so that the page cannot be freed while page migration occurs. | ||
73 | 89 | ||
74 | Steps: | 90 | Steps: |
75 | 91 | ||
@@ -79,36 +95,40 @@ Steps: | |||
79 | 95 | ||
80 | 3. Make sure that the page has assigned swap cache entry if | 96 | 3. Make sure that the page has assigned swap cache entry if |
81 | it is an anonyous page. The swap cache reference is necessary | 97 | it is an anonyous page. The swap cache reference is necessary |
82 | to preserve the information contain in the page table maps. | 98 | to preserve the information contain in the page table maps while |
99 | page migration occurs. | ||
83 | 100 | ||
84 | 4. Prep the new page that we want to move to. It is locked | 101 | 4. Prep the new page that we want to move to. It is locked |
85 | and set to not being uptodate so that all accesses to the new | 102 | and set to not being uptodate so that all accesses to the new |
86 | page immediately lock while we are moving references. | 103 | page immediately lock while the move is in progress. |
87 | 104 | ||
88 | 5. All the page table references to the page are either dropped (file backed) | 105 | 5. All the page table references to the page are either dropped (file |
89 | or converted to swap references (anonymous pages). This should decrease the | 106 | backed pages) or converted to swap references (anonymous pages). |
90 | reference count. | 107 | This should decrease the reference count. |
91 | 108 | ||
92 | 6. The radix tree lock is taken | 109 | 6. The radix tree lock is taken. This will cause all processes trying |
110 | to reestablish a pte to block on the radix tree spinlock. | ||
93 | 111 | ||
94 | 7. The refcount of the page is examined and we back out if references remain | 112 | 7. The refcount of the page is examined and we back out if references remain |
95 | otherwise we know that we are the only one referencing this page. | 113 | otherwise we know that we are the only one referencing this page. |
96 | 114 | ||
97 | 8. The radix tree is checked and if it does not contain the pointer to this | 115 | 8. The radix tree is checked and if it does not contain the pointer to this |
98 | page then we back out. | 116 | page then we back out because someone else modified the mapping first. |
99 | 117 | ||
100 | 9. The mapping is checked. If the mapping is gone then a truncate action may | 118 | 9. The mapping is checked. If the mapping is gone then a truncate action may |
101 | be in progress and we back out. | 119 | be in progress and we back out. |
102 | 120 | ||
103 | 10. The new page is prepped with some settings from the old page so that accesses | 121 | 10. The new page is prepped with some settings from the old page so that |
104 | to the new page will be discovered to have the correct settings. | 122 | accesses to the new page will be discovered to have the correct settings. |
105 | 123 | ||
106 | 11. The radix tree is changed to point to the new page. | 124 | 11. The radix tree is changed to point to the new page. |
107 | 125 | ||
108 | 12. The reference count of the old page is dropped because the reference has now | 126 | 12. The reference count of the old page is dropped because the radix tree |
109 | been removed. | 127 | reference is gone. |
110 | 128 | ||
111 | 13. The radix tree lock is dropped. | 129 | 13. The radix tree lock is dropped. With that lookups become possible again |
130 | and other processes will move from spinning on the tree lock to sleeping on | ||
131 | the locked new page. | ||
112 | 132 | ||
113 | 14. The page contents are copied to the new page. | 133 | 14. The page contents are copied to the new page. |
114 | 134 | ||
@@ -119,11 +139,37 @@ Steps: | |||
119 | 139 | ||
120 | 17. Queued up writeback on the new page is triggered. | 140 | 17. Queued up writeback on the new page is triggered. |
121 | 141 | ||
122 | 18. If swap pte's were generated for the page then remove them again. | 142 | 18. If swap pte's were generated for the page then replace them with real |
143 | ptes. This will reenable access for processes not blocked by the page lock. | ||
144 | |||
145 | 19. The page locks are dropped from the old and new page. | ||
146 | Processes waiting on the page lock can continue. | ||
147 | |||
148 | 20. The new page is moved to the LRU and can be scanned by the swapper | ||
149 | etc again. | ||
150 | |||
151 | TODO list | ||
152 | --------- | ||
153 | |||
154 | - Page migration requires the use of swap handles to preserve the | ||
155 | information of the anonymous page table entries. This means that swap | ||
156 | space is reserved but never used. The maximum number of swap handles used | ||
157 | is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration. | ||
158 | Reservation of pages could be avoided by having a special type of swap | ||
159 | handle that does not require swap space and that would only track the page | ||
160 | references. Something like that was proposed by Marcelo Tosatti in the | ||
161 | past (search for migration cache on lkml or linux-mm@kvack.org). | ||
123 | 162 | ||
124 | 19. The locks are dropped from the old and new page. | 163 | - Page migration unmaps ptes for file backed pages and requires page |
164 | faults to reestablish these ptes. This could be optimized by somehow | ||
165 | recording the references before migration and then reestablish them later. | ||
166 | However, there are several locking challenges that have to be overcome | ||
167 | before this is possible. | ||
125 | 168 | ||
126 | 20. The new page is moved to the LRU. | 169 | - Page migration generates read ptes for anonymous pages. Dirty page |
170 | faults are required to make the pages writable again. It may be possible | ||
171 | to generate a pte marked dirty if it is known that the page is dirty and | ||
172 | that this process has the only reference to that page. | ||
127 | 173 | ||
128 | Christoph Lameter, December 19, 2005. | 174 | Christoph Lameter, March 8, 2006. |
129 | 175 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f80fa502f8f..32ba00bd0a2f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -799,6 +799,8 @@ source "drivers/i2c/Kconfig" | |||
799 | 799 | ||
800 | source "drivers/spi/Kconfig" | 800 | source "drivers/spi/Kconfig" |
801 | 801 | ||
802 | source "drivers/w1/Kconfig" | ||
803 | |||
802 | source "drivers/hwmon/Kconfig" | 804 | source "drivers/hwmon/Kconfig" |
803 | 805 | ||
804 | #source "drivers/l3/Kconfig" | 806 | #source "drivers/l3/Kconfig" |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 0abbce8c70bc..b324dcac1c56 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -57,7 +57,9 @@ int main(void) | |||
57 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); | 57 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); |
58 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); | 58 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); |
59 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); | 59 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); |
60 | DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7); | 60 | #ifdef CONFIG_IWMMXT |
61 | DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); | ||
62 | #endif | ||
61 | BLANK(); | 63 | BLANK(); |
62 | DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); | 64 | DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); |
63 | DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); | 65 | DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 7b6256bb590e..bc9e2f8ae326 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -610,15 +610,12 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp) | |||
610 | static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) | 610 | static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) |
611 | { | 611 | { |
612 | struct thread_info *thread = task_thread_info(tsk); | 612 | struct thread_info *thread = task_thread_info(tsk); |
613 | void *ptr = &thread->fpstate; | ||
614 | 613 | ||
615 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) | 614 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) |
616 | return -ENODATA; | 615 | return -ENODATA; |
617 | iwmmxt_task_disable(thread); /* force it to ram */ | 616 | iwmmxt_task_disable(thread); /* force it to ram */ |
618 | /* The iWMMXt state is stored doubleword-aligned. */ | 617 | return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) |
619 | if (((long) ptr) & 4) | 618 | ? -EFAULT : 0; |
620 | ptr += 4; | ||
621 | return copy_to_user(ufp, ptr, 0x98) ? -EFAULT : 0; | ||
622 | } | 619 | } |
623 | 620 | ||
624 | /* | 621 | /* |
@@ -627,15 +624,12 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) | |||
627 | static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) | 624 | static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) |
628 | { | 625 | { |
629 | struct thread_info *thread = task_thread_info(tsk); | 626 | struct thread_info *thread = task_thread_info(tsk); |
630 | void *ptr = &thread->fpstate; | ||
631 | 627 | ||
632 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) | 628 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) |
633 | return -EACCES; | 629 | return -EACCES; |
634 | iwmmxt_task_release(thread); /* force a reload */ | 630 | iwmmxt_task_release(thread); /* force a reload */ |
635 | /* The iWMMXt state is stored doubleword-aligned. */ | 631 | return copy_from_user(&thead->fpstate.iwmmxt, ufp, IWMMXT_SIZE) |
636 | if (((long) ptr) & 4) | 632 | ? -EFAULT : 0; |
637 | ptr += 4; | ||
638 | return copy_from_user(ptr, ufp, 0x98) ? -EFAULT : 0; | ||
639 | } | 633 | } |
640 | 634 | ||
641 | #endif | 635 | #endif |
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S index 72d594184b8a..d89c60615794 100644 --- a/arch/arm/lib/muldi3.S +++ b/arch/arm/lib/muldi3.S | |||
@@ -29,8 +29,8 @@ ENTRY(__aeabi_lmul) | |||
29 | 29 | ||
30 | mul xh, yl, xh | 30 | mul xh, yl, xh |
31 | mla xh, xl, yh, xh | 31 | mla xh, xl, yh, xh |
32 | mov ip, xl, asr #16 | 32 | mov ip, xl, lsr #16 |
33 | mov yh, yl, asr #16 | 33 | mov yh, yl, lsr #16 |
34 | bic xl, xl, ip, lsl #16 | 34 | bic xl, xl, ip, lsl #16 |
35 | bic yl, yl, yh, lsl #16 | 35 | bic yl, yl, yh, lsl #16 |
36 | mla xh, yh, ip, xh | 36 | mla xh, yh, ip, xh |
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index daadc78e271b..5bf50a2a737d 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig | |||
@@ -8,11 +8,9 @@ menu "Intel IXP4xx Implementation Options" | |||
8 | 8 | ||
9 | comment "IXP4xx Platforms" | 9 | comment "IXP4xx Platforms" |
10 | 10 | ||
11 | # This entry is placed on top because otherwise it would have | ||
12 | # been shown as a submenu. | ||
13 | config MACH_NSLU2 | 11 | config MACH_NSLU2 |
14 | bool | 12 | bool |
15 | prompt "NSLU2" if !(MACH_IXDP465 || MACH_IXDPG425 || ARCH_IXDP425 || ARCH_ADI_COYOTE || ARCH_AVILA || ARCH_IXCDP1100 || ARCH_PRPMC1100 || MACH_GTWX5715) | 13 | prompt "Linksys NSLU2" |
16 | help | 14 | help |
17 | Say 'Y' here if you want your kernel to support Linksys's | 15 | Say 'Y' here if you want your kernel to support Linksys's |
18 | NSLU2 NAS device. For more information on this platform, | 16 | NSLU2 NAS device. For more information on this platform, |
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index 856d56f3b2ae..a3b4c6ac5708 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c | |||
@@ -113,6 +113,9 @@ static void __init nas100d_init(void) | |||
113 | { | 113 | { |
114 | ixp4xx_sys_init(); | 114 | ixp4xx_sys_init(); |
115 | 115 | ||
116 | /* gpio 14 and 15 are _not_ clocks */ | ||
117 | *IXP4XX_GPIO_GPCLKR = 0; | ||
118 | |||
116 | nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); | 119 | nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); |
117 | nas100d_flash_resource.end = | 120 | nas100d_flash_resource.end = |
118 | IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; | 121 | IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index d921c1024ae0..2c6c2a7c05a0 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -96,15 +96,16 @@ ENTRY(v6_coherent_user_range) | |||
96 | #ifdef HARVARD_CACHE | 96 | #ifdef HARVARD_CACHE |
97 | bic r0, r0, #CACHE_LINE_SIZE - 1 | 97 | bic r0, r0, #CACHE_LINE_SIZE - 1 |
98 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D line | 98 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D line |
99 | mcr p15, 0, r0, c7, c5, 1 @ invalidate I line | ||
100 | add r0, r0, #CACHE_LINE_SIZE | 99 | add r0, r0, #CACHE_LINE_SIZE |
101 | cmp r0, r1 | 100 | cmp r0, r1 |
102 | blo 1b | 101 | blo 1b |
103 | #endif | 102 | #endif |
104 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | ||
105 | #ifdef HARVARD_CACHE | ||
106 | mov r0, #0 | 103 | mov r0, #0 |
104 | #ifdef HARVARD_CACHE | ||
107 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 105 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
106 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | ||
107 | #else | ||
108 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | ||
108 | #endif | 109 | #endif |
109 | mov pc, lr | 110 | mov pc, lr |
110 | 111 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 330695b6b19d..b103e56806bd 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -24,14 +24,16 @@ | |||
24 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | 24 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
25 | { | 25 | { |
26 | unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 26 | unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
27 | const int zero = 0; | ||
27 | 28 | ||
28 | set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); | 29 | set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); |
29 | flush_tlb_kernel_page(to); | 30 | flush_tlb_kernel_page(to); |
30 | 31 | ||
31 | asm( "mcrr p15, 0, %1, %0, c14\n" | 32 | asm( "mcrr p15, 0, %1, %0, c14\n" |
32 | " mcrr p15, 0, %1, %0, c5\n" | 33 | " mcr p15, 0, %2, c7, c10, 4\n" |
34 | " mcr p15, 0, %2, c7, c5, 0\n" | ||
33 | : | 35 | : |
34 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) | 36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
35 | : "cc"); | 37 | : "cc"); |
36 | } | 38 | } |
37 | 39 | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index f39e09ef64ec..776c90989e06 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -570,16 +570,18 @@ void __devinit setup_local_APIC(void) | |||
570 | */ | 570 | */ |
571 | void lapic_shutdown(void) | 571 | void lapic_shutdown(void) |
572 | { | 572 | { |
573 | unsigned long flags; | ||
574 | |||
573 | if (!cpu_has_apic) | 575 | if (!cpu_has_apic) |
574 | return; | 576 | return; |
575 | 577 | ||
576 | local_irq_disable(); | 578 | local_irq_save(flags); |
577 | clear_local_APIC(); | 579 | clear_local_APIC(); |
578 | 580 | ||
579 | if (enabled_via_apicbase) | 581 | if (enabled_via_apicbase) |
580 | disable_local_APIC(); | 582 | disable_local_APIC(); |
581 | 583 | ||
582 | local_irq_enable(); | 584 | local_irq_restore(flags); |
583 | } | 585 | } |
584 | 586 | ||
585 | #ifdef CONFIG_PM | 587 | #ifdef CONFIG_PM |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 262e44544dc8..9c205274c1cb 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -38,6 +38,12 @@ | |||
38 | 38 | ||
39 | #define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ | 39 | #define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ |
40 | 40 | ||
41 | /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC | ||
42 | * presents to user space needs more thought, and is likely to change | ||
43 | * substantially. | ||
44 | */ | ||
45 | #define DISABLE_EDAC_SYSFS | ||
46 | |||
41 | #ifdef CONFIG_EDAC_DEBUG | 47 | #ifdef CONFIG_EDAC_DEBUG |
42 | /* Values of 0 to 4 will generate output */ | 48 | /* Values of 0 to 4 will generate output */ |
43 | int edac_debug_level = 1; | 49 | int edac_debug_level = 1; |
@@ -47,7 +53,7 @@ EXPORT_SYMBOL(edac_debug_level); | |||
47 | /* EDAC Controls, setable by module parameter, and sysfs */ | 53 | /* EDAC Controls, setable by module parameter, and sysfs */ |
48 | static int log_ue = 1; | 54 | static int log_ue = 1; |
49 | static int log_ce = 1; | 55 | static int log_ce = 1; |
50 | static int panic_on_ue = 1; | 56 | static int panic_on_ue; |
51 | static int poll_msec = 1000; | 57 | static int poll_msec = 1000; |
52 | 58 | ||
53 | static int check_pci_parity = 0; /* default YES check PCI parity */ | 59 | static int check_pci_parity = 0; /* default YES check PCI parity */ |
@@ -77,6 +83,8 @@ static int pci_whitelist_count ; | |||
77 | 83 | ||
78 | /* START sysfs data and methods */ | 84 | /* START sysfs data and methods */ |
79 | 85 | ||
86 | #ifndef DISABLE_EDAC_SYSFS | ||
87 | |||
80 | static const char *mem_types[] = { | 88 | static const char *mem_types[] = { |
81 | [MEM_EMPTY] = "Empty", | 89 | [MEM_EMPTY] = "Empty", |
82 | [MEM_RESERVED] = "Reserved", | 90 | [MEM_RESERVED] = "Reserved", |
@@ -241,6 +249,7 @@ static struct kobj_type ktype_memctrl = { | |||
241 | .default_attrs = (struct attribute **) memctrl_attr, | 249 | .default_attrs = (struct attribute **) memctrl_attr, |
242 | }; | 250 | }; |
243 | 251 | ||
252 | #endif /* DISABLE_EDAC_SYSFS */ | ||
244 | 253 | ||
245 | /* Initialize the main sysfs entries for edac: | 254 | /* Initialize the main sysfs entries for edac: |
246 | * /sys/devices/system/edac | 255 | * /sys/devices/system/edac |
@@ -251,6 +260,11 @@ static struct kobj_type ktype_memctrl = { | |||
251 | * !0 FAILURE | 260 | * !0 FAILURE |
252 | */ | 261 | */ |
253 | static int edac_sysfs_memctrl_setup(void) | 262 | static int edac_sysfs_memctrl_setup(void) |
263 | #ifdef DISABLE_EDAC_SYSFS | ||
264 | { | ||
265 | return 0; | ||
266 | } | ||
267 | #else | ||
254 | { | 268 | { |
255 | int err=0; | 269 | int err=0; |
256 | 270 | ||
@@ -283,6 +297,7 @@ static int edac_sysfs_memctrl_setup(void) | |||
283 | 297 | ||
284 | return err; | 298 | return err; |
285 | } | 299 | } |
300 | #endif /* DISABLE_EDAC_SYSFS */ | ||
286 | 301 | ||
287 | /* | 302 | /* |
288 | * MC teardown: | 303 | * MC teardown: |
@@ -290,6 +305,7 @@ static int edac_sysfs_memctrl_setup(void) | |||
290 | */ | 305 | */ |
291 | static void edac_sysfs_memctrl_teardown(void) | 306 | static void edac_sysfs_memctrl_teardown(void) |
292 | { | 307 | { |
308 | #ifndef DISABLE_EDAC_SYSFS | ||
293 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 309 | debugf0("MC: " __FILE__ ": %s()\n", __func__); |
294 | 310 | ||
295 | /* Unregister the MC's kobject */ | 311 | /* Unregister the MC's kobject */ |
@@ -300,8 +316,11 @@ static void edac_sysfs_memctrl_teardown(void) | |||
300 | 316 | ||
301 | /* Unregister the 'edac' object */ | 317 | /* Unregister the 'edac' object */ |
302 | sysdev_class_unregister(&edac_class); | 318 | sysdev_class_unregister(&edac_class); |
319 | #endif /* DISABLE_EDAC_SYSFS */ | ||
303 | } | 320 | } |
304 | 321 | ||
322 | #ifndef DISABLE_EDAC_SYSFS | ||
323 | |||
305 | /* | 324 | /* |
306 | * /sys/devices/system/edac/pci; | 325 | * /sys/devices/system/edac/pci; |
307 | * data structures and methods | 326 | * data structures and methods |
@@ -554,11 +573,18 @@ static struct kobj_type ktype_edac_pci = { | |||
554 | .default_attrs = (struct attribute **) edac_pci_attr, | 573 | .default_attrs = (struct attribute **) edac_pci_attr, |
555 | }; | 574 | }; |
556 | 575 | ||
576 | #endif /* DISABLE_EDAC_SYSFS */ | ||
577 | |||
557 | /** | 578 | /** |
558 | * edac_sysfs_pci_setup() | 579 | * edac_sysfs_pci_setup() |
559 | * | 580 | * |
560 | */ | 581 | */ |
561 | static int edac_sysfs_pci_setup(void) | 582 | static int edac_sysfs_pci_setup(void) |
583 | #ifdef DISABLE_EDAC_SYSFS | ||
584 | { | ||
585 | return 0; | ||
586 | } | ||
587 | #else | ||
562 | { | 588 | { |
563 | int err; | 589 | int err; |
564 | 590 | ||
@@ -582,16 +608,20 @@ static int edac_sysfs_pci_setup(void) | |||
582 | } | 608 | } |
583 | return err; | 609 | return err; |
584 | } | 610 | } |
585 | 611 | #endif /* DISABLE_EDAC_SYSFS */ | |
586 | 612 | ||
587 | static void edac_sysfs_pci_teardown(void) | 613 | static void edac_sysfs_pci_teardown(void) |
588 | { | 614 | { |
615 | #ifndef DISABLE_EDAC_SYSFS | ||
589 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 616 | debugf0("MC: " __FILE__ ": %s()\n", __func__); |
590 | 617 | ||
591 | kobject_unregister(&edac_pci_kobj); | 618 | kobject_unregister(&edac_pci_kobj); |
592 | kobject_put(&edac_pci_kobj); | 619 | kobject_put(&edac_pci_kobj); |
620 | #endif | ||
593 | } | 621 | } |
594 | 622 | ||
623 | #ifndef DISABLE_EDAC_SYSFS | ||
624 | |||
595 | /* EDAC sysfs CSROW data structures and methods */ | 625 | /* EDAC sysfs CSROW data structures and methods */ |
596 | 626 | ||
597 | /* Set of more detailed csrow<id> attribute show/store functions */ | 627 | /* Set of more detailed csrow<id> attribute show/store functions */ |
@@ -1045,6 +1075,8 @@ static struct kobj_type ktype_mci = { | |||
1045 | .default_attrs = (struct attribute **) mci_attr, | 1075 | .default_attrs = (struct attribute **) mci_attr, |
1046 | }; | 1076 | }; |
1047 | 1077 | ||
1078 | #endif /* DISABLE_EDAC_SYSFS */ | ||
1079 | |||
1048 | #define EDAC_DEVICE_SYMLINK "device" | 1080 | #define EDAC_DEVICE_SYMLINK "device" |
1049 | 1081 | ||
1050 | /* | 1082 | /* |
@@ -1056,6 +1088,11 @@ static struct kobj_type ktype_mci = { | |||
1056 | * !0 Failure | 1088 | * !0 Failure |
1057 | */ | 1089 | */ |
1058 | static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | 1090 | static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) |
1091 | #ifdef DISABLE_EDAC_SYSFS | ||
1092 | { | ||
1093 | return 0; | ||
1094 | } | ||
1095 | #else | ||
1059 | { | 1096 | { |
1060 | int i; | 1097 | int i; |
1061 | int err; | 1098 | int err; |
@@ -1124,12 +1161,14 @@ fail: | |||
1124 | 1161 | ||
1125 | return err; | 1162 | return err; |
1126 | } | 1163 | } |
1164 | #endif /* DISABLE_EDAC_SYSFS */ | ||
1127 | 1165 | ||
1128 | /* | 1166 | /* |
1129 | * remove a Memory Controller instance | 1167 | * remove a Memory Controller instance |
1130 | */ | 1168 | */ |
1131 | static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | 1169 | static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) |
1132 | { | 1170 | { |
1171 | #ifndef DISABLE_EDAC_SYSFS | ||
1133 | int i; | 1172 | int i; |
1134 | 1173 | ||
1135 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 1174 | debugf0("MC: " __FILE__ ": %s()\n", __func__); |
@@ -1146,6 +1185,7 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1146 | 1185 | ||
1147 | kobject_unregister(&mci->edac_mci_kobj); | 1186 | kobject_unregister(&mci->edac_mci_kobj); |
1148 | kobject_put(&mci->edac_mci_kobj); | 1187 | kobject_put(&mci->edac_mci_kobj); |
1188 | #endif /* DISABLE_EDAC_SYSFS */ | ||
1149 | } | 1189 | } |
1150 | 1190 | ||
1151 | /* END OF sysfs data and methods */ | 1191 | /* END OF sysfs data and methods */ |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 6eb93e45fcd3..4a478eb0e27d 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -825,7 +825,7 @@ proc_get_info(char *page, char **start, off_t off, | |||
825 | p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); | 825 | p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); |
826 | p += sprintf(p, "PMU firmware version : %02x\n", pmu_version); | 826 | p += sprintf(p, "PMU firmware version : %02x\n", pmu_version); |
827 | p += sprintf(p, "AC Power : %d\n", | 827 | p += sprintf(p, "AC Power : %d\n", |
828 | ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0)); | 828 | ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); |
829 | p += sprintf(p, "Battery count : %d\n", pmu_battery_count); | 829 | p += sprintf(p, "Battery count : %d\n", pmu_battery_count); |
830 | 830 | ||
831 | return p - page; | 831 | return p - page; |
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 8416ceff524b..41715cacf926 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | mxb - v4l2 driver for the Multimedia eXtension Board | 2 | mxb - v4l2 driver for the Multimedia eXtension Board |
3 | 3 | ||
4 | Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> | 4 | Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de> |
5 | 5 | ||
6 | Visit http://www.mihu.de/linux/saa7146/mxb/ | 6 | Visit http://www.mihu.de/linux/saa7146/mxb/ |
7 | for further details about this card. | 7 | for further details about this card. |
@@ -327,6 +327,7 @@ static int mxb_init_done(struct saa7146_dev* dev) | |||
327 | struct video_decoder_init init; | 327 | struct video_decoder_init init; |
328 | struct i2c_msg msg; | 328 | struct i2c_msg msg; |
329 | struct tuner_setup tun_setup; | 329 | struct tuner_setup tun_setup; |
330 | v4l2_std_id std = V4L2_STD_PAL_BG; | ||
330 | 331 | ||
331 | int i = 0, err = 0; | 332 | int i = 0, err = 0; |
332 | struct tea6415c_multiplex vm; | 333 | struct tea6415c_multiplex vm; |
@@ -361,6 +362,9 @@ static int mxb_init_done(struct saa7146_dev* dev) | |||
361 | mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, | 362 | mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY, |
362 | &mxb->cur_freq); | 363 | &mxb->cur_freq); |
363 | 364 | ||
365 | /* set a default video standard */ | ||
366 | mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); | ||
367 | |||
364 | /* mute audio on tea6420s */ | 368 | /* mute audio on tea6420s */ |
365 | mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]); | 369 | mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]); |
366 | mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]); | 370 | mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]); |
@@ -921,17 +925,21 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std) | |||
921 | int one = 1; | 925 | int one = 1; |
922 | 926 | ||
923 | if(V4L2_STD_PAL_I == std->id ) { | 927 | if(V4L2_STD_PAL_I == std->id ) { |
928 | v4l2_std_id std = V4L2_STD_PAL_I; | ||
924 | DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n")); | 929 | DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n")); |
925 | /* set the 7146 gpio register -- I don't know what this does exactly */ | 930 | /* set the 7146 gpio register -- I don't know what this does exactly */ |
926 | saa7146_write(dev, GPIO_CTRL, 0x00404050); | 931 | saa7146_write(dev, GPIO_CTRL, 0x00404050); |
927 | /* unset the 7111 gpio register -- I don't know what this does exactly */ | 932 | /* unset the 7111 gpio register -- I don't know what this does exactly */ |
928 | mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero); | 933 | mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero); |
934 | mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); | ||
929 | } else { | 935 | } else { |
936 | v4l2_std_id std = V4L2_STD_PAL_BG; | ||
930 | DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n")); | 937 | DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n")); |
931 | /* set the 7146 gpio register -- I don't know what this does exactly */ | 938 | /* set the 7146 gpio register -- I don't know what this does exactly */ |
932 | saa7146_write(dev, GPIO_CTRL, 0x00404050); | 939 | saa7146_write(dev, GPIO_CTRL, 0x00404050); |
933 | /* set the 7111 gpio register -- I don't know what this does exactly */ | 940 | /* set the 7111 gpio register -- I don't know what this does exactly */ |
934 | mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one); | 941 | mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one); |
942 | mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); | ||
935 | } | 943 | } |
936 | return 0; | 944 | return 0; |
937 | } | 945 | } |
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 155737e7483f..a19480d07888 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c | |||
@@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
178 | * we're at a block boundary and need to erase the whole block. | 178 | * we're at a block boundary and need to erase the whole block. |
179 | */ | 179 | */ |
180 | pageaddr = instr->addr / priv->page_size; | 180 | pageaddr = instr->addr / priv->page_size; |
181 | do_block = (pageaddr & 0x7) == 0 && instr->len <= blocksize; | 181 | do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; |
182 | pageaddr = pageaddr << priv->page_offset; | 182 | pageaddr = pageaddr << priv->page_offset; |
183 | 183 | ||
184 | command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE; | 184 | command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 0f3798f81883..c0878f304e54 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -10543,8 +10543,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | |||
10543 | strcat(str, "66MHz"); | 10543 | strcat(str, "66MHz"); |
10544 | else if (clock_ctrl == 6) | 10544 | else if (clock_ctrl == 6) |
10545 | strcat(str, "100MHz"); | 10545 | strcat(str, "100MHz"); |
10546 | else if (clock_ctrl == 7) | ||
10547 | strcat(str, "133MHz"); | ||
10548 | } else { | 10546 | } else { |
10549 | strcpy(str, "PCI:"); | 10547 | strcpy(str, "PCI:"); |
10550 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) | 10548 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50b8c6754b1e..a1ed2d983740 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -249,8 +249,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
249 | 249 | ||
250 | if (align) | 250 | if (align) |
251 | skb_reserve(skb, align); | 251 | skb_reserve(skb, align); |
252 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) | 252 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { |
253 | tun->stats.rx_dropped++; | ||
254 | kfree_skb(skb); | ||
253 | return -EFAULT; | 255 | return -EFAULT; |
256 | } | ||
254 | 257 | ||
255 | skb->dev = tun->dev; | 258 | skb->dev = tun->dev; |
256 | switch (tun->flags & TUN_TYPE_MASK) { | 259 | switch (tun->flags & TUN_TYPE_MASK) { |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1bbf231f8aaf..3c77d65960db 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -409,6 +409,9 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) | |||
409 | /* -ENXIO: no more subchannels. */ | 409 | /* -ENXIO: no more subchannels. */ |
410 | case -ENXIO: | 410 | case -ENXIO: |
411 | return ret; | 411 | return ret; |
412 | /* -EIO: this subchannel set not supported. */ | ||
413 | case -EIO: | ||
414 | return ret; | ||
412 | default: | 415 | default: |
413 | return 0; | 416 | return 0; |
414 | } | 417 | } |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 7f551d66f47f..6eba56cd89ba 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -664,6 +664,7 @@ do { \ | |||
664 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 | 664 | #define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002 |
665 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 | 665 | #define ZFCP_STATUS_UNIT_SHARED 0x00000004 |
666 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 | 666 | #define ZFCP_STATUS_UNIT_READONLY 0x00000008 |
667 | #define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 | ||
667 | 668 | ||
668 | /* FSF request status (this does not have a common part) */ | 669 | /* FSF request status (this does not have a common part) */ |
669 | #define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000 | 670 | #define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000 |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e3c4bdd29a60..57cb628a05aa 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -3391,10 +3391,13 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3391 | && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, | 3391 | && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, |
3392 | &unit->status)) | 3392 | &unit->status)) |
3393 | && !unit->device | 3393 | && !unit->device |
3394 | && port->rport) | 3394 | && port->rport) { |
3395 | scsi_add_device(port->adapter->scsi_host, 0, | 3395 | atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, |
3396 | port->rport->scsi_target_id, | 3396 | &unit->status); |
3397 | unit->scsi_lun); | 3397 | scsi_scan_target(&port->rport->dev, 0, |
3398 | port->rport->scsi_target_id, | ||
3399 | unit->scsi_lun, 0); | ||
3400 | } | ||
3398 | zfcp_unit_put(unit); | 3401 | zfcp_unit_put(unit); |
3399 | break; | 3402 | break; |
3400 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 3403 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 9f6b4d7a46f3..9e6d07d7b3c8 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -68,7 +68,7 @@ struct zfcp_data zfcp_data = { | |||
68 | eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler, | 68 | eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler, |
69 | /* FIXME(openfcp): Tune */ | 69 | /* FIXME(openfcp): Tune */ |
70 | can_queue: 4096, | 70 | can_queue: 4096, |
71 | this_id: 0, | 71 | this_id: -1, |
72 | /* | 72 | /* |
73 | * FIXME: | 73 | * FIXME: |
74 | * one less? can zfcp_create_sbale cope with it? | 74 | * one less? can zfcp_create_sbale cope with it? |
@@ -183,7 +183,8 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) | |||
183 | 183 | ||
184 | read_lock_irqsave(&zfcp_data.config_lock, flags); | 184 | read_lock_irqsave(&zfcp_data.config_lock, flags); |
185 | unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); | 185 | unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); |
186 | if (unit) { | 186 | if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, |
187 | &unit->status)) { | ||
187 | sdp->hostdata = unit; | 188 | sdp->hostdata = unit; |
188 | unit->device = sdp; | 189 | unit->device = sdp; |
189 | zfcp_unit_get(unit); | 190 | zfcp_unit_get(unit); |
@@ -208,6 +209,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
208 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 209 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
209 | 210 | ||
210 | if (unit) { | 211 | if (unit) { |
212 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); | ||
211 | sdpnt->hostdata = NULL; | 213 | sdpnt->hostdata = NULL; |
212 | unit->device = NULL; | 214 | unit->device = NULL; |
213 | zfcp_unit_put(unit); | 215 | zfcp_unit_put(unit); |
@@ -291,7 +293,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, | |||
291 | "on port 0x%016Lx in recovery\n", | 293 | "on port 0x%016Lx in recovery\n", |
292 | zfcp_get_busid_by_unit(unit), | 294 | zfcp_get_busid_by_unit(unit), |
293 | unit->fcp_lun, unit->port->wwpn); | 295 | unit->fcp_lun, unit->port->wwpn); |
294 | retval = SCSI_MLQUEUE_DEVICE_BUSY; | 296 | zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); |
295 | goto out; | 297 | goto out; |
296 | } | 298 | } |
297 | 299 | ||
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index a800fb51168b..559ff7aae3f1 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -742,23 +742,17 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
742 | struct ata_queued_cmd *qc; | 742 | struct ata_queued_cmd *qc; |
743 | qc = ata_qc_from_tag(ap, ap->active_tag); | 743 | qc = ata_qc_from_tag(ap, ap->active_tag); |
744 | if (!ahci_host_intr(ap, qc)) | 744 | if (!ahci_host_intr(ap, qc)) |
745 | if (ata_ratelimit()) { | 745 | if (ata_ratelimit()) |
746 | struct pci_dev *pdev = | 746 | dev_printk(KERN_WARNING, host_set->dev, |
747 | to_pci_dev(ap->host_set->dev); | ||
748 | dev_printk(KERN_WARNING, &pdev->dev, | ||
749 | "unhandled interrupt on port %u\n", | 747 | "unhandled interrupt on port %u\n", |
750 | i); | 748 | i); |
751 | } | ||
752 | 749 | ||
753 | VPRINTK("port %u\n", i); | 750 | VPRINTK("port %u\n", i); |
754 | } else { | 751 | } else { |
755 | VPRINTK("port %u (no irq)\n", i); | 752 | VPRINTK("port %u (no irq)\n", i); |
756 | if (ata_ratelimit()) { | 753 | if (ata_ratelimit()) |
757 | struct pci_dev *pdev = | 754 | dev_printk(KERN_WARNING, host_set->dev, |
758 | to_pci_dev(ap->host_set->dev); | ||
759 | dev_printk(KERN_WARNING, &pdev->dev, | ||
760 | "interrupt on disabled port %u\n", i); | 755 | "interrupt on disabled port %u\n", i); |
761 | } | ||
762 | } | 756 | } |
763 | 757 | ||
764 | irq_ack |= (1 << i); | 758 | irq_ack |= (1 << i); |
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig index df52190f4d94..eac8e179cfff 100644 --- a/drivers/scsi/pcmcia/Kconfig +++ b/drivers/scsi/pcmcia/Kconfig | |||
@@ -8,6 +8,7 @@ menu "PCMCIA SCSI adapter support" | |||
8 | config PCMCIA_AHA152X | 8 | config PCMCIA_AHA152X |
9 | tristate "Adaptec AHA152X PCMCIA support" | 9 | tristate "Adaptec AHA152X PCMCIA support" |
10 | depends on m && !64BIT | 10 | depends on m && !64BIT |
11 | select SCSI_SPI_ATTRS | ||
11 | help | 12 | help |
12 | Say Y here if you intend to attach this type of PCMCIA SCSI host | 13 | Say Y here if you intend to attach this type of PCMCIA SCSI host |
13 | adapter to your computer. | 14 | adapter to your computer. |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index f01ec0a7c506..84c3937ae8fb 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -126,6 +126,7 @@ static struct { | |||
126 | {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, | 126 | {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, |
127 | {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, | 127 | {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, |
128 | {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, | 128 | {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, |
129 | {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN}, | ||
129 | {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, | 130 | {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, |
130 | {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, | 131 | {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, |
131 | {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ | 132 | {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 929032e370db..13ea64119b73 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -223,7 +223,7 @@ static void fc_rport_terminate(struct fc_rport *rport); | |||
223 | */ | 223 | */ |
224 | #define FC_STARGET_NUM_ATTRS 3 | 224 | #define FC_STARGET_NUM_ATTRS 3 |
225 | #define FC_RPORT_NUM_ATTRS 9 | 225 | #define FC_RPORT_NUM_ATTRS 9 |
226 | #define FC_HOST_NUM_ATTRS 16 | 226 | #define FC_HOST_NUM_ATTRS 17 |
227 | 227 | ||
228 | struct fc_internal { | 228 | struct fc_internal { |
229 | struct scsi_transport_template t; | 229 | struct scsi_transport_template t; |
diff --git a/fs/buffer.c b/fs/buffer.c index 62cfd17dc5fe..a9b399402007 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -3060,6 +3060,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page) | |||
3060 | { | 3060 | { |
3061 | struct address_space *mapping = page->mapping; | 3061 | struct address_space *mapping = page->mapping; |
3062 | struct buffer_head *bh, *head; | 3062 | struct buffer_head *bh, *head; |
3063 | int rc; | ||
3063 | 3064 | ||
3064 | if (!mapping) | 3065 | if (!mapping) |
3065 | return -EAGAIN; | 3066 | return -EAGAIN; |
@@ -3069,8 +3070,9 @@ int buffer_migrate_page(struct page *newpage, struct page *page) | |||
3069 | 3070 | ||
3070 | head = page_buffers(page); | 3071 | head = page_buffers(page); |
3071 | 3072 | ||
3072 | if (migrate_page_remove_references(newpage, page, 3)) | 3073 | rc = migrate_page_remove_references(newpage, page, 3); |
3073 | return -EAGAIN; | 3074 | if (rc) |
3075 | return rc; | ||
3074 | 3076 | ||
3075 | bh = head; | 3077 | bh = head; |
3076 | do { | 3078 | do { |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 848044af7e16..27f3e787faca 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1155,15 +1155,16 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1155 | * For writes, i_mutex is not held on entry; it is never taken. | 1155 | * For writes, i_mutex is not held on entry; it is never taken. |
1156 | * | 1156 | * |
1157 | * DIO_LOCKING (simple locking for regular files) | 1157 | * DIO_LOCKING (simple locking for regular files) |
1158 | * For writes we are called under i_mutex and return with i_mutex held, even though | 1158 | * For writes we are called under i_mutex and return with i_mutex held, even |
1159 | * it is internally dropped. | 1159 | * though it is internally dropped. |
1160 | * For reads, i_mutex is not held on entry, but it is taken and dropped before | 1160 | * For reads, i_mutex is not held on entry, but it is taken and dropped before |
1161 | * returning. | 1161 | * returning. |
1162 | * | 1162 | * |
1163 | * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of | 1163 | * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of |
1164 | * uninitialised data, allowing parallel direct readers and writers) | 1164 | * uninitialised data, allowing parallel direct readers and writers) |
1165 | * For writes we are called without i_mutex, return without it, never touch it. | 1165 | * For writes we are called without i_mutex, return without it, never touch it. |
1166 | * For reads, i_mutex is held on entry and will be released before returning. | 1166 | * For reads we are called under i_mutex and return with i_mutex held, even |
1167 | * though it may be internally dropped. | ||
1167 | * | 1168 | * |
1168 | * Additional i_alloc_sem locking requirements described inline below. | 1169 | * Additional i_alloc_sem locking requirements described inline below. |
1169 | */ | 1170 | */ |
@@ -1182,7 +1183,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1182 | ssize_t retval = -EINVAL; | 1183 | ssize_t retval = -EINVAL; |
1183 | loff_t end = offset; | 1184 | loff_t end = offset; |
1184 | struct dio *dio; | 1185 | struct dio *dio; |
1185 | int reader_with_isem = (rw == READ && dio_lock_type == DIO_OWN_LOCKING); | 1186 | int release_i_mutex = 0; |
1187 | int acquire_i_mutex = 0; | ||
1186 | 1188 | ||
1187 | if (rw & WRITE) | 1189 | if (rw & WRITE) |
1188 | current->flags |= PF_SYNCWRITE; | 1190 | current->flags |= PF_SYNCWRITE; |
@@ -1225,7 +1227,6 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1225 | * writers need to grab i_alloc_sem only (i_mutex is already held) | 1227 | * writers need to grab i_alloc_sem only (i_mutex is already held) |
1226 | * For regular files using DIO_OWN_LOCKING, | 1228 | * For regular files using DIO_OWN_LOCKING, |
1227 | * neither readers nor writers take any locks here | 1229 | * neither readers nor writers take any locks here |
1228 | * (i_mutex is already held and release for writers here) | ||
1229 | */ | 1230 | */ |
1230 | dio->lock_type = dio_lock_type; | 1231 | dio->lock_type = dio_lock_type; |
1231 | if (dio_lock_type != DIO_NO_LOCKING) { | 1232 | if (dio_lock_type != DIO_NO_LOCKING) { |
@@ -1236,7 +1237,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1236 | mapping = iocb->ki_filp->f_mapping; | 1237 | mapping = iocb->ki_filp->f_mapping; |
1237 | if (dio_lock_type != DIO_OWN_LOCKING) { | 1238 | if (dio_lock_type != DIO_OWN_LOCKING) { |
1238 | mutex_lock(&inode->i_mutex); | 1239 | mutex_lock(&inode->i_mutex); |
1239 | reader_with_isem = 1; | 1240 | release_i_mutex = 1; |
1240 | } | 1241 | } |
1241 | 1242 | ||
1242 | retval = filemap_write_and_wait_range(mapping, offset, | 1243 | retval = filemap_write_and_wait_range(mapping, offset, |
@@ -1248,7 +1249,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1248 | 1249 | ||
1249 | if (dio_lock_type == DIO_OWN_LOCKING) { | 1250 | if (dio_lock_type == DIO_OWN_LOCKING) { |
1250 | mutex_unlock(&inode->i_mutex); | 1251 | mutex_unlock(&inode->i_mutex); |
1251 | reader_with_isem = 0; | 1252 | acquire_i_mutex = 1; |
1252 | } | 1253 | } |
1253 | } | 1254 | } |
1254 | 1255 | ||
@@ -1269,11 +1270,13 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1269 | nr_segs, blkbits, get_blocks, end_io, dio); | 1270 | nr_segs, blkbits, get_blocks, end_io, dio); |
1270 | 1271 | ||
1271 | if (rw == READ && dio_lock_type == DIO_LOCKING) | 1272 | if (rw == READ && dio_lock_type == DIO_LOCKING) |
1272 | reader_with_isem = 0; | 1273 | release_i_mutex = 0; |
1273 | 1274 | ||
1274 | out: | 1275 | out: |
1275 | if (reader_with_isem) | 1276 | if (release_i_mutex) |
1276 | mutex_unlock(&inode->i_mutex); | 1277 | mutex_unlock(&inode->i_mutex); |
1278 | else if (acquire_i_mutex) | ||
1279 | mutex_lock(&inode->i_mutex); | ||
1277 | if (rw & WRITE) | 1280 | if (rw & WRITE) |
1278 | current->flags &= ~PF_SYNCWRITE; | 1281 | current->flags &= ~PF_SYNCWRITE; |
1279 | return retval; | 1282 | return retval; |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 2967b7393415..79b5404db100 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -532,10 +532,10 @@ dbUpdatePMap(struct inode *ipbmap, | |||
532 | 532 | ||
533 | lastlblkno = lblkno; | 533 | lastlblkno = lblkno; |
534 | 534 | ||
535 | LOGSYNC_LOCK(log, flags); | ||
535 | if (mp->lsn != 0) { | 536 | if (mp->lsn != 0) { |
536 | /* inherit older/smaller lsn */ | 537 | /* inherit older/smaller lsn */ |
537 | logdiff(diffp, mp->lsn, log); | 538 | logdiff(diffp, mp->lsn, log); |
538 | LOGSYNC_LOCK(log, flags); | ||
539 | if (difft < diffp) { | 539 | if (difft < diffp) { |
540 | mp->lsn = lsn; | 540 | mp->lsn = lsn; |
541 | 541 | ||
@@ -548,20 +548,17 @@ dbUpdatePMap(struct inode *ipbmap, | |||
548 | logdiff(diffp, mp->clsn, log); | 548 | logdiff(diffp, mp->clsn, log); |
549 | if (difft > diffp) | 549 | if (difft > diffp) |
550 | mp->clsn = tblk->clsn; | 550 | mp->clsn = tblk->clsn; |
551 | LOGSYNC_UNLOCK(log, flags); | ||
552 | } else { | 551 | } else { |
553 | mp->log = log; | 552 | mp->log = log; |
554 | mp->lsn = lsn; | 553 | mp->lsn = lsn; |
555 | 554 | ||
556 | /* insert bp after tblock in logsync list */ | 555 | /* insert bp after tblock in logsync list */ |
557 | LOGSYNC_LOCK(log, flags); | ||
558 | |||
559 | log->count++; | 556 | log->count++; |
560 | list_add(&mp->synclist, &tblk->synclist); | 557 | list_add(&mp->synclist, &tblk->synclist); |
561 | 558 | ||
562 | mp->clsn = tblk->clsn; | 559 | mp->clsn = tblk->clsn; |
563 | LOGSYNC_UNLOCK(log, flags); | ||
564 | } | 560 | } |
561 | LOGSYNC_UNLOCK(log, flags); | ||
565 | } | 562 | } |
566 | 563 | ||
567 | /* write the last buffer. */ | 564 | /* write the last buffer. */ |
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 31b4aa13dd4b..4efa0d0eec39 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c | |||
@@ -2844,11 +2844,11 @@ diUpdatePMap(struct inode *ipimap, | |||
2844 | */ | 2844 | */ |
2845 | lsn = tblk->lsn; | 2845 | lsn = tblk->lsn; |
2846 | log = JFS_SBI(tblk->sb)->log; | 2846 | log = JFS_SBI(tblk->sb)->log; |
2847 | LOGSYNC_LOCK(log, flags); | ||
2847 | if (mp->lsn != 0) { | 2848 | if (mp->lsn != 0) { |
2848 | /* inherit older/smaller lsn */ | 2849 | /* inherit older/smaller lsn */ |
2849 | logdiff(difft, lsn, log); | 2850 | logdiff(difft, lsn, log); |
2850 | logdiff(diffp, mp->lsn, log); | 2851 | logdiff(diffp, mp->lsn, log); |
2851 | LOGSYNC_LOCK(log, flags); | ||
2852 | if (difft < diffp) { | 2852 | if (difft < diffp) { |
2853 | mp->lsn = lsn; | 2853 | mp->lsn = lsn; |
2854 | /* move mp after tblock in logsync list */ | 2854 | /* move mp after tblock in logsync list */ |
@@ -2860,17 +2860,15 @@ diUpdatePMap(struct inode *ipimap, | |||
2860 | logdiff(diffp, mp->clsn, log); | 2860 | logdiff(diffp, mp->clsn, log); |
2861 | if (difft > diffp) | 2861 | if (difft > diffp) |
2862 | mp->clsn = tblk->clsn; | 2862 | mp->clsn = tblk->clsn; |
2863 | LOGSYNC_UNLOCK(log, flags); | ||
2864 | } else { | 2863 | } else { |
2865 | mp->log = log; | 2864 | mp->log = log; |
2866 | mp->lsn = lsn; | 2865 | mp->lsn = lsn; |
2867 | /* insert mp after tblock in logsync list */ | 2866 | /* insert mp after tblock in logsync list */ |
2868 | LOGSYNC_LOCK(log, flags); | ||
2869 | log->count++; | 2867 | log->count++; |
2870 | list_add(&mp->synclist, &tblk->synclist); | 2868 | list_add(&mp->synclist, &tblk->synclist); |
2871 | mp->clsn = tblk->clsn; | 2869 | mp->clsn = tblk->clsn; |
2872 | LOGSYNC_UNLOCK(log, flags); | ||
2873 | } | 2870 | } |
2871 | LOGSYNC_UNLOCK(log, flags); | ||
2874 | write_metapage(mp); | 2872 | write_metapage(mp); |
2875 | return (0); | 2873 | return (0); |
2876 | } | 2874 | } |
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 220058d8616d..970b6a6aa337 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -662,12 +662,18 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
662 | * reclaimed while we're stuck in the unlock call. */ | 662 | * reclaimed while we're stuck in the unlock call. */ |
663 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; | 663 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED; |
664 | 664 | ||
665 | /* | ||
666 | * Note: the server is supposed to either grant us the unlock | ||
667 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either | ||
668 | * case, we want to unlock. | ||
669 | */ | ||
670 | do_vfs_lock(fl); | ||
671 | |||
665 | if (req->a_flags & RPC_TASK_ASYNC) { | 672 | if (req->a_flags & RPC_TASK_ASYNC) { |
666 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, | 673 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, |
667 | &nlmclnt_unlock_ops); | 674 | &nlmclnt_unlock_ops); |
668 | /* Hrmf... Do the unlock early since locks_remove_posix() | 675 | /* Hrmf... Do the unlock early since locks_remove_posix() |
669 | * really expects us to free the lock synchronously */ | 676 | * really expects us to free the lock synchronously */ |
670 | do_vfs_lock(fl); | ||
671 | if (status < 0) { | 677 | if (status < 0) { |
672 | nlmclnt_release_lockargs(req); | 678 | nlmclnt_release_lockargs(req); |
673 | kfree(req); | 679 | kfree(req); |
@@ -680,7 +686,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
680 | if (status < 0) | 686 | if (status < 0) |
681 | return status; | 687 | return status; |
682 | 688 | ||
683 | do_vfs_lock(fl); | ||
684 | if (resp->status == NLM_LCK_GRANTED) | 689 | if (resp->status == NLM_LCK_GRANTED) |
685 | return 0; | 690 | return 0; |
686 | 691 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 058a44865beb..39c81a8d6316 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1338,7 +1338,7 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs) | |||
1338 | 1338 | ||
1339 | new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL); | 1339 | new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL); |
1340 | if (!new_ns) | 1340 | if (!new_ns) |
1341 | goto out; | 1341 | return NULL; |
1342 | 1342 | ||
1343 | atomic_set(&new_ns->count, 1); | 1343 | atomic_set(&new_ns->count, 1); |
1344 | INIT_LIST_HEAD(&new_ns->list); | 1344 | INIT_LIST_HEAD(&new_ns->list); |
@@ -1352,7 +1352,7 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs) | |||
1352 | if (!new_ns->root) { | 1352 | if (!new_ns->root) { |
1353 | up_write(&namespace_sem); | 1353 | up_write(&namespace_sem); |
1354 | kfree(new_ns); | 1354 | kfree(new_ns); |
1355 | goto out; | 1355 | return NULL; |
1356 | } | 1356 | } |
1357 | spin_lock(&vfsmount_lock); | 1357 | spin_lock(&vfsmount_lock); |
1358 | list_add_tail(&new_ns->list, &new_ns->root->mnt_list); | 1358 | list_add_tail(&new_ns->list, &new_ns->root->mnt_list); |
@@ -1393,7 +1393,6 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs) | |||
1393 | if (altrootmnt) | 1393 | if (altrootmnt) |
1394 | mntput(altrootmnt); | 1394 | mntput(altrootmnt); |
1395 | 1395 | ||
1396 | out: | ||
1397 | return new_ns; | 1396 | return new_ns; |
1398 | } | 1397 | } |
1399 | 1398 | ||
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 04ab2fc360e7..4e9b3a1b36c5 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #define NFSDBG_FACILITY NFSDBG_VFS | 57 | #define NFSDBG_FACILITY NFSDBG_VFS |
58 | #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT) | 58 | #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT) |
59 | 59 | ||
60 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty); | ||
60 | static kmem_cache_t *nfs_direct_cachep; | 61 | static kmem_cache_t *nfs_direct_cachep; |
61 | 62 | ||
62 | /* | 63 | /* |
@@ -107,6 +108,15 @@ nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, | |||
107 | page_count, (rw == READ), 0, | 108 | page_count, (rw == READ), 0, |
108 | *pages, NULL); | 109 | *pages, NULL); |
109 | up_read(¤t->mm->mmap_sem); | 110 | up_read(¤t->mm->mmap_sem); |
111 | /* | ||
112 | * If we got fewer pages than expected from get_user_pages(), | ||
113 | * the user buffer runs off the end of a mapping; return EFAULT. | ||
114 | */ | ||
115 | if (result >= 0 && result < page_count) { | ||
116 | nfs_free_user_pages(*pages, result, 0); | ||
117 | *pages = NULL; | ||
118 | result = -EFAULT; | ||
119 | } | ||
110 | } | 120 | } |
111 | return result; | 121 | return result; |
112 | } | 122 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 984ca3454d04..f8c0066e02e1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1430,7 +1430,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, | |||
1430 | if (status == 0) | 1430 | if (status == 0) |
1431 | status = nfs4_do_fsinfo(server, fhandle, info); | 1431 | status = nfs4_do_fsinfo(server, fhandle, info); |
1432 | out: | 1432 | out: |
1433 | return status; | 1433 | return nfs4_map_errors(status); |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 1436 | static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
diff --git a/include/asm-arm/fpstate.h b/include/asm-arm/fpstate.h index f7430e3aa55d..6246bf83627d 100644 --- a/include/asm-arm/fpstate.h +++ b/include/asm-arm/fpstate.h | |||
@@ -55,8 +55,10 @@ struct fp_soft_struct { | |||
55 | unsigned int save[FP_SOFT_SIZE]; /* undefined information */ | 55 | unsigned int save[FP_SOFT_SIZE]; /* undefined information */ |
56 | }; | 56 | }; |
57 | 57 | ||
58 | #define IWMMXT_SIZE 0x98 | ||
59 | |||
58 | struct iwmmxt_struct { | 60 | struct iwmmxt_struct { |
59 | unsigned int save[0x98/sizeof(int) + 1]; | 61 | unsigned int save[IWMMXT_SIZE / sizeof(unsigned int)]; |
60 | }; | 62 | }; |
61 | 63 | ||
62 | union fp_state { | 64 | union fp_state { |
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index 33a33cbb6329..cfbccb63c67b 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h | |||
@@ -59,7 +59,7 @@ struct thread_info { | |||
59 | struct cpu_context_save cpu_context; /* cpu context */ | 59 | struct cpu_context_save cpu_context; /* cpu context */ |
60 | __u8 used_cp[16]; /* thread used copro */ | 60 | __u8 used_cp[16]; /* thread used copro */ |
61 | unsigned long tp_value; | 61 | unsigned long tp_value; |
62 | union fp_state fpstate; | 62 | union fp_state fpstate __attribute__((aligned(8))); |
63 | union vfp_state vfpstate; | 63 | union vfp_state vfpstate; |
64 | struct restart_block restart_block; | 64 | struct restart_block restart_block; |
65 | }; | 65 | }; |
diff --git a/kernel/fork.c b/kernel/fork.c index a8eab86de7f1..ccdfbb16c86d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1062,6 +1062,12 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1062 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; | 1062 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; |
1063 | 1063 | ||
1064 | /* | 1064 | /* |
1065 | * sigaltstack should be cleared when sharing the same VM | ||
1066 | */ | ||
1067 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) | ||
1068 | p->sas_ss_sp = p->sas_ss_size = 0; | ||
1069 | |||
1070 | /* | ||
1065 | * Syscall tracing should be turned off in the child regardless | 1071 | * Syscall tracing should be turned off in the child regardless |
1066 | * of CLONE_PTRACE. | 1072 | * of CLONE_PTRACE. |
1067 | */ | 1073 | */ |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 954981b14303..2a8206009422 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -748,7 +748,7 @@ long do_mbind(unsigned long start, unsigned long len, | |||
748 | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | 748 | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) |
749 | || mode > MPOL_MAX) | 749 | || mode > MPOL_MAX) |
750 | return -EINVAL; | 750 | return -EINVAL; |
751 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE)) | 751 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) |
752 | return -EPERM; | 752 | return -EPERM; |
753 | 753 | ||
754 | if (start & ~PAGE_MASK) | 754 | if (start & ~PAGE_MASK) |
@@ -942,20 +942,20 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, | |||
942 | */ | 942 | */ |
943 | if ((current->euid != task->suid) && (current->euid != task->uid) && | 943 | if ((current->euid != task->suid) && (current->euid != task->uid) && |
944 | (current->uid != task->suid) && (current->uid != task->uid) && | 944 | (current->uid != task->suid) && (current->uid != task->uid) && |
945 | !capable(CAP_SYS_ADMIN)) { | 945 | !capable(CAP_SYS_NICE)) { |
946 | err = -EPERM; | 946 | err = -EPERM; |
947 | goto out; | 947 | goto out; |
948 | } | 948 | } |
949 | 949 | ||
950 | task_nodes = cpuset_mems_allowed(task); | 950 | task_nodes = cpuset_mems_allowed(task); |
951 | /* Is the user allowed to access the target nodes? */ | 951 | /* Is the user allowed to access the target nodes? */ |
952 | if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) { | 952 | if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { |
953 | err = -EPERM; | 953 | err = -EPERM; |
954 | goto out; | 954 | goto out; |
955 | } | 955 | } |
956 | 956 | ||
957 | err = do_migrate_pages(mm, &old, &new, | 957 | err = do_migrate_pages(mm, &old, &new, |
958 | capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); | 958 | capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); |
959 | out: | 959 | out: |
960 | mmput(mm); | 960 | mmput(mm); |
961 | return err; | 961 | return err; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ccf763bb30b..4fe7e3aa02e2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage, | |||
700 | * the page. | 700 | * the page. |
701 | */ | 701 | */ |
702 | if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) | 702 | if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) |
703 | return 1; | 703 | return -EAGAIN; |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * Establish swap ptes for anonymous pages or destroy pte | 706 | * Establish swap ptes for anonymous pages or destroy pte |
@@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage, | |||
721 | * If the page was not migrated then the PageSwapCache bit | 721 | * If the page was not migrated then the PageSwapCache bit |
722 | * is still set and the operation may continue. | 722 | * is still set and the operation may continue. |
723 | */ | 723 | */ |
724 | try_to_unmap(page, 1); | 724 | if (try_to_unmap(page, 1) == SWAP_FAIL) |
725 | /* A vma has VM_LOCKED set -> Permanent failure */ | ||
726 | return -EPERM; | ||
725 | 727 | ||
726 | /* | 728 | /* |
727 | * Give up if we were unable to remove all mappings. | 729 | * Give up if we were unable to remove all mappings. |
728 | */ | 730 | */ |
729 | if (page_mapcount(page)) | 731 | if (page_mapcount(page)) |
730 | return 1; | 732 | return -EAGAIN; |
731 | 733 | ||
732 | write_lock_irq(&mapping->tree_lock); | 734 | write_lock_irq(&mapping->tree_lock); |
733 | 735 | ||
@@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage, | |||
738 | if (!page_mapping(page) || page_count(page) != nr_refs || | 740 | if (!page_mapping(page) || page_count(page) != nr_refs || |
739 | *radix_pointer != page) { | 741 | *radix_pointer != page) { |
740 | write_unlock_irq(&mapping->tree_lock); | 742 | write_unlock_irq(&mapping->tree_lock); |
741 | return 1; | 743 | return -EAGAIN; |
742 | } | 744 | } |
743 | 745 | ||
744 | /* | 746 | /* |
@@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy); | |||
813 | */ | 815 | */ |
814 | int migrate_page(struct page *newpage, struct page *page) | 816 | int migrate_page(struct page *newpage, struct page *page) |
815 | { | 817 | { |
818 | int rc; | ||
819 | |||
816 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 820 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
817 | 821 | ||
818 | if (migrate_page_remove_references(newpage, page, 2)) | 822 | rc = migrate_page_remove_references(newpage, page, 2); |
819 | return -EAGAIN; | 823 | |
824 | if (rc) | ||
825 | return rc; | ||
820 | 826 | ||
821 | migrate_page_copy(newpage, page); | 827 | migrate_page_copy(newpage, page); |
822 | 828 | ||
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile index 8bf6d9f6e9d3..905087e0d485 100644 --- a/net/bridge/netfilter/Makefile +++ b/net/bridge/netfilter/Makefile | |||
@@ -29,4 +29,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o | |||
29 | 29 | ||
30 | # watchers | 30 | # watchers |
31 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o | 31 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o |
32 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o | 32 | obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 097bcea2129f..78b2d13e80e3 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -131,7 +131,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, | |||
131 | a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | 131 | a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == |
132 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); | 132 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); |
133 | qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && | 133 | qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && |
134 | (WLAN_FC_GET_STYPE(fc) & 0x08)); | 134 | (WLAN_FC_GET_STYPE(fc) & IEEE80211_STYPE_QOS_DATA)); |
135 | aad_len = 22; | 135 | aad_len = 22; |
136 | if (a4_included) | 136 | if (a4_included) |
137 | aad_len += 6; | 137 | aad_len += 6; |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 6b8469da29b1..785d5a170a7f 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -1515,10 +1515,10 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1515 | 1515 | ||
1516 | if (is_beacon(beacon->header.frame_ctl)) { | 1516 | if (is_beacon(beacon->header.frame_ctl)) { |
1517 | if (ieee->handle_beacon != NULL) | 1517 | if (ieee->handle_beacon != NULL) |
1518 | ieee->handle_beacon(dev, beacon, &network); | 1518 | ieee->handle_beacon(dev, beacon, target); |
1519 | } else { | 1519 | } else { |
1520 | if (ieee->handle_probe_response != NULL) | 1520 | if (ieee->handle_probe_response != NULL) |
1521 | ieee->handle_probe_response(dev, beacon, &network); | 1521 | ieee->handle_probe_response(dev, beacon, target); |
1522 | } | 1522 | } |
1523 | } | 1523 | } |
1524 | 1524 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 57d290d89ec2..8ee4d016740d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -847,10 +847,11 @@ int ip_append_data(struct sock *sk, | |||
847 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 847 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && |
848 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 848 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
849 | 849 | ||
850 | if(ip_ufo_append_data(sk, getfrag, from, length, hh_len, | 850 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, |
851 | fragheaderlen, transhdrlen, mtu, flags)) | 851 | fragheaderlen, transhdrlen, mtu, |
852 | flags); | ||
853 | if (err) | ||
852 | goto error; | 854 | goto error; |
853 | |||
854 | return 0; | 855 | return 0; |
855 | } | 856 | } |
856 | 857 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index dd1048be8a01..7d7ab94a7a2e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -771,7 +771,7 @@ static int get_entries(const struct arpt_get_entries *entries, | |||
771 | struct arpt_table *t; | 771 | struct arpt_table *t; |
772 | 772 | ||
773 | t = xt_find_table_lock(NF_ARP, entries->name); | 773 | t = xt_find_table_lock(NF_ARP, entries->name); |
774 | if (t || !IS_ERR(t)) { | 774 | if (t && !IS_ERR(t)) { |
775 | struct xt_table_info *private = t->private; | 775 | struct xt_table_info *private = t->private; |
776 | duprintf("t->private->number = %u\n", | 776 | duprintf("t->private->number = %u\n", |
777 | private->number); | 777 | private->number); |
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 63cf7e540847..e0e9d1383c7c 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -125,7 +125,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, | |||
125 | /* Update AIMD parameters */ | 125 | /* Update AIMD parameters */ |
126 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { | 126 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { |
127 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 127 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
128 | ca->ai < HSTCP_AIMD_MAX) | 128 | ca->ai < HSTCP_AIMD_MAX - 1) |
129 | ca->ai++; | 129 | ca->ai++; |
130 | } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { | 130 | } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { |
131 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 131 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a7623ead39a8..9f498a6c8895 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1036,6 +1036,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
1036 | 1036 | ||
1037 | limit = min(send_win, cong_win); | 1037 | limit = min(send_win, cong_win); |
1038 | 1038 | ||
1039 | /* If a full-sized TSO skb can be sent, do it. */ | ||
1040 | if (limit >= 65536) | ||
1041 | return 0; | ||
1042 | |||
1039 | if (sysctl_tcp_tso_win_divisor) { | 1043 | if (sysctl_tcp_tso_win_divisor) { |
1040 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 1044 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
1041 | 1045 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b7d8822c1be4..19727d941962 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -822,7 +822,7 @@ struct ipv6_saddr_score { | |||
822 | int addr_type; | 822 | int addr_type; |
823 | unsigned int attrs; | 823 | unsigned int attrs; |
824 | int matchlen; | 824 | int matchlen; |
825 | unsigned int scope; | 825 | int scope; |
826 | unsigned int rule; | 826 | unsigned int rule; |
827 | }; | 827 | }; |
828 | 828 | ||
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index c7932cb420a5..84963749ab77 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -279,7 +279,7 @@ static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc | |||
279 | goto out; | 279 | goto out; |
280 | memcpy(tmp_hdr, skb->nh.raw, hdr_len); | 280 | memcpy(tmp_hdr, skb->nh.raw, hdr_len); |
281 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len)) | 281 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len)) |
282 | goto out; | 282 | goto free_out; |
283 | skb->nh.ipv6h->priority = 0; | 283 | skb->nh.ipv6h->priority = 0; |
284 | skb->nh.ipv6h->flow_lbl[0] = 0; | 284 | skb->nh.ipv6h->flow_lbl[0] = 0; |
285 | skb->nh.ipv6h->flow_lbl[1] = 0; | 285 | skb->nh.ipv6h->flow_lbl[1] = 0; |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 4154f3a8b6cf..bb8ffb8a14c5 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -87,7 +87,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
87 | struct inet_timewait_sock **twp) | 87 | struct inet_timewait_sock **twp) |
88 | { | 88 | { |
89 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 89 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
90 | const struct inet_sock *inet = inet_sk(sk); | 90 | struct inet_sock *inet = inet_sk(sk); |
91 | const struct ipv6_pinfo *np = inet6_sk(sk); | 91 | const struct ipv6_pinfo *np = inet6_sk(sk); |
92 | const struct in6_addr *daddr = &np->rcv_saddr; | 92 | const struct in6_addr *daddr = &np->rcv_saddr; |
93 | const struct in6_addr *saddr = &np->daddr; | 93 | const struct in6_addr *saddr = &np->daddr; |
@@ -129,6 +129,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
129 | } | 129 | } |
130 | 130 | ||
131 | unique: | 131 | unique: |
132 | /* Must record num and sport now. Otherwise we will see | ||
133 | * in hash table socket with a funny identity. */ | ||
134 | inet->num = lport; | ||
135 | inet->sport = htons(lport); | ||
132 | BUG_TRAP(sk_unhashed(sk)); | 136 | BUG_TRAP(sk_unhashed(sk)); |
133 | __sk_add_node(sk, &head->chain); | 137 | __sk_add_node(sk, &head->chain); |
134 | sk->sk_hash = hash; | 138 | sk->sk_hash = hash; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f999edd846a9..5bf70b1442ea 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -944,10 +944,11 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
944 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 944 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && |
945 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 945 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
946 | 946 | ||
947 | if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len, | 947 | err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, |
948 | fragheaderlen, transhdrlen, mtu, flags)) | 948 | fragheaderlen, transhdrlen, mtu, |
949 | flags); | ||
950 | if (err) | ||
949 | goto error; | 951 | goto error; |
950 | |||
951 | return 0; | 952 | return 0; |
952 | } | 953 | } |
953 | 954 | ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index cac38b2e147a..2cf5fb8322c4 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -928,8 +928,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
928 | 928 | ||
929 | if (nfqa[NFQA_CFG_PARAMS-1]) { | 929 | if (nfqa[NFQA_CFG_PARAMS-1]) { |
930 | struct nfqnl_msg_config_params *params; | 930 | struct nfqnl_msg_config_params *params; |
931 | params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]); | ||
932 | 931 | ||
932 | if (!queue) { | ||
933 | ret = -ENOENT; | ||
934 | goto out_put; | ||
935 | } | ||
936 | params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]); | ||
933 | nfqnl_set_mode(queue, params->copy_mode, | 937 | nfqnl_set_mode(queue, params->copy_mode, |
934 | ntohl(params->copy_range)); | 938 | ntohl(params->copy_range)); |
935 | } | 939 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6b9772d95872..59dc7d140600 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1194,6 +1194,9 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1194 | msg->msg_namelen = sizeof(*addr); | 1194 | msg->msg_namelen = sizeof(*addr); |
1195 | } | 1195 | } |
1196 | 1196 | ||
1197 | if (nlk->flags & NETLINK_RECV_PKTINFO) | ||
1198 | netlink_cmsg_recv_pktinfo(msg, skb); | ||
1199 | |||
1197 | if (NULL == siocb->scm) { | 1200 | if (NULL == siocb->scm) { |
1198 | memset(&scm, 0, sizeof(scm)); | 1201 | memset(&scm, 0, sizeof(scm)); |
1199 | siocb->scm = &scm; | 1202 | siocb->scm = &scm; |
@@ -1205,8 +1208,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1205 | netlink_dump(sk); | 1208 | netlink_dump(sk); |
1206 | 1209 | ||
1207 | scm_recv(sock, msg, siocb->scm, flags); | 1210 | scm_recv(sock, msg, siocb->scm, flags); |
1208 | if (nlk->flags & NETLINK_RECV_PKTINFO) | ||
1209 | netlink_cmsg_recv_pktinfo(msg, skb); | ||
1210 | 1211 | ||
1211 | out: | 1212 | out: |
1212 | netlink_rcv_wake(sk); | 1213 | netlink_rcv_wake(sk); |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 792ce59940ec..2ffa11c6e8de 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -707,7 +707,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | |||
707 | 707 | ||
708 | rtattr_failure: | 708 | rtattr_failure: |
709 | nlmsg_failure: | 709 | nlmsg_failure: |
710 | skb_trim(skb, b - skb->data); | 710 | kfree_skb(skb); |
711 | return -1; | 711 | return -1; |
712 | } | 712 | } |
713 | 713 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d2f0550c4ba0..d78479782045 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -113,7 +113,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, | |||
113 | 113 | ||
114 | err = -EINVAL; | 114 | err = -EINVAL; |
115 | if (!xprt) | 115 | if (!xprt) |
116 | goto out_err; | 116 | goto out_no_xprt; |
117 | if (vers >= program->nrvers || !(version = program->version[vers])) | 117 | if (vers >= program->nrvers || !(version = program->version[vers])) |
118 | goto out_err; | 118 | goto out_err; |
119 | 119 | ||
@@ -182,6 +182,7 @@ out_no_path: | |||
182 | kfree(clnt); | 182 | kfree(clnt); |
183 | out_err: | 183 | out_err: |
184 | xprt_destroy(xprt); | 184 | xprt_destroy(xprt); |
185 | out_no_xprt: | ||
185 | return ERR_PTR(err); | 186 | return ERR_PTR(err); |
186 | } | 187 | } |
187 | 188 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 802d4fe0f55c..e838d042f7f5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -515,16 +515,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
515 | */ | 515 | */ |
516 | void rpc_wake_up(struct rpc_wait_queue *queue) | 516 | void rpc_wake_up(struct rpc_wait_queue *queue) |
517 | { | 517 | { |
518 | struct rpc_task *task; | 518 | struct rpc_task *task, *next; |
519 | |||
520 | struct list_head *head; | 519 | struct list_head *head; |
520 | |||
521 | spin_lock_bh(&queue->lock); | 521 | spin_lock_bh(&queue->lock); |
522 | head = &queue->tasks[queue->maxpriority]; | 522 | head = &queue->tasks[queue->maxpriority]; |
523 | for (;;) { | 523 | for (;;) { |
524 | while (!list_empty(head)) { | 524 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
525 | task = list_entry(head->next, struct rpc_task, u.tk_wait.list); | ||
526 | __rpc_wake_up_task(task); | 525 | __rpc_wake_up_task(task); |
527 | } | ||
528 | if (head == &queue->tasks[0]) | 526 | if (head == &queue->tasks[0]) |
529 | break; | 527 | break; |
530 | head--; | 528 | head--; |
@@ -541,14 +539,13 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
541 | */ | 539 | */ |
542 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | 540 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
543 | { | 541 | { |
542 | struct rpc_task *task, *next; | ||
544 | struct list_head *head; | 543 | struct list_head *head; |
545 | struct rpc_task *task; | ||
546 | 544 | ||
547 | spin_lock_bh(&queue->lock); | 545 | spin_lock_bh(&queue->lock); |
548 | head = &queue->tasks[queue->maxpriority]; | 546 | head = &queue->tasks[queue->maxpriority]; |
549 | for (;;) { | 547 | for (;;) { |
550 | while (!list_empty(head)) { | 548 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
551 | task = list_entry(head->next, struct rpc_task, u.tk_wait.list); | ||
552 | task->tk_status = status; | 549 | task->tk_status = status; |
553 | __rpc_wake_up_task(task); | 550 | __rpc_wake_up_task(task); |
554 | } | 551 | } |