diff options
78 files changed, 945 insertions, 453 deletions
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c index 24c5aade8998..cbee3a27f768 100644 --- a/Documentation/accounting/getdelays.c +++ b/Documentation/accounting/getdelays.c | |||
@@ -196,7 +196,7 @@ void print_delayacct(struct taskstats *t) | |||
196 | "IO %15s%15s\n" | 196 | "IO %15s%15s\n" |
197 | " %15llu%15llu\n" | 197 | " %15llu%15llu\n" |
198 | "MEM %15s%15s\n" | 198 | "MEM %15s%15s\n" |
199 | " %15llu%15llu\n" | 199 | " %15llu%15llu\n", |
200 | "count", "real total", "virtual total", "delay total", | 200 | "count", "real total", "virtual total", "delay total", |
201 | t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total, | 201 | t->cpu_count, t->cpu_run_real_total, t->cpu_run_virtual_total, |
202 | t->cpu_delay_total, | 202 | t->cpu_delay_total, |
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt new file mode 100644 index 000000000000..8242f52d0f22 --- /dev/null +++ b/Documentation/vm/numa_memory_policy.txt | |||
@@ -0,0 +1,332 @@ | |||
1 | |||
2 | What is Linux Memory Policy? | ||
3 | |||
4 | In the Linux kernel, "memory policy" determines from which node the kernel will | ||
5 | allocate memory in a NUMA system or in an emulated NUMA system. Linux has | ||
6 | supported platforms with Non-Uniform Memory Access architectures since 2.4.?. | ||
7 | The current memory policy support was added to Linux 2.6 around May 2004. This | ||
8 | document attempts to describe the concepts and APIs of the 2.6 memory policy | ||
9 | support. | ||
10 | |||
11 | Memory policies should not be confused with cpusets (Documentation/cpusets.txt) | ||
12 | which is an administrative mechanism for restricting the nodes from which | ||
13 | memory may be allocated by a set of processes. Memory policies are a | ||
14 | programming interface that a NUMA-aware application can take advantage of. When | ||
15 | both cpusets and policies are applied to a task, the restrictions of the cpuset | ||
16 | takes priority. See "MEMORY POLICIES AND CPUSETS" below for more details. | ||
17 | |||
18 | MEMORY POLICY CONCEPTS | ||
19 | |||
20 | Scope of Memory Policies | ||
21 | |||
22 | The Linux kernel supports _scopes_ of memory policy, described here from | ||
23 | most general to most specific: | ||
24 | |||
25 | System Default Policy: this policy is "hard coded" into the kernel. It | ||
26 | is the policy that governs all page allocations that aren't controlled | ||
27 | by one of the more specific policy scopes discussed below. When the | ||
28 | system is "up and running", the system default policy will use "local | ||
29 | allocation" described below. However, during boot up, the system | ||
30 | default policy will be set to interleave allocations across all nodes | ||
31 | with "sufficient" memory, so as not to overload the initial boot node | ||
32 | with boot-time allocations. | ||
33 | |||
34 | Task/Process Policy: this is an optional, per-task policy. When defined | ||
35 | for a specific task, this policy controls all page allocations made by or | ||
36 | on behalf of the task that aren't controlled by a more specific scope. | ||
37 | If a task does not define a task policy, then all page allocations that | ||
38 | would have been controlled by the task policy "fall back" to the System | ||
39 | Default Policy. | ||
40 | |||
41 | The task policy applies to the entire address space of a task. Thus, | ||
42 | it is inheritable, and indeed is inherited, across both fork() | ||
43 | [clone() w/o the CLONE_VM flag] and exec*(). This allows a parent task | ||
44 | to establish the task policy for a child task exec()'d from an | ||
45 | executable image that has no awareness of memory policy. See the | ||
46 | MEMORY POLICY APIS section, below, for an overview of the system call | ||
47 | that a task may use to set/change it's task/process policy. | ||
48 | |||
49 | In a multi-threaded task, task policies apply only to the thread | ||
50 | [Linux kernel task] that installs the policy and any threads | ||
51 | subsequently created by that thread. Any sibling threads existing | ||
52 | at the time a new task policy is installed retain their current | ||
53 | policy. | ||
54 | |||
55 | A task policy applies only to pages allocated after the policy is | ||
56 | installed. Any pages already faulted in by the task when the task | ||
57 | changes its task policy remain where they were allocated based on | ||
58 | the policy at the time they were allocated. | ||
59 | |||
60 | VMA Policy: A "VMA" or "Virtual Memory Area" refers to a range of a task's | ||
61 | virtual adddress space. A task may define a specific policy for a range | ||
62 | of its virtual address space. See the MEMORY POLICIES APIS section, | ||
63 | below, for an overview of the mbind() system call used to set a VMA | ||
64 | policy. | ||
65 | |||
66 | A VMA policy will govern the allocation of pages that back this region of | ||
67 | the address space. Any regions of the task's address space that don't | ||
68 | have an explicit VMA policy will fall back to the task policy, which may | ||
69 | itself fall back to the System Default Policy. | ||
70 | |||
71 | VMA policies have a few complicating details: | ||
72 | |||
73 | VMA policy applies ONLY to anonymous pages. These include pages | ||
74 | allocated for anonymous segments, such as the task stack and heap, and | ||
75 | any regions of the address space mmap()ed with the MAP_ANONYMOUS flag. | ||
76 | If a VMA policy is applied to a file mapping, it will be ignored if | ||
77 | the mapping used the MAP_SHARED flag. If the file mapping used the | ||
78 | MAP_PRIVATE flag, the VMA policy will only be applied when an | ||
79 | anonymous page is allocated on an attempt to write to the mapping-- | ||
80 | i.e., at Copy-On-Write. | ||
81 | |||
82 | VMA policies are shared between all tasks that share a virtual address | ||
83 | space--a.k.a. threads--independent of when the policy is installed; and | ||
84 | they are inherited across fork(). However, because VMA policies refer | ||
85 | to a specific region of a task's address space, and because the address | ||
86 | space is discarded and recreated on exec*(), VMA policies are NOT | ||
87 | inheritable across exec(). Thus, only NUMA-aware applications may | ||
88 | use VMA policies. | ||
89 | |||
90 | A task may install a new VMA policy on a sub-range of a previously | ||
91 | mmap()ed region. When this happens, Linux splits the existing virtual | ||
92 | memory area into 2 or 3 VMAs, each with it's own policy. | ||
93 | |||
94 | By default, VMA policy applies only to pages allocated after the policy | ||
95 | is installed. Any pages already faulted into the VMA range remain | ||
96 | where they were allocated based on the policy at the time they were | ||
97 | allocated. However, since 2.6.16, Linux supports page migration via | ||
98 | the mbind() system call, so that page contents can be moved to match | ||
99 | a newly installed policy. | ||
100 | |||
101 | Shared Policy: Conceptually, shared policies apply to "memory objects" | ||
102 | mapped shared into one or more tasks' distinct address spaces. An | ||
103 | application installs a shared policies the same way as VMA policies--using | ||
104 | the mbind() system call specifying a range of virtual addresses that map | ||
105 | the shared object. However, unlike VMA policies, which can be considered | ||
106 | to be an attribute of a range of a task's address space, shared policies | ||
107 | apply directly to the shared object. Thus, all tasks that attach to the | ||
108 | object share the policy, and all pages allocated for the shared object, | ||
109 | by any task, will obey the shared policy. | ||
110 | |||
111 | As of 2.6.22, only shared memory segments, created by shmget() or | ||
112 | mmap(MAP_ANONYMOUS|MAP_SHARED), support shared policy. When shared | ||
113 | policy support was added to Linux, the associated data structures were | ||
114 | added to hugetlbfs shmem segments. At the time, hugetlbfs did not | ||
115 | support allocation at fault time--a.k.a lazy allocation--so hugetlbfs | ||
116 | shmem segments were never "hooked up" to the shared policy support. | ||
117 | Although hugetlbfs segments now support lazy allocation, their support | ||
118 | for shared policy has not been completed. | ||
119 | |||
120 | As mentioned above [re: VMA policies], allocations of page cache | ||
121 | pages for regular files mmap()ed with MAP_SHARED ignore any VMA | ||
122 | policy installed on the virtual address range backed by the shared | ||
123 | file mapping. Rather, shared page cache pages, including pages backing | ||
124 | private mappings that have not yet been written by the task, follow | ||
125 | task policy, if any, else System Default Policy. | ||
126 | |||
127 | The shared policy infrastructure supports different policies on subset | ||
128 | ranges of the shared object. However, Linux still splits the VMA of | ||
129 | the task that installs the policy for each range of distinct policy. | ||
130 | Thus, different tasks that attach to a shared memory segment can have | ||
131 | different VMA configurations mapping that one shared object. This | ||
132 | can be seen by examining the /proc/<pid>/numa_maps of tasks sharing | ||
133 | a shared memory region, when one task has installed shared policy on | ||
134 | one or more ranges of the region. | ||
135 | |||
136 | Components of Memory Policies | ||
137 | |||
138 | A Linux memory policy is a tuple consisting of a "mode" and an optional set | ||
139 | of nodes. The mode determine the behavior of the policy, while the | ||
140 | optional set of nodes can be viewed as the arguments to the behavior. | ||
141 | |||
142 | Internally, memory policies are implemented by a reference counted | ||
143 | structure, struct mempolicy. Details of this structure will be discussed | ||
144 | in context, below, as required to explain the behavior. | ||
145 | |||
146 | Note: in some functions AND in the struct mempolicy itself, the mode | ||
147 | is called "policy". However, to avoid confusion with the policy tuple, | ||
148 | this document will continue to use the term "mode". | ||
149 | |||
150 | Linux memory policy supports the following 4 behavioral modes: | ||
151 | |||
152 | Default Mode--MPOL_DEFAULT: The behavior specified by this mode is | ||
153 | context or scope dependent. | ||
154 | |||
155 | As mentioned in the Policy Scope section above, during normal | ||
156 | system operation, the System Default Policy is hard coded to | ||
157 | contain the Default mode. | ||
158 | |||
159 | In this context, default mode means "local" allocation--that is | ||
160 | attempt to allocate the page from the node associated with the cpu | ||
161 | where the fault occurs. If the "local" node has no memory, or the | ||
162 | node's memory can be exhausted [no free pages available], local | ||
163 | allocation will "fallback to"--attempt to allocate pages from-- | ||
164 | "nearby" nodes, in order of increasing "distance". | ||
165 | |||
166 | Implementation detail -- subject to change: "Fallback" uses | ||
167 | a per node list of sibling nodes--called zonelists--built at | ||
168 | boot time, or when nodes or memory are added or removed from | ||
169 | the system [memory hotplug]. These per node zonelist are | ||
170 | constructed with nodes in order of increasing distance based | ||
171 | on information provided by the platform firmware. | ||
172 | |||
173 | When a task/process policy or a shared policy contains the Default | ||
174 | mode, this also means "local allocation", as described above. | ||
175 | |||
176 | In the context of a VMA, Default mode means "fall back to task | ||
177 | policy"--which may or may not specify Default mode. Thus, Default | ||
178 | mode can not be counted on to mean local allocation when used | ||
179 | on a non-shared region of the address space. However, see | ||
180 | MPOL_PREFERRED below. | ||
181 | |||
182 | The Default mode does not use the optional set of nodes. | ||
183 | |||
184 | MPOL_BIND: This mode specifies that memory must come from the | ||
185 | set of nodes specified by the policy. | ||
186 | |||
187 | The memory policy APIs do not specify an order in which the nodes | ||
188 | will be searched. However, unlike "local allocation", the Bind | ||
189 | policy does not consider the distance between the nodes. Rather, | ||
190 | allocations will fallback to the nodes specified by the policy in | ||
191 | order of numeric node id. Like everything in Linux, this is subject | ||
192 | to change. | ||
193 | |||
194 | MPOL_PREFERRED: This mode specifies that the allocation should be | ||
195 | attempted from the single node specified in the policy. If that | ||
196 | allocation fails, the kernel will search other nodes, exactly as | ||
197 | it would for a local allocation that started at the preferred node | ||
198 | in increasing distance from the preferred node. "Local" allocation | ||
199 | policy can be viewed as a Preferred policy that starts at the node | ||
200 | containing the cpu where the allocation takes place. | ||
201 | |||
202 | Internally, the Preferred policy uses a single node--the | ||
203 | preferred_node member of struct mempolicy. A "distinguished | ||
204 | value of this preferred_node, currently '-1', is interpreted | ||
205 | as "the node containing the cpu where the allocation takes | ||
206 | place"--local allocation. This is the way to specify | ||
207 | local allocation for a specific range of addresses--i.e. for | ||
208 | VMA policies. | ||
209 | |||
210 | MPOL_INTERLEAVED: This mode specifies that page allocations be | ||
211 | interleaved, on a page granularity, across the nodes specified in | ||
212 | the policy. This mode also behaves slightly differently, based on | ||
213 | the context where it is used: | ||
214 | |||
215 | For allocation of anonymous pages and shared memory pages, | ||
216 | Interleave mode indexes the set of nodes specified by the policy | ||
217 | using the page offset of the faulting address into the segment | ||
218 | [VMA] containing the address modulo the number of nodes specified | ||
219 | by the policy. It then attempts to allocate a page, starting at | ||
220 | the selected node, as if the node had been specified by a Preferred | ||
221 | policy or had been selected by a local allocation. That is, | ||
222 | allocation will follow the per node zonelist. | ||
223 | |||
224 | For allocation of page cache pages, Interleave mode indexes the set | ||
225 | of nodes specified by the policy using a node counter maintained | ||
226 | per task. This counter wraps around to the lowest specified node | ||
227 | after it reaches the highest specified node. This will tend to | ||
228 | spread the pages out over the nodes specified by the policy based | ||
229 | on the order in which they are allocated, rather than based on any | ||
230 | page offset into an address range or file. During system boot up, | ||
231 | the temporary interleaved system default policy works in this | ||
232 | mode. | ||
233 | |||
234 | MEMORY POLICY APIs | ||
235 | |||
236 | Linux supports 3 system calls for controlling memory policy. These APIS | ||
237 | always affect only the calling task, the calling task's address space, or | ||
238 | some shared object mapped into the calling task's address space. | ||
239 | |||
240 | Note: the headers that define these APIs and the parameter data types | ||
241 | for user space applications reside in a package that is not part of | ||
242 | the Linux kernel. The kernel system call interfaces, with the 'sys_' | ||
243 | prefix, are defined in <linux/syscalls.h>; the mode and flag | ||
244 | definitions are defined in <linux/mempolicy.h>. | ||
245 | |||
246 | Set [Task] Memory Policy: | ||
247 | |||
248 | long set_mempolicy(int mode, const unsigned long *nmask, | ||
249 | unsigned long maxnode); | ||
250 | |||
251 | Set's the calling task's "task/process memory policy" to mode | ||
252 | specified by the 'mode' argument and the set of nodes defined | ||
253 | by 'nmask'. 'nmask' points to a bit mask of node ids containing | ||
254 | at least 'maxnode' ids. | ||
255 | |||
256 | See the set_mempolicy(2) man page for more details | ||
257 | |||
258 | |||
259 | Get [Task] Memory Policy or Related Information | ||
260 | |||
261 | long get_mempolicy(int *mode, | ||
262 | const unsigned long *nmask, unsigned long maxnode, | ||
263 | void *addr, int flags); | ||
264 | |||
265 | Queries the "task/process memory policy" of the calling task, or | ||
266 | the policy or location of a specified virtual address, depending | ||
267 | on the 'flags' argument. | ||
268 | |||
269 | See the get_mempolicy(2) man page for more details | ||
270 | |||
271 | |||
272 | Install VMA/Shared Policy for a Range of Task's Address Space | ||
273 | |||
274 | long mbind(void *start, unsigned long len, int mode, | ||
275 | const unsigned long *nmask, unsigned long maxnode, | ||
276 | unsigned flags); | ||
277 | |||
278 | mbind() installs the policy specified by (mode, nmask, maxnodes) as | ||
279 | a VMA policy for the range of the calling task's address space | ||
280 | specified by the 'start' and 'len' arguments. Additional actions | ||
281 | may be requested via the 'flags' argument. | ||
282 | |||
283 | See the mbind(2) man page for more details. | ||
284 | |||
285 | MEMORY POLICY COMMAND LINE INTERFACE | ||
286 | |||
287 | Although not strictly part of the Linux implementation of memory policy, | ||
288 | a command line tool, numactl(8), exists that allows one to: | ||
289 | |||
290 | + set the task policy for a specified program via set_mempolicy(2), fork(2) and | ||
291 | exec(2) | ||
292 | |||
293 | + set the shared policy for a shared memory segment via mbind(2) | ||
294 | |||
295 | The numactl(8) tool is packages with the run-time version of the library | ||
296 | containing the memory policy system call wrappers. Some distributions | ||
297 | package the headers and compile-time libraries in a separate development | ||
298 | package. | ||
299 | |||
300 | |||
301 | MEMORY POLICIES AND CPUSETS | ||
302 | |||
303 | Memory policies work within cpusets as described above. For memory policies | ||
304 | that require a node or set of nodes, the nodes are restricted to the set of | ||
305 | nodes whose memories are allowed by the cpuset constraints. If the | ||
306 | intersection of the set of nodes specified for the policy and the set of nodes | ||
307 | allowed by the cpuset is the empty set, the policy is considered invalid and | ||
308 | cannot be installed. | ||
309 | |||
310 | The interaction of memory policies and cpusets can be problematic for a | ||
311 | couple of reasons: | ||
312 | |||
313 | 1) the memory policy APIs take physical node id's as arguments. However, the | ||
314 | memory policy APIs do not provide a way to determine what nodes are valid | ||
315 | in the context where the application is running. An application MAY consult | ||
316 | the cpuset file system [directly or via an out of tree, and not generally | ||
317 | available, libcpuset API] to obtain this information, but then the | ||
318 | application must be aware that it is running in a cpuset and use what are | ||
319 | intended primarily as administrative APIs. | ||
320 | |||
321 | However, as long as the policy specifies at least one node that is valid | ||
322 | in the controlling cpuset, the policy can be used. | ||
323 | |||
324 | 2) when tasks in two cpusets share access to a memory region, such as shared | ||
325 | memory segments created by shmget() of mmap() with the MAP_ANONYMOUS and | ||
326 | MAP_SHARED flags, and any of the tasks install shared policy on the region, | ||
327 | only nodes whose memories are allowed in both cpusets may be used in the | ||
328 | policies. Again, obtaining this information requires "stepping outside" | ||
329 | the memory policy APIs, as well as knowing in what cpusets other task might | ||
330 | be attaching to the shared region, to use the cpuset information. | ||
331 | Furthermore, if the cpusets' allowed memory sets are disjoint, "local" | ||
332 | allocation is the only valid policy. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 371fe67a4eef..abe5fa7f9c33 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3452,7 +3452,7 @@ S: Maintained | |||
3452 | 3452 | ||
3453 | TPM DEVICE DRIVER | 3453 | TPM DEVICE DRIVER |
3454 | P: Kylene Hall | 3454 | P: Kylene Hall |
3455 | M: kjhall@us.ibm.com | 3455 | M: tpmdd-devel@lists.sourceforge.net |
3456 | W: http://tpmdd.sourceforge.net | 3456 | W: http://tpmdd.sourceforge.net |
3457 | P: Marcel Selhorst | 3457 | P: Marcel Selhorst |
3458 | M: tpm@selhorst.net | 3458 | M: tpm@selhorst.net |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index e061b63a0038..dfbe7ab9ffe2 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/a.out.h> | 37 | #include <linux/a.out.h> |
38 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
39 | #include <linux/reboot.h> | 39 | #include <linux/reboot.h> |
40 | #include <linux/fs.h> | ||
40 | 41 | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/system.h> | 43 | #include <asm/system.h> |
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c index de7688cfd573..ddc62727dc9f 100644 --- a/arch/h8300/kernel/sys_h8300.c +++ b/arch/h8300/kernel/sys_h8300.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mman.h> | 18 | #include <linux/mman.h> |
19 | #include <linux/file.h> | 19 | #include <linux/file.h> |
20 | #include <linux/utsname.h> | 20 | #include <linux/utsname.h> |
21 | #include <linux/fs.h> | ||
21 | 22 | ||
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
diff --git a/arch/i386/xen/xen-head.S b/arch/i386/xen/xen-head.S index bc71f3bc4014..f8d6937db2ec 100644 --- a/arch/i386/xen/xen-head.S +++ b/arch/i386/xen/xen-head.S | |||
@@ -7,20 +7,20 @@ | |||
7 | #include <asm/boot.h> | 7 | #include <asm/boot.h> |
8 | #include <xen/interface/elfnote.h> | 8 | #include <xen/interface/elfnote.h> |
9 | 9 | ||
10 | .section .init.text | 10 | .pushsection .init.text |
11 | ENTRY(startup_xen) | 11 | ENTRY(startup_xen) |
12 | movl %esi,xen_start_info | 12 | movl %esi,xen_start_info |
13 | cld | 13 | cld |
14 | movl $(init_thread_union+THREAD_SIZE),%esp | 14 | movl $(init_thread_union+THREAD_SIZE),%esp |
15 | jmp xen_start_kernel | 15 | jmp xen_start_kernel |
16 | .popsection | ||
16 | 17 | ||
17 | .pushsection ".bss.page_aligned" | 18 | .pushsection .bss.page_aligned |
18 | .align PAGE_SIZE_asm | 19 | .align PAGE_SIZE_asm |
19 | ENTRY(hypercall_page) | 20 | ENTRY(hypercall_page) |
20 | .skip 0x1000 | 21 | .skip 0x1000 |
21 | .popsection | 22 | .popsection |
22 | 23 | ||
23 | .section .text | ||
24 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") | 24 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") |
25 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") | 25 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") |
26 | ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") | 26 | ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") |
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c index 7e6d5fb75390..ed3a4caec620 100644 --- a/arch/m68k/kernel/setup.c +++ b/arch/m68k/kernel/setup.c | |||
@@ -62,7 +62,6 @@ EXPORT_SYMBOL(m68k_num_memory); | |||
62 | int m68k_realnum_memory; | 62 | int m68k_realnum_memory; |
63 | EXPORT_SYMBOL(m68k_realnum_memory); | 63 | EXPORT_SYMBOL(m68k_realnum_memory); |
64 | unsigned long m68k_memoffset; | 64 | unsigned long m68k_memoffset; |
65 | EXPORT_SYMBOL(m68k_memoffset); | ||
66 | struct mem_info m68k_memory[NUM_MEMINFO]; | 65 | struct mem_info m68k_memory[NUM_MEMINFO]; |
67 | EXPORT_SYMBOL(m68k_memory); | 66 | EXPORT_SYMBOL(m68k_memory); |
68 | 67 | ||
@@ -200,7 +199,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) | |||
200 | (m68k_num_memory - 1)); | 199 | (m68k_num_memory - 1)); |
201 | m68k_num_memory = 1; | 200 | m68k_num_memory = 1; |
202 | } | 201 | } |
203 | m68k_memoffset = m68k_memory[0].addr-PAGE_OFFSET; | ||
204 | #endif | 202 | #endif |
205 | } | 203 | } |
206 | 204 | ||
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index c42245775a4d..59fe285865ec 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds | |||
@@ -19,6 +19,8 @@ SECTIONS | |||
19 | *(.gnu.warning) | 19 | *(.gnu.warning) |
20 | } :text = 0x4e75 | 20 | } :text = 0x4e75 |
21 | 21 | ||
22 | _etext = .; /* End of text section */ | ||
23 | |||
22 | . = ALIGN(16); /* Exception table */ | 24 | . = ALIGN(16); /* Exception table */ |
23 | __start___ex_table = .; | 25 | __start___ex_table = .; |
24 | __ex_table : { *(__ex_table) } | 26 | __ex_table : { *(__ex_table) } |
@@ -26,8 +28,6 @@ SECTIONS | |||
26 | 28 | ||
27 | RODATA | 29 | RODATA |
28 | 30 | ||
29 | _etext = .; /* End of text section */ | ||
30 | |||
31 | .data : { /* Data */ | 31 | .data : { /* Data */ |
32 | DATA_DATA | 32 | DATA_DATA |
33 | CONSTRUCTORS | 33 | CONSTRUCTORS |
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 7d571a2b44dd..30d34f285024 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c | |||
@@ -210,11 +210,7 @@ void __init paging_init(void) | |||
210 | int i; | 210 | int i; |
211 | 211 | ||
212 | #ifdef DEBUG | 212 | #ifdef DEBUG |
213 | { | 213 | printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem); |
214 | extern unsigned long availmem; | ||
215 | printk ("start of paging_init (%p, %lx)\n", | ||
216 | kernel_pg_dir, availmem); | ||
217 | } | ||
218 | #endif | 214 | #endif |
219 | 215 | ||
220 | /* Fix the cache mode in the page descriptors for the 680[46]0. */ | 216 | /* Fix the cache mode in the page descriptors for the 680[46]0. */ |
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c index 846f97534685..47502d5ec19f 100644 --- a/arch/m68knommu/kernel/process.c +++ b/arch/m68knommu/kernel/process.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/a.out.h> | 28 | #include <linux/a.out.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/reboot.h> | 30 | #include <linux/reboot.h> |
31 | #include <linux/fs.h> | ||
31 | 32 | ||
32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
33 | #include <asm/system.h> | 34 | #include <asm/system.h> |
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c index 48e6b33e8b44..15d62c5279a9 100644 --- a/arch/m68knommu/kernel/sys_m68k.c +++ b/arch/m68knommu/kernel/sys_m68k.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mman.h> | 18 | #include <linux/mman.h> |
19 | #include <linux/file.h> | 19 | #include <linux/file.h> |
20 | #include <linux/utsname.h> | 20 | #include <linux/utsname.h> |
21 | #include <linux/fs.h> | ||
21 | 22 | ||
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index aff661fe2ee1..0eabe73c964d 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -612,6 +612,8 @@ static int ubd_open_dev(struct ubd *ubd_dev) | |||
612 | ubd_dev->fd = fd; | 612 | ubd_dev->fd = fd; |
613 | 613 | ||
614 | if(ubd_dev->cow.file != NULL){ | 614 | if(ubd_dev->cow.file != NULL){ |
615 | blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long)); | ||
616 | |||
615 | err = -ENOMEM; | 617 | err = -ENOMEM; |
616 | ubd_dev->cow.bitmap = (void *) vmalloc(ubd_dev->cow.bitmap_len); | 618 | ubd_dev->cow.bitmap = (void *) vmalloc(ubd_dev->cow.bitmap_len); |
617 | if(ubd_dev->cow.bitmap == NULL){ | 619 | if(ubd_dev->cow.bitmap == NULL){ |
@@ -712,8 +714,6 @@ static int ubd_add(int n, char **error_out) | |||
712 | ubd_dev->queue->queuedata = ubd_dev; | 714 | ubd_dev->queue->queuedata = ubd_dev; |
713 | 715 | ||
714 | blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); | 716 | blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); |
715 | if(ubd_dev->cow.file != NULL) | ||
716 | blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long)); | ||
717 | err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]); | 717 | err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]); |
718 | if(err){ | 718 | if(err){ |
719 | *error_out = "Failed to register device"; | 719 | *error_out = "Failed to register device"; |
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 430673be1df7..7225124d96c2 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c | |||
@@ -587,7 +587,7 @@ static int it821x_port_start(struct ata_port *ap) | |||
587 | itdev->want[1][1] = ATA_ANY; | 587 | itdev->want[1][1] = ATA_ANY; |
588 | itdev->last_device = -1; | 588 | itdev->last_device = -1; |
589 | 589 | ||
590 | if (pdev->revision == 0x11) { | 590 | if (pdev->revision == 0x10) { |
591 | itdev->timing10 = 1; | 591 | itdev->timing10 = 1; |
592 | /* Need to disable ATAPI DMA for this case */ | 592 | /* Need to disable ATAPI DMA for this case */ |
593 | if (!itdev->smart) | 593 | if (!itdev->smart) |
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c index cb44cb4f6a47..80bb06105387 100644 --- a/drivers/auxdisplay/cfag12864b.c +++ b/drivers/auxdisplay/cfag12864b.c | |||
@@ -355,7 +355,7 @@ static int __init cfag12864b_init(void) | |||
355 | 355 | ||
356 | cfag12864b_cache = kmalloc(sizeof(unsigned char) * | 356 | cfag12864b_cache = kmalloc(sizeof(unsigned char) * |
357 | CFAG12864B_SIZE, GFP_KERNEL); | 357 | CFAG12864B_SIZE, GFP_KERNEL); |
358 | if (cfag12864b_buffer == NULL) { | 358 | if (cfag12864b_cache == NULL) { |
359 | printk(KERN_ERR CFAG12864B_NAME ": ERROR: " | 359 | printk(KERN_ERR CFAG12864B_NAME ": ERROR: " |
360 | "can't alloc cache buffer (%i bytes)\n", | 360 | "can't alloc cache buffer (%i bytes)\n", |
361 | CFAG12864B_SIZE); | 361 | CFAG12864B_SIZE); |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index ef32e977d307..4245b7f80a49 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -68,6 +68,7 @@ config AMIGA_Z2RAM | |||
68 | config BLK_DEV_XD | 68 | config BLK_DEV_XD |
69 | tristate "XT hard disk support" | 69 | tristate "XT hard disk support" |
70 | depends on ISA && ISA_DMA_API | 70 | depends on ISA && ISA_DMA_API |
71 | select CHECK_SIGNATURE | ||
71 | help | 72 | help |
72 | Very old 8 bit hard disk controllers used in the IBM XT computer | 73 | Very old 8 bit hard disk controllers used in the IBM XT computer |
73 | will be supported if you say Y here. | 74 | will be supported if you say Y here. |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 96d2f9ee42d6..9b07f7851061 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2292,7 +2292,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2292 | info->irq = irq_of_parse_and_map(dev->node, 0); | 2292 | info->irq = irq_of_parse_and_map(dev->node, 0); |
2293 | info->dev = &dev->dev; | 2293 | info->dev = &dev->dev; |
2294 | 2294 | ||
2295 | dev_dbg(&dev->dev, "addr 0x%lx regsize %ld spacing %ld irq %x\n", | 2295 | dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", |
2296 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2296 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2297 | info->irq); | 2297 | info->irq); |
2298 | 2298 | ||
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index 2ce0af1bd588..d95f316afb5a 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c | |||
@@ -1022,10 +1022,6 @@ static const unsigned short x86_keycodes[256] = | |||
1022 | 308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330, | 1022 | 308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330, |
1023 | 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 }; | 1023 | 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 }; |
1024 | 1024 | ||
1025 | #ifdef CONFIG_MAC_EMUMOUSEBTN | ||
1026 | extern int mac_hid_mouse_emulate_buttons(int, int, int); | ||
1027 | #endif /* CONFIG_MAC_EMUMOUSEBTN */ | ||
1028 | |||
1029 | #ifdef CONFIG_SPARC | 1025 | #ifdef CONFIG_SPARC |
1030 | static int sparc_l1_a_state = 0; | 1026 | static int sparc_l1_a_state = 0; |
1031 | extern void sun_do_break(void); | 1027 | extern void sun_do_break(void); |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index bbb7f1292665..2f97d2f8f916 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -1565,6 +1565,9 @@ static int hdlcdev_open(struct net_device *dev) | |||
1565 | int rc; | 1565 | int rc; |
1566 | unsigned long flags; | 1566 | unsigned long flags; |
1567 | 1567 | ||
1568 | if (!try_module_get(THIS_MODULE)) | ||
1569 | return -EBUSY; | ||
1570 | |||
1568 | DBGINFO(("%s hdlcdev_open\n", dev->name)); | 1571 | DBGINFO(("%s hdlcdev_open\n", dev->name)); |
1569 | 1572 | ||
1570 | /* generic HDLC layer open processing */ | 1573 | /* generic HDLC layer open processing */ |
@@ -1634,6 +1637,7 @@ static int hdlcdev_close(struct net_device *dev) | |||
1634 | info->netcount=0; | 1637 | info->netcount=0; |
1635 | spin_unlock_irqrestore(&info->netlock, flags); | 1638 | spin_unlock_irqrestore(&info->netlock, flags); |
1636 | 1639 | ||
1640 | module_put(THIS_MODULE); | ||
1637 | return 0; | 1641 | return 0; |
1638 | } | 1642 | } |
1639 | 1643 | ||
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 9bb542913b86..39564b76d4a3 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Reiner Sailer <sailer@watson.ibm.com> | 7 | * Reiner Sailer <sailer@watson.ibm.com> |
8 | * Kylene Hall <kjhall@us.ibm.com> | 8 | * Kylene Hall <kjhall@us.ibm.com> |
9 | * | 9 | * |
10 | * Maintained by: <tpmdd_devel@lists.sourceforge.net> | 10 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> |
11 | * | 11 | * |
12 | * Device driver for TCG/TCPA TPM (trusted platform module). | 12 | * Device driver for TCG/TCPA TPM (trusted platform module). |
13 | * Specifications at www.trustedcomputinggroup.org | 13 | * Specifications at www.trustedcomputinggroup.org |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index b2e2b002a1bb..d15ccddc92eb 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * Reiner Sailer <sailer@watson.ibm.com> | 7 | * Reiner Sailer <sailer@watson.ibm.com> |
8 | * Kylene Hall <kjhall@us.ibm.com> | 8 | * Kylene Hall <kjhall@us.ibm.com> |
9 | * | 9 | * |
10 | * Maintained by: <tpmdd_devel@lists.sourceforge.net> | 10 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> |
11 | * | 11 | * |
12 | * Device driver for TCG/TCPA TPM (trusted platform module). | 12 | * Device driver for TCG/TCPA TPM (trusted platform module). |
13 | * Specifications at www.trustedcomputinggroup.org | 13 | * Specifications at www.trustedcomputinggroup.org |
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 1ab0896070be..d0e7926eb486 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Reiner Sailer <sailer@watson.ibm.com> | 7 | * Reiner Sailer <sailer@watson.ibm.com> |
8 | * Kylene Hall <kjhall@us.ibm.com> | 8 | * Kylene Hall <kjhall@us.ibm.com> |
9 | * | 9 | * |
10 | * Maintained by: <tpmdd_devel@lists.sourceforge.net> | 10 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> |
11 | * | 11 | * |
12 | * Device driver for TCG/TCPA TPM (trusted platform module). | 12 | * Device driver for TCG/TCPA TPM (trusted platform module). |
13 | * Specifications at www.trustedcomputinggroup.org | 13 | * Specifications at www.trustedcomputinggroup.org |
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h index 9363bcf0a402..6c831f9466b7 100644 --- a/drivers/char/tpm/tpm_atmel.h +++ b/drivers/char/tpm/tpm_atmel.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Authors: | 4 | * Authors: |
5 | * Kylene Hall <kjhall@us.ibm.com> | 5 | * Kylene Hall <kjhall@us.ibm.com> |
6 | * | 6 | * |
7 | * Maintained by: <tpmdd_devel@lists.sourceforge.net> | 7 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> |
8 | * | 8 | * |
9 | * Device driver for TCG/TCPA TPM (trusted platform module). | 9 | * Device driver for TCG/TCPA TPM (trusted platform module). |
10 | * Specifications at www.trustedcomputinggroup.org | 10 | * Specifications at www.trustedcomputinggroup.org |
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 8677fc6a545e..60a2d2630e36 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * Reiner Sailer <sailer@watson.ibm.com> | 7 | * Reiner Sailer <sailer@watson.ibm.com> |
8 | * Kylene Hall <kjhall@us.ibm.com> | 8 | * Kylene Hall <kjhall@us.ibm.com> |
9 | * | 9 | * |
10 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
11 | * | ||
10 | * Access to the eventlog extended by the TCG BIOS of PC platform | 12 | * Access to the eventlog extended by the TCG BIOS of PC platform |
11 | * | 13 | * |
12 | * This program is free software; you can redistribute it and/or | 14 | * This program is free software; you can redistribute it and/or |
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 608f73071bef..6313326bc41f 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Reiner Sailer <sailer@watson.ibm.com> | 7 | * Reiner Sailer <sailer@watson.ibm.com> |
8 | * Kylene Hall <kjhall@us.ibm.com> | 8 | * Kylene Hall <kjhall@us.ibm.com> |
9 | * | 9 | * |
10 | * Maintained by: <tpmdd_devel@lists.sourceforge.net> | 10 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> |
11 | * | 11 | * |
12 | * Device driver for TCG/TCPA TPM (trusted platform module). | 12 | * Device driver for TCG/TCPA TPM (trusted platform module). |
13 | * Specifications at www.trustedcomputinggroup.org | 13 | * Specifications at www.trustedcomputinggroup.org |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 483f3f60013c..23fa18a6654c 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -5,6 +5,8 @@ | |||
5 | * Leendert van Doorn <leendert@watson.ibm.com> | 5 | * Leendert van Doorn <leendert@watson.ibm.com> |
6 | * Kylene Hall <kjhall@us.ibm.com> | 6 | * Kylene Hall <kjhall@us.ibm.com> |
7 | * | 7 | * |
8 | * Maintained by: <tpmdd-devel@lists.sourceforge.net> | ||
9 | * | ||
8 | * Device driver for TCG/TCPA TPM (trusted platform module). | 10 | * Device driver for TCG/TCPA TPM (trusted platform module). |
9 | * Specifications at www.trustedcomputinggroup.org | 11 | * Specifications at www.trustedcomputinggroup.org |
10 | * | 12 | * |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 9b26574f1466..d602b8fa7d46 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -68,6 +68,7 @@ config INPUT_WISTRON_BTNS | |||
68 | select INPUT_POLLDEV | 68 | select INPUT_POLLDEV |
69 | select NEW_LEDS | 69 | select NEW_LEDS |
70 | select LEDS_CLASS | 70 | select LEDS_CLASS |
71 | select CHECK_SIGNATURE | ||
71 | help | 72 | help |
72 | Say Y here for support of Winstron laptop button interface, used on | 73 | Say Y here for support of Winstron laptop button interface, used on |
73 | laptops of various brands, including Acer and Fujitsu-Siemens. If | 74 | laptops of various brands, including Acer and Fujitsu-Siemens. If |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index fd6925f41647..41e2250613a1 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config LGUEST | 1 | config LGUEST |
2 | tristate "Linux hypervisor example code" | 2 | tristate "Linux hypervisor example code" |
3 | depends on X86 && PARAVIRT && EXPERIMENTAL && !X86_PAE | 3 | depends on X86 && PARAVIRT && EXPERIMENTAL && !X86_PAE && FUTEX |
4 | select LGUEST_GUEST | 4 | select LGUEST_GUEST |
5 | select HVC_DRIVER | 5 | select HVC_DRIVER |
6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c index 76c1e8e4a487..33dee3a773ed 100644 --- a/drivers/macintosh/mac_hid.c +++ b/drivers/macintosh/mac_hid.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
14 | #include <linux/input.h> | 14 | #include <linux/input.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kbd_kern.h> | ||
16 | 17 | ||
17 | 18 | ||
18 | static struct input_dev *emumousebtn; | 19 | static struct input_dev *emumousebtn; |
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c index dfdf11c1eec4..e2f84da09e7c 100644 --- a/drivers/macintosh/via-pmu68k.c +++ b/drivers/macintosh/via-pmu68k.c | |||
@@ -818,243 +818,3 @@ pmu_present(void) | |||
818 | { | 818 | { |
819 | return (pmu_kind != PMU_UNKNOWN); | 819 | return (pmu_kind != PMU_UNKNOWN); |
820 | } | 820 | } |
821 | |||
822 | #if 0 /* needs some work for 68K */ | ||
823 | |||
824 | /* | ||
825 | * This struct is used to store config register values for | ||
826 | * PCI devices which may get powered off when we sleep. | ||
827 | */ | ||
828 | static struct pci_save { | ||
829 | u16 command; | ||
830 | u16 cache_lat; | ||
831 | u16 intr; | ||
832 | } *pbook_pci_saves; | ||
833 | static int n_pbook_pci_saves; | ||
834 | |||
835 | static inline void | ||
836 | pbook_pci_save(void) | ||
837 | { | ||
838 | int npci; | ||
839 | struct pci_dev *pd = NULL; | ||
840 | struct pci_save *ps; | ||
841 | |||
842 | npci = 0; | ||
843 | while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) | ||
844 | ++npci; | ||
845 | n_pbook_pci_saves = npci; | ||
846 | if (npci == 0) | ||
847 | return; | ||
848 | ps = kmalloc(npci * sizeof(*ps), GFP_KERNEL); | ||
849 | pbook_pci_saves = ps; | ||
850 | if (ps == NULL) | ||
851 | return; | ||
852 | |||
853 | pd = NULL; | ||
854 | while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) { | ||
855 | pci_read_config_word(pd, PCI_COMMAND, &ps->command); | ||
856 | pci_read_config_word(pd, PCI_CACHE_LINE_SIZE, &ps->cache_lat); | ||
857 | pci_read_config_word(pd, PCI_INTERRUPT_LINE, &ps->intr); | ||
858 | ++ps; | ||
859 | --npci; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | static inline void | ||
864 | pbook_pci_restore(void) | ||
865 | { | ||
866 | u16 cmd; | ||
867 | struct pci_save *ps = pbook_pci_saves; | ||
868 | struct pci_dev *pd = NULL; | ||
869 | int j; | ||
870 | |||
871 | while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) { | ||
872 | if (ps->command == 0) | ||
873 | continue; | ||
874 | pci_read_config_word(pd, PCI_COMMAND, &cmd); | ||
875 | if ((ps->command & ~cmd) == 0) | ||
876 | continue; | ||
877 | switch (pd->hdr_type) { | ||
878 | case PCI_HEADER_TYPE_NORMAL: | ||
879 | for (j = 0; j < 6; ++j) | ||
880 | pci_write_config_dword(pd, | ||
881 | PCI_BASE_ADDRESS_0 + j*4, | ||
882 | pd->resource[j].start); | ||
883 | pci_write_config_dword(pd, PCI_ROM_ADDRESS, | ||
884 | pd->resource[PCI_ROM_RESOURCE].start); | ||
885 | pci_write_config_word(pd, PCI_CACHE_LINE_SIZE, | ||
886 | ps->cache_lat); | ||
887 | pci_write_config_word(pd, PCI_INTERRUPT_LINE, | ||
888 | ps->intr); | ||
889 | pci_write_config_word(pd, PCI_COMMAND, ps->command); | ||
890 | break; | ||
891 | /* other header types not restored at present */ | ||
892 | } | ||
893 | } | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * Put the powerbook to sleep. | ||
898 | */ | ||
899 | #define IRQ_ENABLE ((unsigned int *)0xf3000024) | ||
900 | #define MEM_CTRL ((unsigned int *)0xf8000070) | ||
901 | |||
902 | int powerbook_sleep(void) | ||
903 | { | ||
904 | int ret, i, x; | ||
905 | static int save_backlight; | ||
906 | static unsigned int save_irqen; | ||
907 | unsigned long msr; | ||
908 | unsigned int hid0; | ||
909 | unsigned long p, wait; | ||
910 | struct adb_request sleep_req; | ||
911 | |||
912 | /* Notify device drivers */ | ||
913 | ret = blocking_notifier_call_chain(&sleep_notifier_list, | ||
914 | PBOOK_SLEEP, NULL); | ||
915 | if (ret & NOTIFY_STOP_MASK) | ||
916 | return -EBUSY; | ||
917 | |||
918 | /* Sync the disks. */ | ||
919 | /* XXX It would be nice to have some way to ensure that | ||
920 | * nobody is dirtying any new buffers while we wait. */ | ||
921 | sys_sync(); | ||
922 | |||
923 | /* Turn off the display backlight */ | ||
924 | save_backlight = backlight_enabled; | ||
925 | if (save_backlight) | ||
926 | pmu_enable_backlight(0); | ||
927 | |||
928 | /* Give the disks a little time to actually finish writing */ | ||
929 | for (wait = jiffies + (HZ/4); time_before(jiffies, wait); ) | ||
930 | mb(); | ||
931 | |||
932 | /* Disable all interrupts except pmu */ | ||
933 | save_irqen = in_le32(IRQ_ENABLE); | ||
934 | for (i = 0; i < 32; ++i) | ||
935 | if (i != vias->intrs[0].line && (save_irqen & (1 << i))) | ||
936 | disable_irq(i); | ||
937 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
938 | |||
939 | /* Save the state of PCI config space for some slots */ | ||
940 | pbook_pci_save(); | ||
941 | |||
942 | /* Set the memory controller to keep the memory refreshed | ||
943 | while we're asleep */ | ||
944 | for (i = 0x403f; i >= 0x4000; --i) { | ||
945 | out_be32(MEM_CTRL, i); | ||
946 | do { | ||
947 | x = (in_be32(MEM_CTRL) >> 16) & 0x3ff; | ||
948 | } while (x == 0); | ||
949 | if (x >= 0x100) | ||
950 | break; | ||
951 | } | ||
952 | |||
953 | /* Ask the PMU to put us to sleep */ | ||
954 | pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); | ||
955 | while (!sleep_req.complete) | ||
956 | mb(); | ||
957 | /* displacement-flush the L2 cache - necessary? */ | ||
958 | for (p = KERNELBASE; p < KERNELBASE + 0x100000; p += 0x1000) | ||
959 | i = *(volatile int *)p; | ||
960 | asleep = 1; | ||
961 | |||
962 | /* Put the CPU into sleep mode */ | ||
963 | asm volatile("mfspr %0,1008" : "=r" (hid0) :); | ||
964 | hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP; | ||
965 | asm volatile("mtspr 1008,%0" : : "r" (hid0)); | ||
966 | local_save_flags(msr); | ||
967 | msr |= MSR_POW | MSR_EE; | ||
968 | local_irq_restore(msr); | ||
969 | udelay(10); | ||
970 | |||
971 | /* OK, we're awake again, start restoring things */ | ||
972 | out_be32(MEM_CTRL, 0x3f); | ||
973 | pbook_pci_restore(); | ||
974 | |||
975 | /* wait for the PMU interrupt sequence to complete */ | ||
976 | while (asleep) | ||
977 | mb(); | ||
978 | |||
979 | /* reenable interrupts */ | ||
980 | for (i = 0; i < 32; ++i) | ||
981 | if (i != vias->intrs[0].line && (save_irqen & (1 << i))) | ||
982 | enable_irq(i); | ||
983 | |||
984 | /* Notify drivers */ | ||
985 | blocking_notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL); | ||
986 | |||
987 | /* reenable ADB autopoll */ | ||
988 | pmu_adb_autopoll(adb_dev_map); | ||
989 | |||
990 | /* Turn on the screen backlight, if it was on before */ | ||
991 | if (save_backlight) | ||
992 | pmu_enable_backlight(1); | ||
993 | |||
994 | /* Wait for the hard disk to spin up */ | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * Support for /dev/pmu device | ||
1001 | */ | ||
1002 | static int pmu_open(struct inode *inode, struct file *file) | ||
1003 | { | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | static ssize_t pmu_read(struct file *file, char *buf, | ||
1008 | size_t count, loff_t *ppos) | ||
1009 | { | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static ssize_t pmu_write(struct file *file, const char *buf, | ||
1014 | size_t count, loff_t *ppos) | ||
1015 | { | ||
1016 | return 0; | ||
1017 | } | ||
1018 | |||
1019 | static int pmu_ioctl(struct inode * inode, struct file *filp, | ||
1020 | u_int cmd, u_long arg) | ||
1021 | { | ||
1022 | int error; | ||
1023 | __u32 value; | ||
1024 | |||
1025 | switch (cmd) { | ||
1026 | case PMU_IOC_SLEEP: | ||
1027 | return -ENOSYS; | ||
1028 | case PMU_IOC_GET_BACKLIGHT: | ||
1029 | return put_user(backlight_level, (__u32 *)arg); | ||
1030 | case PMU_IOC_SET_BACKLIGHT: | ||
1031 | error = get_user(value, (__u32 *)arg); | ||
1032 | if (!error) | ||
1033 | pmu_set_brightness(value); | ||
1034 | return error; | ||
1035 | case PMU_IOC_GET_MODEL: | ||
1036 | return put_user(pmu_kind, (__u32 *)arg); | ||
1037 | } | ||
1038 | return -EINVAL; | ||
1039 | } | ||
1040 | |||
1041 | static const struct file_operations pmu_device_fops = { | ||
1042 | .read = pmu_read, | ||
1043 | .write = pmu_write, | ||
1044 | .ioctl = pmu_ioctl, | ||
1045 | .open = pmu_open, | ||
1046 | }; | ||
1047 | |||
1048 | static struct miscdevice pmu_device = { | ||
1049 | PMU_MINOR, "pmu", &pmu_device_fops | ||
1050 | }; | ||
1051 | |||
1052 | void pmu_device_init(void) | ||
1053 | { | ||
1054 | if (!via) | ||
1055 | return; | ||
1056 | if (misc_register(&pmu_device) < 0) | ||
1057 | printk(KERN_ERR "via-pmu68k: cannot register misc device.\n"); | ||
1058 | } | ||
1059 | #endif /* CONFIG_PMAC_PBOOK */ | ||
1060 | |||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 650991bddd8e..f33a729960ca 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1972,7 +1972,8 @@ static int run(mddev_t *mddev) | |||
1972 | !test_bit(In_sync, &disk->rdev->flags)) { | 1972 | !test_bit(In_sync, &disk->rdev->flags)) { |
1973 | disk->head_position = 0; | 1973 | disk->head_position = 0; |
1974 | mddev->degraded++; | 1974 | mddev->degraded++; |
1975 | conf->fullsync = 1; | 1975 | if (disk->rdev) |
1976 | conf->fullsync = 1; | ||
1976 | } | 1977 | } |
1977 | } | 1978 | } |
1978 | if (mddev->degraded == conf->raid_disks) { | 1979 | if (mddev->degraded == conf->raid_disks) { |
@@ -2153,11 +2154,25 @@ static int raid1_reshape(mddev_t *mddev) | |||
2153 | oldpool = conf->r1bio_pool; | 2154 | oldpool = conf->r1bio_pool; |
2154 | conf->r1bio_pool = newpool; | 2155 | conf->r1bio_pool = newpool; |
2155 | 2156 | ||
2156 | for (d=d2=0; d < conf->raid_disks; d++) | 2157 | for (d = d2 = 0; d < conf->raid_disks; d++) { |
2157 | if (conf->mirrors[d].rdev) { | 2158 | mdk_rdev_t *rdev = conf->mirrors[d].rdev; |
2158 | conf->mirrors[d].rdev->raid_disk = d2; | 2159 | if (rdev && rdev->raid_disk != d2) { |
2159 | newmirrors[d2++].rdev = conf->mirrors[d].rdev; | 2160 | char nm[20]; |
2161 | sprintf(nm, "rd%d", rdev->raid_disk); | ||
2162 | sysfs_remove_link(&mddev->kobj, nm); | ||
2163 | rdev->raid_disk = d2; | ||
2164 | sprintf(nm, "rd%d", rdev->raid_disk); | ||
2165 | sysfs_remove_link(&mddev->kobj, nm); | ||
2166 | if (sysfs_create_link(&mddev->kobj, | ||
2167 | &rdev->kobj, nm)) | ||
2168 | printk(KERN_WARNING | ||
2169 | "md/raid1: cannot register " | ||
2170 | "%s for %s\n", | ||
2171 | nm, mdname(mddev)); | ||
2160 | } | 2172 | } |
2173 | if (rdev) | ||
2174 | newmirrors[d2++].rdev = rdev; | ||
2175 | } | ||
2161 | kfree(conf->mirrors); | 2176 | kfree(conf->mirrors); |
2162 | conf->mirrors = newmirrors; | 2177 | conf->mirrors = newmirrors; |
2163 | kfree(conf->poolinfo); | 2178 | kfree(conf->poolinfo); |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 80c4a8463065..1cb33cac1237 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -892,7 +892,7 @@ static int m41t80_remove(struct i2c_client *client) | |||
892 | 892 | ||
893 | static struct i2c_driver m41t80_driver = { | 893 | static struct i2c_driver m41t80_driver = { |
894 | .driver = { | 894 | .driver = { |
895 | .name = "m41t80", | 895 | .name = "rtc-m41t80", |
896 | }, | 896 | }, |
897 | .probe = m41t80_probe, | 897 | .probe = m41t80_probe, |
898 | .remove = m41t80_remove, | 898 | .remove = m41t80_remove, |
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c index d94170728075..3e183cfee10f 100644 --- a/drivers/rtc/rtc-max6902.c +++ b/drivers/rtc/rtc-max6902.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * | 13 | * |
14 | * 24-May-2006: Raphael Assenat <raph@8d.com> | 14 | * 24-May-2006: Raphael Assenat <raph@8d.com> |
15 | * - Major rework | 15 | * - Major rework |
16 | * Converted to rtc_device and uses the SPI layer. | 16 | * Converted to rtc_device and uses the SPI layer. |
17 | * | 17 | * |
18 | * ??-???-2005: Someone at Compulab | 18 | * ??-???-2005: Someone at Compulab |
19 | * - Initial driver creation. | 19 | * - Initial driver creation. |
@@ -259,11 +259,11 @@ static int __devexit max6902_remove(struct spi_device *spi) | |||
259 | 259 | ||
260 | static struct spi_driver max6902_driver = { | 260 | static struct spi_driver max6902_driver = { |
261 | .driver = { | 261 | .driver = { |
262 | .name = "max6902", | 262 | .name = "rtc-max6902", |
263 | .bus = &spi_bus_type, | 263 | .bus = &spi_bus_type, |
264 | .owner = THIS_MODULE, | 264 | .owner = THIS_MODULE, |
265 | }, | 265 | }, |
266 | .probe = max6902_probe, | 266 | .probe = max6902_probe, |
267 | .remove = __devexit_p(max6902_remove), | 267 | .remove = __devexit_p(max6902_remove), |
268 | }; | 268 | }; |
269 | 269 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d2b3898b750a..6f2c71ef47ee 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -367,6 +367,7 @@ config SCSI_3W_9XXX | |||
367 | config SCSI_7000FASST | 367 | config SCSI_7000FASST |
368 | tristate "7000FASST SCSI support" | 368 | tristate "7000FASST SCSI support" |
369 | depends on ISA && SCSI && ISA_DMA_API | 369 | depends on ISA && SCSI && ISA_DMA_API |
370 | select CHECK_SIGNATURE | ||
370 | help | 371 | help |
371 | This driver supports the Western Digital 7000 SCSI host adapter | 372 | This driver supports the Western Digital 7000 SCSI host adapter |
372 | family. Some information is in the source: | 373 | family. Some information is in the source: |
@@ -388,6 +389,7 @@ config SCSI_AHA152X | |||
388 | tristate "Adaptec AHA152X/2825 support" | 389 | tristate "Adaptec AHA152X/2825 support" |
389 | depends on ISA && SCSI && !64BIT | 390 | depends on ISA && SCSI && !64BIT |
390 | select SCSI_SPI_ATTRS | 391 | select SCSI_SPI_ATTRS |
392 | select CHECK_SIGNATURE | ||
391 | ---help--- | 393 | ---help--- |
392 | This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825 | 394 | This is a driver for the AHA-1510, AHA-1520, AHA-1522, and AHA-2825 |
393 | SCSI host adapters. It also works for the AVA-1505, but the IRQ etc. | 395 | SCSI host adapters. It also works for the AVA-1505, but the IRQ etc. |
@@ -583,6 +585,7 @@ config SCSI_DTC3280 | |||
583 | tristate "DTC3180/3280 SCSI support" | 585 | tristate "DTC3180/3280 SCSI support" |
584 | depends on ISA && SCSI | 586 | depends on ISA && SCSI |
585 | select SCSI_SPI_ATTRS | 587 | select SCSI_SPI_ATTRS |
588 | select CHECK_SIGNATURE | ||
586 | help | 589 | help |
587 | This is support for DTC 3180/3280 SCSI Host Adapters. Please read | 590 | This is support for DTC 3180/3280 SCSI Host Adapters. Please read |
588 | the SCSI-HOWTO, available from | 591 | the SCSI-HOWTO, available from |
@@ -657,6 +660,7 @@ config SCSI_EATA_PIO | |||
657 | config SCSI_FUTURE_DOMAIN | 660 | config SCSI_FUTURE_DOMAIN |
658 | tristate "Future Domain 16xx SCSI/AHA-2920A support" | 661 | tristate "Future Domain 16xx SCSI/AHA-2920A support" |
659 | depends on (ISA || PCI) && SCSI | 662 | depends on (ISA || PCI) && SCSI |
663 | select CHECK_SIGNATURE | ||
660 | ---help--- | 664 | ---help--- |
661 | This is support for Future Domain's 16-bit SCSI host adapters | 665 | This is support for Future Domain's 16-bit SCSI host adapters |
662 | (TMC-1660/1680, TMC-1650/1670, TMC-3260, TMC-1610M/MER/MEX) and | 666 | (TMC-1660/1680, TMC-1650/1670, TMC-3260, TMC-1610M/MER/MEX) and |
@@ -1324,6 +1328,7 @@ config SCSI_LPFC | |||
1324 | config SCSI_SEAGATE | 1328 | config SCSI_SEAGATE |
1325 | tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support" | 1329 | tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support" |
1326 | depends on X86 && ISA && SCSI | 1330 | depends on X86 && ISA && SCSI |
1331 | select CHECK_SIGNATURE | ||
1327 | ---help--- | 1332 | ---help--- |
1328 | These are 8-bit SCSI controllers; the ST-01 is also supported by | 1333 | These are 8-bit SCSI controllers; the ST-01 is also supported by |
1329 | this driver. It is explained in section 3.9 of the SCSI-HOWTO, | 1334 | this driver. It is explained in section 3.9 of the SCSI-HOWTO, |
@@ -1397,6 +1402,7 @@ config SCSI_T128 | |||
1397 | tristate "Trantor T128/T128F/T228 SCSI support" | 1402 | tristate "Trantor T128/T128F/T228 SCSI support" |
1398 | depends on ISA && SCSI | 1403 | depends on ISA && SCSI |
1399 | select SCSI_SPI_ATTRS | 1404 | select SCSI_SPI_ATTRS |
1405 | select CHECK_SIGNATURE | ||
1400 | ---help--- | 1406 | ---help--- |
1401 | This is support for a SCSI host adapter. It is explained in section | 1407 | This is support for a SCSI host adapter. It is explained in section |
1402 | 3.11 of the SCSI-HOWTO, available from | 1408 | 3.11 of the SCSI-HOWTO, available from |
@@ -1561,7 +1567,7 @@ config A3000_SCSI | |||
1561 | built-in SCSI controller, say Y. Otherwise, say N. | 1567 | built-in SCSI controller, say Y. Otherwise, say N. |
1562 | 1568 | ||
1563 | To compile this driver as a module, choose M here: the | 1569 | To compile this driver as a module, choose M here: the |
1564 | module will be called wd33c93. | 1570 | module will be called a3000. |
1565 | 1571 | ||
1566 | config A2091_SCSI | 1572 | config A2091_SCSI |
1567 | tristate "A2091/A590 WD33C93A support" | 1573 | tristate "A2091/A590 WD33C93A support" |
@@ -1571,7 +1577,7 @@ config A2091_SCSI | |||
1571 | say N. | 1577 | say N. |
1572 | 1578 | ||
1573 | To compile this driver as a module, choose M here: the | 1579 | To compile this driver as a module, choose M here: the |
1574 | module will be called wd33c93. | 1580 | module will be called a2091. |
1575 | 1581 | ||
1576 | config GVP11_SCSI | 1582 | config GVP11_SCSI |
1577 | tristate "GVP Series II WD33C93A support" | 1583 | tristate "GVP Series II WD33C93A support" |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 301313002f6b..f94109cbb46e 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -129,7 +129,16 @@ struct uart_8250_port { | |||
129 | unsigned char mcr; | 129 | unsigned char mcr; |
130 | unsigned char mcr_mask; /* mask of user bits */ | 130 | unsigned char mcr_mask; /* mask of user bits */ |
131 | unsigned char mcr_force; /* mask of forced bits */ | 131 | unsigned char mcr_force; /* mask of forced bits */ |
132 | unsigned char lsr_break_flag; | 132 | |
133 | /* | ||
134 | * Some bits in registers are cleared on a read, so they must | ||
135 | * be saved whenever the register is read but the bits will not | ||
136 | * be immediately processed. | ||
137 | */ | ||
138 | #define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS | ||
139 | unsigned char lsr_saved_flags; | ||
140 | #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA | ||
141 | unsigned char msr_saved_flags; | ||
133 | 142 | ||
134 | /* | 143 | /* |
135 | * We provide a per-port pm hook. | 144 | * We provide a per-port pm hook. |
@@ -1238,6 +1247,7 @@ static void serial8250_start_tx(struct uart_port *port) | |||
1238 | if (up->bugs & UART_BUG_TXEN) { | 1247 | if (up->bugs & UART_BUG_TXEN) { |
1239 | unsigned char lsr, iir; | 1248 | unsigned char lsr, iir; |
1240 | lsr = serial_in(up, UART_LSR); | 1249 | lsr = serial_in(up, UART_LSR); |
1250 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | ||
1241 | iir = serial_in(up, UART_IIR) & 0x0f; | 1251 | iir = serial_in(up, UART_IIR) & 0x0f; |
1242 | if ((up->port.type == PORT_RM9000) ? | 1252 | if ((up->port.type == PORT_RM9000) ? |
1243 | (lsr & UART_LSR_THRE && | 1253 | (lsr & UART_LSR_THRE && |
@@ -1290,18 +1300,10 @@ receive_chars(struct uart_8250_port *up, unsigned int *status) | |||
1290 | flag = TTY_NORMAL; | 1300 | flag = TTY_NORMAL; |
1291 | up->port.icount.rx++; | 1301 | up->port.icount.rx++; |
1292 | 1302 | ||
1293 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 1303 | lsr |= up->lsr_saved_flags; |
1294 | /* | 1304 | up->lsr_saved_flags = 0; |
1295 | * Recover the break flag from console xmit | ||
1296 | */ | ||
1297 | if (up->port.line == up->port.cons->index) { | ||
1298 | lsr |= up->lsr_break_flag; | ||
1299 | up->lsr_break_flag = 0; | ||
1300 | } | ||
1301 | #endif | ||
1302 | 1305 | ||
1303 | if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE | | 1306 | if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) { |
1304 | UART_LSR_FE | UART_LSR_OE))) { | ||
1305 | /* | 1307 | /* |
1306 | * For statistics only | 1308 | * For statistics only |
1307 | */ | 1309 | */ |
@@ -1392,6 +1394,8 @@ static unsigned int check_modem_status(struct uart_8250_port *up) | |||
1392 | { | 1394 | { |
1393 | unsigned int status = serial_in(up, UART_MSR); | 1395 | unsigned int status = serial_in(up, UART_MSR); |
1394 | 1396 | ||
1397 | status |= up->msr_saved_flags; | ||
1398 | up->msr_saved_flags = 0; | ||
1395 | if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && | 1399 | if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && |
1396 | up->port.info != NULL) { | 1400 | up->port.info != NULL) { |
1397 | if (status & UART_MSR_TERI) | 1401 | if (status & UART_MSR_TERI) |
@@ -1591,7 +1595,8 @@ static void serial8250_timeout(unsigned long data) | |||
1591 | static void serial8250_backup_timeout(unsigned long data) | 1595 | static void serial8250_backup_timeout(unsigned long data) |
1592 | { | 1596 | { |
1593 | struct uart_8250_port *up = (struct uart_8250_port *)data; | 1597 | struct uart_8250_port *up = (struct uart_8250_port *)data; |
1594 | unsigned int iir, ier = 0; | 1598 | unsigned int iir, ier = 0, lsr; |
1599 | unsigned long flags; | ||
1595 | 1600 | ||
1596 | /* | 1601 | /* |
1597 | * Must disable interrupts or else we risk racing with the interrupt | 1602 | * Must disable interrupts or else we risk racing with the interrupt |
@@ -1610,9 +1615,13 @@ static void serial8250_backup_timeout(unsigned long data) | |||
1610 | * the "Diva" UART used on the management processor on many HP | 1615 | * the "Diva" UART used on the management processor on many HP |
1611 | * ia64 and parisc boxes. | 1616 | * ia64 and parisc boxes. |
1612 | */ | 1617 | */ |
1618 | spin_lock_irqsave(&up->port.lock, flags); | ||
1619 | lsr = serial_in(up, UART_LSR); | ||
1620 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | ||
1621 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
1613 | if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && | 1622 | if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && |
1614 | (!uart_circ_empty(&up->port.info->xmit) || up->port.x_char) && | 1623 | (!uart_circ_empty(&up->port.info->xmit) || up->port.x_char) && |
1615 | (serial_in(up, UART_LSR) & UART_LSR_THRE)) { | 1624 | (lsr & UART_LSR_THRE)) { |
1616 | iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); | 1625 | iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); |
1617 | iir |= UART_IIR_THRI; | 1626 | iir |= UART_IIR_THRI; |
1618 | } | 1627 | } |
@@ -1631,13 +1640,14 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) | |||
1631 | { | 1640 | { |
1632 | struct uart_8250_port *up = (struct uart_8250_port *)port; | 1641 | struct uart_8250_port *up = (struct uart_8250_port *)port; |
1633 | unsigned long flags; | 1642 | unsigned long flags; |
1634 | unsigned int ret; | 1643 | unsigned int lsr; |
1635 | 1644 | ||
1636 | spin_lock_irqsave(&up->port.lock, flags); | 1645 | spin_lock_irqsave(&up->port.lock, flags); |
1637 | ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; | 1646 | lsr = serial_in(up, UART_LSR); |
1647 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | ||
1638 | spin_unlock_irqrestore(&up->port.lock, flags); | 1648 | spin_unlock_irqrestore(&up->port.lock, flags); |
1639 | 1649 | ||
1640 | return ret; | 1650 | return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; |
1641 | } | 1651 | } |
1642 | 1652 | ||
1643 | static unsigned int serial8250_get_mctrl(struct uart_port *port) | 1653 | static unsigned int serial8250_get_mctrl(struct uart_port *port) |
@@ -1708,8 +1718,7 @@ static inline void wait_for_xmitr(struct uart_8250_port *up, int bits) | |||
1708 | do { | 1718 | do { |
1709 | status = serial_in(up, UART_LSR); | 1719 | status = serial_in(up, UART_LSR); |
1710 | 1720 | ||
1711 | if (status & UART_LSR_BI) | 1721 | up->lsr_saved_flags |= status & LSR_SAVE_FLAGS; |
1712 | up->lsr_break_flag = UART_LSR_BI; | ||
1713 | 1722 | ||
1714 | if (--tmout == 0) | 1723 | if (--tmout == 0) |
1715 | break; | 1724 | break; |
@@ -1718,8 +1727,12 @@ static inline void wait_for_xmitr(struct uart_8250_port *up, int bits) | |||
1718 | 1727 | ||
1719 | /* Wait up to 1s for flow control if necessary */ | 1728 | /* Wait up to 1s for flow control if necessary */ |
1720 | if (up->port.flags & UPF_CONS_FLOW) { | 1729 | if (up->port.flags & UPF_CONS_FLOW) { |
1721 | tmout = 1000000; | 1730 | unsigned int tmout; |
1722 | while (!(serial_in(up, UART_MSR) & UART_MSR_CTS) && --tmout) { | 1731 | for (tmout = 1000000; tmout; tmout--) { |
1732 | unsigned int msr = serial_in(up, UART_MSR); | ||
1733 | up->msr_saved_flags |= msr & MSR_SAVE_FLAGS; | ||
1734 | if (msr & UART_MSR_CTS) | ||
1735 | break; | ||
1723 | udelay(1); | 1736 | udelay(1); |
1724 | touch_nmi_watchdog(); | 1737 | touch_nmi_watchdog(); |
1725 | } | 1738 | } |
@@ -1889,6 +1902,18 @@ static int serial8250_startup(struct uart_port *port) | |||
1889 | spin_unlock_irqrestore(&up->port.lock, flags); | 1902 | spin_unlock_irqrestore(&up->port.lock, flags); |
1890 | 1903 | ||
1891 | /* | 1904 | /* |
1905 | * Clear the interrupt registers again for luck, and clear the | ||
1906 | * saved flags to avoid getting false values from polling | ||
1907 | * routines or the previous session. | ||
1908 | */ | ||
1909 | serial_inp(up, UART_LSR); | ||
1910 | serial_inp(up, UART_RX); | ||
1911 | serial_inp(up, UART_IIR); | ||
1912 | serial_inp(up, UART_MSR); | ||
1913 | up->lsr_saved_flags = 0; | ||
1914 | up->msr_saved_flags = 0; | ||
1915 | |||
1916 | /* | ||
1892 | * Finally, enable interrupts. Note: Modem status interrupts | 1917 | * Finally, enable interrupts. Note: Modem status interrupts |
1893 | * are set via set_termios(), which will be occurring imminently | 1918 | * are set via set_termios(), which will be occurring imminently |
1894 | * anyway, so we don't enable them here. | 1919 | * anyway, so we don't enable them here. |
@@ -1906,14 +1931,6 @@ static int serial8250_startup(struct uart_port *port) | |||
1906 | (void) inb_p(icp); | 1931 | (void) inb_p(icp); |
1907 | } | 1932 | } |
1908 | 1933 | ||
1909 | /* | ||
1910 | * And clear the interrupt registers again for luck. | ||
1911 | */ | ||
1912 | (void) serial_inp(up, UART_LSR); | ||
1913 | (void) serial_inp(up, UART_RX); | ||
1914 | (void) serial_inp(up, UART_IIR); | ||
1915 | (void) serial_inp(up, UART_MSR); | ||
1916 | |||
1917 | return 0; | 1934 | return 0; |
1918 | } | 1935 | } |
1919 | 1936 | ||
@@ -2484,6 +2501,16 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) | |||
2484 | wait_for_xmitr(up, BOTH_EMPTY); | 2501 | wait_for_xmitr(up, BOTH_EMPTY); |
2485 | serial_out(up, UART_IER, ier); | 2502 | serial_out(up, UART_IER, ier); |
2486 | 2503 | ||
2504 | /* | ||
2505 | * The receive handling will happen properly because the | ||
2506 | * receive ready bit will still be set; it is not cleared | ||
2507 | * on read. However, modem control will not, we must | ||
2508 | * call it if we have saved something in the saved flags | ||
2509 | * while processing with interrupts off. | ||
2510 | */ | ||
2511 | if (up->msr_saved_flags) | ||
2512 | check_modem_status(up); | ||
2513 | |||
2487 | if (locked) | 2514 | if (locked) |
2488 | spin_unlock(&up->port.lock); | 2515 | spin_unlock(&up->port.lock); |
2489 | local_irq_restore(flags); | 2516 | local_irq_restore(flags); |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 5e485876f54c..bd66339f7a3f 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -580,6 +580,138 @@ static int pci_netmos_init(struct pci_dev *dev) | |||
580 | return num_serial; | 580 | return num_serial; |
581 | } | 581 | } |
582 | 582 | ||
583 | /* | ||
584 | * ITE support by Niels de Vos <niels.devos@wincor-nixdorf.com> | ||
585 | * | ||
586 | * These chips are available with optionally one parallel port and up to | ||
587 | * two serial ports. Unfortunately they all have the same product id. | ||
588 | * | ||
589 | * Basic configuration is done over a region of 32 I/O ports. The base | ||
590 | * ioport is called INTA or INTC, depending on docs/other drivers. | ||
591 | * | ||
592 | * The region of the 32 I/O ports is configured in POSIO0R... | ||
593 | */ | ||
594 | |||
595 | /* registers */ | ||
596 | #define ITE_887x_MISCR 0x9c | ||
597 | #define ITE_887x_INTCBAR 0x78 | ||
598 | #define ITE_887x_UARTBAR 0x7c | ||
599 | #define ITE_887x_PS0BAR 0x10 | ||
600 | #define ITE_887x_POSIO0 0x60 | ||
601 | |||
602 | /* I/O space size */ | ||
603 | #define ITE_887x_IOSIZE 32 | ||
604 | /* I/O space size (bits 26-24; 8 bytes = 011b) */ | ||
605 | #define ITE_887x_POSIO_IOSIZE_8 (3 << 24) | ||
606 | /* I/O space size (bits 26-24; 32 bytes = 101b) */ | ||
607 | #define ITE_887x_POSIO_IOSIZE_32 (5 << 24) | ||
608 | /* Decoding speed (1 = slow, 2 = medium, 3 = fast) */ | ||
609 | #define ITE_887x_POSIO_SPEED (3 << 29) | ||
610 | /* enable IO_Space bit */ | ||
611 | #define ITE_887x_POSIO_ENABLE (1 << 31) | ||
612 | |||
613 | static int __devinit pci_ite887x_init(struct pci_dev *dev) | ||
614 | { | ||
615 | /* inta_addr are the configuration addresses of the ITE */ | ||
616 | static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, | ||
617 | 0x200, 0x280, 0 }; | ||
618 | int ret, i, type; | ||
619 | struct resource *iobase = NULL; | ||
620 | u32 miscr, uartbar, ioport; | ||
621 | |||
622 | /* search for the base-ioport */ | ||
623 | i = 0; | ||
624 | while (inta_addr[i] && iobase == NULL) { | ||
625 | iobase = request_region(inta_addr[i], ITE_887x_IOSIZE, | ||
626 | "ite887x"); | ||
627 | if (iobase != NULL) { | ||
628 | /* write POSIO0R - speed | size | ioport */ | ||
629 | pci_write_config_dword(dev, ITE_887x_POSIO0, | ||
630 | ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | | ||
631 | ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]); | ||
632 | /* write INTCBAR - ioport */ | ||
633 | pci_write_config_dword(dev, ITE_887x_INTCBAR, inta_addr[i]); | ||
634 | ret = inb(inta_addr[i]); | ||
635 | if (ret != 0xff) { | ||
636 | /* ioport connected */ | ||
637 | break; | ||
638 | } | ||
639 | release_region(iobase->start, ITE_887x_IOSIZE); | ||
640 | iobase = NULL; | ||
641 | } | ||
642 | i++; | ||
643 | } | ||
644 | |||
645 | if (!inta_addr[i]) { | ||
646 | printk(KERN_ERR "ite887x: could not find iobase\n"); | ||
647 | return -ENODEV; | ||
648 | } | ||
649 | |||
650 | /* start of undocumented type checking (see parport_pc.c) */ | ||
651 | type = inb(iobase->start + 0x18) & 0x0f; | ||
652 | |||
653 | switch (type) { | ||
654 | case 0x2: /* ITE8871 (1P) */ | ||
655 | case 0xa: /* ITE8875 (1P) */ | ||
656 | ret = 0; | ||
657 | break; | ||
658 | case 0xe: /* ITE8872 (2S1P) */ | ||
659 | ret = 2; | ||
660 | break; | ||
661 | case 0x6: /* ITE8873 (1S) */ | ||
662 | ret = 1; | ||
663 | break; | ||
664 | case 0x8: /* ITE8874 (2S) */ | ||
665 | ret = 2; | ||
666 | break; | ||
667 | default: | ||
668 | moan_device("Unknown ITE887x", dev); | ||
669 | ret = -ENODEV; | ||
670 | } | ||
671 | |||
672 | /* configure all serial ports */ | ||
673 | for (i = 0; i < ret; i++) { | ||
674 | /* read the I/O port from the device */ | ||
675 | pci_read_config_dword(dev, ITE_887x_PS0BAR + (0x4 * (i + 1)), | ||
676 | &ioport); | ||
677 | ioport &= 0x0000FF00; /* the actual base address */ | ||
678 | pci_write_config_dword(dev, ITE_887x_POSIO0 + (0x4 * (i + 1)), | ||
679 | ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | | ||
680 | ITE_887x_POSIO_IOSIZE_8 | ioport); | ||
681 | |||
682 | /* write the ioport to the UARTBAR */ | ||
683 | pci_read_config_dword(dev, ITE_887x_UARTBAR, &uartbar); | ||
684 | uartbar &= ~(0xffff << (16 * i)); /* clear half the reg */ | ||
685 | uartbar |= (ioport << (16 * i)); /* set the ioport */ | ||
686 | pci_write_config_dword(dev, ITE_887x_UARTBAR, uartbar); | ||
687 | |||
688 | /* get current config */ | ||
689 | pci_read_config_dword(dev, ITE_887x_MISCR, &miscr); | ||
690 | /* disable interrupts (UARTx_Routing[3:0]) */ | ||
691 | miscr &= ~(0xf << (12 - 4 * i)); | ||
692 | /* activate the UART (UARTx_En) */ | ||
693 | miscr |= 1 << (23 - i); | ||
694 | /* write new config with activated UART */ | ||
695 | pci_write_config_dword(dev, ITE_887x_MISCR, miscr); | ||
696 | } | ||
697 | |||
698 | if (ret <= 0) { | ||
699 | /* the device has no UARTs if we get here */ | ||
700 | release_region(iobase->start, ITE_887x_IOSIZE); | ||
701 | } | ||
702 | |||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | static void __devexit pci_ite887x_exit(struct pci_dev *dev) | ||
707 | { | ||
708 | u32 ioport; | ||
709 | /* the ioport is bit 0-15 in POSIO0R */ | ||
710 | pci_read_config_dword(dev, ITE_887x_POSIO0, &ioport); | ||
711 | ioport &= 0xffff; | ||
712 | release_region(ioport, ITE_887x_IOSIZE); | ||
713 | } | ||
714 | |||
583 | static int | 715 | static int |
584 | pci_default_setup(struct serial_private *priv, struct pciserial_board *board, | 716 | pci_default_setup(struct serial_private *priv, struct pciserial_board *board, |
585 | struct uart_port *port, int idx) | 717 | struct uart_port *port, int idx) |
@@ -653,6 +785,18 @@ static struct pci_serial_quirk pci_serial_quirks[] = { | |||
653 | .setup = pci_default_setup, | 785 | .setup = pci_default_setup, |
654 | }, | 786 | }, |
655 | /* | 787 | /* |
788 | * ITE | ||
789 | */ | ||
790 | { | ||
791 | .vendor = PCI_VENDOR_ID_ITE, | ||
792 | .device = PCI_DEVICE_ID_ITE_8872, | ||
793 | .subvendor = PCI_ANY_ID, | ||
794 | .subdevice = PCI_ANY_ID, | ||
795 | .init = pci_ite887x_init, | ||
796 | .setup = pci_default_setup, | ||
797 | .exit = __devexit_p(pci_ite887x_exit), | ||
798 | }, | ||
799 | /* | ||
656 | * Panacom | 800 | * Panacom |
657 | */ | 801 | */ |
658 | { | 802 | { |
@@ -933,6 +1077,7 @@ enum pci_board_num_t { | |||
933 | 1077 | ||
934 | pbn_b1_2_1250000, | 1078 | pbn_b1_2_1250000, |
935 | 1079 | ||
1080 | pbn_b1_bt_1_115200, | ||
936 | pbn_b1_bt_2_921600, | 1081 | pbn_b1_bt_2_921600, |
937 | 1082 | ||
938 | pbn_b1_1_1382400, | 1083 | pbn_b1_1_1382400, |
@@ -983,6 +1128,7 @@ enum pci_board_num_t { | |||
983 | pbn_exar_XR17C152, | 1128 | pbn_exar_XR17C152, |
984 | pbn_exar_XR17C154, | 1129 | pbn_exar_XR17C154, |
985 | pbn_exar_XR17C158, | 1130 | pbn_exar_XR17C158, |
1131 | pbn_pasemi_1682M, | ||
986 | }; | 1132 | }; |
987 | 1133 | ||
988 | /* | 1134 | /* |
@@ -1211,6 +1357,13 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1211 | .uart_offset = 8, | 1357 | .uart_offset = 8, |
1212 | }, | 1358 | }, |
1213 | 1359 | ||
1360 | [pbn_b1_bt_1_115200] = { | ||
1361 | .flags = FL_BASE1|FL_BASE_BARS, | ||
1362 | .num_ports = 1, | ||
1363 | .base_baud = 115200, | ||
1364 | .uart_offset = 8, | ||
1365 | }, | ||
1366 | |||
1214 | [pbn_b1_bt_2_921600] = { | 1367 | [pbn_b1_bt_2_921600] = { |
1215 | .flags = FL_BASE1|FL_BASE_BARS, | 1368 | .flags = FL_BASE1|FL_BASE_BARS, |
1216 | .num_ports = 2, | 1369 | .num_ports = 2, |
@@ -1498,6 +1651,18 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1498 | .base_baud = 921600, | 1651 | .base_baud = 921600, |
1499 | .uart_offset = 0x200, | 1652 | .uart_offset = 0x200, |
1500 | }, | 1653 | }, |
1654 | /* | ||
1655 | * PA Semi PWRficient PA6T-1682M on-chip UART | ||
1656 | */ | ||
1657 | [pbn_pasemi_1682M] = { | ||
1658 | .flags = FL_BASE0, | ||
1659 | .num_ports = 1, | ||
1660 | .base_baud = 8333333, | ||
1661 | }, | ||
1662 | }; | ||
1663 | |||
1664 | static const struct pci_device_id softmodem_blacklist[] = { | ||
1665 | { PCI_VDEVICE ( AL, 0x5457 ), }, /* ALi Corporation M5457 AC'97 Modem */ | ||
1501 | }; | 1666 | }; |
1502 | 1667 | ||
1503 | /* | 1668 | /* |
@@ -1508,6 +1673,7 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1508 | static int __devinit | 1673 | static int __devinit |
1509 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | 1674 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) |
1510 | { | 1675 | { |
1676 | const struct pci_device_id *blacklist; | ||
1511 | int num_iomem, num_port, first_port = -1, i; | 1677 | int num_iomem, num_port, first_port = -1, i; |
1512 | 1678 | ||
1513 | /* | 1679 | /* |
@@ -1522,6 +1688,18 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | |||
1522 | (dev->class & 0xff) > 6) | 1688 | (dev->class & 0xff) > 6) |
1523 | return -ENODEV; | 1689 | return -ENODEV; |
1524 | 1690 | ||
1691 | /* | ||
1692 | * Do not access blacklisted devices that are known not to | ||
1693 | * feature serial ports. | ||
1694 | */ | ||
1695 | for (blacklist = softmodem_blacklist; | ||
1696 | blacklist < softmodem_blacklist + ARRAY_SIZE(softmodem_blacklist); | ||
1697 | blacklist++) { | ||
1698 | if (dev->vendor == blacklist->vendor && | ||
1699 | dev->device == blacklist->device) | ||
1700 | return -ENODEV; | ||
1701 | } | ||
1702 | |||
1525 | num_iomem = num_port = 0; | 1703 | num_iomem = num_port = 0; |
1526 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { | 1704 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { |
1527 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { | 1705 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { |
@@ -2364,6 +2542,13 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2364 | { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, | 2542 | { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, |
2365 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2543 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2366 | pbn_b0_1_115200 }, | 2544 | pbn_b0_1_115200 }, |
2545 | /* | ||
2546 | * ITE | ||
2547 | */ | ||
2548 | { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872, | ||
2549 | PCI_ANY_ID, PCI_ANY_ID, | ||
2550 | 0, 0, | ||
2551 | pbn_b1_bt_1_115200 }, | ||
2367 | 2552 | ||
2368 | /* | 2553 | /* |
2369 | * IntaShield IS-200 | 2554 | * IntaShield IS-200 |
@@ -2382,6 +2567,13 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2382 | PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS8, | 2567 | PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS8, |
2383 | 0, 0, pbn_b2_8_921600 }, | 2568 | 0, 0, pbn_b2_8_921600 }, |
2384 | /* | 2569 | /* |
2570 | * PA Semi PA6T-1682M on-chip UART | ||
2571 | */ | ||
2572 | { PCI_VENDOR_ID_PASEMI, 0xa004, | ||
2573 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2574 | pbn_pasemi_1682M }, | ||
2575 | |||
2576 | /* | ||
2385 | * These entries match devices with class COMMUNICATION_SERIAL, | 2577 | * These entries match devices with class COMMUNICATION_SERIAL, |
2386 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL | 2578 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL |
2387 | */ | 2579 | */ |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 030a6063541d..a055f58f342f 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -1146,11 +1146,14 @@ static void uart_set_termios(struct tty_struct *tty, struct ktermios *old_termio | |||
1146 | 1146 | ||
1147 | /* | 1147 | /* |
1148 | * These are the bits that are used to setup various | 1148 | * These are the bits that are used to setup various |
1149 | * flags in the low level driver. | 1149 | * flags in the low level driver. We can ignore the Bfoo |
1150 | * bits in c_cflag; c_[io]speed will always be set | ||
1151 | * appropriately by set_termios() in tty_ioctl.c | ||
1150 | */ | 1152 | */ |
1151 | #define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) | 1153 | #define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) |
1152 | |||
1153 | if ((cflag ^ old_termios->c_cflag) == 0 && | 1154 | if ((cflag ^ old_termios->c_cflag) == 0 && |
1155 | tty->termios->c_ospeed == old_termios->c_ospeed && | ||
1156 | tty->termios->c_ispeed == old_termios->c_ispeed && | ||
1154 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) | 1157 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) |
1155 | return; | 1158 | return; |
1156 | 1159 | ||
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index b8f91e018b21..0930e2a85514 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | 39 | ||
40 | static char *serial_version = "1.09"; | 40 | static char *serial_version = "1.10"; |
41 | static char *serial_name = "TX39/49 Serial driver"; | 41 | static char *serial_name = "TX39/49 Serial driver"; |
42 | 42 | ||
43 | #define PASS_LIMIT 256 | 43 | #define PASS_LIMIT 256 |
@@ -436,8 +436,10 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port) | |||
436 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; | 436 | struct uart_txx9_port *up = (struct uart_txx9_port *)port; |
437 | unsigned int ret; | 437 | unsigned int ret; |
438 | 438 | ||
439 | ret = ((sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS) | 439 | /* no modem control lines */ |
440 | | ((sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS); | 440 | ret = TIOCM_CAR | TIOCM_DSR; |
441 | ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS; | ||
442 | ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS; | ||
441 | 443 | ||
442 | return ret; | 444 | return ret; |
443 | } | 445 | } |
@@ -557,6 +559,12 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, | |||
557 | unsigned long flags; | 559 | unsigned long flags; |
558 | unsigned int baud, quot; | 560 | unsigned int baud, quot; |
559 | 561 | ||
562 | /* | ||
563 | * We don't support modem control lines. | ||
564 | */ | ||
565 | termios->c_cflag &= ~(HUPCL | CMSPAR); | ||
566 | termios->c_cflag |= CLOCAL; | ||
567 | |||
560 | cval = sio_in(up, TXX9_SILCR); | 568 | cval = sio_in(up, TXX9_SILCR); |
561 | /* byte size and parity */ | 569 | /* byte size and parity */ |
562 | cval &= ~TXX9_SILCR_UMODE_MASK; | 570 | cval &= ~TXX9_SILCR_UMODE_MASK; |
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index 80a81eccad36..832e4613673a 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c | |||
@@ -115,6 +115,52 @@ static int nocursor = 0; | |||
115 | module_param(nocursor, int, 0644); | 115 | module_param(nocursor, int, 0644); |
116 | MODULE_PARM_DESC(nocursor, "cursor enable/disable"); | 116 | MODULE_PARM_DESC(nocursor, "cursor enable/disable"); |
117 | 117 | ||
118 | /* fb_blank | ||
119 | * Blank the screen. Depending on the mode, the screen will be | ||
120 | * activated with the backlight color, or desactivated | ||
121 | */ | ||
122 | static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) | ||
123 | { | ||
124 | struct au1100fb_device *fbdev = to_au1100fb_device(fbi); | ||
125 | |||
126 | print_dbg("fb_blank %d %p", blank_mode, fbi); | ||
127 | |||
128 | switch (blank_mode) { | ||
129 | |||
130 | case VESA_NO_BLANKING: | ||
131 | /* Turn on panel */ | ||
132 | fbdev->regs->lcd_control |= LCD_CONTROL_GO; | ||
133 | #ifdef CONFIG_MIPS_PB1100 | ||
134 | if (drv_info.panel_idx == 1) { | ||
135 | au_writew(au_readw(PB1100_G_CONTROL) | ||
136 | | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), | ||
137 | PB1100_G_CONTROL); | ||
138 | } | ||
139 | #endif | ||
140 | au_sync(); | ||
141 | break; | ||
142 | |||
143 | case VESA_VSYNC_SUSPEND: | ||
144 | case VESA_HSYNC_SUSPEND: | ||
145 | case VESA_POWERDOWN: | ||
146 | /* Turn off panel */ | ||
147 | fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; | ||
148 | #ifdef CONFIG_MIPS_PB1100 | ||
149 | if (drv_info.panel_idx == 1) { | ||
150 | au_writew(au_readw(PB1100_G_CONTROL) | ||
151 | & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), | ||
152 | PB1100_G_CONTROL); | ||
153 | } | ||
154 | #endif | ||
155 | au_sync(); | ||
156 | break; | ||
157 | default: | ||
158 | break; | ||
159 | |||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | |||
118 | /* | 164 | /* |
119 | * Set hardware with var settings. This will enable the controller with a specific | 165 | * Set hardware with var settings. This will enable the controller with a specific |
120 | * mode, normally validated with the fb_check_var method | 166 | * mode, normally validated with the fb_check_var method |
@@ -272,52 +318,6 @@ int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned | |||
272 | return 0; | 318 | return 0; |
273 | } | 319 | } |
274 | 320 | ||
275 | /* fb_blank | ||
276 | * Blank the screen. Depending on the mode, the screen will be | ||
277 | * activated with the backlight color, or desactivated | ||
278 | */ | ||
279 | int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) | ||
280 | { | ||
281 | struct au1100fb_device *fbdev = to_au1100fb_device(fbi); | ||
282 | |||
283 | print_dbg("fb_blank %d %p", blank_mode, fbi); | ||
284 | |||
285 | switch (blank_mode) { | ||
286 | |||
287 | case VESA_NO_BLANKING: | ||
288 | /* Turn on panel */ | ||
289 | fbdev->regs->lcd_control |= LCD_CONTROL_GO; | ||
290 | #ifdef CONFIG_MIPS_PB1100 | ||
291 | if (drv_info.panel_idx == 1) { | ||
292 | au_writew(au_readw(PB1100_G_CONTROL) | ||
293 | | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), | ||
294 | PB1100_G_CONTROL); | ||
295 | } | ||
296 | #endif | ||
297 | au_sync(); | ||
298 | break; | ||
299 | |||
300 | case VESA_VSYNC_SUSPEND: | ||
301 | case VESA_HSYNC_SUSPEND: | ||
302 | case VESA_POWERDOWN: | ||
303 | /* Turn off panel */ | ||
304 | fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; | ||
305 | #ifdef CONFIG_MIPS_PB1100 | ||
306 | if (drv_info.panel_idx == 1) { | ||
307 | au_writew(au_readw(PB1100_G_CONTROL) | ||
308 | & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), | ||
309 | PB1100_G_CONTROL); | ||
310 | } | ||
311 | #endif | ||
312 | au_sync(); | ||
313 | break; | ||
314 | default: | ||
315 | break; | ||
316 | |||
317 | } | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /* fb_pan_display | 321 | /* fb_pan_display |
322 | * Pan display in x and/or y as specified | 322 | * Pan display in x and/or y as specified |
323 | */ | 323 | */ |
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 7fa1afeae8dc..dda0586ab3f3 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c | |||
@@ -738,9 +738,8 @@ const struct consw newport_con = { | |||
738 | #ifdef MODULE | 738 | #ifdef MODULE |
739 | static int __init newport_console_init(void) | 739 | static int __init newport_console_init(void) |
740 | { | 740 | { |
741 | |||
742 | if (!sgi_gfxaddr) | 741 | if (!sgi_gfxaddr) |
743 | return NULL; | 742 | return 0; |
744 | 743 | ||
745 | if (!npregs) | 744 | if (!npregs) |
746 | npregs = (struct newport_regs *)/* ioremap cannot fail */ | 745 | npregs = (struct newport_regs *)/* ioremap cannot fail */ |
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c index 5715b8ad0ddc..94f4511023d8 100644 --- a/drivers/video/imsttfb.c +++ b/drivers/video/imsttfb.c | |||
@@ -1391,7 +1391,7 @@ init_imstt(struct fb_info *info) | |||
1391 | } | 1391 | } |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | #if USE_NV_MODES && defined(CONFIG_PPC) | 1394 | #if USE_NV_MODES && defined(CONFIG_PPC32) |
1395 | { | 1395 | { |
1396 | int vmode = init_vmode, cmode = init_cmode; | 1396 | int vmode = init_vmode, cmode = init_cmode; |
1397 | 1397 | ||
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 2fbd8dd16df5..6840dfebe4d4 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c | |||
@@ -170,22 +170,24 @@ void __w1_remove_master_device(struct w1_master *dev) | |||
170 | 170 | ||
171 | void w1_remove_master_device(struct w1_bus_master *bm) | 171 | void w1_remove_master_device(struct w1_bus_master *bm) |
172 | { | 172 | { |
173 | struct w1_master *dev = NULL; | 173 | struct w1_master *dev, *found = NULL; |
174 | 174 | ||
175 | list_for_each_entry(dev, &w1_masters, w1_master_entry) { | 175 | list_for_each_entry(dev, &w1_masters, w1_master_entry) { |
176 | if (!dev->initialized) | 176 | if (!dev->initialized) |
177 | continue; | 177 | continue; |
178 | 178 | ||
179 | if (dev->bus_master->data == bm->data) | 179 | if (dev->bus_master->data == bm->data) { |
180 | found = dev; | ||
180 | break; | 181 | break; |
182 | } | ||
181 | } | 183 | } |
182 | 184 | ||
183 | if (!dev) { | 185 | if (!found) { |
184 | printk(KERN_ERR "Device doesn't exist.\n"); | 186 | printk(KERN_ERR "Device doesn't exist.\n"); |
185 | return; | 187 | return; |
186 | } | 188 | } |
187 | 189 | ||
188 | __w1_remove_master_device(dev); | 190 | __w1_remove_master_device(found); |
189 | } | 191 | } |
190 | 192 | ||
191 | EXPORT_SYMBOL(w1_add_master_device); | 193 | EXPORT_SYMBOL(w1_add_master_device); |
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c index 9130f1c12c26..808b4f8675c5 100644 --- a/drivers/zorro/zorro-sysfs.c +++ b/drivers/zorro/zorro-sysfs.c | |||
@@ -78,7 +78,7 @@ static ssize_t zorro_read_config(struct kobject *kobj, | |||
78 | static struct bin_attribute zorro_config_attr = { | 78 | static struct bin_attribute zorro_config_attr = { |
79 | .attr = { | 79 | .attr = { |
80 | .name = "config", | 80 | .name = "config", |
81 | .mode = S_IRUGO | S_IWUSR, | 81 | .mode = S_IRUGO, |
82 | }, | 82 | }, |
83 | .size = sizeof(struct ConfigDev), | 83 | .size = sizeof(struct ConfigDev), |
84 | .read = zorro_read_config, | 84 | .read = zorro_read_config, |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 2d4c8a3e604e..45ff3d63b758 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -587,19 +587,20 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s | |||
587 | unhashed = autofs4_lookup_unhashed(sbi, dentry->d_parent, &dentry->d_name); | 587 | unhashed = autofs4_lookup_unhashed(sbi, dentry->d_parent, &dentry->d_name); |
588 | if (!unhashed) { | 588 | if (!unhashed) { |
589 | /* | 589 | /* |
590 | * Mark the dentry incomplete, but add it. This is needed so | 590 | * Mark the dentry incomplete but don't hash it. We do this |
591 | * that the VFS layer knows about the dentry, and we can count | 591 | * to serialize our inode creation operations (symlink and |
592 | * on catching any lookups through the revalidate. | 592 | * mkdir) which prevents deadlock during the callback to |
593 | * | 593 | * the daemon. Subsequent user space lookups for the same |
594 | * Let all the hard work be done by the revalidate function that | 594 | * dentry are placed on the wait queue while the daemon |
595 | * needs to be able to do this anyway.. | 595 | * itself is allowed passage unresticted so the create |
596 | * | 596 | * operation itself can then hash the dentry. Finally, |
597 | * We need to do this before we release the directory semaphore. | 597 | * we check for the hashed dentry and return the newly |
598 | * hashed dentry. | ||
598 | */ | 599 | */ |
599 | dentry->d_op = &autofs4_root_dentry_operations; | 600 | dentry->d_op = &autofs4_root_dentry_operations; |
600 | 601 | ||
601 | dentry->d_fsdata = NULL; | 602 | dentry->d_fsdata = NULL; |
602 | d_add(dentry, NULL); | 603 | d_instantiate(dentry, NULL); |
603 | } else { | 604 | } else { |
604 | struct autofs_info *ino = autofs4_dentry_ino(unhashed); | 605 | struct autofs_info *ino = autofs4_dentry_ino(unhashed); |
605 | DPRINTK("rehash %p with %p", dentry, unhashed); | 606 | DPRINTK("rehash %p with %p", dentry, unhashed); |
@@ -607,15 +608,17 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s | |||
607 | * If we are racing with expire the request might not | 608 | * If we are racing with expire the request might not |
608 | * be quite complete but the directory has been removed | 609 | * be quite complete but the directory has been removed |
609 | * so it must have been successful, so just wait for it. | 610 | * so it must have been successful, so just wait for it. |
611 | * We need to ensure the AUTOFS_INF_EXPIRING flag is clear | ||
612 | * before continuing as revalidate may fail when calling | ||
613 | * try_to_fill_dentry (returning EAGAIN) if we don't. | ||
610 | */ | 614 | */ |
611 | if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { | 615 | while (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { |
612 | DPRINTK("wait for incomplete expire %p name=%.*s", | 616 | DPRINTK("wait for incomplete expire %p name=%.*s", |
613 | unhashed, unhashed->d_name.len, | 617 | unhashed, unhashed->d_name.len, |
614 | unhashed->d_name.name); | 618 | unhashed->d_name.name); |
615 | autofs4_wait(sbi, unhashed, NFY_NONE); | 619 | autofs4_wait(sbi, unhashed, NFY_NONE); |
616 | DPRINTK("request completed"); | 620 | DPRINTK("request completed"); |
617 | } | 621 | } |
618 | d_rehash(unhashed); | ||
619 | dentry = unhashed; | 622 | dentry = unhashed; |
620 | } | 623 | } |
621 | 624 | ||
@@ -658,7 +661,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s | |||
658 | * for all system calls, but it should be OK for the operations | 661 | * for all system calls, but it should be OK for the operations |
659 | * we permit from an autofs. | 662 | * we permit from an autofs. |
660 | */ | 663 | */ |
661 | if (dentry->d_inode && d_unhashed(dentry)) { | 664 | if (!oz_mode && d_unhashed(dentry)) { |
662 | /* | 665 | /* |
663 | * A user space application can (and has done in the past) | 666 | * A user space application can (and has done in the past) |
664 | * remove and re-create this directory during the callback. | 667 | * remove and re-create this directory during the callback. |
@@ -716,7 +719,7 @@ static int autofs4_dir_symlink(struct inode *dir, | |||
716 | strcpy(cp, symname); | 719 | strcpy(cp, symname); |
717 | 720 | ||
718 | inode = autofs4_get_inode(dir->i_sb, ino); | 721 | inode = autofs4_get_inode(dir->i_sb, ino); |
719 | d_instantiate(dentry, inode); | 722 | d_add(dentry, inode); |
720 | 723 | ||
721 | if (dir == dir->i_sb->s_root->d_inode) | 724 | if (dir == dir->i_sb->s_root->d_inode) |
722 | dentry->d_op = &autofs4_root_dentry_operations; | 725 | dentry->d_op = &autofs4_root_dentry_operations; |
@@ -844,7 +847,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
844 | return -ENOSPC; | 847 | return -ENOSPC; |
845 | 848 | ||
846 | inode = autofs4_get_inode(dir->i_sb, ino); | 849 | inode = autofs4_get_inode(dir->i_sb, ino); |
847 | d_instantiate(dentry, inode); | 850 | d_add(dentry, inode); |
848 | 851 | ||
849 | if (dir == dir->i_sb->s_root->d_inode) | 852 | if (dir == dir->i_sb->s_root->d_inode) |
850 | dentry->d_op = &autofs4_root_dentry_operations; | 853 | dentry->d_op = &autofs4_root_dentry_operations; |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 131954b3fb98..5d40ad13ab5c 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -357,6 +357,10 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, | |||
357 | ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); | 357 | ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); |
358 | goto out; | 358 | goto out; |
359 | } | 359 | } |
360 | if (special_file(lower_inode->i_mode)) { | ||
361 | ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); | ||
362 | goto out; | ||
363 | } | ||
360 | if (!nd) { | 364 | if (!nd) { |
361 | ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave" | 365 | ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave" |
362 | "as we *think* we are about to unlink\n"); | 366 | "as we *think* we are about to unlink\n"); |
@@ -780,18 +780,11 @@ static int de_thread(struct task_struct *tsk) | |||
780 | int count; | 780 | int count; |
781 | 781 | ||
782 | /* | 782 | /* |
783 | * Tell all the sighand listeners that this sighand has | ||
784 | * been detached. The signalfd_detach() function grabs the | ||
785 | * sighand lock, if signal listeners are present on the sighand. | ||
786 | */ | ||
787 | signalfd_detach(tsk); | ||
788 | |||
789 | /* | ||
790 | * If we don't share sighandlers, then we aren't sharing anything | 783 | * If we don't share sighandlers, then we aren't sharing anything |
791 | * and we can just re-use it all. | 784 | * and we can just re-use it all. |
792 | */ | 785 | */ |
793 | if (atomic_read(&oldsighand->count) <= 1) { | 786 | if (atomic_read(&oldsighand->count) <= 1) { |
794 | BUG_ON(atomic_read(&sig->count) != 1); | 787 | signalfd_detach(tsk); |
795 | exit_itimers(sig); | 788 | exit_itimers(sig); |
796 | return 0; | 789 | return 0; |
797 | } | 790 | } |
@@ -930,12 +923,11 @@ static int de_thread(struct task_struct *tsk) | |||
930 | sig->flags = 0; | 923 | sig->flags = 0; |
931 | 924 | ||
932 | no_thread_group: | 925 | no_thread_group: |
926 | signalfd_detach(tsk); | ||
933 | exit_itimers(sig); | 927 | exit_itimers(sig); |
934 | if (leader) | 928 | if (leader) |
935 | release_task(leader); | 929 | release_task(leader); |
936 | 930 | ||
937 | BUG_ON(atomic_read(&sig->count) != 1); | ||
938 | |||
939 | if (atomic_read(&oldsighand->count) == 1) { | 931 | if (atomic_read(&oldsighand->count) == 1) { |
940 | /* | 932 | /* |
941 | * Now that we nuked the rest of the thread group, | 933 | * Now that we nuked the rest of the thread group, |
diff --git a/fs/signalfd.c b/fs/signalfd.c index 7b941abbcde0..a8e293d30034 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -56,12 +56,18 @@ static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) | |||
56 | sighand = lock_task_sighand(lk->tsk, &lk->flags); | 56 | sighand = lock_task_sighand(lk->tsk, &lk->flags); |
57 | rcu_read_unlock(); | 57 | rcu_read_unlock(); |
58 | 58 | ||
59 | if (sighand && !ctx->tsk) { | 59 | if (!sighand) |
60 | return 0; | ||
61 | |||
62 | if (!ctx->tsk) { | ||
60 | unlock_task_sighand(lk->tsk, &lk->flags); | 63 | unlock_task_sighand(lk->tsk, &lk->flags); |
61 | sighand = NULL; | 64 | return 0; |
62 | } | 65 | } |
63 | 66 | ||
64 | return sighand != NULL; | 67 | if (lk->tsk->tgid == current->tgid) |
68 | lk->tsk = current; | ||
69 | |||
70 | return 1; | ||
65 | } | 71 | } |
66 | 72 | ||
67 | static void signalfd_unlock(struct signalfd_lockctx *lk) | 73 | static void signalfd_unlock(struct signalfd_lockctx *lk) |
@@ -331,7 +337,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas | |||
331 | 337 | ||
332 | init_waitqueue_head(&ctx->wqh); | 338 | init_waitqueue_head(&ctx->wqh); |
333 | ctx->sigmask = sigmask; | 339 | ctx->sigmask = sigmask; |
334 | ctx->tsk = current; | 340 | ctx->tsk = current->group_leader; |
335 | 341 | ||
336 | sighand = current->sighand; | 342 | sighand = current->sighand; |
337 | /* | 343 | /* |
diff --git a/include/asm-m68k/ioctls.h b/include/asm-m68k/ioctls.h index 0c48929ab444..b8d2f4be7fd7 100644 --- a/include/asm-m68k/ioctls.h +++ b/include/asm-m68k/ioctls.h | |||
@@ -46,6 +46,10 @@ | |||
46 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | 46 | #define TIOCSBRK 0x5427 /* BSD compatibility */ |
47 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | 47 | #define TIOCCBRK 0x5428 /* BSD compatibility */ |
48 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | 48 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ |
49 | #define TCGETS2 _IOR('T',0x2A, struct termios2) | ||
50 | #define TCSETS2 _IOW('T',0x2B, struct termios2) | ||
51 | #define TCSETSW2 _IOW('T',0x2C, struct termios2) | ||
52 | #define TCSETSF2 _IOW('T',0x2D, struct termios2) | ||
49 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 53 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
50 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 54 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
51 | 55 | ||
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h index 9e6d0d6debdb..1431ea0b59e0 100644 --- a/include/asm-m68k/page.h +++ b/include/asm-m68k/page.h | |||
@@ -4,17 +4,15 @@ | |||
4 | 4 | ||
5 | #ifdef __KERNEL__ | 5 | #ifdef __KERNEL__ |
6 | 6 | ||
7 | #include <linux/const.h> | ||
8 | |||
7 | /* PAGE_SHIFT determines the page size */ | 9 | /* PAGE_SHIFT determines the page size */ |
8 | #ifndef CONFIG_SUN3 | 10 | #ifndef CONFIG_SUN3 |
9 | #define PAGE_SHIFT (12) | 11 | #define PAGE_SHIFT (12) |
10 | #else | 12 | #else |
11 | #define PAGE_SHIFT (13) | 13 | #define PAGE_SHIFT (13) |
12 | #endif | 14 | #endif |
13 | #ifdef __ASSEMBLY__ | 15 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
14 | #define PAGE_SIZE (1 << PAGE_SHIFT) | ||
15 | #else | ||
16 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | ||
17 | #endif | ||
18 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 16 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
19 | 17 | ||
20 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
@@ -27,6 +25,8 @@ | |||
27 | 25 | ||
28 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
29 | 27 | ||
28 | #include <linux/compiler.h> | ||
29 | |||
30 | #include <asm/module.h> | 30 | #include <asm/module.h> |
31 | 31 | ||
32 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | 32 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) |
diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h index 8455f778b601..4453ec379c5d 100644 --- a/include/asm-m68k/processor.h +++ b/include/asm-m68k/processor.h | |||
@@ -38,12 +38,8 @@ static inline void wrusp(unsigned long usp) | |||
38 | #ifndef CONFIG_SUN3 | 38 | #ifndef CONFIG_SUN3 |
39 | #define TASK_SIZE (0xF0000000UL) | 39 | #define TASK_SIZE (0xF0000000UL) |
40 | #else | 40 | #else |
41 | #ifdef __ASSEMBLY__ | ||
42 | #define TASK_SIZE (0x0E000000) | ||
43 | #else | ||
44 | #define TASK_SIZE (0x0E000000UL) | 41 | #define TASK_SIZE (0x0E000000UL) |
45 | #endif | 42 | #endif |
46 | #endif | ||
47 | 43 | ||
48 | /* This decides where the kernel will search for a free chunk of vm | 44 | /* This decides where the kernel will search for a free chunk of vm |
49 | * space during mmap's. | 45 | * space during mmap's. |
diff --git a/include/asm-m68k/termbits.h b/include/asm-m68k/termbits.h index 0e520f328f53..8c14170996bb 100644 --- a/include/asm-m68k/termbits.h +++ b/include/asm-m68k/termbits.h | |||
@@ -141,6 +141,7 @@ struct ktermios { | |||
141 | #define HUPCL 0002000 | 141 | #define HUPCL 0002000 |
142 | #define CLOCAL 0004000 | 142 | #define CLOCAL 0004000 |
143 | #define CBAUDEX 0010000 | 143 | #define CBAUDEX 0010000 |
144 | #define BOTHER 0010000 | ||
144 | #define B57600 0010001 | 145 | #define B57600 0010001 |
145 | #define B115200 0010002 | 146 | #define B115200 0010002 |
146 | #define B230400 0010003 | 147 | #define B230400 0010003 |
@@ -156,10 +157,12 @@ struct ktermios { | |||
156 | #define B3000000 0010015 | 157 | #define B3000000 0010015 |
157 | #define B3500000 0010016 | 158 | #define B3500000 0010016 |
158 | #define B4000000 0010017 | 159 | #define B4000000 0010017 |
159 | #define CIBAUD 002003600000 /* input baud rate (not used) */ | 160 | #define CIBAUD 002003600000 /* input baud rate */ |
160 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | 161 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ |
161 | #define CRTSCTS 020000000000 /* flow control */ | 162 | #define CRTSCTS 020000000000 /* flow control */ |
162 | 163 | ||
164 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
165 | |||
163 | /* c_lflag bits */ | 166 | /* c_lflag bits */ |
164 | #define ISIG 0000001 | 167 | #define ISIG 0000001 |
165 | #define ICANON 0000002 | 168 | #define ICANON 0000002 |
diff --git a/include/asm-m68k/termios.h b/include/asm-m68k/termios.h index 00edabd76168..0823032e4045 100644 --- a/include/asm-m68k/termios.h +++ b/include/asm-m68k/termios.h | |||
@@ -82,8 +82,10 @@ struct termio { | |||
82 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | 82 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ |
83 | }) | 83 | }) |
84 | 84 | ||
85 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | 85 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) |
86 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | 86 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) |
87 | #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
88 | #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
87 | 89 | ||
88 | #endif /* __KERNEL__ */ | 90 | #endif /* __KERNEL__ */ |
89 | 91 | ||
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h index e1e6a1d2333a..46251016e821 100644 --- a/include/asm-m68knommu/pgtable.h +++ b/include/asm-m68knommu/pgtable.h | |||
@@ -65,4 +65,6 @@ extern unsigned int kobjsize(const void *objp); | |||
65 | #define VMALLOC_START 0 | 65 | #define VMALLOC_START 0 |
66 | #define VMALLOC_END 0xffffffff | 66 | #define VMALLOC_END 0xffffffff |
67 | 67 | ||
68 | #include <asm-generic/pgtable.h> | ||
69 | |||
68 | #endif /* _M68KNOMMU_PGTABLE_H */ | 70 | #endif /* _M68KNOMMU_PGTABLE_H */ |
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index 506ad20c18f8..8bdb16bfe5fb 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h | |||
@@ -161,4 +161,7 @@ static inline void con_schedule_flip(struct tty_struct *t) | |||
161 | schedule_delayed_work(&t->buf.work, 0); | 161 | schedule_delayed_work(&t->buf.work, 0); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* mac_hid.c */ | ||
165 | extern int mac_hid_mouse_emulate_buttons(int, unsigned int, int); | ||
166 | |||
164 | #endif | 167 | #endif |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index e147cf50529f..5bdd656e88cf 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -166,7 +166,7 @@ extern enum zone_type policy_zone; | |||
166 | 166 | ||
167 | static inline void check_highest_zone(enum zone_type k) | 167 | static inline void check_highest_zone(enum zone_type k) |
168 | { | 168 | { |
169 | if (k > policy_zone) | 169 | if (k > policy_zone && k != ZONE_MOVABLE) |
170 | policy_zone = k; | 170 | policy_zone = k; |
171 | } | 171 | } |
172 | 172 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 655094dc9440..1692dd6cb915 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1042,7 +1042,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma, | |||
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | /* mmap.c */ | 1044 | /* mmap.c */ |
1045 | extern int __vm_enough_memory(long pages, int cap_sys_admin); | 1045 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); |
1046 | extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, | 1046 | extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, |
1047 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); | 1047 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); |
1048 | extern struct vm_area_struct *vma_merge(struct mm_struct *, | 1048 | extern struct vm_area_struct *vma_merge(struct mm_struct *, |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3ea68cd3b61f..4e5627379b09 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -410,6 +410,24 @@ struct zonelist { | |||
410 | #endif | 410 | #endif |
411 | }; | 411 | }; |
412 | 412 | ||
413 | #ifdef CONFIG_NUMA | ||
414 | /* | ||
415 | * Only custom zonelists like MPOL_BIND need to be filtered as part of | ||
416 | * policies. As described in the comment for struct zonelist_cache, these | ||
417 | * zonelists will not have a zlcache so zlcache_ptr will not be set. Use | ||
418 | * that to determine if the zonelists needs to be filtered or not. | ||
419 | */ | ||
420 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | ||
421 | { | ||
422 | return !zonelist->zlcache_ptr; | ||
423 | } | ||
424 | #else | ||
425 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | ||
426 | { | ||
427 | return 0; | ||
428 | } | ||
429 | #endif /* CONFIG_NUMA */ | ||
430 | |||
413 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 431 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
414 | struct node_active_region { | 432 | struct node_active_region { |
415 | unsigned long start_pfn; | 433 | unsigned long start_pfn; |
diff --git a/include/linux/security.h b/include/linux/security.h index c11dc8aa0351..1a15526e9f67 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -54,7 +54,7 @@ extern int cap_inode_removexattr(struct dentry *dentry, char *name); | |||
54 | extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); | 54 | extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); |
55 | extern void cap_task_reparent_to_init (struct task_struct *p); | 55 | extern void cap_task_reparent_to_init (struct task_struct *p); |
56 | extern int cap_syslog (int type); | 56 | extern int cap_syslog (int type); |
57 | extern int cap_vm_enough_memory (long pages); | 57 | extern int cap_vm_enough_memory (struct mm_struct *mm, long pages); |
58 | 58 | ||
59 | struct msghdr; | 59 | struct msghdr; |
60 | struct sk_buff; | 60 | struct sk_buff; |
@@ -1125,6 +1125,7 @@ struct request_sock; | |||
1125 | * Return 0 if permission is granted. | 1125 | * Return 0 if permission is granted. |
1126 | * @vm_enough_memory: | 1126 | * @vm_enough_memory: |
1127 | * Check permissions for allocating a new virtual mapping. | 1127 | * Check permissions for allocating a new virtual mapping. |
1128 | * @mm contains the mm struct it is being added to. | ||
1128 | * @pages contains the number of pages. | 1129 | * @pages contains the number of pages. |
1129 | * Return 0 if permission is granted. | 1130 | * Return 0 if permission is granted. |
1130 | * | 1131 | * |
@@ -1169,7 +1170,7 @@ struct security_operations { | |||
1169 | int (*quota_on) (struct dentry * dentry); | 1170 | int (*quota_on) (struct dentry * dentry); |
1170 | int (*syslog) (int type); | 1171 | int (*syslog) (int type); |
1171 | int (*settime) (struct timespec *ts, struct timezone *tz); | 1172 | int (*settime) (struct timespec *ts, struct timezone *tz); |
1172 | int (*vm_enough_memory) (long pages); | 1173 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); |
1173 | 1174 | ||
1174 | int (*bprm_alloc_security) (struct linux_binprm * bprm); | 1175 | int (*bprm_alloc_security) (struct linux_binprm * bprm); |
1175 | void (*bprm_free_security) (struct linux_binprm * bprm); | 1176 | void (*bprm_free_security) (struct linux_binprm * bprm); |
@@ -1469,10 +1470,14 @@ static inline int security_settime(struct timespec *ts, struct timezone *tz) | |||
1469 | return security_ops->settime(ts, tz); | 1470 | return security_ops->settime(ts, tz); |
1470 | } | 1471 | } |
1471 | 1472 | ||
1472 | |||
1473 | static inline int security_vm_enough_memory(long pages) | 1473 | static inline int security_vm_enough_memory(long pages) |
1474 | { | 1474 | { |
1475 | return security_ops->vm_enough_memory(pages); | 1475 | return security_ops->vm_enough_memory(current->mm, pages); |
1476 | } | ||
1477 | |||
1478 | static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) | ||
1479 | { | ||
1480 | return security_ops->vm_enough_memory(mm, pages); | ||
1476 | } | 1481 | } |
1477 | 1482 | ||
1478 | static inline int security_bprm_alloc (struct linux_binprm *bprm) | 1483 | static inline int security_bprm_alloc (struct linux_binprm *bprm) |
@@ -2219,7 +2224,12 @@ static inline int security_settime(struct timespec *ts, struct timezone *tz) | |||
2219 | 2224 | ||
2220 | static inline int security_vm_enough_memory(long pages) | 2225 | static inline int security_vm_enough_memory(long pages) |
2221 | { | 2226 | { |
2222 | return cap_vm_enough_memory(pages); | 2227 | return cap_vm_enough_memory(current->mm, pages); |
2228 | } | ||
2229 | |||
2230 | static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) | ||
2231 | { | ||
2232 | return cap_vm_enough_memory(mm, pages); | ||
2223 | } | 2233 | } |
2224 | 2234 | ||
2225 | static inline int security_bprm_alloc (struct linux_binprm *bprm) | 2235 | static inline int security_bprm_alloc (struct linux_binprm *bprm) |
diff --git a/include/linux/selection.h b/include/linux/selection.h index ed3408b400f1..f9457861937c 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/tiocl.h> | 10 | #include <linux/tiocl.h> |
11 | #include <linux/vt_buffer.h> | 11 | #include <linux/vt_buffer.h> |
12 | 12 | ||
13 | struct tty_struct; | ||
14 | |||
13 | extern struct vc_data *sel_cons; | 15 | extern struct vc_data *sel_cons; |
14 | 16 | ||
15 | extern void clear_selection(void); | 17 | extern void clear_selection(void); |
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index 1c5ed7d92b0f..96c0d93fc2ca 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h | |||
@@ -118,6 +118,7 @@ | |||
118 | #define UART_LSR_PE 0x04 /* Parity error indicator */ | 118 | #define UART_LSR_PE 0x04 /* Parity error indicator */ |
119 | #define UART_LSR_OE 0x02 /* Overrun error indicator */ | 119 | #define UART_LSR_OE 0x02 /* Overrun error indicator */ |
120 | #define UART_LSR_DR 0x01 /* Receiver data ready */ | 120 | #define UART_LSR_DR 0x01 /* Receiver data ready */ |
121 | #define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */ | ||
121 | 122 | ||
122 | #define UART_MSR 6 /* In: Modem Status Register */ | 123 | #define UART_MSR 6 /* In: Modem Status Register */ |
123 | #define UART_MSR_DCD 0x80 /* Data Carrier Detect */ | 124 | #define UART_MSR_DCD 0x80 /* Data Carrier Detect */ |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 3401293359e8..04f3ffb8d9d4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -2023,7 +2023,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2023 | axp->d.next = ctx->aux_pids; | 2023 | axp->d.next = ctx->aux_pids; |
2024 | ctx->aux_pids = (void *)axp; | 2024 | ctx->aux_pids = (void *)axp; |
2025 | } | 2025 | } |
2026 | BUG_ON(axp->pid_count > AUDIT_AUX_PIDS); | 2026 | BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); |
2027 | 2027 | ||
2028 | axp->target_pid[axp->pid_count] = t->tgid; | 2028 | axp->target_pid[axp->pid_count] = t->tgid; |
2029 | selinux_get_task_sid(t, &axp->target_sid[axp->pid_count]); | 2029 | selinux_get_task_sid(t, &axp->target_sid[axp->pid_count]); |
diff --git a/kernel/futex.c b/kernel/futex.c index 3415e9ad1391..e8935b195e88 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1670,6 +1670,7 @@ pi_faulted: | |||
1670 | attempt); | 1670 | attempt); |
1671 | if (ret) | 1671 | if (ret) |
1672 | goto out; | 1672 | goto out; |
1673 | uval = 0; | ||
1673 | goto retry_unlocked; | 1674 | goto retry_unlocked; |
1674 | } | 1675 | } |
1675 | 1676 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 203a518b6f14..853aefbd184b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -462,7 +462,9 @@ void free_irq(unsigned int irq, void *dev_id) | |||
462 | * We do this after actually deregistering it, to make sure that | 462 | * We do this after actually deregistering it, to make sure that |
463 | * a 'real' IRQ doesn't run in parallel with our fake | 463 | * a 'real' IRQ doesn't run in parallel with our fake |
464 | */ | 464 | */ |
465 | local_irq_save(flags); | ||
465 | handler(irq, dev_id); | 466 | handler(irq, dev_id); |
467 | local_irq_restore(flags); | ||
466 | } | 468 | } |
467 | #endif | 469 | #endif |
468 | } | 470 | } |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 55b3761edaa9..7a15afb73ed0 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -547,9 +547,9 @@ sys_timer_create(const clockid_t which_clock, | |||
547 | new_timer->it_process = process; | 547 | new_timer->it_process = process; |
548 | list_add(&new_timer->list, | 548 | list_add(&new_timer->list, |
549 | &process->signal->posix_timers); | 549 | &process->signal->posix_timers); |
550 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | ||
551 | if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | 550 | if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) |
552 | get_task_struct(process); | 551 | get_task_struct(process); |
552 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | ||
553 | } else { | 553 | } else { |
554 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | 554 | spin_unlock_irqrestore(&process->sighand->siglock, flags); |
555 | process = NULL; | 555 | process = NULL; |
@@ -605,13 +605,14 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) | |||
605 | timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); | 605 | timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); |
606 | if (timr) { | 606 | if (timr) { |
607 | spin_lock(&timr->it_lock); | 607 | spin_lock(&timr->it_lock); |
608 | spin_unlock(&idr_lock); | ||
609 | 608 | ||
610 | if ((timr->it_id != timer_id) || !(timr->it_process) || | 609 | if ((timr->it_id != timer_id) || !(timr->it_process) || |
611 | timr->it_process->tgid != current->tgid) { | 610 | timr->it_process->tgid != current->tgid) { |
612 | unlock_timer(timr, *flags); | 611 | spin_unlock(&timr->it_lock); |
612 | spin_unlock_irqrestore(&idr_lock, *flags); | ||
613 | timr = NULL; | 613 | timr = NULL; |
614 | } | 614 | } else |
615 | spin_unlock(&idr_lock); | ||
615 | } else | 616 | } else |
616 | spin_unlock_irqrestore(&idr_lock, *flags); | 617 | spin_unlock_irqrestore(&idr_lock, *flags); |
617 | 618 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index b27c01a66448..ad63109e413c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -378,7 +378,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
378 | /* We only dequeue private signals from ourselves, we don't let | 378 | /* We only dequeue private signals from ourselves, we don't let |
379 | * signalfd steal them | 379 | * signalfd steal them |
380 | */ | 380 | */ |
381 | if (tsk == current) | 381 | if (likely(tsk == current)) |
382 | signr = __dequeue_signal(&tsk->pending, mask, info); | 382 | signr = __dequeue_signal(&tsk->pending, mask, info); |
383 | if (!signr) { | 383 | if (!signr) { |
384 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 384 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
@@ -425,7 +425,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
425 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 425 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) |
426 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | 426 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
427 | } | 427 | } |
428 | if ( signr && | 428 | if (signr && likely(tsk == current) && |
429 | ((info->si_code & __SI_MASK) == __SI_TIMER) && | 429 | ((info->si_code & __SI_MASK) == __SI_TIMER) && |
430 | info->si_sys_private){ | 430 | info->si_sys_private){ |
431 | /* | 431 | /* |
diff --git a/lib/Kconfig b/lib/Kconfig index e5c2c514174a..ba3d104994d9 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -138,4 +138,7 @@ config HAS_DMA | |||
138 | depends on !NO_DMA | 138 | depends on !NO_DMA |
139 | default y | 139 | default y |
140 | 140 | ||
141 | config CHECK_SIGNATURE | ||
142 | bool | ||
143 | |||
141 | endmenu | 144 | endmenu |
diff --git a/lib/Makefile b/lib/Makefile index d9e5f1cd0bfb..6b0ba8cf4e5f 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -21,7 +21,8 @@ CFLAGS_kobject_uevent.o += -DDEBUG | |||
21 | endif | 21 | endif |
22 | 22 | ||
23 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o | 23 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o |
24 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o check_signature.o | 24 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o |
25 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | ||
25 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 26 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
26 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 27 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
27 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 28 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d7ca59d66c59..de4cf458d6e1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -643,7 +643,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
643 | spin_unlock(&mm->page_table_lock); | 643 | spin_unlock(&mm->page_table_lock); |
644 | ret = hugetlb_fault(mm, vma, vaddr, 0); | 644 | ret = hugetlb_fault(mm, vma, vaddr, 0); |
645 | spin_lock(&mm->page_table_lock); | 645 | spin_lock(&mm->page_table_lock); |
646 | if (!(ret & VM_FAULT_MAJOR)) | 646 | if (!(ret & VM_FAULT_ERROR)) |
647 | continue; | 647 | continue; |
648 | 648 | ||
649 | remainder = 0; | 649 | remainder = 0; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 71b84b45154a..172abffeb2e3 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -149,7 +149,7 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes) | |||
149 | lower zones etc. Avoid empty zones because the memory allocator | 149 | lower zones etc. Avoid empty zones because the memory allocator |
150 | doesn't like them. If you implement node hot removal you | 150 | doesn't like them. If you implement node hot removal you |
151 | have to fix that. */ | 151 | have to fix that. */ |
152 | k = policy_zone; | 152 | k = MAX_NR_ZONES - 1; |
153 | while (1) { | 153 | while (1) { |
154 | for_each_node_mask(nd, *nodes) { | 154 | for_each_node_mask(nd, *nodes) { |
155 | struct zone *z = &NODE_DATA(nd)->node_zones[k]; | 155 | struct zone *z = &NODE_DATA(nd)->node_zones[k]; |
@@ -93,7 +93,7 @@ atomic_t vm_committed_space = ATOMIC_INIT(0); | |||
93 | * Note this is a helper function intended to be used by LSMs which | 93 | * Note this is a helper function intended to be used by LSMs which |
94 | * wish to use this logic. | 94 | * wish to use this logic. |
95 | */ | 95 | */ |
96 | int __vm_enough_memory(long pages, int cap_sys_admin) | 96 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
97 | { | 97 | { |
98 | unsigned long free, allowed; | 98 | unsigned long free, allowed; |
99 | 99 | ||
@@ -166,7 +166,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) | |||
166 | 166 | ||
167 | /* Don't let a single process grow too big: | 167 | /* Don't let a single process grow too big: |
168 | leave 3% of the size of this process for other processes */ | 168 | leave 3% of the size of this process for other processes */ |
169 | allowed -= current->mm->total_vm / 32; | 169 | allowed -= mm->total_vm / 32; |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * cast `allowed' as a signed long because vm_committed_space | 172 | * cast `allowed' as a signed long because vm_committed_space |
@@ -2077,7 +2077,7 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) | |||
2077 | if (__vma && __vma->vm_start < vma->vm_end) | 2077 | if (__vma && __vma->vm_start < vma->vm_end) |
2078 | return -ENOMEM; | 2078 | return -ENOMEM; |
2079 | if ((vma->vm_flags & VM_ACCOUNT) && | 2079 | if ((vma->vm_flags & VM_ACCOUNT) && |
2080 | security_vm_enough_memory(vma_pages(vma))) | 2080 | security_vm_enough_memory_mm(mm, vma_pages(vma))) |
2081 | return -ENOMEM; | 2081 | return -ENOMEM; |
2082 | vma_link(mm, vma, prev, rb_link, rb_parent); | 2082 | vma_link(mm, vma, prev, rb_link, rb_parent); |
2083 | return 0; | 2083 | return 0; |
diff --git a/mm/nommu.c b/mm/nommu.c index 9eef6a398555..8ed0cb43118a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1270,7 +1270,7 @@ EXPORT_SYMBOL(get_unmapped_area); | |||
1270 | * Note this is a helper function intended to be used by LSMs which | 1270 | * Note this is a helper function intended to be used by LSMs which |
1271 | * wish to use this logic. | 1271 | * wish to use this logic. |
1272 | */ | 1272 | */ |
1273 | int __vm_enough_memory(long pages, int cap_sys_admin) | 1273 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
1274 | { | 1274 | { |
1275 | unsigned long free, allowed; | 1275 | unsigned long free, allowed; |
1276 | 1276 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3da85b81dabb..6427653023aa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1157,6 +1157,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | |||
1157 | nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ | 1157 | nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ |
1158 | int zlc_active = 0; /* set if using zonelist_cache */ | 1158 | int zlc_active = 0; /* set if using zonelist_cache */ |
1159 | int did_zlc_setup = 0; /* just call zlc_setup() one time */ | 1159 | int did_zlc_setup = 0; /* just call zlc_setup() one time */ |
1160 | enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */ | ||
1160 | 1161 | ||
1161 | zonelist_scan: | 1162 | zonelist_scan: |
1162 | /* | 1163 | /* |
@@ -1166,6 +1167,18 @@ zonelist_scan: | |||
1166 | z = zonelist->zones; | 1167 | z = zonelist->zones; |
1167 | 1168 | ||
1168 | do { | 1169 | do { |
1170 | /* | ||
1171 | * In NUMA, this could be a policy zonelist which contains | ||
1172 | * zones that may not be allowed by the current gfp_mask. | ||
1173 | * Check the zone is allowed by the current flags | ||
1174 | */ | ||
1175 | if (unlikely(alloc_should_filter_zonelist(zonelist))) { | ||
1176 | if (highest_zoneidx == -1) | ||
1177 | highest_zoneidx = gfp_zone(gfp_mask); | ||
1178 | if (zone_idx(*z) > highest_zoneidx) | ||
1179 | continue; | ||
1180 | } | ||
1181 | |||
1169 | if (NUMA_BUILD && zlc_active && | 1182 | if (NUMA_BUILD && zlc_active && |
1170 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) | 1183 | !zlc_zone_worth_trying(zonelist, z, allowednodes)) |
1171 | continue; | 1184 | continue; |
@@ -883,6 +883,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, | |||
883 | */ | 883 | */ |
884 | 884 | ||
885 | static int use_alien_caches __read_mostly = 1; | 885 | static int use_alien_caches __read_mostly = 1; |
886 | static int numa_platform __read_mostly = 1; | ||
886 | static int __init noaliencache_setup(char *s) | 887 | static int __init noaliencache_setup(char *s) |
887 | { | 888 | { |
888 | use_alien_caches = 0; | 889 | use_alien_caches = 0; |
@@ -1399,8 +1400,10 @@ void __init kmem_cache_init(void) | |||
1399 | int order; | 1400 | int order; |
1400 | int node; | 1401 | int node; |
1401 | 1402 | ||
1402 | if (num_possible_nodes() == 1) | 1403 | if (num_possible_nodes() == 1) { |
1403 | use_alien_caches = 0; | 1404 | use_alien_caches = 0; |
1405 | numa_platform = 0; | ||
1406 | } | ||
1404 | 1407 | ||
1405 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1408 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
1406 | kmem_list3_init(&initkmem_list3[i]); | 1409 | kmem_list3_init(&initkmem_list3[i]); |
@@ -3558,7 +3561,14 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3558 | check_irq_off(); | 3561 | check_irq_off(); |
3559 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3562 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3560 | 3563 | ||
3561 | if (cache_free_alien(cachep, objp)) | 3564 | /* |
3565 | * Skip calling cache_free_alien() when the platform is not numa. | ||
3566 | * This will avoid cache misses that happen while accessing slabp (which | ||
3567 | * is per page memory reference) to get nodeid. Instead use a global | ||
3568 | * variable to skip the call, which is mostly likely to be present in | ||
3569 | * the cache. | ||
3570 | */ | ||
3571 | if (numa_platform && cache_free_alien(cachep, objp)) | ||
3562 | return; | 3572 | return; |
3563 | 3573 | ||
3564 | if (likely(ac->avail < ac->limit)) { | 3574 | if (likely(ac->avail < ac->limit)) { |
@@ -1877,9 +1877,16 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag | |||
1877 | 1877 | ||
1878 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); | 1878 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); |
1879 | 1879 | ||
1880 | page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); | 1880 | page = new_slab(kmalloc_caches, gfpflags, node); |
1881 | 1881 | ||
1882 | BUG_ON(!page); | 1882 | BUG_ON(!page); |
1883 | if (page_to_nid(page) != node) { | ||
1884 | printk(KERN_ERR "SLUB: Unable to allocate memory from " | ||
1885 | "node %d\n", node); | ||
1886 | printk(KERN_ERR "SLUB: Allocating a useless per node structure " | ||
1887 | "in order to be able to continue\n"); | ||
1888 | } | ||
1889 | |||
1883 | n = page->freelist; | 1890 | n = page->freelist; |
1884 | BUG_ON(!n); | 1891 | BUG_ON(!n); |
1885 | page->freelist = get_freepointer(kmalloc_caches, n); | 1892 | page->freelist = get_freepointer(kmalloc_caches, n); |
@@ -3112,7 +3119,7 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3112 | unsigned long flags; | 3119 | unsigned long flags; |
3113 | struct page *page; | 3120 | struct page *page; |
3114 | 3121 | ||
3115 | if (!atomic_read(&n->nr_slabs)) | 3122 | if (!atomic_long_read(&n->nr_slabs)) |
3116 | continue; | 3123 | continue; |
3117 | 3124 | ||
3118 | spin_lock_irqsave(&n->list_lock, flags); | 3125 | spin_lock_irqsave(&n->list_lock, flags); |
@@ -3247,7 +3254,7 @@ static unsigned long slab_objects(struct kmem_cache *s, | |||
3247 | } | 3254 | } |
3248 | 3255 | ||
3249 | if (flags & SO_FULL) { | 3256 | if (flags & SO_FULL) { |
3250 | int full_slabs = atomic_read(&n->nr_slabs) | 3257 | int full_slabs = atomic_long_read(&n->nr_slabs) |
3251 | - per_cpu[node] | 3258 | - per_cpu[node] |
3252 | - n->nr_partial; | 3259 | - n->nr_partial; |
3253 | 3260 | ||
@@ -3283,7 +3290,7 @@ static int any_slab_objects(struct kmem_cache *s) | |||
3283 | for_each_node(node) { | 3290 | for_each_node(node) { |
3284 | struct kmem_cache_node *n = get_node(s, node); | 3291 | struct kmem_cache_node *n = get_node(s, node); |
3285 | 3292 | ||
3286 | if (n->nr_partial || atomic_read(&n->nr_slabs)) | 3293 | if (n->nr_partial || atomic_long_read(&n->nr_slabs)) |
3287 | return 1; | 3294 | return 1; |
3288 | } | 3295 | } |
3289 | return 0; | 3296 | return 0; |
diff --git a/mm/sparse.c b/mm/sparse.c index 3047bf06c1f3..239f5a720d38 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -41,6 +41,15 @@ int page_to_nid(struct page *page) | |||
41 | return section_to_node_table[page_to_section(page)]; | 41 | return section_to_node_table[page_to_section(page)]; |
42 | } | 42 | } |
43 | EXPORT_SYMBOL(page_to_nid); | 43 | EXPORT_SYMBOL(page_to_nid); |
44 | |||
45 | static void set_section_nid(unsigned long section_nr, int nid) | ||
46 | { | ||
47 | section_to_node_table[section_nr] = nid; | ||
48 | } | ||
49 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | ||
50 | static inline void set_section_nid(unsigned long section_nr, int nid) | ||
51 | { | ||
52 | } | ||
44 | #endif | 53 | #endif |
45 | 54 | ||
46 | #ifdef CONFIG_SPARSEMEM_EXTREME | 55 | #ifdef CONFIG_SPARSEMEM_EXTREME |
@@ -68,10 +77,6 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) | |||
68 | struct mem_section *section; | 77 | struct mem_section *section; |
69 | int ret = 0; | 78 | int ret = 0; |
70 | 79 | ||
71 | #ifdef NODE_NOT_IN_PAGE_FLAGS | ||
72 | section_to_node_table[section_nr] = nid; | ||
73 | #endif | ||
74 | |||
75 | if (mem_section[root]) | 80 | if (mem_section[root]) |
76 | return -EEXIST; | 81 | return -EEXIST; |
77 | 82 | ||
@@ -148,6 +153,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) | |||
148 | struct mem_section *ms; | 153 | struct mem_section *ms; |
149 | 154 | ||
150 | sparse_index_init(section, nid); | 155 | sparse_index_init(section, nid); |
156 | set_section_nid(section, nid); | ||
151 | 157 | ||
152 | ms = __nr_to_section(section); | 158 | ms = __nr_to_section(section); |
153 | if (!ms->section_mem_map) | 159 | if (!ms->section_mem_map) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d419e10e3daa..a6e65d024995 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -271,6 +271,12 @@ static void handle_write_error(struct address_space *mapping, | |||
271 | unlock_page(page); | 271 | unlock_page(page); |
272 | } | 272 | } |
273 | 273 | ||
274 | /* Request for sync pageout. */ | ||
275 | enum pageout_io { | ||
276 | PAGEOUT_IO_ASYNC, | ||
277 | PAGEOUT_IO_SYNC, | ||
278 | }; | ||
279 | |||
274 | /* possible outcome of pageout() */ | 280 | /* possible outcome of pageout() */ |
275 | typedef enum { | 281 | typedef enum { |
276 | /* failed to write page out, page is locked */ | 282 | /* failed to write page out, page is locked */ |
@@ -287,7 +293,8 @@ typedef enum { | |||
287 | * pageout is called by shrink_page_list() for each dirty page. | 293 | * pageout is called by shrink_page_list() for each dirty page. |
288 | * Calls ->writepage(). | 294 | * Calls ->writepage(). |
289 | */ | 295 | */ |
290 | static pageout_t pageout(struct page *page, struct address_space *mapping) | 296 | static pageout_t pageout(struct page *page, struct address_space *mapping, |
297 | enum pageout_io sync_writeback) | ||
291 | { | 298 | { |
292 | /* | 299 | /* |
293 | * If the page is dirty, only perform writeback if that write | 300 | * If the page is dirty, only perform writeback if that write |
@@ -346,6 +353,15 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) | |||
346 | ClearPageReclaim(page); | 353 | ClearPageReclaim(page); |
347 | return PAGE_ACTIVATE; | 354 | return PAGE_ACTIVATE; |
348 | } | 355 | } |
356 | |||
357 | /* | ||
358 | * Wait on writeback if requested to. This happens when | ||
359 | * direct reclaiming a large contiguous area and the | ||
360 | * first attempt to free a range of pages fails. | ||
361 | */ | ||
362 | if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) | ||
363 | wait_on_page_writeback(page); | ||
364 | |||
349 | if (!PageWriteback(page)) { | 365 | if (!PageWriteback(page)) { |
350 | /* synchronous write or broken a_ops? */ | 366 | /* synchronous write or broken a_ops? */ |
351 | ClearPageReclaim(page); | 367 | ClearPageReclaim(page); |
@@ -423,7 +439,8 @@ cannot_free: | |||
423 | * shrink_page_list() returns the number of reclaimed pages | 439 | * shrink_page_list() returns the number of reclaimed pages |
424 | */ | 440 | */ |
425 | static unsigned long shrink_page_list(struct list_head *page_list, | 441 | static unsigned long shrink_page_list(struct list_head *page_list, |
426 | struct scan_control *sc) | 442 | struct scan_control *sc, |
443 | enum pageout_io sync_writeback) | ||
427 | { | 444 | { |
428 | LIST_HEAD(ret_pages); | 445 | LIST_HEAD(ret_pages); |
429 | struct pagevec freed_pvec; | 446 | struct pagevec freed_pvec; |
@@ -458,8 +475,23 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
458 | if (page_mapped(page) || PageSwapCache(page)) | 475 | if (page_mapped(page) || PageSwapCache(page)) |
459 | sc->nr_scanned++; | 476 | sc->nr_scanned++; |
460 | 477 | ||
461 | if (PageWriteback(page)) | 478 | may_enter_fs = (sc->gfp_mask & __GFP_FS) || |
462 | goto keep_locked; | 479 | (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); |
480 | |||
481 | if (PageWriteback(page)) { | ||
482 | /* | ||
483 | * Synchronous reclaim is performed in two passes, | ||
484 | * first an asynchronous pass over the list to | ||
485 | * start parallel writeback, and a second synchronous | ||
486 | * pass to wait for the IO to complete. Wait here | ||
487 | * for any page for which writeback has already | ||
488 | * started. | ||
489 | */ | ||
490 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) | ||
491 | wait_on_page_writeback(page); | ||
492 | else | ||
493 | goto keep_locked; | ||
494 | } | ||
463 | 495 | ||
464 | referenced = page_referenced(page, 1); | 496 | referenced = page_referenced(page, 1); |
465 | /* In active use or really unfreeable? Activate it. */ | 497 | /* In active use or really unfreeable? Activate it. */ |
@@ -478,8 +510,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
478 | #endif /* CONFIG_SWAP */ | 510 | #endif /* CONFIG_SWAP */ |
479 | 511 | ||
480 | mapping = page_mapping(page); | 512 | mapping = page_mapping(page); |
481 | may_enter_fs = (sc->gfp_mask & __GFP_FS) || | ||
482 | (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); | ||
483 | 513 | ||
484 | /* | 514 | /* |
485 | * The page is mapped into the page tables of one or more | 515 | * The page is mapped into the page tables of one or more |
@@ -505,7 +535,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
505 | goto keep_locked; | 535 | goto keep_locked; |
506 | 536 | ||
507 | /* Page is dirty, try to write it out here */ | 537 | /* Page is dirty, try to write it out here */ |
508 | switch(pageout(page, mapping)) { | 538 | switch (pageout(page, mapping, sync_writeback)) { |
509 | case PAGE_KEEP: | 539 | case PAGE_KEEP: |
510 | goto keep_locked; | 540 | goto keep_locked; |
511 | case PAGE_ACTIVATE: | 541 | case PAGE_ACTIVATE: |
@@ -777,6 +807,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
777 | (sc->order > PAGE_ALLOC_COSTLY_ORDER)? | 807 | (sc->order > PAGE_ALLOC_COSTLY_ORDER)? |
778 | ISOLATE_BOTH : ISOLATE_INACTIVE); | 808 | ISOLATE_BOTH : ISOLATE_INACTIVE); |
779 | nr_active = clear_active_flags(&page_list); | 809 | nr_active = clear_active_flags(&page_list); |
810 | __count_vm_events(PGDEACTIVATE, nr_active); | ||
780 | 811 | ||
781 | __mod_zone_page_state(zone, NR_ACTIVE, -nr_active); | 812 | __mod_zone_page_state(zone, NR_ACTIVE, -nr_active); |
782 | __mod_zone_page_state(zone, NR_INACTIVE, | 813 | __mod_zone_page_state(zone, NR_INACTIVE, |
@@ -785,7 +816,29 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
785 | spin_unlock_irq(&zone->lru_lock); | 816 | spin_unlock_irq(&zone->lru_lock); |
786 | 817 | ||
787 | nr_scanned += nr_scan; | 818 | nr_scanned += nr_scan; |
788 | nr_freed = shrink_page_list(&page_list, sc); | 819 | nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); |
820 | |||
821 | /* | ||
822 | * If we are direct reclaiming for contiguous pages and we do | ||
823 | * not reclaim everything in the list, try again and wait | ||
824 | * for IO to complete. This will stall high-order allocations | ||
825 | * but that should be acceptable to the caller | ||
826 | */ | ||
827 | if (nr_freed < nr_taken && !current_is_kswapd() && | ||
828 | sc->order > PAGE_ALLOC_COSTLY_ORDER) { | ||
829 | congestion_wait(WRITE, HZ/10); | ||
830 | |||
831 | /* | ||
832 | * The attempt at page out may have made some | ||
833 | * of the pages active, mark them inactive again. | ||
834 | */ | ||
835 | nr_active = clear_active_flags(&page_list); | ||
836 | count_vm_events(PGDEACTIVATE, nr_active); | ||
837 | |||
838 | nr_freed += shrink_page_list(&page_list, sc, | ||
839 | PAGEOUT_IO_SYNC); | ||
840 | } | ||
841 | |||
789 | nr_reclaimed += nr_freed; | 842 | nr_reclaimed += nr_freed; |
790 | local_irq_disable(); | 843 | local_irq_disable(); |
791 | if (current_is_kswapd()) { | 844 | if (current_is_kswapd()) { |
diff --git a/security/commoncap.c b/security/commoncap.c index 338606eb7238..7520361663e8 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -315,13 +315,13 @@ int cap_syslog (int type) | |||
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | int cap_vm_enough_memory(long pages) | 318 | int cap_vm_enough_memory(struct mm_struct *mm, long pages) |
319 | { | 319 | { |
320 | int cap_sys_admin = 0; | 320 | int cap_sys_admin = 0; |
321 | 321 | ||
322 | if (cap_capable(current, CAP_SYS_ADMIN) == 0) | 322 | if (cap_capable(current, CAP_SYS_ADMIN) == 0) |
323 | cap_sys_admin = 1; | 323 | cap_sys_admin = 1; |
324 | return __vm_enough_memory(pages, cap_sys_admin); | 324 | return __vm_enough_memory(mm, pages, cap_sys_admin); |
325 | } | 325 | } |
326 | 326 | ||
327 | EXPORT_SYMBOL(cap_capable); | 327 | EXPORT_SYMBOL(cap_capable); |
diff --git a/security/dummy.c b/security/dummy.c index 19d813d5e083..853ec2292798 100644 --- a/security/dummy.c +++ b/security/dummy.c | |||
@@ -108,13 +108,13 @@ static int dummy_settime(struct timespec *ts, struct timezone *tz) | |||
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int dummy_vm_enough_memory(long pages) | 111 | static int dummy_vm_enough_memory(struct mm_struct *mm, long pages) |
112 | { | 112 | { |
113 | int cap_sys_admin = 0; | 113 | int cap_sys_admin = 0; |
114 | 114 | ||
115 | if (dummy_capable(current, CAP_SYS_ADMIN) == 0) | 115 | if (dummy_capable(current, CAP_SYS_ADMIN) == 0) |
116 | cap_sys_admin = 1; | 116 | cap_sys_admin = 1; |
117 | return __vm_enough_memory(pages, cap_sys_admin); | 117 | return __vm_enough_memory(mm, pages, cap_sys_admin); |
118 | } | 118 | } |
119 | 119 | ||
120 | static int dummy_bprm_alloc_security (struct linux_binprm *bprm) | 120 | static int dummy_bprm_alloc_security (struct linux_binprm *bprm) |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 6237933f7d82..d8bc4172819c 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1584,7 +1584,7 @@ static int selinux_syslog(int type) | |||
1584 | * Do not audit the selinux permission check, as this is applied to all | 1584 | * Do not audit the selinux permission check, as this is applied to all |
1585 | * processes that allocate mappings. | 1585 | * processes that allocate mappings. |
1586 | */ | 1586 | */ |
1587 | static int selinux_vm_enough_memory(long pages) | 1587 | static int selinux_vm_enough_memory(struct mm_struct *mm, long pages) |
1588 | { | 1588 | { |
1589 | int rc, cap_sys_admin = 0; | 1589 | int rc, cap_sys_admin = 0; |
1590 | struct task_security_struct *tsec = current->security; | 1590 | struct task_security_struct *tsec = current->security; |
@@ -1600,7 +1600,7 @@ static int selinux_vm_enough_memory(long pages) | |||
1600 | if (rc == 0) | 1600 | if (rc == 0) |
1601 | cap_sys_admin = 1; | 1601 | cap_sys_admin = 1; |
1602 | 1602 | ||
1603 | return __vm_enough_memory(pages, cap_sys_admin); | 1603 | return __vm_enough_memory(mm, pages, cap_sys_admin); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | /* binprm security operations */ | 1606 | /* binprm security operations */ |