diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-04-12 06:52:34 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-04-12 06:52:34 -0400 |
commit | 33cd5cffd50a789f4e2f0f8bcd7ed9cf8a07da16 (patch) | |
tree | f142384aa88b5d25391084b1ef2d192d4e7556ce /arch | |
parent | a4e02f6d83d4fcdb13bcaba76878fc5ea0da9911 (diff) | |
parent | 104fa61a7dd83197160d5cafedc0e94ad9cd7fcc (diff) |
Merge branch 'common/clkfwk' into sh-latest
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/kernel/leon_pci.c | 13 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 37 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 37 | ||||
-rw-r--r-- | arch/um/drivers/cow.h | 35 | ||||
-rw-r--r-- | arch/um/drivers/cow_user.c | 43 | ||||
-rw-r--r-- | arch/um/drivers/mconsole_kern.c | 1 | ||||
-rw-r--r-- | arch/um/include/asm/Kbuild | 3 | ||||
-rw-r--r-- | arch/um/kernel/Makefile | 7 | ||||
-rw-r--r-- | arch/um/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 1 | ||||
-rw-r--r-- | arch/x86/Makefile.um | 3 | ||||
-rw-r--r-- | arch/x86/um/asm/barrier.h | 75 | ||||
-rw-r--r-- | arch/x86/um/asm/system.h | 135 |
13 files changed, 167 insertions, 229 deletions
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index aba6b958b2a5..19f56058742b 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c | |||
@@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) | |||
45 | 45 | ||
46 | void __devinit pcibios_fixup_bus(struct pci_bus *pbus) | 46 | void __devinit pcibios_fixup_bus(struct pci_bus *pbus) |
47 | { | 47 | { |
48 | struct leon_pci_info *info = pbus->sysdata; | ||
49 | struct pci_dev *dev; | 48 | struct pci_dev *dev; |
50 | int i, has_io, has_mem; | 49 | int i, has_io, has_mem; |
51 | u16 cmd; | 50 | u16 cmd; |
@@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
111 | return pci_enable_resources(dev, mask); | 110 | return pci_enable_resources(dev, mask); |
112 | } | 111 | } |
113 | 112 | ||
114 | struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) | ||
115 | { | ||
116 | /* | ||
117 | * Currently the OpenBoot nodes are not connected with the PCI device, | ||
118 | * this is because the LEON PROM does not create PCI nodes. Eventually | ||
119 | * this will change and the same approach as pcic.c can be used to | ||
120 | * match PROM nodes with pci devices. | ||
121 | */ | ||
122 | return NULL; | ||
123 | } | ||
124 | EXPORT_SYMBOL(pci_device_to_OF_node); | ||
125 | |||
126 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) | 113 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) |
127 | { | 114 | { |
128 | #ifdef CONFIG_PCI_DEBUG | 115 | #ifdef CONFIG_PCI_DEBUG |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7705c6731e28..df3155a17991 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
225 | unsigned long g2; | 225 | unsigned long g2; |
226 | int from_user = !(regs->psr & PSR_PS); | 226 | int from_user = !(regs->psr & PSR_PS); |
227 | int fault, code; | 227 | int fault, code; |
228 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
229 | (write ? FAULT_FLAG_WRITE : 0)); | ||
228 | 230 | ||
229 | if(text_fault) | 231 | if(text_fault) |
230 | address = regs->pc; | 232 | address = regs->pc; |
@@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
251 | 253 | ||
252 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 254 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
253 | 255 | ||
256 | retry: | ||
254 | down_read(&mm->mmap_sem); | 257 | down_read(&mm->mmap_sem); |
255 | 258 | ||
256 | /* | 259 | /* |
@@ -289,7 +292,11 @@ good_area: | |||
289 | * make sure we exit gracefully rather than endlessly redo | 292 | * make sure we exit gracefully rather than endlessly redo |
290 | * the fault. | 293 | * the fault. |
291 | */ | 294 | */ |
292 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 295 | fault = handle_mm_fault(mm, vma, address, flags); |
296 | |||
297 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
298 | return; | ||
299 | |||
293 | if (unlikely(fault & VM_FAULT_ERROR)) { | 300 | if (unlikely(fault & VM_FAULT_ERROR)) { |
294 | if (fault & VM_FAULT_OOM) | 301 | if (fault & VM_FAULT_OOM) |
295 | goto out_of_memory; | 302 | goto out_of_memory; |
@@ -297,13 +304,29 @@ good_area: | |||
297 | goto do_sigbus; | 304 | goto do_sigbus; |
298 | BUG(); | 305 | BUG(); |
299 | } | 306 | } |
300 | if (fault & VM_FAULT_MAJOR) { | 307 | |
301 | current->maj_flt++; | 308 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | 309 | if (fault & VM_FAULT_MAJOR) { |
303 | } else { | 310 | current->maj_flt++; |
304 | current->min_flt++; | 311 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, |
305 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | 312 | 1, regs, address); |
313 | } else { | ||
314 | current->min_flt++; | ||
315 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | ||
316 | 1, regs, address); | ||
317 | } | ||
318 | if (fault & VM_FAULT_RETRY) { | ||
319 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
320 | |||
321 | /* No need to up_read(&mm->mmap_sem) as we would | ||
322 | * have already released it in __lock_page_or_retry | ||
323 | * in mm/filemap.c. | ||
324 | */ | ||
325 | |||
326 | goto retry; | ||
327 | } | ||
306 | } | 328 | } |
329 | |||
307 | up_read(&mm->mmap_sem); | 330 | up_read(&mm->mmap_sem); |
308 | return; | 331 | return; |
309 | 332 | ||
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 504c0622f729..1fe0429b6314 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
279 | unsigned int insn = 0; | 279 | unsigned int insn = 0; |
280 | int si_code, fault_code, fault; | 280 | int si_code, fault_code, fault; |
281 | unsigned long address, mm_rss; | 281 | unsigned long address, mm_rss; |
282 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
282 | 283 | ||
283 | fault_code = get_thread_fault_code(); | 284 | fault_code = get_thread_fault_code(); |
284 | 285 | ||
@@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
333 | insn = get_fault_insn(regs, insn); | 334 | insn = get_fault_insn(regs, insn); |
334 | goto handle_kernel_fault; | 335 | goto handle_kernel_fault; |
335 | } | 336 | } |
337 | |||
338 | retry: | ||
336 | down_read(&mm->mmap_sem); | 339 | down_read(&mm->mmap_sem); |
337 | } | 340 | } |
338 | 341 | ||
@@ -423,7 +426,12 @@ good_area: | |||
423 | goto bad_area; | 426 | goto bad_area; |
424 | } | 427 | } |
425 | 428 | ||
426 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); | 429 | flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); |
430 | fault = handle_mm_fault(mm, vma, address, flags); | ||
431 | |||
432 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
433 | return; | ||
434 | |||
427 | if (unlikely(fault & VM_FAULT_ERROR)) { | 435 | if (unlikely(fault & VM_FAULT_ERROR)) { |
428 | if (fault & VM_FAULT_OOM) | 436 | if (fault & VM_FAULT_OOM) |
429 | goto out_of_memory; | 437 | goto out_of_memory; |
@@ -431,12 +439,27 @@ good_area: | |||
431 | goto do_sigbus; | 439 | goto do_sigbus; |
432 | BUG(); | 440 | BUG(); |
433 | } | 441 | } |
434 | if (fault & VM_FAULT_MAJOR) { | 442 | |
435 | current->maj_flt++; | 443 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
436 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | 444 | if (fault & VM_FAULT_MAJOR) { |
437 | } else { | 445 | current->maj_flt++; |
438 | current->min_flt++; | 446 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, |
439 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | 447 | 1, regs, address); |
448 | } else { | ||
449 | current->min_flt++; | ||
450 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | ||
451 | 1, regs, address); | ||
452 | } | ||
453 | if (fault & VM_FAULT_RETRY) { | ||
454 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
455 | |||
456 | /* No need to up_read(&mm->mmap_sem) as we would | ||
457 | * have already released it in __lock_page_or_retry | ||
458 | * in mm/filemap.c. | ||
459 | */ | ||
460 | |||
461 | goto retry; | ||
462 | } | ||
440 | } | 463 | } |
441 | up_read(&mm->mmap_sem); | 464 | up_read(&mm->mmap_sem); |
442 | 465 | ||
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index dc36b222100b..6673508f3426 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h | |||
@@ -3,41 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | 5 | ||
6 | #if defined(__KERNEL__) | ||
7 | |||
8 | # include <asm/byteorder.h> | ||
9 | |||
10 | # if defined(__BIG_ENDIAN) | ||
11 | # define ntohll(x) (x) | ||
12 | # define htonll(x) (x) | ||
13 | # elif defined(__LITTLE_ENDIAN) | ||
14 | # define ntohll(x) be64_to_cpu(x) | ||
15 | # define htonll(x) cpu_to_be64(x) | ||
16 | # else | ||
17 | # error "Could not determine byte order" | ||
18 | # endif | ||
19 | |||
20 | #else | ||
21 | /* For the definition of ntohl, htonl and __BYTE_ORDER */ | ||
22 | #include <endian.h> | ||
23 | #include <netinet/in.h> | ||
24 | #if defined(__BYTE_ORDER) | ||
25 | |||
26 | # if __BYTE_ORDER == __BIG_ENDIAN | ||
27 | # define ntohll(x) (x) | ||
28 | # define htonll(x) (x) | ||
29 | # elif __BYTE_ORDER == __LITTLE_ENDIAN | ||
30 | # define ntohll(x) bswap_64(x) | ||
31 | # define htonll(x) bswap_64(x) | ||
32 | # else | ||
33 | # error "Could not determine byte order: __BYTE_ORDER uncorrectly defined" | ||
34 | # endif | ||
35 | |||
36 | #else /* ! defined(__BYTE_ORDER) */ | ||
37 | # error "Could not determine byte order: __BYTE_ORDER not defined" | ||
38 | #endif | ||
39 | #endif /* ! defined(__KERNEL__) */ | ||
40 | |||
41 | extern int init_cow_file(int fd, char *cow_file, char *backing_file, | 6 | extern int init_cow_file(int fd, char *cow_file, char *backing_file, |
42 | int sectorsize, int alignment, int *bitmap_offset_out, | 7 | int sectorsize, int alignment, int *bitmap_offset_out, |
43 | unsigned long *bitmap_len_out, int *data_offset_out); | 8 | unsigned long *bitmap_len_out, int *data_offset_out); |
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c index 9cbb426c0b91..0ee9cc6cc4c7 100644 --- a/arch/um/drivers/cow_user.c +++ b/arch/um/drivers/cow_user.c | |||
@@ -8,11 +8,10 @@ | |||
8 | * that. | 8 | * that. |
9 | */ | 9 | */ |
10 | #include <unistd.h> | 10 | #include <unistd.h> |
11 | #include <byteswap.h> | ||
12 | #include <errno.h> | 11 | #include <errno.h> |
13 | #include <string.h> | 12 | #include <string.h> |
14 | #include <arpa/inet.h> | 13 | #include <arpa/inet.h> |
15 | #include <asm/types.h> | 14 | #include <endian.h> |
16 | #include "cow.h" | 15 | #include "cow.h" |
17 | #include "cow_sys.h" | 16 | #include "cow_sys.h" |
18 | 17 | ||
@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, | |||
214 | "header\n"); | 213 | "header\n"); |
215 | goto out; | 214 | goto out; |
216 | } | 215 | } |
217 | header->magic = htonl(COW_MAGIC); | 216 | header->magic = htobe32(COW_MAGIC); |
218 | header->version = htonl(COW_VERSION); | 217 | header->version = htobe32(COW_VERSION); |
219 | 218 | ||
220 | err = -EINVAL; | 219 | err = -EINVAL; |
221 | if (strlen(backing_file) > sizeof(header->backing_file) - 1) { | 220 | if (strlen(backing_file) > sizeof(header->backing_file) - 1) { |
@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, | |||
246 | goto out_free; | 245 | goto out_free; |
247 | } | 246 | } |
248 | 247 | ||
249 | header->mtime = htonl(modtime); | 248 | header->mtime = htobe32(modtime); |
250 | header->size = htonll(*size); | 249 | header->size = htobe64(*size); |
251 | header->sectorsize = htonl(sectorsize); | 250 | header->sectorsize = htobe32(sectorsize); |
252 | header->alignment = htonl(alignment); | 251 | header->alignment = htobe32(alignment); |
253 | header->cow_format = COW_BITMAP; | 252 | header->cow_format = COW_BITMAP; |
254 | 253 | ||
255 | err = cow_write_file(fd, header, sizeof(*header)); | 254 | err = cow_write_file(fd, header, sizeof(*header)); |
@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
301 | magic = header->v1.magic; | 300 | magic = header->v1.magic; |
302 | if (magic == COW_MAGIC) | 301 | if (magic == COW_MAGIC) |
303 | version = header->v1.version; | 302 | version = header->v1.version; |
304 | else if (magic == ntohl(COW_MAGIC)) | 303 | else if (magic == be32toh(COW_MAGIC)) |
305 | version = ntohl(header->v1.version); | 304 | version = be32toh(header->v1.version); |
306 | /* No error printed because the non-COW case comes through here */ | 305 | /* No error printed because the non-COW case comes through here */ |
307 | else goto out; | 306 | else goto out; |
308 | 307 | ||
@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
327 | "header\n"); | 326 | "header\n"); |
328 | goto out; | 327 | goto out; |
329 | } | 328 | } |
330 | *mtime_out = ntohl(header->v2.mtime); | 329 | *mtime_out = be32toh(header->v2.mtime); |
331 | *size_out = ntohll(header->v2.size); | 330 | *size_out = be64toh(header->v2.size); |
332 | *sectorsize_out = ntohl(header->v2.sectorsize); | 331 | *sectorsize_out = be32toh(header->v2.sectorsize); |
333 | *bitmap_offset_out = sizeof(header->v2); | 332 | *bitmap_offset_out = sizeof(header->v2); |
334 | *align_out = *sectorsize_out; | 333 | *align_out = *sectorsize_out; |
335 | file = header->v2.backing_file; | 334 | file = header->v2.backing_file; |
@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
341 | "header\n"); | 340 | "header\n"); |
342 | goto out; | 341 | goto out; |
343 | } | 342 | } |
344 | *mtime_out = ntohl(header->v3.mtime); | 343 | *mtime_out = be32toh(header->v3.mtime); |
345 | *size_out = ntohll(header->v3.size); | 344 | *size_out = be64toh(header->v3.size); |
346 | *sectorsize_out = ntohl(header->v3.sectorsize); | 345 | *sectorsize_out = be32toh(header->v3.sectorsize); |
347 | *align_out = ntohl(header->v3.alignment); | 346 | *align_out = be32toh(header->v3.alignment); |
348 | if (*align_out == 0) { | 347 | if (*align_out == 0) { |
349 | cow_printf("read_cow_header - invalid COW header, " | 348 | cow_printf("read_cow_header - invalid COW header, " |
350 | "align == 0\n"); | 349 | "align == 0\n"); |
@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
366 | * this was used until Dec2005 - 64bits are needed to represent | 365 | * this was used until Dec2005 - 64bits are needed to represent |
367 | * 2038+. I.e. we can safely do this truncating cast. | 366 | * 2038+. I.e. we can safely do this truncating cast. |
368 | * | 367 | * |
369 | * Additionally, we must use ntohl() instead of ntohll(), since | 368 | * Additionally, we must use be32toh() instead of be64toh(), since |
370 | * the program used to use the former (tested - I got mtime | 369 | * the program used to use the former (tested - I got mtime |
371 | * mismatch "0 vs whatever"). | 370 | * mismatch "0 vs whatever"). |
372 | * | 371 | * |
373 | * Ever heard about bug-to-bug-compatibility ? ;-) */ | 372 | * Ever heard about bug-to-bug-compatibility ? ;-) */ |
374 | *mtime_out = (time32_t) ntohl(header->v3_b.mtime); | 373 | *mtime_out = (time32_t) be32toh(header->v3_b.mtime); |
375 | 374 | ||
376 | *size_out = ntohll(header->v3_b.size); | 375 | *size_out = be64toh(header->v3_b.size); |
377 | *sectorsize_out = ntohl(header->v3_b.sectorsize); | 376 | *sectorsize_out = be32toh(header->v3_b.sectorsize); |
378 | *align_out = ntohl(header->v3_b.alignment); | 377 | *align_out = be32toh(header->v3_b.alignment); |
379 | if (*align_out == 0) { | 378 | if (*align_out == 0) { |
380 | cow_printf("read_cow_header - invalid COW header, " | 379 | cow_printf("read_cow_header - invalid COW header, " |
381 | "align == 0\n"); | 380 | "align == 0\n"); |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index e672bd6d43e3..43b39d61b538 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/workqueue.h> | 22 | #include <linux/workqueue.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
25 | #include <asm/switch_to.h> | ||
25 | 26 | ||
26 | #include "init.h" | 27 | #include "init.h" |
27 | #include "irq_kern.h" | 28 | #include "irq_kern.h" |
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 8419f5cf2ac7..fff24352255d 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h | 1 | generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h |
2 | generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h | 2 | generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h |
3 | generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h | 3 | generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h |
4 | generic-y += switch_to.h | ||
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 492bc4c1b62b..65a1c3d690ea 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile | |||
@@ -3,9 +3,10 @@ | |||
3 | # Licensed under the GPL | 3 | # Licensed under the GPL |
4 | # | 4 | # |
5 | 5 | ||
6 | CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ | 6 | CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ |
7 | -DELF_ARCH=$(LDS_ELF_ARCH) \ | 7 | -DELF_ARCH=$(LDS_ELF_ARCH) \ |
8 | -DELF_FORMAT=$(LDS_ELF_FORMAT) | 8 | -DELF_FORMAT=$(LDS_ELF_FORMAT) \ |
9 | $(LDS_EXTRA) | ||
9 | extra-y := vmlinux.lds | 10 | extra-y := vmlinux.lds |
10 | clean-files := | 11 | clean-files := |
11 | 12 | ||
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index f386d04a84a5..2b73dedb44ca 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task) | |||
88 | 88 | ||
89 | extern void arch_switch_to(struct task_struct *to); | 89 | extern void arch_switch_to(struct task_struct *to); |
90 | 90 | ||
91 | void *_switch_to(void *prev, void *next, void *last) | 91 | void *__switch_to(struct task_struct *from, struct task_struct *to) |
92 | { | 92 | { |
93 | struct task_struct *from = prev; | ||
94 | struct task_struct *to = next; | ||
95 | |||
96 | to->thread.prev_sched = from; | 93 | to->thread.prev_sched = from; |
97 | set_current(to); | 94 | set_current(to); |
98 | 95 | ||
@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last) | |||
111 | } while (current->thread.saved_task); | 108 | } while (current->thread.saved_task); |
112 | 109 | ||
113 | return current->thread.prev_sched; | 110 | return current->thread.prev_sched; |
114 | |||
115 | } | 111 | } |
116 | 112 | ||
117 | void interrupt_end(void) | 113 | void interrupt_end(void) |
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 4947b319f53a..0a49ef0c2bf4 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
103 | 103 | ||
104 | void uml_setup_stubs(struct mm_struct *mm) | 104 | void uml_setup_stubs(struct mm_struct *mm) |
105 | { | 105 | { |
106 | struct page **pages; | ||
107 | int err, ret; | 106 | int err, ret; |
108 | 107 | ||
109 | if (!skas_needs_stub) | 108 | if (!skas_needs_stub) |
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 4be406abeefd..36b62bc52638 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um | |||
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32) | |||
14 | 14 | ||
15 | export LDFLAGS | 15 | export LDFLAGS |
16 | 16 | ||
17 | LDS_EXTRA := -Ui386 | ||
18 | export LDS_EXTRA | ||
19 | |||
17 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | 20 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. |
18 | include $(srctree)/arch/x86/Makefile_32.cpu | 21 | include $(srctree)/arch/x86/Makefile_32.cpu |
19 | 22 | ||
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h new file mode 100644 index 000000000000..7d01b8c56c00 --- /dev/null +++ b/arch/x86/um/asm/barrier.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef _ASM_UM_BARRIER_H_ | ||
2 | #define _ASM_UM_BARRIER_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* | ||
14 | * Force strict CPU ordering. | ||
15 | * And yes, this is required on UP too when we're talking | ||
16 | * to devices. | ||
17 | */ | ||
18 | #ifdef CONFIG_X86_32 | ||
19 | |||
20 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
21 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
22 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
23 | |||
24 | #else /* CONFIG_X86_32 */ | ||
25 | |||
26 | #define mb() asm volatile("mfence" : : : "memory") | ||
27 | #define rmb() asm volatile("lfence" : : : "memory") | ||
28 | #define wmb() asm volatile("sfence" : : : "memory") | ||
29 | |||
30 | #endif /* CONFIG_X86_32 */ | ||
31 | |||
32 | #define read_barrier_depends() do { } while (0) | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | |||
36 | #define smp_mb() mb() | ||
37 | #ifdef CONFIG_X86_PPRO_FENCE | ||
38 | #define smp_rmb() rmb() | ||
39 | #else /* CONFIG_X86_PPRO_FENCE */ | ||
40 | #define smp_rmb() barrier() | ||
41 | #endif /* CONFIG_X86_PPRO_FENCE */ | ||
42 | |||
43 | #ifdef CONFIG_X86_OOSTORE | ||
44 | #define smp_wmb() wmb() | ||
45 | #else /* CONFIG_X86_OOSTORE */ | ||
46 | #define smp_wmb() barrier() | ||
47 | #endif /* CONFIG_X86_OOSTORE */ | ||
48 | |||
49 | #define smp_read_barrier_depends() read_barrier_depends() | ||
50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
51 | |||
52 | #else /* CONFIG_SMP */ | ||
53 | |||
54 | #define smp_mb() barrier() | ||
55 | #define smp_rmb() barrier() | ||
56 | #define smp_wmb() barrier() | ||
57 | #define smp_read_barrier_depends() do { } while (0) | ||
58 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
59 | |||
60 | #endif /* CONFIG_SMP */ | ||
61 | |||
62 | /* | ||
63 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
64 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
65 | * code region. | ||
66 | * | ||
67 | * (Could use an alternative three way for this if there was one.) | ||
68 | */ | ||
69 | static inline void rdtsc_barrier(void) | ||
70 | { | ||
71 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
72 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
73 | } | ||
74 | |||
75 | #endif | ||
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h deleted file mode 100644 index a459fd9b7598..000000000000 --- a/arch/x86/um/asm/system.h +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | extern void *_switch_to(void *prev, void *next, void *last); | ||
133 | #define switch_to(prev, next, last) prev = _switch_to(prev, next, last) | ||
134 | |||
135 | #endif | ||