diff options
477 files changed, 8021 insertions, 5212 deletions
diff --git a/.gitignore b/.gitignore index 8363e48cdcdc..fdcce40226d7 100644 --- a/.gitignore +++ b/.gitignore | |||
@@ -53,3 +53,5 @@ cscope.* | |||
53 | 53 | ||
54 | *.orig | 54 | *.orig |
55 | *.rej | 55 | *.rej |
56 | *~ | ||
57 | \#*# | ||
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index f31601e8bd89..dc0f30c3e571 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl | |||
@@ -361,12 +361,14 @@ X!Edrivers/pnp/system.c | |||
361 | <chapter id="blkdev"> | 361 | <chapter id="blkdev"> |
362 | <title>Block Devices</title> | 362 | <title>Block Devices</title> |
363 | !Eblock/blk-core.c | 363 | !Eblock/blk-core.c |
364 | !Iblock/blk-core.c | ||
364 | !Eblock/blk-map.c | 365 | !Eblock/blk-map.c |
365 | !Iblock/blk-sysfs.c | 366 | !Iblock/blk-sysfs.c |
366 | !Eblock/blk-settings.c | 367 | !Eblock/blk-settings.c |
367 | !Eblock/blk-exec.c | 368 | !Eblock/blk-exec.c |
368 | !Eblock/blk-barrier.c | 369 | !Eblock/blk-barrier.c |
369 | !Eblock/blk-tag.c | 370 | !Eblock/blk-tag.c |
371 | !Iblock/blk-tag.c | ||
370 | </chapter> | 372 | </chapter> |
371 | 373 | ||
372 | <chapter id="chrdev"> | 374 | <chapter id="chrdev"> |
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt index 6015347b41e2..866b9cd9a959 100644 --- a/Documentation/controllers/memory.txt +++ b/Documentation/controllers/memory.txt | |||
@@ -1,4 +1,8 @@ | |||
1 | Memory Controller | 1 | Memory Resource Controller |
2 | |||
3 | NOTE: The Memory Resource Controller has been generically been referred | ||
4 | to as the memory controller in this document. Do not confuse memory controller | ||
5 | used here with the memory controller that is used in hardware. | ||
2 | 6 | ||
3 | Salient features | 7 | Salient features |
4 | 8 | ||
@@ -152,7 +156,7 @@ The memory controller uses the following hierarchy | |||
152 | 156 | ||
153 | a. Enable CONFIG_CGROUPS | 157 | a. Enable CONFIG_CGROUPS |
154 | b. Enable CONFIG_RESOURCE_COUNTERS | 158 | b. Enable CONFIG_RESOURCE_COUNTERS |
155 | c. Enable CONFIG_CGROUP_MEM_CONT | 159 | c. Enable CONFIG_CGROUP_MEM_RES_CTLR |
156 | 160 | ||
157 | 1. Prepare the cgroups | 161 | 1. Prepare the cgroups |
158 | # mkdir -p /cgroups | 162 | # mkdir -p /cgroups |
@@ -164,7 +168,7 @@ c. Enable CONFIG_CGROUP_MEM_CONT | |||
164 | 168 | ||
165 | Since now we're in the 0 cgroup, | 169 | Since now we're in the 0 cgroup, |
166 | We can alter the memory limit: | 170 | We can alter the memory limit: |
167 | # echo -n 4M > /cgroups/0/memory.limit_in_bytes | 171 | # echo 4M > /cgroups/0/memory.limit_in_bytes |
168 | 172 | ||
169 | NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, | 173 | NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, |
170 | mega or gigabytes. | 174 | mega or gigabytes. |
@@ -185,7 +189,7 @@ number of factors, such as rounding up to page boundaries or the total | |||
185 | availability of memory on the system. The user is required to re-read | 189 | availability of memory on the system. The user is required to re-read |
186 | this file after a write to guarantee the value committed by the kernel. | 190 | this file after a write to guarantee the value committed by the kernel. |
187 | 191 | ||
188 | # echo -n 1 > memory.limit_in_bytes | 192 | # echo 1 > memory.limit_in_bytes |
189 | # cat memory.limit_in_bytes | 193 | # cat memory.limit_in_bytes |
190 | 4096 | 194 | 4096 |
191 | 195 | ||
@@ -197,7 +201,7 @@ caches, RSS and Active pages/Inactive pages are shown. | |||
197 | 201 | ||
198 | The memory.force_empty gives an interface to drop *all* charges by force. | 202 | The memory.force_empty gives an interface to drop *all* charges by force. |
199 | 203 | ||
200 | # echo -n 1 > memory.force_empty | 204 | # echo 1 > memory.force_empty |
201 | 205 | ||
202 | will drop all charges in cgroup. Currently, this is maintained for test. | 206 | will drop all charges in cgroup. Currently, this is maintained for test. |
203 | 207 | ||
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt index de4804e8b396..c360d4e91b48 100644 --- a/Documentation/debugging-via-ohci1394.txt +++ b/Documentation/debugging-via-ohci1394.txt | |||
@@ -36,14 +36,15 @@ available (notebooks) or too slow for extensive debug information (like ACPI). | |||
36 | Drivers | 36 | Drivers |
37 | ------- | 37 | ------- |
38 | 38 | ||
39 | The OHCI-1394 drivers in drivers/firewire and drivers/ieee1394 initialize | 39 | The ohci1394 driver in drivers/ieee1394 initializes the OHCI-1394 controllers |
40 | the OHCI-1394 controllers to a working state and can be used to enable | 40 | to a working state and enables physical DMA by default for all remote nodes. |
41 | physical DMA. By default you only have to load the driver, and physical | 41 | This can be turned off by ohci1394's module parameter phys_dma=0. |
42 | DMA access will be granted to all remote nodes, but it can be turned off | 42 | |
43 | when using the ohci1394 driver. | 43 | The alternative firewire-ohci driver in drivers/firewire uses filtered physical |
44 | 44 | DMA, hence is not yet suitable for remote debugging. | |
45 | Because these drivers depend on the PCI enumeration to be completed, an | 45 | |
46 | initialization routine which can runs pretty early (long before console_init(), | 46 | Because ohci1394 depends on the PCI enumeration to be completed, an |
47 | initialization routine which runs pretty early (long before console_init() | ||
47 | which makes the printk buffer appear on the console can be called) was written. | 48 | which makes the printk buffer appear on the console can be called) was written. |
48 | 49 | ||
49 | To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu: | 50 | To activate it, enable CONFIG_PROVIDE_OHCI1394_DMA_INIT (Kernel hacking menu: |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 4d3aa519eadf..c1d1fd0c299b 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -172,6 +172,16 @@ Who: Len Brown <len.brown@intel.com> | |||
172 | 172 | ||
173 | --------------------------- | 173 | --------------------------- |
174 | 174 | ||
175 | What: ide-tape driver | ||
176 | When: July 2008 | ||
177 | Files: drivers/ide/ide-tape.c | ||
178 | Why: This driver might not have any users anymore and maintaining it for no | ||
179 | reason is an effort no one wants to make. | ||
180 | Who: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>, Borislav Petkov | ||
181 | <petkovbb@googlemail.com> | ||
182 | |||
183 | --------------------------- | ||
184 | |||
175 | What: libata spindown skipping and warning | 185 | What: libata spindown skipping and warning |
176 | When: Dec 2008 | 186 | When: Dec 2008 |
177 | Why: Some halt(8) implementations synchronize caches for and spin | 187 | Why: Some halt(8) implementations synchronize caches for and spin |
@@ -306,3 +316,15 @@ Why: Largely unmaintained and almost entirely unused. File system | |||
306 | is largely pointless as without a lot of work only the most | 316 | is largely pointless as without a lot of work only the most |
307 | trivial of Solaris binaries can work with the emulation code. | 317 | trivial of Solaris binaries can work with the emulation code. |
308 | Who: David S. Miller <davem@davemloft.net> | 318 | Who: David S. Miller <davem@davemloft.net> |
319 | |||
320 | --------------------------- | ||
321 | |||
322 | What: init_mm export | ||
323 | When: 2.6.26 | ||
324 | Why: Not used in-tree. The current out-of-tree users used it to | ||
325 | work around problems in the CPA code which should be resolved | ||
326 | by now. One usecase was described to provide verification code | ||
327 | of the CPA operation. That's a good idea in general, but such | ||
328 | code / infrastructure should be in the kernel and not in some | ||
329 | out-of-tree driver. | ||
330 | Who: Thomas Gleixner <tglx@linutronix.de> | ||
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index 8da724e2a0ff..54630095aa3c 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt | |||
@@ -2,6 +2,9 @@ GPIO Interfaces | |||
2 | 2 | ||
3 | This provides an overview of GPIO access conventions on Linux. | 3 | This provides an overview of GPIO access conventions on Linux. |
4 | 4 | ||
5 | These calls use the gpio_* naming prefix. No other calls should use that | ||
6 | prefix, or the related __gpio_* prefix. | ||
7 | |||
5 | 8 | ||
6 | What is a GPIO? | 9 | What is a GPIO? |
7 | =============== | 10 | =============== |
@@ -69,11 +72,13 @@ in this document, but drivers acting as clients to the GPIO interface must | |||
69 | not care how it's implemented.) | 72 | not care how it's implemented.) |
70 | 73 | ||
71 | That said, if the convention is supported on their platform, drivers should | 74 | That said, if the convention is supported on their platform, drivers should |
72 | use it when possible. Platforms should declare GENERIC_GPIO support in | 75 | use it when possible. Platforms must declare GENERIC_GPIO support in their |
73 | Kconfig (boolean true), which multi-platform drivers can depend on when | 76 | Kconfig (boolean true), and provide an <asm/gpio.h> file. Drivers that can't |
74 | using the include file: | 77 | work without standard GPIO calls should have Kconfig entries which depend |
78 | on GENERIC_GPIO. The GPIO calls are available, either as "real code" or as | ||
79 | optimized-away stubs, when drivers use the include file: | ||
75 | 80 | ||
76 | #include <asm/gpio.h> | 81 | #include <linux/gpio.h> |
77 | 82 | ||
78 | If you stick to this convention then it'll be easier for other developers to | 83 | If you stick to this convention then it'll be easier for other developers to |
79 | see what your code is doing, and help maintain it. | 84 | see what your code is doing, and help maintain it. |
@@ -316,6 +321,9 @@ pulldowns integrated on some platforms. Not all platforms support them, | |||
316 | or support them in the same way; and any given board might use external | 321 | or support them in the same way; and any given board might use external |
317 | pullups (or pulldowns) so that the on-chip ones should not be used. | 322 | pullups (or pulldowns) so that the on-chip ones should not be used. |
318 | (When a circuit needs 5 kOhm, on-chip 100 kOhm resistors won't do.) | 323 | (When a circuit needs 5 kOhm, on-chip 100 kOhm resistors won't do.) |
324 | Likewise drive strength (2 mA vs 20 mA) and voltage (1.8V vs 3.3V) is a | ||
325 | platform-specific issue, as are models like (not) having a one-to-one | ||
326 | correspondence between configurable pins and GPIOs. | ||
319 | 327 | ||
320 | There are other system-specific mechanisms that are not specified here, | 328 | There are other system-specific mechanisms that are not specified here, |
321 | like the aforementioned options for input de-glitching and wire-OR output. | 329 | like the aforementioned options for input de-glitching and wire-OR output. |
diff --git a/Documentation/ide.txt b/Documentation/ide.txt index 94e2e3b9e77f..bcd7cd1278ef 100644 --- a/Documentation/ide.txt +++ b/Documentation/ide.txt | |||
@@ -258,8 +258,6 @@ Summary of ide driver parameters for kernel command line | |||
258 | As for VLB, it is safest to not specify it. | 258 | As for VLB, it is safest to not specify it. |
259 | Bigger values are safer than smaller ones. | 259 | Bigger values are safer than smaller ones. |
260 | 260 | ||
261 | "idex=noprobe" : do not attempt to access/use this interface | ||
262 | |||
263 | "idex=base" : probe for an interface at the addr specified, | 261 | "idex=base" : probe for an interface at the addr specified, |
264 | where "base" is usually 0x1f0 or 0x170 | 262 | where "base" is usually 0x1f0 or 0x170 |
265 | and "ctl" is assumed to be "base"+0x206 | 263 | and "ctl" is assumed to be "base"+0x206 |
@@ -309,53 +307,6 @@ are detected automatically). | |||
309 | 307 | ||
310 | ================================================================================ | 308 | ================================================================================ |
311 | 309 | ||
312 | IDE ATAPI streaming tape driver | ||
313 | ------------------------------- | ||
314 | |||
315 | This driver is a part of the Linux ide driver and works in co-operation | ||
316 | with linux/drivers/block/ide.c. | ||
317 | |||
318 | The driver, in co-operation with ide.c, basically traverses the | ||
319 | request-list for the block device interface. The character device | ||
320 | interface, on the other hand, creates new requests, adds them | ||
321 | to the request-list of the block device, and waits for their completion. | ||
322 | |||
323 | Pipelined operation mode is now supported on both reads and writes. | ||
324 | |||
325 | The block device major and minor numbers are determined from the | ||
326 | tape's relative position in the ide interfaces, as explained in ide.c. | ||
327 | |||
328 | The character device interface consists of the following devices: | ||
329 | |||
330 | ht0 major 37, minor 0 first IDE tape, rewind on close. | ||
331 | ht1 major 37, minor 1 second IDE tape, rewind on close. | ||
332 | ... | ||
333 | nht0 major 37, minor 128 first IDE tape, no rewind on close. | ||
334 | nht1 major 37, minor 129 second IDE tape, no rewind on close. | ||
335 | ... | ||
336 | |||
337 | Run /dev/MAKEDEV to create the above entries. | ||
338 | |||
339 | The general magnetic tape commands compatible interface, as defined by | ||
340 | include/linux/mtio.h, is accessible through the character device. | ||
341 | |||
342 | General ide driver configuration options, such as the interrupt-unmask | ||
343 | flag, can be configured by issuing an ioctl to the block device interface, | ||
344 | as any other ide device. | ||
345 | |||
346 | Our own ide-tape ioctl's can be issued to either the block device or | ||
347 | the character device interface. | ||
348 | |||
349 | Maximal throughput with minimal bus load will usually be achieved in the | ||
350 | following scenario: | ||
351 | |||
352 | 1. ide-tape is operating in the pipelined operation mode. | ||
353 | 2. No buffering is performed by the user backup program. | ||
354 | |||
355 | |||
356 | |||
357 | ================================================================================ | ||
358 | |||
359 | Some Terminology | 310 | Some Terminology |
360 | ---------------- | 311 | ---------------- |
361 | IDE = Integrated Drive Electronics, meaning that each drive has a built-in | 312 | IDE = Integrated Drive Electronics, meaning that each drive has a built-in |
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 83f515c2905a..be89f393274f 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt | |||
@@ -192,7 +192,8 @@ code mapping. | |||
192 | The Kprobes API includes a "register" function and an "unregister" | 192 | The Kprobes API includes a "register" function and an "unregister" |
193 | function for each type of probe. Here are terse, mini-man-page | 193 | function for each type of probe. Here are terse, mini-man-page |
194 | specifications for these functions and the associated probe handlers | 194 | specifications for these functions and the associated probe handlers |
195 | that you'll write. See the latter half of this document for examples. | 195 | that you'll write. See the files in the samples/kprobes/ sub-directory |
196 | for examples. | ||
196 | 197 | ||
197 | 4.1 register_kprobe | 198 | 4.1 register_kprobe |
198 | 199 | ||
@@ -420,249 +421,15 @@ e. Watchpoint probes (which fire on data references). | |||
420 | 421 | ||
421 | 8. Kprobes Example | 422 | 8. Kprobes Example |
422 | 423 | ||
423 | Here's a sample kernel module showing the use of kprobes to dump a | 424 | See samples/kprobes/kprobe_example.c |
424 | stack trace and selected i386 registers when do_fork() is called. | ||
425 | ----- cut here ----- | ||
426 | /*kprobe_example.c*/ | ||
427 | #include <linux/kernel.h> | ||
428 | #include <linux/module.h> | ||
429 | #include <linux/kprobes.h> | ||
430 | #include <linux/sched.h> | ||
431 | |||
432 | /*For each probe you need to allocate a kprobe structure*/ | ||
433 | static struct kprobe kp; | ||
434 | |||
435 | /*kprobe pre_handler: called just before the probed instruction is executed*/ | ||
436 | int handler_pre(struct kprobe *p, struct pt_regs *regs) | ||
437 | { | ||
438 | printk("pre_handler: p->addr=0x%p, eip=%lx, eflags=0x%lx\n", | ||
439 | p->addr, regs->eip, regs->eflags); | ||
440 | dump_stack(); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /*kprobe post_handler: called after the probed instruction is executed*/ | ||
445 | void handler_post(struct kprobe *p, struct pt_regs *regs, unsigned long flags) | ||
446 | { | ||
447 | printk("post_handler: p->addr=0x%p, eflags=0x%lx\n", | ||
448 | p->addr, regs->eflags); | ||
449 | } | ||
450 | |||
451 | /* fault_handler: this is called if an exception is generated for any | ||
452 | * instruction within the pre- or post-handler, or when Kprobes | ||
453 | * single-steps the probed instruction. | ||
454 | */ | ||
455 | int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) | ||
456 | { | ||
457 | printk("fault_handler: p->addr=0x%p, trap #%dn", | ||
458 | p->addr, trapnr); | ||
459 | /* Return 0 because we don't handle the fault. */ | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int __init kprobe_init(void) | ||
464 | { | ||
465 | int ret; | ||
466 | kp.pre_handler = handler_pre; | ||
467 | kp.post_handler = handler_post; | ||
468 | kp.fault_handler = handler_fault; | ||
469 | kp.symbol_name = "do_fork"; | ||
470 | |||
471 | ret = register_kprobe(&kp); | ||
472 | if (ret < 0) { | ||
473 | printk("register_kprobe failed, returned %d\n", ret); | ||
474 | return ret; | ||
475 | } | ||
476 | printk("kprobe registered\n"); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | static void __exit kprobe_exit(void) | ||
481 | { | ||
482 | unregister_kprobe(&kp); | ||
483 | printk("kprobe unregistered\n"); | ||
484 | } | ||
485 | |||
486 | module_init(kprobe_init) | ||
487 | module_exit(kprobe_exit) | ||
488 | MODULE_LICENSE("GPL"); | ||
489 | ----- cut here ----- | ||
490 | |||
491 | You can build the kernel module, kprobe-example.ko, using the following | ||
492 | Makefile: | ||
493 | ----- cut here ----- | ||
494 | obj-m := kprobe-example.o | ||
495 | KDIR := /lib/modules/$(shell uname -r)/build | ||
496 | PWD := $(shell pwd) | ||
497 | default: | ||
498 | $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules | ||
499 | clean: | ||
500 | rm -f *.mod.c *.ko *.o | ||
501 | ----- cut here ----- | ||
502 | |||
503 | $ make | ||
504 | $ su - | ||
505 | ... | ||
506 | # insmod kprobe-example.ko | ||
507 | |||
508 | You will see the trace data in /var/log/messages and on the console | ||
509 | whenever do_fork() is invoked to create a new process. | ||
510 | 425 | ||
511 | 9. Jprobes Example | 426 | 9. Jprobes Example |
512 | 427 | ||
513 | Here's a sample kernel module showing the use of jprobes to dump | 428 | See samples/kprobes/jprobe_example.c |
514 | the arguments of do_fork(). | ||
515 | ----- cut here ----- | ||
516 | /*jprobe-example.c */ | ||
517 | #include <linux/kernel.h> | ||
518 | #include <linux/module.h> | ||
519 | #include <linux/fs.h> | ||
520 | #include <linux/uio.h> | ||
521 | #include <linux/kprobes.h> | ||
522 | |||
523 | /* | ||
524 | * Jumper probe for do_fork. | ||
525 | * Mirror principle enables access to arguments of the probed routine | ||
526 | * from the probe handler. | ||
527 | */ | ||
528 | |||
529 | /* Proxy routine having the same arguments as actual do_fork() routine */ | ||
530 | long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | ||
531 | struct pt_regs *regs, unsigned long stack_size, | ||
532 | int __user * parent_tidptr, int __user * child_tidptr) | ||
533 | { | ||
534 | printk("jprobe: clone_flags=0x%lx, stack_size=0x%lx, regs=0x%p\n", | ||
535 | clone_flags, stack_size, regs); | ||
536 | /* Always end with a call to jprobe_return(). */ | ||
537 | jprobe_return(); | ||
538 | /*NOTREACHED*/ | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static struct jprobe my_jprobe = { | ||
543 | .entry = jdo_fork | ||
544 | }; | ||
545 | |||
546 | static int __init jprobe_init(void) | ||
547 | { | ||
548 | int ret; | ||
549 | my_jprobe.kp.symbol_name = "do_fork"; | ||
550 | |||
551 | if ((ret = register_jprobe(&my_jprobe)) <0) { | ||
552 | printk("register_jprobe failed, returned %d\n", ret); | ||
553 | return -1; | ||
554 | } | ||
555 | printk("Planted jprobe at %p, handler addr %p\n", | ||
556 | my_jprobe.kp.addr, my_jprobe.entry); | ||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | static void __exit jprobe_exit(void) | ||
561 | { | ||
562 | unregister_jprobe(&my_jprobe); | ||
563 | printk("jprobe unregistered\n"); | ||
564 | } | ||
565 | |||
566 | module_init(jprobe_init) | ||
567 | module_exit(jprobe_exit) | ||
568 | MODULE_LICENSE("GPL"); | ||
569 | ----- cut here ----- | ||
570 | |||
571 | Build and insert the kernel module as shown in the above kprobe | ||
572 | example. You will see the trace data in /var/log/messages and on | ||
573 | the console whenever do_fork() is invoked to create a new process. | ||
574 | (Some messages may be suppressed if syslogd is configured to | ||
575 | eliminate duplicate messages.) | ||
576 | 429 | ||
577 | 10. Kretprobes Example | 430 | 10. Kretprobes Example |
578 | 431 | ||
579 | Here's a sample kernel module showing the use of return probes to | 432 | See samples/kprobes/kretprobe_example.c |
580 | report failed calls to sys_open(). | ||
581 | ----- cut here ----- | ||
582 | /*kretprobe-example.c*/ | ||
583 | #include <linux/kernel.h> | ||
584 | #include <linux/module.h> | ||
585 | #include <linux/kprobes.h> | ||
586 | #include <linux/ktime.h> | ||
587 | |||
588 | /* per-instance private data */ | ||
589 | struct my_data { | ||
590 | ktime_t entry_stamp; | ||
591 | }; | ||
592 | |||
593 | static const char *probed_func = "sys_open"; | ||
594 | |||
595 | /* Timestamp function entry. */ | ||
596 | static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
597 | { | ||
598 | struct my_data *data; | ||
599 | |||
600 | if(!current->mm) | ||
601 | return 1; /* skip kernel threads */ | ||
602 | |||
603 | data = (struct my_data *)ri->data; | ||
604 | data->entry_stamp = ktime_get(); | ||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /* If the probed function failed, log the return value and duration. | ||
609 | * Duration may turn out to be zero consistently, depending upon the | ||
610 | * granularity of time accounting on the platform. */ | ||
611 | static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
612 | { | ||
613 | int retval = regs_return_value(regs); | ||
614 | struct my_data *data = (struct my_data *)ri->data; | ||
615 | s64 delta; | ||
616 | ktime_t now; | ||
617 | |||
618 | if (retval < 0) { | ||
619 | now = ktime_get(); | ||
620 | delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); | ||
621 | printk("%s: return val = %d (duration = %lld ns)\n", | ||
622 | probed_func, retval, delta); | ||
623 | } | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | static struct kretprobe my_kretprobe = { | ||
628 | .handler = return_handler, | ||
629 | .entry_handler = entry_handler, | ||
630 | .data_size = sizeof(struct my_data), | ||
631 | .maxactive = 20, /* probe up to 20 instances concurrently */ | ||
632 | }; | ||
633 | |||
634 | static int __init kretprobe_init(void) | ||
635 | { | ||
636 | int ret; | ||
637 | my_kretprobe.kp.symbol_name = (char *)probed_func; | ||
638 | |||
639 | if ((ret = register_kretprobe(&my_kretprobe)) < 0) { | ||
640 | printk("register_kretprobe failed, returned %d\n", ret); | ||
641 | return -1; | ||
642 | } | ||
643 | printk("Kretprobe active on %s\n", my_kretprobe.kp.symbol_name); | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static void __exit kretprobe_exit(void) | ||
648 | { | ||
649 | unregister_kretprobe(&my_kretprobe); | ||
650 | printk("kretprobe unregistered\n"); | ||
651 | /* nmissed > 0 suggests that maxactive was set too low. */ | ||
652 | printk("Missed probing %d instances of %s\n", | ||
653 | my_kretprobe.nmissed, probed_func); | ||
654 | } | ||
655 | |||
656 | module_init(kretprobe_init) | ||
657 | module_exit(kretprobe_exit) | ||
658 | MODULE_LICENSE("GPL"); | ||
659 | ----- cut here ----- | ||
660 | |||
661 | Build and insert the kernel module as shown in the above kprobe | ||
662 | example. You will see the trace data in /var/log/messages and on the | ||
663 | console whenever sys_open() returns a negative value. (Some messages | ||
664 | may be suppressed if syslogd is configured to eliminate duplicate | ||
665 | messages.) | ||
666 | 433 | ||
667 | For additional information on Kprobes, refer to the following URLs: | 434 | For additional information on Kprobes, refer to the following URLs: |
668 | http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe | 435 | http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe |
diff --git a/Documentation/pci.txt b/Documentation/pci.txt index 72b20c639596..bb7bd27d4682 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt | |||
@@ -123,7 +123,8 @@ initialization with a pointer to a structure describing the driver | |||
123 | 123 | ||
124 | 124 | ||
125 | The ID table is an array of struct pci_device_id entries ending with an | 125 | The ID table is an array of struct pci_device_id entries ending with an |
126 | all-zero entry. Each entry consists of: | 126 | all-zero entry; use of the macro DECLARE_PCI_DEVICE_TABLE is the preferred |
127 | method of declaring the table. Each entry consists of: | ||
127 | 128 | ||
128 | vendor,device Vendor and device ID to match (or PCI_ANY_ID) | 129 | vendor,device Vendor and device ID to match (or PCI_ANY_ID) |
129 | 130 | ||
@@ -191,7 +192,8 @@ Tips on when/where to use the above attributes: | |||
191 | 192 | ||
192 | o Do not mark the struct pci_driver. | 193 | o Do not mark the struct pci_driver. |
193 | 194 | ||
194 | o The ID table array should be marked __devinitdata. | 195 | o The ID table array should be marked __devinitconst; this is done |
196 | automatically if the table is declared with DECLARE_PCI_DEVICE_TABLE(). | ||
195 | 197 | ||
196 | o The probe() and remove() functions should be marked __devinit | 198 | o The probe() and remove() functions should be marked __devinit |
197 | and __devexit respectively. All initialization functions | 199 | and __devexit respectively. All initialization functions |
diff --git a/MAINTAINERS b/MAINTAINERS index 33d99dcac691..558636e3a954 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -767,14 +767,14 @@ S: Maintained | |||
767 | 767 | ||
768 | BLACKFIN ARCHITECTURE | 768 | BLACKFIN ARCHITECTURE |
769 | P: Bryan Wu | 769 | P: Bryan Wu |
770 | M: bryan.wu@analog.com | 770 | M: cooloney@kernel.org |
771 | L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) | 771 | L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) |
772 | W: http://blackfin.uclinux.org | 772 | W: http://blackfin.uclinux.org |
773 | S: Supported | 773 | S: Supported |
774 | 774 | ||
775 | BLACKFIN EMAC DRIVER | 775 | BLACKFIN EMAC DRIVER |
776 | P: Bryan Wu | 776 | P: Bryan Wu |
777 | M: bryan.wu@analog.com | 777 | M: cooloney@kernel.org |
778 | L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) | 778 | L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only) |
779 | W: http://blackfin.uclinux.org | 779 | W: http://blackfin.uclinux.org |
780 | S: Supported | 780 | S: Supported |
@@ -1138,6 +1138,12 @@ L: accessrunner-general@lists.sourceforge.net | |||
1138 | W: http://accessrunner.sourceforge.net/ | 1138 | W: http://accessrunner.sourceforge.net/ |
1139 | S: Maintained | 1139 | S: Maintained |
1140 | 1140 | ||
1141 | CONTROL GROUPS (CGROUPS) | ||
1142 | P: Paul Menage | ||
1143 | M: menage@google.com | ||
1144 | L: containers@lists.linux-foundation.org | ||
1145 | S: Maintained | ||
1146 | |||
1141 | CORETEMP HARDWARE MONITORING DRIVER | 1147 | CORETEMP HARDWARE MONITORING DRIVER |
1142 | P: Rudolf Marek | 1148 | P: Rudolf Marek |
1143 | M: r.marek@assembler.cz | 1149 | M: r.marek@assembler.cz |
@@ -1589,6 +1595,13 @@ L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | |||
1589 | W: http://linux-fbdev.sourceforge.net/ | 1595 | W: http://linux-fbdev.sourceforge.net/ |
1590 | S: Maintained | 1596 | S: Maintained |
1591 | 1597 | ||
1598 | FREESCALE DMA DRIVER | ||
1599 | P; Zhang Wei | ||
1600 | M: wei.zhang@freescale.com | ||
1601 | L: linuxppc-embedded@ozlabs.org | ||
1602 | L: linux-kernel@vger.kernel.org | ||
1603 | S: Maintained | ||
1604 | |||
1592 | FREESCALE SOC FS_ENET DRIVER | 1605 | FREESCALE SOC FS_ENET DRIVER |
1593 | P: Pantelis Antoniou | 1606 | P: Pantelis Antoniou |
1594 | M: pantelis.antoniou@gmail.com | 1607 | M: pantelis.antoniou@gmail.com |
@@ -2626,6 +2639,17 @@ L: linux-kernel@vger.kernel.org | |||
2626 | W: http://www.linux-mm.org | 2639 | W: http://www.linux-mm.org |
2627 | S: Maintained | 2640 | S: Maintained |
2628 | 2641 | ||
2642 | MEMORY RESOURCE CONTROLLER | ||
2643 | P: Balbir Singh | ||
2644 | M: balbir@linux.vnet.ibm.com | ||
2645 | P: Pavel Emelyanov | ||
2646 | M: xemul@openvz.org | ||
2647 | P: KAMEZAWA Hiroyuki | ||
2648 | M: kamezawa.hiroyu@jp.fujitsu.com | ||
2649 | L: linux-mm@kvack.org | ||
2650 | L: linux-kernel@vger.kernel.org | ||
2651 | S: Maintained | ||
2652 | |||
2629 | MEI MN10300/AM33 PORT | 2653 | MEI MN10300/AM33 PORT |
2630 | P: David Howells | 2654 | P: David Howells |
2631 | M: dhowells@redhat.com | 2655 | M: dhowells@redhat.com |
@@ -2750,6 +2774,8 @@ S: Maintained | |||
2750 | NETEFFECT IWARP RNIC DRIVER (IW_NES) | 2774 | NETEFFECT IWARP RNIC DRIVER (IW_NES) |
2751 | P: Faisal Latif | 2775 | P: Faisal Latif |
2752 | M: flatif@neteffect.com | 2776 | M: flatif@neteffect.com |
2777 | P: Nishi Gupta | ||
2778 | M: ngupta@neteffect.com | ||
2753 | P: Glenn Streiff | 2779 | P: Glenn Streiff |
2754 | M: gstreiff@neteffect.com | 2780 | M: gstreiff@neteffect.com |
2755 | L: general@lists.openfabrics.org | 2781 | L: general@lists.openfabrics.org |
diff --git a/arch/Kconfig b/arch/Kconfig index 3d72dc3fc8f5..694c9af520bb 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -27,5 +27,12 @@ config KPROBES | |||
27 | for kernel debugging, non-intrusive instrumentation and testing. | 27 | for kernel debugging, non-intrusive instrumentation and testing. |
28 | If in doubt, say "N". | 28 | If in doubt, say "N". |
29 | 29 | ||
30 | config KRETPROBES | ||
31 | def_bool y | ||
32 | depends on KPROBES && HAVE_KRETPROBES | ||
33 | |||
30 | config HAVE_KPROBES | 34 | config HAVE_KPROBES |
31 | def_bool n | 35 | def_bool n |
36 | |||
37 | config HAVE_KRETPROBES | ||
38 | def_bool n | ||
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 26d3789dfdd0..be6fa105cd34 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define DEBUG_NODIRECT 0 | 33 | #define DEBUG_NODIRECT 0 |
34 | #define DEBUG_FORCEDAC 0 | ||
35 | 34 | ||
36 | #define ISA_DMA_MASK 0x00ffffff | 35 | #define ISA_DMA_MASK 0x00ffffff |
37 | 36 | ||
@@ -126,39 +125,67 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base, | |||
126 | return iommu_arena_new_node(0, hose, base, window_size, align); | 125 | return iommu_arena_new_node(0, hose, base, window_size, align); |
127 | } | 126 | } |
128 | 127 | ||
128 | static inline int is_span_boundary(unsigned int index, unsigned int nr, | ||
129 | unsigned long shift, | ||
130 | unsigned long boundary_size) | ||
131 | { | ||
132 | shift = (shift + index) & (boundary_size - 1); | ||
133 | return shift + nr > boundary_size; | ||
134 | } | ||
135 | |||
129 | /* Must be called with the arena lock held */ | 136 | /* Must be called with the arena lock held */ |
130 | static long | 137 | static long |
131 | iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | 138 | iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, |
139 | long n, long mask) | ||
132 | { | 140 | { |
133 | unsigned long *ptes; | 141 | unsigned long *ptes; |
134 | long i, p, nent; | 142 | long i, p, nent; |
143 | int pass = 0; | ||
144 | unsigned long base; | ||
145 | unsigned long boundary_size; | ||
146 | |||
147 | BUG_ON(arena->dma_base & ~PAGE_MASK); | ||
148 | base = arena->dma_base >> PAGE_SHIFT; | ||
149 | if (dev) | ||
150 | boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE) | ||
151 | >> PAGE_SHIFT; | ||
152 | else | ||
153 | boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT; | ||
154 | |||
155 | BUG_ON(!is_power_of_2(boundary_size)); | ||
135 | 156 | ||
136 | /* Search forward for the first mask-aligned sequence of N free ptes */ | 157 | /* Search forward for the first mask-aligned sequence of N free ptes */ |
137 | ptes = arena->ptes; | 158 | ptes = arena->ptes; |
138 | nent = arena->size >> PAGE_SHIFT; | 159 | nent = arena->size >> PAGE_SHIFT; |
139 | p = (arena->next_entry + mask) & ~mask; | 160 | p = ALIGN(arena->next_entry, mask + 1); |
140 | i = 0; | 161 | i = 0; |
162 | |||
163 | again: | ||
141 | while (i < n && p+i < nent) { | 164 | while (i < n && p+i < nent) { |
165 | if (!i && is_span_boundary(p, n, base, boundary_size)) { | ||
166 | p = ALIGN(p + 1, mask + 1); | ||
167 | goto again; | ||
168 | } | ||
169 | |||
142 | if (ptes[p+i]) | 170 | if (ptes[p+i]) |
143 | p = (p + i + 1 + mask) & ~mask, i = 0; | 171 | p = ALIGN(p + i + 1, mask + 1), i = 0; |
144 | else | 172 | else |
145 | i = i + 1; | 173 | i = i + 1; |
146 | } | 174 | } |
147 | 175 | ||
148 | if (i < n) { | 176 | if (i < n) { |
149 | /* Reached the end. Flush the TLB and restart the | 177 | if (pass < 1) { |
150 | search from the beginning. */ | 178 | /* |
151 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); | 179 | * Reached the end. Flush the TLB and restart |
152 | 180 | * the search from the beginning. | |
153 | p = 0, i = 0; | 181 | */ |
154 | while (i < n && p+i < nent) { | 182 | alpha_mv.mv_pci_tbi(arena->hose, 0, -1); |
155 | if (ptes[p+i]) | 183 | |
156 | p = (p + i + 1 + mask) & ~mask, i = 0; | 184 | pass++; |
157 | else | 185 | p = 0; |
158 | i = i + 1; | 186 | i = 0; |
159 | } | 187 | goto again; |
160 | 188 | } else | |
161 | if (i < n) | ||
162 | return -1; | 189 | return -1; |
163 | } | 190 | } |
164 | 191 | ||
@@ -168,7 +195,8 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) | |||
168 | } | 195 | } |
169 | 196 | ||
170 | static long | 197 | static long |
171 | iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | 198 | iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, |
199 | unsigned int align) | ||
172 | { | 200 | { |
173 | unsigned long flags; | 201 | unsigned long flags; |
174 | unsigned long *ptes; | 202 | unsigned long *ptes; |
@@ -179,7 +207,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) | |||
179 | /* Search for N empty ptes */ | 207 | /* Search for N empty ptes */ |
180 | ptes = arena->ptes; | 208 | ptes = arena->ptes; |
181 | mask = max(align, arena->align_entry) - 1; | 209 | mask = max(align, arena->align_entry) - 1; |
182 | p = iommu_arena_find_pages(arena, n, mask); | 210 | p = iommu_arena_find_pages(dev, arena, n, mask); |
183 | if (p < 0) { | 211 | if (p < 0) { |
184 | spin_unlock_irqrestore(&arena->lock, flags); | 212 | spin_unlock_irqrestore(&arena->lock, flags); |
185 | return -1; | 213 | return -1; |
@@ -229,6 +257,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
229 | unsigned long paddr; | 257 | unsigned long paddr; |
230 | dma_addr_t ret; | 258 | dma_addr_t ret; |
231 | unsigned int align = 0; | 259 | unsigned int align = 0; |
260 | struct device *dev = pdev ? &pdev->dev : NULL; | ||
232 | 261 | ||
233 | paddr = __pa(cpu_addr); | 262 | paddr = __pa(cpu_addr); |
234 | 263 | ||
@@ -276,7 +305,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
276 | /* Force allocation to 64KB boundary for ISA bridges. */ | 305 | /* Force allocation to 64KB boundary for ISA bridges. */ |
277 | if (pdev && pdev == isa_bridge) | 306 | if (pdev && pdev == isa_bridge) |
278 | align = 8; | 307 | align = 8; |
279 | dma_ofs = iommu_arena_alloc(arena, npages, align); | 308 | dma_ofs = iommu_arena_alloc(dev, arena, npages, align); |
280 | if (dma_ofs < 0) { | 309 | if (dma_ofs < 0) { |
281 | printk(KERN_WARNING "pci_map_single failed: " | 310 | printk(KERN_WARNING "pci_map_single failed: " |
282 | "could not allocate dma page tables\n"); | 311 | "could not allocate dma page tables\n"); |
@@ -563,7 +592,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, | |||
563 | 592 | ||
564 | paddr &= ~PAGE_MASK; | 593 | paddr &= ~PAGE_MASK; |
565 | npages = calc_npages(paddr + size); | 594 | npages = calc_npages(paddr + size); |
566 | dma_ofs = iommu_arena_alloc(arena, npages, 0); | 595 | dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); |
567 | if (dma_ofs < 0) { | 596 | if (dma_ofs < 0) { |
568 | /* If we attempted a direct map above but failed, die. */ | 597 | /* If we attempted a direct map above but failed, die. */ |
569 | if (leader->dma_address == 0) | 598 | if (leader->dma_address == 0) |
@@ -830,7 +859,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) | |||
830 | 859 | ||
831 | /* Search for N empty ptes. */ | 860 | /* Search for N empty ptes. */ |
832 | ptes = arena->ptes; | 861 | ptes = arena->ptes; |
833 | p = iommu_arena_find_pages(arena, pg_count, align_mask); | 862 | p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); |
834 | if (p < 0) { | 863 | if (p < 0) { |
835 | spin_unlock_irqrestore(&arena->lock, flags); | 864 | spin_unlock_irqrestore(&arena->lock, flags); |
836 | return -1; | 865 | return -1; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9619c43783ff..955fc53c1c01 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -12,6 +12,7 @@ config ARM | |||
12 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
13 | select HAVE_OPROFILE | 13 | select HAVE_OPROFILE |
14 | select HAVE_KPROBES if (!XIP_KERNEL) | 14 | select HAVE_KPROBES if (!XIP_KERNEL) |
15 | select HAVE_KRETPROBES if (HAVE_KPROBES) | ||
15 | help | 16 | help |
16 | The ARM series is a line of low-power-consumption RISC chip designs | 17 | The ARM series is a line of low-power-consumption RISC chip designs |
17 | licensed by ARM Ltd and targeted at embedded applications and | 18 | licensed by ARM Ltd and targeted at embedded applications and |
@@ -939,7 +940,8 @@ config KEXEC | |||
939 | 940 | ||
940 | config ATAGS_PROC | 941 | config ATAGS_PROC |
941 | bool "Export atags in procfs" | 942 | bool "Export atags in procfs" |
942 | default n | 943 | depends on KEXEC |
944 | default y | ||
943 | help | 945 | help |
944 | Should the atags used to boot the kernel be exported in an "atags" | 946 | Should the atags used to boot the kernel be exported in an "atags" |
945 | file in procfs. Useful with kexec. | 947 | file in procfs. Useful with kexec. |
diff --git a/arch/arm/mach-pxa/cpu-pxa.c b/arch/arm/mach-pxa/cpu-pxa.c index 939a3867f77c..4b21479332ae 100644 --- a/arch/arm/mach-pxa/cpu-pxa.c +++ b/arch/arm/mach-pxa/cpu-pxa.c | |||
@@ -43,7 +43,7 @@ | |||
43 | 43 | ||
44 | #ifdef DEBUG | 44 | #ifdef DEBUG |
45 | static unsigned int freq_debug; | 45 | static unsigned int freq_debug; |
46 | MODULE_PARM(freq_debug, "i"); | 46 | module_param(freq_debug, uint, 0); |
47 | MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); | 47 | MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); |
48 | #else | 48 | #else |
49 | #define freq_debug 0 | 49 | #define freq_debug 0 |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 7cd9ef8deb02..35f25fdaeba3 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -129,28 +129,20 @@ static void clk_pxa3xx_cken_enable(struct clk *clk) | |||
129 | { | 129 | { |
130 | unsigned long mask = 1ul << (clk->cken & 0x1f); | 130 | unsigned long mask = 1ul << (clk->cken & 0x1f); |
131 | 131 | ||
132 | local_irq_disable(); | ||
133 | |||
134 | if (clk->cken < 32) | 132 | if (clk->cken < 32) |
135 | CKENA |= mask; | 133 | CKENA |= mask; |
136 | else | 134 | else |
137 | CKENB |= mask; | 135 | CKENB |= mask; |
138 | |||
139 | local_irq_enable(); | ||
140 | } | 136 | } |
141 | 137 | ||
142 | static void clk_pxa3xx_cken_disable(struct clk *clk) | 138 | static void clk_pxa3xx_cken_disable(struct clk *clk) |
143 | { | 139 | { |
144 | unsigned long mask = 1ul << (clk->cken & 0x1f); | 140 | unsigned long mask = 1ul << (clk->cken & 0x1f); |
145 | 141 | ||
146 | local_irq_disable(); | ||
147 | |||
148 | if (clk->cken < 32) | 142 | if (clk->cken < 32) |
149 | CKENA &= ~mask; | 143 | CKENA &= ~mask; |
150 | else | 144 | else |
151 | CKENB &= ~mask; | 145 | CKENB &= ~mask; |
152 | |||
153 | local_irq_enable(); | ||
154 | } | 146 | } |
155 | 147 | ||
156 | static const struct clkops clk_pxa3xx_cken_ops = { | 148 | static const struct clkops clk_pxa3xx_cken_ops = { |
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 7731d50dd86c..afd2cbfca0d9 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c | |||
@@ -58,7 +58,7 @@ static struct platform_device smc91x_device = { | |||
58 | .resource = smc91x_resources, | 58 | .resource = smc91x_resources, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULES) | 61 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) |
62 | static void zylonite_backlight_power(int on) | 62 | static void zylonite_backlight_power(int on) |
63 | { | 63 | { |
64 | gpio_set_value(gpio_backlight, on); | 64 | gpio_set_value(gpio_backlight, on); |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2728b0e7d2bb..3f6dc40b8353 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -120,6 +120,8 @@ full_search: | |||
120 | */ | 120 | */ |
121 | int valid_phys_addr_range(unsigned long addr, size_t size) | 121 | int valid_phys_addr_range(unsigned long addr, size_t size) |
122 | { | 122 | { |
123 | if (addr < PHYS_OFFSET) | ||
124 | return 0; | ||
123 | if (addr + size > __pa(high_memory)) | 125 | if (addr + size > __pa(high_memory)) |
124 | return 0; | 126 | return 0; |
125 | 127 | ||
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 500c9610ab30..e0f19ab91163 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -75,7 +75,7 @@ no_pgd: | |||
75 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) | 75 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) |
76 | { | 76 | { |
77 | pmd_t *pmd; | 77 | pmd_t *pmd; |
78 | struct page *pte; | 78 | pgtable_t pte; |
79 | 79 | ||
80 | if (!pgd) | 80 | if (!pgd) |
81 | return; | 81 | return; |
@@ -90,10 +90,8 @@ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) | |||
90 | goto free; | 90 | goto free; |
91 | } | 91 | } |
92 | 92 | ||
93 | pte = pmd_page(*pmd); | 93 | pte = pmd_pgtable(*pmd); |
94 | pmd_clear(pmd); | 94 | pmd_clear(pmd); |
95 | dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); | ||
96 | pte_lock_deinit(pte); | ||
97 | pte_free(mm, pte); | 95 | pte_free(mm, pte); |
98 | pmd_free(mm, pmd); | 96 | pmd_free(mm, pmd); |
99 | free: | 97 | free: |
diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c index 5a77030e07a0..e765a8652b3e 100644 --- a/arch/avr32/boards/atstk1000/atstk1004.c +++ b/arch/avr32/boards/atstk1000/atstk1004.c | |||
@@ -129,7 +129,7 @@ static int __init atstk1004_init(void) | |||
129 | #ifdef CONFIG_BOARD_ATSTK100X_SPI1 | 129 | #ifdef CONFIG_BOARD_ATSTK100X_SPI1 |
130 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); | 130 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); |
131 | #endif | 131 | #endif |
132 | #ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM | 132 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM |
133 | at32_add_device_mci(0); | 133 | at32_add_device_mci(0); |
134 | #endif | 134 | #endif |
135 | at32_add_device_lcdc(0, &atstk1000_lcdc_data, | 135 | at32_add_device_lcdc(0, &atstk1000_lcdc_data, |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index eaaa69bbdc38..7f4af0b1e111 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/reboot.h> | 13 | #include <linux/reboot.h> |
14 | #include <linux/tick.h> | ||
14 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
15 | #include <linux/unistd.h> | 16 | #include <linux/unistd.h> |
16 | 17 | ||
@@ -30,8 +31,10 @@ void cpu_idle(void) | |||
30 | { | 31 | { |
31 | /* endless idle loop with no priority at all */ | 32 | /* endless idle loop with no priority at all */ |
32 | while (1) { | 33 | while (1) { |
34 | tick_nohz_stop_sched_tick(); | ||
33 | while (!need_resched()) | 35 | while (!need_resched()) |
34 | cpu_idle_sleep(); | 36 | cpu_idle_sleep(); |
37 | tick_nohz_restart_sched_tick(); | ||
35 | preempt_enable_no_resched(); | 38 | preempt_enable_no_resched(); |
36 | schedule(); | 39 | schedule(); |
37 | preempt_disable(); | 40 | preempt_disable(); |
@@ -345,6 +348,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
345 | p->thread.cpu_context.ksp = (unsigned long)childregs; | 348 | p->thread.cpu_context.ksp = (unsigned long)childregs; |
346 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; | 349 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
347 | 350 | ||
351 | clear_tsk_thread_flag(p, TIF_DEBUG); | ||
348 | if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG)) | 352 | if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG)) |
349 | ocd_enable(p); | 353 | ocd_enable(p); |
350 | 354 | ||
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 6560cb18b4e3..ce4e4296b954 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
@@ -189,6 +189,8 @@ no_context: | |||
189 | 189 | ||
190 | page = sysreg_read(PTBR); | 190 | page = sysreg_read(PTBR); |
191 | printk(KERN_ALERT "ptbr = %08lx", page); | 191 | printk(KERN_ALERT "ptbr = %08lx", page); |
192 | if (address >= TASK_SIZE) | ||
193 | page = (unsigned long)swapper_pg_dir; | ||
192 | if (page) { | 194 | if (page) { |
193 | page = ((unsigned long *)page)[address >> 22]; | 195 | page = ((unsigned long *)page)[address >> 22]; |
194 | printk(" pgd = %08lx", page); | 196 | printk(" pgd = %08lx", page); |
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index fe254f886a6e..75eba2ca7881 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile | |||
@@ -98,8 +98,11 @@ drivers-$(CONFIG_OPROFILE) += arch/$(ARCH)/oprofile/ | |||
98 | # them changed. We use .mach to indicate when they were updated | 98 | # them changed. We use .mach to indicate when they were updated |
99 | # last, otherwise make uses the target directory mtime. | 99 | # last, otherwise make uses the target directory mtime. |
100 | 100 | ||
101 | show_mach_symlink = : | ||
102 | quiet_show_mach_symlink = echo ' SYMLINK include/asm-$(ARCH)/mach-$(MACHINE) -> include/asm-$(ARCH)/mach' | ||
103 | silent_show_mach_symlink = : | ||
101 | include/asm-blackfin/.mach: $(wildcard include/config/arch/*.h) include/config/auto.conf | 104 | include/asm-blackfin/.mach: $(wildcard include/config/arch/*.h) include/config/auto.conf |
102 | @echo ' SYMLINK include/asm-$(ARCH)/mach-$(MACHINE) -> include/asm-$(ARCH)/mach' | 105 | @$($(quiet)show_mach_symlink) |
103 | ifneq ($(KBUILD_SRC),) | 106 | ifneq ($(KBUILD_SRC),) |
104 | $(Q)mkdir -p include/asm-$(ARCH) | 107 | $(Q)mkdir -p include/asm-$(ARCH) |
105 | $(Q)ln -fsn $(srctree)/include/asm-$(ARCH)/mach-$(MACHINE) include/asm-$(ARCH)/mach | 108 | $(Q)ln -fsn $(srctree)/include/asm-$(ARCH)/mach-$(MACHINE) include/asm-$(ARCH)/mach |
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig index d59ee1530bd4..ae320dcfedef 100644 --- a/arch/blackfin/configs/BF527-EZKIT_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT_defconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.22.14 | 3 | # Linux kernel version: 2.6.22.16 |
4 | # Thu Nov 29 17:32:47 2007 | ||
5 | # | 4 | # |
6 | # CONFIG_MMU is not set | 5 | # CONFIG_MMU is not set |
7 | # CONFIG_FPU is not set | 6 | # CONFIG_FPU is not set |
@@ -116,7 +115,10 @@ CONFIG_PREEMPT_VOLUNTARY=y | |||
116 | # Processor and Board Settings | 115 | # Processor and Board Settings |
117 | # | 116 | # |
118 | # CONFIG_BF522 is not set | 117 | # CONFIG_BF522 is not set |
118 | # CONFIG_BF523 is not set | ||
119 | # CONFIG_BF524 is not set | ||
119 | # CONFIG_BF525 is not set | 120 | # CONFIG_BF525 is not set |
121 | # CONFIG_BF526 is not set | ||
120 | CONFIG_BF527=y | 122 | CONFIG_BF527=y |
121 | # CONFIG_BF531 is not set | 123 | # CONFIG_BF531 is not set |
122 | # CONFIG_BF532 is not set | 124 | # CONFIG_BF532 is not set |
@@ -306,6 +308,7 @@ CONFIG_BFIN_DCACHE=y | |||
306 | # CONFIG_BFIN_WB is not set | 308 | # CONFIG_BFIN_WB is not set |
307 | CONFIG_BFIN_WT=y | 309 | CONFIG_BFIN_WT=y |
308 | CONFIG_L1_MAX_PIECE=16 | 310 | CONFIG_L1_MAX_PIECE=16 |
311 | # CONFIG_MPU is not set | ||
309 | 312 | ||
310 | # | 313 | # |
311 | # Asynchonous Memory Configuration | 314 | # Asynchonous Memory Configuration |
@@ -354,6 +357,7 @@ CONFIG_BINFMT_ZFLAT=y | |||
354 | # Power management options | 357 | # Power management options |
355 | # | 358 | # |
356 | # CONFIG_PM is not set | 359 | # CONFIG_PM is not set |
360 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | ||
357 | 361 | ||
358 | # | 362 | # |
359 | # Networking | 363 | # Networking |
@@ -496,7 +500,6 @@ CONFIG_MTD_CFI_I2=y | |||
496 | # CONFIG_MTD_CFI_INTELEXT is not set | 500 | # CONFIG_MTD_CFI_INTELEXT is not set |
497 | # CONFIG_MTD_CFI_AMDSTD is not set | 501 | # CONFIG_MTD_CFI_AMDSTD is not set |
498 | # CONFIG_MTD_CFI_STAA is not set | 502 | # CONFIG_MTD_CFI_STAA is not set |
499 | CONFIG_MTD_MW320D=m | ||
500 | CONFIG_MTD_RAM=y | 503 | CONFIG_MTD_RAM=y |
501 | CONFIG_MTD_ROM=m | 504 | CONFIG_MTD_ROM=m |
502 | # CONFIG_MTD_ABSENT is not set | 505 | # CONFIG_MTD_ABSENT is not set |
@@ -506,9 +509,6 @@ CONFIG_MTD_ROM=m | |||
506 | # | 509 | # |
507 | CONFIG_MTD_COMPLEX_MAPPINGS=y | 510 | CONFIG_MTD_COMPLEX_MAPPINGS=y |
508 | # CONFIG_MTD_PHYSMAP is not set | 511 | # CONFIG_MTD_PHYSMAP is not set |
509 | CONFIG_MTD_BF5xx=m | ||
510 | CONFIG_BFIN_FLASH_SIZE=0x400000 | ||
511 | CONFIG_EBIU_FLASH_BASE=0x20000000 | ||
512 | # CONFIG_MTD_UCLINUX is not set | 512 | # CONFIG_MTD_UCLINUX is not set |
513 | # CONFIG_MTD_PLATRAM is not set | 513 | # CONFIG_MTD_PLATRAM is not set |
514 | 514 | ||
@@ -684,7 +684,6 @@ CONFIG_INPUT_MISC=y | |||
684 | # CONFIG_INPUT_POWERMATE is not set | 684 | # CONFIG_INPUT_POWERMATE is not set |
685 | # CONFIG_INPUT_YEALINK is not set | 685 | # CONFIG_INPUT_YEALINK is not set |
686 | # CONFIG_INPUT_UINPUT is not set | 686 | # CONFIG_INPUT_UINPUT is not set |
687 | # CONFIG_BF53X_PFBUTTONS is not set | ||
688 | # CONFIG_TWI_KEYPAD is not set | 687 | # CONFIG_TWI_KEYPAD is not set |
689 | 688 | ||
690 | # | 689 | # |
@@ -702,12 +701,12 @@ CONFIG_INPUT_MISC=y | |||
702 | # CONFIG_BF5xx_PPIFCD is not set | 701 | # CONFIG_BF5xx_PPIFCD is not set |
703 | # CONFIG_BFIN_SIMPLE_TIMER is not set | 702 | # CONFIG_BFIN_SIMPLE_TIMER is not set |
704 | # CONFIG_BF5xx_PPI is not set | 703 | # CONFIG_BF5xx_PPI is not set |
704 | CONFIG_BFIN_OTP=y | ||
705 | # CONFIG_BFIN_OTP_WRITE_ENABLE is not set | ||
705 | # CONFIG_BFIN_SPORT is not set | 706 | # CONFIG_BFIN_SPORT is not set |
706 | # CONFIG_BFIN_TIMER_LATENCY is not set | 707 | # CONFIG_BFIN_TIMER_LATENCY is not set |
707 | # CONFIG_TWI_LCD is not set | 708 | # CONFIG_TWI_LCD is not set |
708 | # CONFIG_AD5304 is not set | 709 | # CONFIG_AD5304 is not set |
709 | # CONFIG_BF5xx_TEA5764 is not set | ||
710 | # CONFIG_BF5xx_FBDMA is not set | ||
711 | # CONFIG_VT is not set | 710 | # CONFIG_VT is not set |
712 | # CONFIG_SERIAL_NONSTANDARD is not set | 711 | # CONFIG_SERIAL_NONSTANDARD is not set |
713 | 712 | ||
@@ -772,7 +771,6 @@ CONFIG_I2C_CHARDEV=m | |||
772 | # | 771 | # |
773 | # I2C Hardware Bus support | 772 | # I2C Hardware Bus support |
774 | # | 773 | # |
775 | # CONFIG_I2C_BLACKFIN_GPIO is not set | ||
776 | CONFIG_I2C_BLACKFIN_TWI=m | 774 | CONFIG_I2C_BLACKFIN_TWI=m |
777 | CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50 | 775 | CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50 |
778 | # CONFIG_I2C_GPIO is not set | 776 | # CONFIG_I2C_GPIO is not set |
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig index 811711f59a25..9621caa60b5f 100644 --- a/arch/blackfin/configs/BF533-EZKIT_defconfig +++ b/arch/blackfin/configs/BF533-EZKIT_defconfig | |||
@@ -322,10 +322,9 @@ CONFIG_PM=y | |||
322 | # CONFIG_PM_LEGACY is not set | 322 | # CONFIG_PM_LEGACY is not set |
323 | # CONFIG_PM_DEBUG is not set | 323 | # CONFIG_PM_DEBUG is not set |
324 | # CONFIG_PM_SYSFS_DEPRECATED is not set | 324 | # CONFIG_PM_SYSFS_DEPRECATED is not set |
325 | CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y | 325 | CONFIG_PM_BFIN_SLEEP_DEEPER=y |
326 | # CONFIG_PM_BFIN_SLEEP is not set | ||
326 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | 327 | # CONFIG_PM_WAKEUP_BY_GPIO is not set |
327 | # CONFIG_PM_WAKEUP_GPIO_API is not set | ||
328 | CONFIG_PM_WAKEUP_SIC_IWR=0x80 | ||
329 | 328 | ||
330 | # | 329 | # |
331 | # CPU Frequency scaling | 330 | # CPU Frequency scaling |
@@ -697,7 +696,6 @@ CONFIG_SERIAL_BFIN_DMA=y | |||
697 | # CONFIG_SERIAL_BFIN_PIO is not set | 696 | # CONFIG_SERIAL_BFIN_PIO is not set |
698 | CONFIG_SERIAL_BFIN_UART0=y | 697 | CONFIG_SERIAL_BFIN_UART0=y |
699 | # CONFIG_BFIN_UART0_CTSRTS is not set | 698 | # CONFIG_BFIN_UART0_CTSRTS is not set |
700 | # CONFIG_SERIAL_BFIN_UART1 is not set | ||
701 | CONFIG_SERIAL_CORE=y | 699 | CONFIG_SERIAL_CORE=y |
702 | CONFIG_SERIAL_CORE_CONSOLE=y | 700 | CONFIG_SERIAL_CORE_CONSOLE=y |
703 | # CONFIG_SERIAL_BFIN_SPORT is not set | 701 | # CONFIG_SERIAL_BFIN_SPORT is not set |
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig index 198f4123af4b..b51e76ce7f4f 100644 --- a/arch/blackfin/configs/BF533-STAMP_defconfig +++ b/arch/blackfin/configs/BF533-STAMP_defconfig | |||
@@ -323,10 +323,9 @@ CONFIG_PM=y | |||
323 | # CONFIG_PM_LEGACY is not set | 323 | # CONFIG_PM_LEGACY is not set |
324 | # CONFIG_PM_DEBUG is not set | 324 | # CONFIG_PM_DEBUG is not set |
325 | # CONFIG_PM_SYSFS_DEPRECATED is not set | 325 | # CONFIG_PM_SYSFS_DEPRECATED is not set |
326 | CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y | 326 | CONFIG_PM_BFIN_SLEEP_DEEPER=y |
327 | # CONFIG_PM_BFIN_SLEEP is not set | ||
327 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | 328 | # CONFIG_PM_WAKEUP_BY_GPIO is not set |
328 | # CONFIG_PM_WAKEUP_GPIO_API is not set | ||
329 | CONFIG_PM_WAKEUP_SIC_IWR=0x80 | ||
330 | 329 | ||
331 | # | 330 | # |
332 | # CPU Frequency scaling | 331 | # CPU Frequency scaling |
@@ -714,7 +713,6 @@ CONFIG_SERIAL_BFIN_DMA=y | |||
714 | # CONFIG_SERIAL_BFIN_PIO is not set | 713 | # CONFIG_SERIAL_BFIN_PIO is not set |
715 | CONFIG_SERIAL_BFIN_UART0=y | 714 | CONFIG_SERIAL_BFIN_UART0=y |
716 | # CONFIG_BFIN_UART0_CTSRTS is not set | 715 | # CONFIG_BFIN_UART0_CTSRTS is not set |
717 | # CONFIG_SERIAL_BFIN_UART1 is not set | ||
718 | CONFIG_SERIAL_CORE=y | 716 | CONFIG_SERIAL_CORE=y |
719 | CONFIG_SERIAL_CORE_CONSOLE=y | 717 | CONFIG_SERIAL_CORE_CONSOLE=y |
720 | # CONFIG_SERIAL_BFIN_SPORT is not set | 718 | # CONFIG_SERIAL_BFIN_SPORT is not set |
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig index b37ccc681e7a..d45fa535dad7 100644 --- a/arch/blackfin/configs/BF537-STAMP_defconfig +++ b/arch/blackfin/configs/BF537-STAMP_defconfig | |||
@@ -330,10 +330,9 @@ CONFIG_PM=y | |||
330 | # CONFIG_PM_LEGACY is not set | 330 | # CONFIG_PM_LEGACY is not set |
331 | # CONFIG_PM_DEBUG is not set | 331 | # CONFIG_PM_DEBUG is not set |
332 | # CONFIG_PM_SYSFS_DEPRECATED is not set | 332 | # CONFIG_PM_SYSFS_DEPRECATED is not set |
333 | CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR=y | 333 | CONFIG_PM_BFIN_SLEEP_DEEPER=y |
334 | # CONFIG_PM_BFIN_SLEEP is not set | ||
334 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | 335 | # CONFIG_PM_WAKEUP_BY_GPIO is not set |
335 | # CONFIG_PM_WAKEUP_GPIO_API is not set | ||
336 | CONFIG_PM_WAKEUP_SIC_IWR=0x8 | ||
337 | 336 | ||
338 | # | 337 | # |
339 | # CPU Frequency scaling | 338 | # CPU Frequency scaling |
@@ -1013,6 +1012,7 @@ CONFIG_SND_BFIN_AD73311_SE=4 | |||
1013 | CONFIG_SND_SOC_AC97_BUS=y | 1012 | CONFIG_SND_SOC_AC97_BUS=y |
1014 | CONFIG_SND_SOC=m | 1013 | CONFIG_SND_SOC=m |
1015 | CONFIG_SND_BF5XX_SOC=m | 1014 | CONFIG_SND_BF5XX_SOC=m |
1015 | CONFIG_SND_MMAP_SUPPORT=y | ||
1016 | CONFIG_SND_BF5XX_SOC_AC97=m | 1016 | CONFIG_SND_BF5XX_SOC_AC97=m |
1017 | # CONFIG_SND_BF5XX_SOC_WM8750 is not set | 1017 | # CONFIG_SND_BF5XX_SOC_WM8750 is not set |
1018 | # CONFIG_SND_BF5XX_SOC_WM8731 is not set | 1018 | # CONFIG_SND_BF5XX_SOC_WM8731 is not set |
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig index fd702161ef59..c9707f7665ad 100644 --- a/arch/blackfin/configs/BF548-EZKIT_defconfig +++ b/arch/blackfin/configs/BF548-EZKIT_defconfig | |||
@@ -396,6 +396,7 @@ CONFIG_BINFMT_ZFLAT=y | |||
396 | # Power management options | 396 | # Power management options |
397 | # | 397 | # |
398 | # CONFIG_PM is not set | 398 | # CONFIG_PM is not set |
399 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | ||
399 | 400 | ||
400 | # | 401 | # |
401 | # CPU Frequency scaling | 402 | # CPU Frequency scaling |
@@ -1075,6 +1076,7 @@ CONFIG_SND_VERBOSE_PROCFS=y | |||
1075 | CONFIG_SND_SOC_AC97_BUS=y | 1076 | CONFIG_SND_SOC_AC97_BUS=y |
1076 | CONFIG_SND_SOC=y | 1077 | CONFIG_SND_SOC=y |
1077 | CONFIG_SND_BF5XX_SOC=y | 1078 | CONFIG_SND_BF5XX_SOC=y |
1079 | CONFIG_SND_MMAP_SUPPORT=y | ||
1078 | CONFIG_SND_BF5XX_SOC_AC97=y | 1080 | CONFIG_SND_BF5XX_SOC_AC97=y |
1079 | CONFIG_SND_BF5XX_SOC_BF548_EZKIT=y | 1081 | CONFIG_SND_BF5XX_SOC_BF548_EZKIT=y |
1080 | # CONFIG_SND_BF5XX_SOC_WM8750 is not set | 1082 | # CONFIG_SND_BF5XX_SOC_WM8750 is not set |
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig index 8546994939fb..4d8a63331309 100644 --- a/arch/blackfin/configs/BF561-EZKIT_defconfig +++ b/arch/blackfin/configs/BF561-EZKIT_defconfig | |||
@@ -367,6 +367,7 @@ CONFIG_BINFMT_ZFLAT=y | |||
367 | # Power management options | 367 | # Power management options |
368 | # | 368 | # |
369 | # CONFIG_PM is not set | 369 | # CONFIG_PM is not set |
370 | # CONFIG_PM_WAKEUP_BY_GPIO is not set | ||
370 | 371 | ||
371 | # | 372 | # |
372 | # Networking | 373 | # Networking |
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 5453bc3664fc..8fd5d22cec34 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -105,13 +105,14 @@ int request_dma(unsigned int channel, char *device_id) | |||
105 | mutex_unlock(&(dma_ch[channel].dmalock)); | 105 | mutex_unlock(&(dma_ch[channel].dmalock)); |
106 | 106 | ||
107 | #ifdef CONFIG_BF54x | 107 | #ifdef CONFIG_BF54x |
108 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX && | 108 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { |
109 | strncmp(device_id, "BFIN_UART", 9) == 0) | 109 | if (strncmp(device_id, "BFIN_UART", 9) == 0) |
110 | dma_ch[channel].regs->peripheral_map |= | 110 | dma_ch[channel].regs->peripheral_map |= |
111 | (channel - CH_UART2_RX + 0xC); | 111 | (channel - CH_UART2_RX + 0xC); |
112 | else | 112 | else |
113 | dma_ch[channel].regs->peripheral_map |= | 113 | dma_ch[channel].regs->peripheral_map |= |
114 | (channel - CH_UART2_RX + 0x6); | 114 | (channel - CH_UART2_RX + 0x6); |
115 | } | ||
115 | #endif | 116 | #endif |
116 | 117 | ||
117 | dma_ch[channel].device_id = device_id; | 118 | dma_ch[channel].device_id = device_id; |
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index 5cf4bdb1df3b..1904d8b53328 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * bfin_gptimers.c - derived from bf53x_timers.c | 2 | * gptimers.c - Blackfin General Purpose Timer core API |
3 | * Driver for General Purpose Timer functions on the Blackfin processor | ||
4 | * | 3 | * |
5 | * Copyright (C) 2005 John DeHority | 4 | * Copyright (c) 2005-2008 Analog Devices Inc. |
6 | * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de) | 5 | * Copyright (C) 2005 John DeHority |
6 | * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de) | ||
7 | * | 7 | * |
8 | * Licensed under the GPLv2. | 8 | * Licensed under the GPLv2. |
9 | */ | 9 | */ |
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 8229b1090eb9..2255c289a714 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -32,6 +32,7 @@ | |||
32 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 32 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
33 | 33 | ||
34 | u16 _bfin_swrst; | 34 | u16 _bfin_swrst; |
35 | EXPORT_SYMBOL(_bfin_swrst); | ||
35 | 36 | ||
36 | unsigned long memory_start, memory_end, physical_mem_end; | 37 | unsigned long memory_start, memory_end, physical_mem_end; |
37 | unsigned long reserved_mem_dcache_on; | 38 | unsigned long reserved_mem_dcache_on; |
@@ -514,6 +515,7 @@ static __init void memory_setup(void) | |||
514 | printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); | 515 | printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); |
515 | 516 | ||
516 | printk(KERN_INFO "Memory map:\n" | 517 | printk(KERN_INFO "Memory map:\n" |
518 | KERN_INFO " fixedcode = 0x%p-0x%p\n" | ||
517 | KERN_INFO " text = 0x%p-0x%p\n" | 519 | KERN_INFO " text = 0x%p-0x%p\n" |
518 | KERN_INFO " rodata = 0x%p-0x%p\n" | 520 | KERN_INFO " rodata = 0x%p-0x%p\n" |
519 | KERN_INFO " bss = 0x%p-0x%p\n" | 521 | KERN_INFO " bss = 0x%p-0x%p\n" |
@@ -527,7 +529,8 @@ static __init void memory_setup(void) | |||
527 | #if DMA_UNCACHED_REGION > 0 | 529 | #if DMA_UNCACHED_REGION > 0 |
528 | KERN_INFO " DMA Zone = 0x%p-0x%p\n" | 530 | KERN_INFO " DMA Zone = 0x%p-0x%p\n" |
529 | #endif | 531 | #endif |
530 | , _stext, _etext, | 532 | , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END, |
533 | _stext, _etext, | ||
531 | __start_rodata, __end_rodata, | 534 | __start_rodata, __end_rodata, |
532 | __bss_start, __bss_stop, | 535 | __bss_start, __bss_stop, |
533 | _sdata, _edata, | 536 | _sdata, _edata, |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index aed832540b3b..cb01a9de2680 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -147,44 +147,64 @@ SECTIONS | |||
147 | 147 | ||
148 | __l1_lma_start = .; | 148 | __l1_lma_start = .; |
149 | 149 | ||
150 | #if L1_CODE_LENGTH | ||
151 | # define LDS_L1_CODE *(.l1.text) | ||
152 | #else | ||
153 | # define LDS_L1_CODE | ||
154 | #endif | ||
150 | .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs)) | 155 | .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs)) |
151 | { | 156 | { |
152 | . = ALIGN(4); | 157 | . = ALIGN(4); |
153 | __stext_l1 = .; | 158 | __stext_l1 = .; |
154 | *(.l1.text) | 159 | LDS_L1_CODE |
155 | |||
156 | . = ALIGN(4); | 160 | . = ALIGN(4); |
157 | __etext_l1 = .; | 161 | __etext_l1 = .; |
158 | } | 162 | } |
159 | 163 | ||
164 | #if L1_DATA_A_LENGTH | ||
165 | # define LDS_L1_A_DATA *(.l1.data) | ||
166 | # define LDS_L1_A_BSS *(.l1.bss) | ||
167 | # define LDS_L1_A_CACHE *(.data_l1.cacheline_aligned) | ||
168 | #else | ||
169 | # define LDS_L1_A_DATA | ||
170 | # define LDS_L1_A_BSS | ||
171 | # define LDS_L1_A_CACHE | ||
172 | #endif | ||
160 | .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) | 173 | .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) |
161 | { | 174 | { |
162 | . = ALIGN(4); | 175 | . = ALIGN(4); |
163 | __sdata_l1 = .; | 176 | __sdata_l1 = .; |
164 | *(.l1.data) | 177 | LDS_L1_A_DATA |
165 | __edata_l1 = .; | 178 | __edata_l1 = .; |
166 | 179 | ||
167 | . = ALIGN(4); | 180 | . = ALIGN(4); |
168 | __sbss_l1 = .; | 181 | __sbss_l1 = .; |
169 | *(.l1.bss) | 182 | LDS_L1_A_BSS |
170 | 183 | ||
171 | . = ALIGN(32); | 184 | . = ALIGN(32); |
172 | *(.data_l1.cacheline_aligned) | 185 | LDS_L1_A_CACHE |
173 | 186 | ||
174 | . = ALIGN(4); | 187 | . = ALIGN(4); |
175 | __ebss_l1 = .; | 188 | __ebss_l1 = .; |
176 | } | 189 | } |
177 | 190 | ||
191 | #if L1_DATA_B_LENGTH | ||
192 | # define LDS_L1_B_DATA *(.l1.data.B) | ||
193 | # define LDS_L1_B_BSS *(.l1.bss.B) | ||
194 | #else | ||
195 | # define LDS_L1_B_DATA | ||
196 | # define LDS_L1_B_BSS | ||
197 | #endif | ||
178 | .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) | 198 | .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) |
179 | { | 199 | { |
180 | . = ALIGN(4); | 200 | . = ALIGN(4); |
181 | __sdata_b_l1 = .; | 201 | __sdata_b_l1 = .; |
182 | *(.l1.data.B) | 202 | LDS_L1_B_DATA |
183 | __edata_b_l1 = .; | 203 | __edata_b_l1 = .; |
184 | 204 | ||
185 | . = ALIGN(4); | 205 | . = ALIGN(4); |
186 | __sbss_b_l1 = .; | 206 | __sbss_b_l1 = .; |
187 | *(.l1.bss.B) | 207 | LDS_L1_B_BSS |
188 | 208 | ||
189 | . = ALIGN(4); | 209 | . = ALIGN(4); |
190 | __ebss_b_l1 = .; | 210 | __ebss_b_l1 = .; |
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 337515fba612..cf4bc0d83355 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c | |||
@@ -180,8 +180,8 @@ static struct mtd_partition partition_info[] = { | |||
180 | }, | 180 | }, |
181 | { | 181 | { |
182 | .name = "File System", | 182 | .name = "File System", |
183 | .offset = 4 * SIZE_1M, | 183 | .offset = MTDPART_OFS_APPEND, |
184 | .size = (256 - 4) * SIZE_1M, | 184 | .size = MTDPART_SIZ_FULL, |
185 | }, | 185 | }, |
186 | }; | 186 | }; |
187 | 187 | ||
@@ -422,11 +422,11 @@ static struct mtd_partition bfin_spi_flash_partitions[] = { | |||
422 | }, { | 422 | }, { |
423 | .name = "kernel", | 423 | .name = "kernel", |
424 | .size = 0xe0000, | 424 | .size = 0xe0000, |
425 | .offset = 0x20000 | 425 | .offset = MTDPART_OFS_APPEND, |
426 | }, { | 426 | }, { |
427 | .name = "file system", | 427 | .name = "file system", |
428 | .size = 0x700000, | 428 | .size = MTDPART_SIZ_FULL, |
429 | .offset = 0x00100000, | 429 | .offset = MTDPART_OFS_APPEND, |
430 | } | 430 | } |
431 | }; | 431 | }; |
432 | 432 | ||
@@ -484,13 +484,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { | |||
484 | }; | 484 | }; |
485 | #endif | 485 | #endif |
486 | 486 | ||
487 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
488 | static struct bfin5xx_spi_chip ad5304_chip_info = { | ||
489 | .enable_dma = 0, | ||
490 | .bits_per_word = 16, | ||
491 | }; | ||
492 | #endif | ||
493 | |||
494 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 487 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
495 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { | 488 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { |
496 | .enable_dma = 0, | 489 | .enable_dma = 0, |
@@ -611,17 +604,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { | |||
611 | .mode = SPI_MODE_3, | 604 | .mode = SPI_MODE_3, |
612 | }, | 605 | }, |
613 | #endif | 606 | #endif |
614 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
615 | { | ||
616 | .modalias = "ad5304_spi", | ||
617 | .max_speed_hz = 1250000, /* max spi clock (SCK) speed in HZ */ | ||
618 | .bus_num = 0, | ||
619 | .chip_select = 2, | ||
620 | .platform_data = NULL, | ||
621 | .controller_data = &ad5304_chip_info, | ||
622 | .mode = SPI_MODE_2, | ||
623 | }, | ||
624 | #endif | ||
625 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 607 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
626 | { | 608 | { |
627 | .modalias = "ad7877", | 609 | .modalias = "ad7877", |
@@ -818,6 +800,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
818 | }; | 800 | }; |
819 | #endif | 801 | #endif |
820 | 802 | ||
803 | static struct resource bfin_gpios_resources = { | ||
804 | .start = 0, | ||
805 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
806 | .flags = IORESOURCE_IRQ, | ||
807 | }; | ||
808 | |||
809 | static struct platform_device bfin_gpios_device = { | ||
810 | .name = "simple-gpio", | ||
811 | .id = -1, | ||
812 | .num_resources = 1, | ||
813 | .resource = &bfin_gpios_resources, | ||
814 | }; | ||
815 | |||
821 | static struct platform_device *stamp_devices[] __initdata = { | 816 | static struct platform_device *stamp_devices[] __initdata = { |
822 | #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) | 817 | #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) |
823 | &bf5xx_nand_device, | 818 | &bf5xx_nand_device, |
@@ -895,6 +890,8 @@ static struct platform_device *stamp_devices[] __initdata = { | |||
895 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | 890 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) |
896 | &bfin_device_gpiokeys, | 891 | &bfin_device_gpiokeys, |
897 | #endif | 892 | #endif |
893 | |||
894 | &bfin_gpios_device, | ||
898 | }; | 895 | }; |
899 | 896 | ||
900 | static int __init stamp_init(void) | 897 | static int __init stamp_init(void) |
@@ -921,13 +918,18 @@ void native_machine_restart(char *cmd) | |||
921 | bfin_gpio_reset_spi0_ssel1(); | 918 | bfin_gpio_reset_spi0_ssel1(); |
922 | } | 919 | } |
923 | 920 | ||
924 | /* | ||
925 | * Currently the MAC address is saved in Flash by U-Boot | ||
926 | */ | ||
927 | #define FLASH_MAC 0x203f0000 | ||
928 | void bfin_get_ether_addr(char *addr) | 921 | void bfin_get_ether_addr(char *addr) |
929 | { | 922 | { |
930 | *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); | 923 | /* the MAC is stored in OTP memory page 0xDF */ |
931 | *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); | 924 | u32 ret; |
925 | u64 otp_mac; | ||
926 | u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; | ||
927 | |||
928 | ret = otp_read(0xDF, 0x00, &otp_mac); | ||
929 | if (!(ret & 0x1)) { | ||
930 | char *otp_mac_p = (char *)&otp_mac; | ||
931 | for (ret = 0; ret < 6; ++ret) | ||
932 | addr[ret] = otp_mac_p[5 - ret]; | ||
933 | } | ||
932 | } | 934 | } |
933 | EXPORT_SYMBOL(bfin_get_ether_addr); | 935 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 2b09aa39f565..241b5a20a36a 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c | |||
@@ -99,11 +99,11 @@ static struct mtd_partition bfin_spi_flash_partitions[] = { | |||
99 | }, { | 99 | }, { |
100 | .name = "kernel", | 100 | .name = "kernel", |
101 | .size = 0xe0000, | 101 | .size = 0xe0000, |
102 | .offset = 0x20000 | 102 | .offset = MTDPART_OFS_APPEND, |
103 | }, { | 103 | }, { |
104 | .name = "file system", | 104 | .name = "file system", |
105 | .size = 0x700000, | 105 | .size = MTDPART_SIZ_FULL, |
106 | .offset = 0x00100000, | 106 | .offset = MTDPART_OFS_APPEND, |
107 | } | 107 | } |
108 | }; | 108 | }; |
109 | 109 | ||
@@ -298,6 +298,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
298 | }; | 298 | }; |
299 | #endif | 299 | #endif |
300 | 300 | ||
301 | static struct resource bfin_gpios_resources = { | ||
302 | .start = 0, | ||
303 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
304 | .flags = IORESOURCE_IRQ, | ||
305 | }; | ||
306 | |||
307 | static struct platform_device bfin_gpios_device = { | ||
308 | .name = "simple-gpio", | ||
309 | .id = -1, | ||
310 | .num_resources = 1, | ||
311 | .resource = &bfin_gpios_resources, | ||
312 | }; | ||
313 | |||
301 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) | 314 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) |
302 | #include <linux/i2c-gpio.h> | 315 | #include <linux/i2c-gpio.h> |
303 | 316 | ||
@@ -350,6 +363,8 @@ static struct platform_device *ezkit_devices[] __initdata = { | |||
350 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) | 363 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) |
351 | &i2c_gpio_device, | 364 | &i2c_gpio_device, |
352 | #endif | 365 | #endif |
366 | |||
367 | &bfin_gpios_device, | ||
353 | }; | 368 | }; |
354 | 369 | ||
355 | static int __init ezkit_init(void) | 370 | static int __init ezkit_init(void) |
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index a645f6fd091b..b2ac4816ae62 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c | |||
@@ -112,7 +112,7 @@ static struct platform_device net2272_bfin_device = { | |||
112 | static struct mtd_partition stamp_partitions[] = { | 112 | static struct mtd_partition stamp_partitions[] = { |
113 | { | 113 | { |
114 | .name = "Bootloader", | 114 | .name = "Bootloader", |
115 | .size = 0x20000, | 115 | .size = 0x40000, |
116 | .offset = 0, | 116 | .offset = 0, |
117 | }, { | 117 | }, { |
118 | .name = "Kernel", | 118 | .name = "Kernel", |
@@ -160,17 +160,17 @@ static struct platform_device stamp_flash_device = { | |||
160 | static struct mtd_partition bfin_spi_flash_partitions[] = { | 160 | static struct mtd_partition bfin_spi_flash_partitions[] = { |
161 | { | 161 | { |
162 | .name = "bootloader", | 162 | .name = "bootloader", |
163 | .size = 0x00020000, | 163 | .size = 0x00040000, |
164 | .offset = 0, | 164 | .offset = 0, |
165 | .mask_flags = MTD_CAP_ROM | 165 | .mask_flags = MTD_CAP_ROM |
166 | }, { | 166 | }, { |
167 | .name = "kernel", | 167 | .name = "kernel", |
168 | .size = 0xe0000, | 168 | .size = 0xe0000, |
169 | .offset = 0x20000 | 169 | .offset = MTDPART_OFS_APPEND, |
170 | }, { | 170 | }, { |
171 | .name = "file system", | 171 | .name = "file system", |
172 | .size = 0x700000, | 172 | .size = MTDPART_SIZ_FULL, |
173 | .offset = 0x00100000, | 173 | .offset = MTDPART_OFS_APPEND, |
174 | } | 174 | } |
175 | }; | 175 | }; |
176 | 176 | ||
@@ -212,13 +212,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { | |||
212 | }; | 212 | }; |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
216 | static struct bfin5xx_spi_chip ad5304_chip_info = { | ||
217 | .enable_dma = 0, | ||
218 | .bits_per_word = 16, | ||
219 | }; | ||
220 | #endif | ||
221 | |||
222 | #if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) | 215 | #if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) |
223 | static struct bfin5xx_spi_chip spi_mmc_chip_info = { | 216 | static struct bfin5xx_spi_chip spi_mmc_chip_info = { |
224 | .enable_dma = 1, | 217 | .enable_dma = 1, |
@@ -308,17 +301,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { | |||
308 | }, | 301 | }, |
309 | #endif | 302 | #endif |
310 | 303 | ||
311 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
312 | { | ||
313 | .modalias = "ad5304_spi", | ||
314 | .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */ | ||
315 | .bus_num = 0, | ||
316 | .chip_select = 2, | ||
317 | .platform_data = NULL, | ||
318 | .controller_data = &ad5304_chip_info, | ||
319 | .mode = SPI_MODE_2, | ||
320 | }, | ||
321 | #endif | ||
322 | #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) | 304 | #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) |
323 | { | 305 | { |
324 | .modalias = "spidev", | 306 | .modalias = "spidev", |
@@ -457,6 +439,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
457 | }; | 439 | }; |
458 | #endif | 440 | #endif |
459 | 441 | ||
442 | static struct resource bfin_gpios_resources = { | ||
443 | .start = 0, | ||
444 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
445 | .flags = IORESOURCE_IRQ, | ||
446 | }; | ||
447 | |||
448 | static struct platform_device bfin_gpios_device = { | ||
449 | .name = "simple-gpio", | ||
450 | .id = -1, | ||
451 | .num_resources = 1, | ||
452 | .resource = &bfin_gpios_resources, | ||
453 | }; | ||
454 | |||
460 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) | 455 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) |
461 | #include <linux/i2c-gpio.h> | 456 | #include <linux/i2c-gpio.h> |
462 | 457 | ||
@@ -518,6 +513,8 @@ static struct platform_device *stamp_devices[] __initdata = { | |||
518 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) | 513 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) |
519 | &i2c_gpio_device, | 514 | &i2c_gpio_device, |
520 | #endif | 515 | #endif |
516 | |||
517 | &bfin_gpios_device, | ||
521 | &stamp_flash_device, | 518 | &stamp_flash_device, |
522 | }; | 519 | }; |
523 | 520 | ||
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c index 8a3397db1d21..c95395ba7bfa 100644 --- a/arch/blackfin/mach-bf537/boards/generic_board.c +++ b/arch/blackfin/mach-bf537/boards/generic_board.c | |||
@@ -371,13 +371,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { | |||
371 | }; | 371 | }; |
372 | #endif | 372 | #endif |
373 | 373 | ||
374 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
375 | static struct bfin5xx_spi_chip ad5304_chip_info = { | ||
376 | .enable_dma = 0, | ||
377 | .bits_per_word = 16, | ||
378 | }; | ||
379 | #endif | ||
380 | |||
381 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 374 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
382 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { | 375 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { |
383 | .enable_dma = 0, | 376 | .enable_dma = 0, |
@@ -483,17 +476,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { | |||
483 | .mode = SPI_MODE_3, | 476 | .mode = SPI_MODE_3, |
484 | }, | 477 | }, |
485 | #endif | 478 | #endif |
486 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
487 | { | ||
488 | .modalias = "ad5304_spi", | ||
489 | .max_speed_hz = 1250000, /* max spi clock (SCK) speed in HZ */ | ||
490 | .bus_num = 0, | ||
491 | .chip_select = 2, | ||
492 | .platform_data = NULL, | ||
493 | .controller_data = &ad5304_chip_info, | ||
494 | .mode = SPI_MODE_2, | ||
495 | }, | ||
496 | #endif | ||
497 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 479 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
498 | { | 480 | { |
499 | .modalias = "ad7877", | 481 | .modalias = "ad7877", |
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 9e2277e0d25c..ea83148993da 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c | |||
@@ -128,6 +128,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
128 | }; | 128 | }; |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | static struct resource bfin_gpios_resources = { | ||
132 | .start = 0, | ||
133 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
134 | .flags = IORESOURCE_IRQ, | ||
135 | }; | ||
136 | |||
137 | static struct platform_device bfin_gpios_device = { | ||
138 | .name = "simple-gpio", | ||
139 | .id = -1, | ||
140 | .num_resources = 1, | ||
141 | .resource = &bfin_gpios_resources, | ||
142 | }; | ||
143 | |||
131 | #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) | 144 | #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) |
132 | static struct resource bfin_pcmcia_cf_resources[] = { | 145 | static struct resource bfin_pcmcia_cf_resources[] = { |
133 | { | 146 | { |
@@ -343,7 +356,7 @@ static struct platform_device net2272_bfin_device = { | |||
343 | static struct mtd_partition stamp_partitions[] = { | 356 | static struct mtd_partition stamp_partitions[] = { |
344 | { | 357 | { |
345 | .name = "Bootloader", | 358 | .name = "Bootloader", |
346 | .size = 0x20000, | 359 | .size = 0x40000, |
347 | .offset = 0, | 360 | .offset = 0, |
348 | }, { | 361 | }, { |
349 | .name = "Kernel", | 362 | .name = "Kernel", |
@@ -351,7 +364,7 @@ static struct mtd_partition stamp_partitions[] = { | |||
351 | .offset = MTDPART_OFS_APPEND, | 364 | .offset = MTDPART_OFS_APPEND, |
352 | }, { | 365 | }, { |
353 | .name = "RootFS", | 366 | .name = "RootFS", |
354 | .size = 0x400000 - 0x20000 - 0xE0000 - 0x10000, | 367 | .size = 0x400000 - 0x40000 - 0xE0000 - 0x10000, |
355 | .offset = MTDPART_OFS_APPEND, | 368 | .offset = MTDPART_OFS_APPEND, |
356 | }, { | 369 | }, { |
357 | .name = "MAC Address", | 370 | .name = "MAC Address", |
@@ -391,17 +404,17 @@ static struct platform_device stamp_flash_device = { | |||
391 | static struct mtd_partition bfin_spi_flash_partitions[] = { | 404 | static struct mtd_partition bfin_spi_flash_partitions[] = { |
392 | { | 405 | { |
393 | .name = "bootloader", | 406 | .name = "bootloader", |
394 | .size = 0x00020000, | 407 | .size = 0x00040000, |
395 | .offset = 0, | 408 | .offset = 0, |
396 | .mask_flags = MTD_CAP_ROM | 409 | .mask_flags = MTD_CAP_ROM |
397 | }, { | 410 | }, { |
398 | .name = "kernel", | 411 | .name = "kernel", |
399 | .size = 0xe0000, | 412 | .size = 0xe0000, |
400 | .offset = 0x20000 | 413 | .offset = MTDPART_OFS_APPEND, |
401 | }, { | 414 | }, { |
402 | .name = "file system", | 415 | .name = "file system", |
403 | .size = 0x700000, | 416 | .size = MTDPART_SIZ_FULL, |
404 | .offset = 0x00100000, | 417 | .offset = MTDPART_OFS_APPEND, |
405 | } | 418 | } |
406 | }; | 419 | }; |
407 | 420 | ||
@@ -459,13 +472,6 @@ static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { | |||
459 | }; | 472 | }; |
460 | #endif | 473 | #endif |
461 | 474 | ||
462 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
463 | static struct bfin5xx_spi_chip ad5304_chip_info = { | ||
464 | .enable_dma = 0, | ||
465 | .bits_per_word = 16, | ||
466 | }; | ||
467 | #endif | ||
468 | |||
469 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 475 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
470 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { | 476 | static struct bfin5xx_spi_chip spi_ad7877_chip_info = { |
471 | .enable_dma = 0, | 477 | .enable_dma = 0, |
@@ -578,17 +584,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { | |||
578 | .mode = SPI_MODE_3, | 584 | .mode = SPI_MODE_3, |
579 | }, | 585 | }, |
580 | #endif | 586 | #endif |
581 | #if defined(CONFIG_AD5304) || defined(CONFIG_AD5304_MODULE) | ||
582 | { | ||
583 | .modalias = "ad5304_spi", | ||
584 | .max_speed_hz = 1250000, /* max spi clock (SCK) speed in HZ */ | ||
585 | .bus_num = 0, | ||
586 | .chip_select = 2, | ||
587 | .platform_data = NULL, | ||
588 | .controller_data = &ad5304_chip_info, | ||
589 | .mode = SPI_MODE_2, | ||
590 | }, | ||
591 | #endif | ||
592 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) | 587 | #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) |
593 | { | 588 | { |
594 | .modalias = "ad7877", | 589 | .modalias = "ad7877", |
@@ -821,6 +816,8 @@ static struct platform_device *stamp_devices[] __initdata = { | |||
821 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | 816 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) |
822 | &bfin_device_gpiokeys, | 817 | &bfin_device_gpiokeys, |
823 | #endif | 818 | #endif |
819 | |||
820 | &bfin_gpios_device, | ||
824 | &stamp_flash_device, | 821 | &stamp_flash_device, |
825 | }; | 822 | }; |
826 | 823 | ||
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 916e963e83ba..a0950c1fd800 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c | |||
@@ -285,8 +285,8 @@ static struct mtd_partition partition_info[] = { | |||
285 | }, | 285 | }, |
286 | { | 286 | { |
287 | .name = "File System", | 287 | .name = "File System", |
288 | .offset = 4 * SIZE_1M, | 288 | .offset = MTDPART_OFS_APPEND, |
289 | .size = (256 - 4) * SIZE_1M, | 289 | .size = MTDPART_SIZ_FULL, |
290 | }, | 290 | }, |
291 | }; | 291 | }; |
292 | 292 | ||
@@ -333,7 +333,7 @@ static struct platform_device bf54x_sdh_device = { | |||
333 | static struct mtd_partition ezkit_partitions[] = { | 333 | static struct mtd_partition ezkit_partitions[] = { |
334 | { | 334 | { |
335 | .name = "Bootloader", | 335 | .name = "Bootloader", |
336 | .size = 0x20000, | 336 | .size = 0x40000, |
337 | .offset = 0, | 337 | .offset = 0, |
338 | }, { | 338 | }, { |
339 | .name = "Kernel", | 339 | .name = "Kernel", |
@@ -381,8 +381,8 @@ static struct mtd_partition bfin_spi_flash_partitions[] = { | |||
381 | .mask_flags = MTD_CAP_ROM | 381 | .mask_flags = MTD_CAP_ROM |
382 | }, { | 382 | }, { |
383 | .name = "linux kernel", | 383 | .name = "linux kernel", |
384 | .size = 0x1c0000, | 384 | .size = MTDPART_SIZ_FULL, |
385 | .offset = 0x40000 | 385 | .offset = MTDPART_OFS_APPEND, |
386 | } | 386 | } |
387 | }; | 387 | }; |
388 | 388 | ||
@@ -594,6 +594,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
594 | }; | 594 | }; |
595 | #endif | 595 | #endif |
596 | 596 | ||
597 | static struct resource bfin_gpios_resources = { | ||
598 | .start = 0, | ||
599 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
600 | .flags = IORESOURCE_IRQ, | ||
601 | }; | ||
602 | |||
603 | static struct platform_device bfin_gpios_device = { | ||
604 | .name = "simple-gpio", | ||
605 | .id = -1, | ||
606 | .num_resources = 1, | ||
607 | .resource = &bfin_gpios_resources, | ||
608 | }; | ||
609 | |||
597 | static struct platform_device *ezkit_devices[] __initdata = { | 610 | static struct platform_device *ezkit_devices[] __initdata = { |
598 | #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) | 611 | #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) |
599 | &rtc_device, | 612 | &rtc_device, |
@@ -646,6 +659,8 @@ static struct platform_device *ezkit_devices[] __initdata = { | |||
646 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | 659 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) |
647 | &bfin_device_gpiokeys, | 660 | &bfin_device_gpiokeys, |
648 | #endif | 661 | #endif |
662 | |||
663 | &bfin_gpios_device, | ||
649 | &ezkit_flash_device, | 664 | &ezkit_flash_device, |
650 | }; | 665 | }; |
651 | 666 | ||
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c index 374803a8d2e8..f5479298bb79 100644 --- a/arch/blackfin/mach-bf548/dma.c +++ b/arch/blackfin/mach-bf548/dma.c | |||
@@ -27,6 +27,8 @@ | |||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/module.h> | ||
31 | |||
30 | #include <asm/blackfin.h> | 32 | #include <asm/blackfin.h> |
31 | #include <asm/dma.h> | 33 | #include <asm/dma.h> |
32 | 34 | ||
diff --git a/arch/blackfin/mach-bf548/head.S b/arch/blackfin/mach-bf548/head.S index 74fe258421a5..46222a75321a 100644 --- a/arch/blackfin/mach-bf548/head.S +++ b/arch/blackfin/mach-bf548/head.S | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/linkage.h> | 30 | #include <linux/linkage.h> |
31 | #include <linux/init.h> | ||
31 | #include <asm/blackfin.h> | 32 | #include <asm/blackfin.h> |
32 | #include <asm/trace.h> | 33 | #include <asm/trace.h> |
33 | #if CONFIG_BFIN_KERNEL_CLOCK | 34 | #if CONFIG_BFIN_KERNEL_CLOCK |
@@ -44,10 +45,9 @@ | |||
44 | 45 | ||
45 | #define INITIAL_STACK 0xFFB01000 | 46 | #define INITIAL_STACK 0xFFB01000 |
46 | 47 | ||
47 | .text | 48 | __INIT |
48 | 49 | ||
49 | ENTRY(__start) | 50 | ENTRY(__start) |
50 | ENTRY(__stext) | ||
51 | /* R0: argument of command line string, passed from uboot, save it */ | 51 | /* R0: argument of command line string, passed from uboot, save it */ |
52 | R7 = R0; | 52 | R7 = R0; |
53 | /* Enable Cycle Counter and Nesting Of Interrupts */ | 53 | /* Enable Cycle Counter and Nesting Of Interrupts */ |
@@ -213,6 +213,7 @@ ENTRY(__stext) | |||
213 | 213 | ||
214 | .LWAIT_HERE: | 214 | .LWAIT_HERE: |
215 | jump .LWAIT_HERE; | 215 | jump .LWAIT_HERE; |
216 | ENDPROC(__start) | ||
216 | 217 | ||
217 | ENTRY(_real_start) | 218 | ENTRY(_real_start) |
218 | [ -- sp ] = reti; | 219 | [ -- sp ] = reti; |
@@ -285,6 +286,9 @@ ENTRY(_real_start) | |||
285 | call _start_kernel; | 286 | call _start_kernel; |
286 | .L_exit: | 287 | .L_exit: |
287 | jump.s .L_exit; | 288 | jump.s .L_exit; |
289 | ENDPROC(_real_start) | ||
290 | |||
291 | __FINIT | ||
288 | 292 | ||
289 | .section .l1.text | 293 | .section .l1.text |
290 | #if CONFIG_BFIN_KERNEL_CLOCK | 294 | #if CONFIG_BFIN_KERNEL_CLOCK |
@@ -450,6 +454,7 @@ ENTRY(_start_dma_code) | |||
450 | SSYNC; | 454 | SSYNC; |
451 | 455 | ||
452 | RTS; | 456 | RTS; |
457 | ENDPROC(_start_dma_code) | ||
453 | #endif /* CONFIG_BFIN_KERNEL_CLOCK */ | 458 | #endif /* CONFIG_BFIN_KERNEL_CLOCK */ |
454 | 459 | ||
455 | .data | 460 | .data |
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 43c1b0982819..d357f648d963 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c | |||
@@ -223,7 +223,7 @@ static struct platform_device bfin_uart_device = { | |||
223 | static struct mtd_partition ezkit_partitions[] = { | 223 | static struct mtd_partition ezkit_partitions[] = { |
224 | { | 224 | { |
225 | .name = "Bootloader", | 225 | .name = "Bootloader", |
226 | .size = 0x20000, | 226 | .size = 0x40000, |
227 | .offset = 0, | 227 | .offset = 0, |
228 | }, { | 228 | }, { |
229 | .name = "Kernel", | 229 | .name = "Kernel", |
@@ -389,6 +389,19 @@ static struct platform_device bfin_device_gpiokeys = { | |||
389 | }; | 389 | }; |
390 | #endif | 390 | #endif |
391 | 391 | ||
392 | static struct resource bfin_gpios_resources = { | ||
393 | .start = 0, | ||
394 | .end = MAX_BLACKFIN_GPIOS - 1, | ||
395 | .flags = IORESOURCE_IRQ, | ||
396 | }; | ||
397 | |||
398 | static struct platform_device bfin_gpios_device = { | ||
399 | .name = "simple-gpio", | ||
400 | .id = -1, | ||
401 | .num_resources = 1, | ||
402 | .resource = &bfin_gpios_resources, | ||
403 | }; | ||
404 | |||
392 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) | 405 | #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) |
393 | #include <linux/i2c-gpio.h> | 406 | #include <linux/i2c-gpio.h> |
394 | 407 | ||
@@ -446,6 +459,7 @@ static struct platform_device *ezkit_devices[] __initdata = { | |||
446 | &isp1362_hcd_device, | 459 | &isp1362_hcd_device, |
447 | #endif | 460 | #endif |
448 | 461 | ||
462 | &bfin_gpios_device, | ||
449 | &ezkit_flash_device, | 463 | &ezkit_flash_device, |
450 | }; | 464 | }; |
451 | 465 | ||
diff --git a/arch/blackfin/mach-common/dpmc.S b/arch/blackfin/mach-common/dpmc.S index b80ddd8b232d..9d45aa3265b1 100644 --- a/arch/blackfin/mach-common/dpmc.S +++ b/arch/blackfin/mach-common/dpmc.S | |||
@@ -31,140 +31,6 @@ | |||
31 | #include <asm/blackfin.h> | 31 | #include <asm/blackfin.h> |
32 | #include <asm/mach/irq.h> | 32 | #include <asm/mach/irq.h> |
33 | 33 | ||
34 | .text | ||
35 | |||
36 | ENTRY(_unmask_wdog_wakeup_evt) | ||
37 | [--SP] = ( R7:0, P5:0 ); | ||
38 | #if defined(CONFIG_BF561) | ||
39 | P0.H = hi(SICA_IWR1); | ||
40 | P0.L = lo(SICA_IWR1); | ||
41 | #elif defined(CONFIG_BF54x) || defined(CONFIG_BF52x) | ||
42 | P0.h = HI(SIC_IWR0); | ||
43 | P0.l = LO(SIC_IWR0); | ||
44 | #else | ||
45 | P0.h = HI(SIC_IWR); | ||
46 | P0.l = LO(SIC_IWR); | ||
47 | #endif | ||
48 | R7 = [P0]; | ||
49 | #if defined(CONFIG_BF561) | ||
50 | BITSET(R7, 27); | ||
51 | #else | ||
52 | BITSET(R7,(IRQ_WATCH - IVG7)); | ||
53 | #endif | ||
54 | [P0] = R7; | ||
55 | SSYNC; | ||
56 | |||
57 | ( R7:0, P5:0 ) = [SP++]; | ||
58 | RTS; | ||
59 | |||
60 | .LWRITE_TO_STAT: | ||
61 | /* When watch dog timer is enabled, a write to STAT will load the | ||
62 | * contents of CNT to STAT | ||
63 | */ | ||
64 | R7 = 0x0000(z); | ||
65 | #if defined(CONFIG_BF561) | ||
66 | P0.h = HI(WDOGA_STAT); | ||
67 | P0.l = LO(WDOGA_STAT); | ||
68 | #else | ||
69 | P0.h = HI(WDOG_STAT); | ||
70 | P0.l = LO(WDOG_STAT); | ||
71 | #endif | ||
72 | [P0] = R7; | ||
73 | SSYNC; | ||
74 | JUMP .LSKIP_WRITE_TO_STAT; | ||
75 | |||
76 | ENTRY(_program_wdog_timer) | ||
77 | [--SP] = ( R7:0, P5:0 ); | ||
78 | #if defined(CONFIG_BF561) | ||
79 | P0.h = HI(WDOGA_CNT); | ||
80 | P0.l = LO(WDOGA_CNT); | ||
81 | #else | ||
82 | P0.h = HI(WDOG_CNT); | ||
83 | P0.l = LO(WDOG_CNT); | ||
84 | #endif | ||
85 | [P0] = R0; | ||
86 | SSYNC; | ||
87 | |||
88 | #if defined(CONFIG_BF561) | ||
89 | P0.h = HI(WDOGA_CTL); | ||
90 | P0.l = LO(WDOGA_CTL); | ||
91 | #else | ||
92 | P0.h = HI(WDOG_CTL); | ||
93 | P0.l = LO(WDOG_CTL); | ||
94 | #endif | ||
95 | R7 = W[P0](Z); | ||
96 | CC = BITTST(R7,1); | ||
97 | if !CC JUMP .LWRITE_TO_STAT; | ||
98 | CC = BITTST(R7,2); | ||
99 | if !CC JUMP .LWRITE_TO_STAT; | ||
100 | |||
101 | .LSKIP_WRITE_TO_STAT: | ||
102 | #if defined(CONFIG_BF561) | ||
103 | P0.h = HI(WDOGA_CTL); | ||
104 | P0.l = LO(WDOGA_CTL); | ||
105 | #else | ||
106 | P0.h = HI(WDOG_CTL); | ||
107 | P0.l = LO(WDOG_CTL); | ||
108 | #endif | ||
109 | R7 = W[P0](Z); | ||
110 | BITCLR(R7,1); /* Enable GP event */ | ||
111 | BITSET(R7,2); | ||
112 | W[P0] = R7.L; | ||
113 | SSYNC; | ||
114 | NOP; | ||
115 | |||
116 | R7 = W[P0](Z); | ||
117 | BITCLR(R7,4); /* Enable the wdog counter */ | ||
118 | W[P0] = R7.L; | ||
119 | SSYNC; | ||
120 | |||
121 | ( R7:0, P5:0 ) = [SP++]; | ||
122 | RTS; | ||
123 | |||
124 | ENTRY(_clear_wdog_wakeup_evt) | ||
125 | [--SP] = ( R7:0, P5:0 ); | ||
126 | |||
127 | #if defined(CONFIG_BF561) | ||
128 | P0.h = HI(WDOGA_CTL); | ||
129 | P0.l = LO(WDOGA_CTL); | ||
130 | #else | ||
131 | P0.h = HI(WDOG_CTL); | ||
132 | P0.l = LO(WDOG_CTL); | ||
133 | #endif | ||
134 | R7 = 0x0AD6(Z); | ||
135 | W[P0] = R7.L; | ||
136 | SSYNC; | ||
137 | |||
138 | R7 = W[P0](Z); | ||
139 | BITSET(R7,15); | ||
140 | W[P0] = R7.L; | ||
141 | SSYNC; | ||
142 | |||
143 | R7 = W[P0](Z); | ||
144 | BITSET(R7,1); | ||
145 | BITSET(R7,2); | ||
146 | W[P0] = R7.L; | ||
147 | SSYNC; | ||
148 | |||
149 | ( R7:0, P5:0 ) = [SP++]; | ||
150 | RTS; | ||
151 | |||
152 | ENTRY(_disable_wdog_timer) | ||
153 | [--SP] = ( R7:0, P5:0 ); | ||
154 | #if defined(CONFIG_BF561) | ||
155 | P0.h = HI(WDOGA_CTL); | ||
156 | P0.l = LO(WDOGA_CTL); | ||
157 | #else | ||
158 | P0.h = HI(WDOG_CTL); | ||
159 | P0.l = LO(WDOG_CTL); | ||
160 | #endif | ||
161 | R7 = 0xAD6(Z); | ||
162 | W[P0] = R7.L; | ||
163 | SSYNC; | ||
164 | ( R7:0, P5:0 ) = [SP++]; | ||
165 | RTS; | ||
166 | |||
167 | #if !defined(CONFIG_BF561) | ||
168 | 34 | ||
169 | .section .l1.text | 35 | .section .l1.text |
170 | 36 | ||
@@ -459,10 +325,12 @@ ENTRY(_set_sic_iwr) | |||
459 | RTS; | 325 | RTS; |
460 | 326 | ||
461 | ENTRY(_set_rtc_istat) | 327 | ENTRY(_set_rtc_istat) |
328 | #ifndef CONFIG_BF561 | ||
462 | P0.H = hi(RTC_ISTAT); | 329 | P0.H = hi(RTC_ISTAT); |
463 | P0.L = lo(RTC_ISTAT); | 330 | P0.L = lo(RTC_ISTAT); |
464 | w[P0] = R0.L; | 331 | w[P0] = R0.L; |
465 | SSYNC; | 332 | SSYNC; |
333 | #endif | ||
466 | RTS; | 334 | RTS; |
467 | 335 | ||
468 | ENTRY(_test_pll_locked) | 336 | ENTRY(_test_pll_locked) |
@@ -473,4 +341,3 @@ ENTRY(_test_pll_locked) | |||
473 | CC = BITTST(R0,5); | 341 | CC = BITTST(R0,5); |
474 | IF !CC JUMP 1b; | 342 | IF !CC JUMP 1b; |
475 | RTS; | 343 | RTS; |
476 | #endif | ||
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 880595afe98d..225ef14af75e 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -74,7 +74,7 @@ unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ | |||
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | struct ivgx { | 76 | struct ivgx { |
77 | /* irq number for request_irq, available in mach-bf533/irq.h */ | 77 | /* irq number for request_irq, available in mach-bf5xx/irq.h */ |
78 | unsigned int irqno; | 78 | unsigned int irqno; |
79 | /* corresponding bit in the SIC_ISR register */ | 79 | /* corresponding bit in the SIC_ISR register */ |
80 | unsigned int isrflag; | 80 | unsigned int isrflag; |
@@ -86,7 +86,6 @@ struct ivg_slice { | |||
86 | struct ivgx *istop; | 86 | struct ivgx *istop; |
87 | } ivg7_13[IVG13 - IVG7 + 1]; | 87 | } ivg7_13[IVG13 - IVG7 + 1]; |
88 | 88 | ||
89 | static void search_IAR(void); | ||
90 | 89 | ||
91 | /* | 90 | /* |
92 | * Search SIC_IAR and fill tables with the irqvalues | 91 | * Search SIC_IAR and fill tables with the irqvalues |
@@ -120,10 +119,10 @@ static void __init search_IAR(void) | |||
120 | } | 119 | } |
121 | 120 | ||
122 | /* | 121 | /* |
123 | * This is for BF533 internal IRQs | 122 | * This is for core internal IRQs |
124 | */ | 123 | */ |
125 | 124 | ||
126 | static void ack_noop(unsigned int irq) | 125 | static void bfin_ack_noop(unsigned int irq) |
127 | { | 126 | { |
128 | /* Dummy function. */ | 127 | /* Dummy function. */ |
129 | } | 128 | } |
@@ -156,11 +155,11 @@ static void bfin_internal_mask_irq(unsigned int irq) | |||
156 | { | 155 | { |
157 | #ifdef CONFIG_BF53x | 156 | #ifdef CONFIG_BF53x |
158 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & | 157 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & |
159 | ~(1 << (irq - (IRQ_CORETMR + 1)))); | 158 | ~(1 << SIC_SYSIRQ(irq))); |
160 | #else | 159 | #else |
161 | unsigned mask_bank, mask_bit; | 160 | unsigned mask_bank, mask_bit; |
162 | mask_bank = (irq - (IRQ_CORETMR + 1)) / 32; | 161 | mask_bank = SIC_SYSIRQ(irq) / 32; |
163 | mask_bit = (irq - (IRQ_CORETMR + 1)) % 32; | 162 | mask_bit = SIC_SYSIRQ(irq) % 32; |
164 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & | 163 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & |
165 | ~(1 << mask_bit)); | 164 | ~(1 << mask_bit)); |
166 | #endif | 165 | #endif |
@@ -171,11 +170,11 @@ static void bfin_internal_unmask_irq(unsigned int irq) | |||
171 | { | 170 | { |
172 | #ifdef CONFIG_BF53x | 171 | #ifdef CONFIG_BF53x |
173 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | | 172 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | |
174 | (1 << (irq - (IRQ_CORETMR + 1)))); | 173 | (1 << SIC_SYSIRQ(irq))); |
175 | #else | 174 | #else |
176 | unsigned mask_bank, mask_bit; | 175 | unsigned mask_bank, mask_bit; |
177 | mask_bank = (irq - (IRQ_CORETMR + 1)) / 32; | 176 | mask_bank = SIC_SYSIRQ(irq) / 32; |
178 | mask_bit = (irq - (IRQ_CORETMR + 1)) % 32; | 177 | mask_bit = SIC_SYSIRQ(irq) % 32; |
179 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | | 178 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | |
180 | (1 << mask_bit)); | 179 | (1 << mask_bit)); |
181 | #endif | 180 | #endif |
@@ -187,8 +186,8 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state) | |||
187 | { | 186 | { |
188 | unsigned bank, bit; | 187 | unsigned bank, bit; |
189 | unsigned long flags; | 188 | unsigned long flags; |
190 | bank = (irq - (IRQ_CORETMR + 1)) / 32; | 189 | bank = SIC_SYSIRQ(irq) / 32; |
191 | bit = (irq - (IRQ_CORETMR + 1)) % 32; | 190 | bit = SIC_SYSIRQ(irq) % 32; |
192 | 191 | ||
193 | local_irq_save(flags); | 192 | local_irq_save(flags); |
194 | 193 | ||
@@ -204,15 +203,18 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state) | |||
204 | #endif | 203 | #endif |
205 | 204 | ||
206 | static struct irq_chip bfin_core_irqchip = { | 205 | static struct irq_chip bfin_core_irqchip = { |
207 | .ack = ack_noop, | 206 | .ack = bfin_ack_noop, |
208 | .mask = bfin_core_mask_irq, | 207 | .mask = bfin_core_mask_irq, |
209 | .unmask = bfin_core_unmask_irq, | 208 | .unmask = bfin_core_unmask_irq, |
210 | }; | 209 | }; |
211 | 210 | ||
212 | static struct irq_chip bfin_internal_irqchip = { | 211 | static struct irq_chip bfin_internal_irqchip = { |
213 | .ack = ack_noop, | 212 | .ack = bfin_ack_noop, |
214 | .mask = bfin_internal_mask_irq, | 213 | .mask = bfin_internal_mask_irq, |
215 | .unmask = bfin_internal_unmask_irq, | 214 | .unmask = bfin_internal_unmask_irq, |
215 | .mask_ack = bfin_internal_mask_irq, | ||
216 | .disable = bfin_internal_mask_irq, | ||
217 | .enable = bfin_internal_unmask_irq, | ||
216 | #ifdef CONFIG_PM | 218 | #ifdef CONFIG_PM |
217 | .set_wake = bfin_internal_set_wake, | 219 | .set_wake = bfin_internal_set_wake, |
218 | #endif | 220 | #endif |
@@ -221,38 +223,23 @@ static struct irq_chip bfin_internal_irqchip = { | |||
221 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | 223 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX |
222 | static int error_int_mask; | 224 | static int error_int_mask; |
223 | 225 | ||
224 | static void bfin_generic_error_ack_irq(unsigned int irq) | ||
225 | { | ||
226 | |||
227 | } | ||
228 | |||
229 | static void bfin_generic_error_mask_irq(unsigned int irq) | 226 | static void bfin_generic_error_mask_irq(unsigned int irq) |
230 | { | 227 | { |
231 | error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); | 228 | error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); |
232 | 229 | ||
233 | if (!error_int_mask) { | 230 | if (!error_int_mask) |
234 | local_irq_disable(); | 231 | bfin_internal_mask_irq(IRQ_GENERIC_ERROR); |
235 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & | ||
236 | ~(1 << (IRQ_GENERIC_ERROR - | ||
237 | (IRQ_CORETMR + 1)))); | ||
238 | SSYNC(); | ||
239 | local_irq_enable(); | ||
240 | } | ||
241 | } | 232 | } |
242 | 233 | ||
243 | static void bfin_generic_error_unmask_irq(unsigned int irq) | 234 | static void bfin_generic_error_unmask_irq(unsigned int irq) |
244 | { | 235 | { |
245 | local_irq_disable(); | 236 | bfin_internal_unmask_irq(IRQ_GENERIC_ERROR); |
246 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 << | ||
247 | (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1))); | ||
248 | SSYNC(); | ||
249 | local_irq_enable(); | ||
250 | |||
251 | error_int_mask |= 1L << (irq - IRQ_PPI_ERROR); | 237 | error_int_mask |= 1L << (irq - IRQ_PPI_ERROR); |
252 | } | 238 | } |
253 | 239 | ||
254 | static struct irq_chip bfin_generic_error_irqchip = { | 240 | static struct irq_chip bfin_generic_error_irqchip = { |
255 | .ack = bfin_generic_error_ack_irq, | 241 | .ack = bfin_ack_noop, |
242 | .mask_ack = bfin_generic_error_mask_irq, | ||
256 | .mask = bfin_generic_error_mask_irq, | 243 | .mask = bfin_generic_error_mask_irq, |
257 | .unmask = bfin_generic_error_unmask_irq, | 244 | .unmask = bfin_generic_error_unmask_irq, |
258 | }; | 245 | }; |
@@ -608,7 +595,7 @@ static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = { | |||
608 | (struct pin_int_t *)PINT3_MASK_SET, | 595 | (struct pin_int_t *)PINT3_MASK_SET, |
609 | }; | 596 | }; |
610 | 597 | ||
611 | unsigned short get_irq_base(u8 bank, u8 bmap) | 598 | inline unsigned short get_irq_base(u8 bank, u8 bmap) |
612 | { | 599 | { |
613 | 600 | ||
614 | u16 irq_base; | 601 | u16 irq_base; |
@@ -969,17 +956,12 @@ int __init init_arch_irq(void) | |||
969 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) | 956 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) |
970 | bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); | 957 | bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); |
971 | bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); | 958 | bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); |
972 | bfin_write_SIC_IWR0(IWR_ENABLE_ALL); | ||
973 | bfin_write_SIC_IWR1(IWR_ENABLE_ALL); | ||
974 | # ifdef CONFIG_BF54x | 959 | # ifdef CONFIG_BF54x |
975 | bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); | 960 | bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); |
976 | bfin_write_SIC_IWR2(IWR_ENABLE_ALL); | ||
977 | # endif | 961 | # endif |
978 | #else | 962 | #else |
979 | bfin_write_SIC_IMASK(SIC_UNMASK_ALL); | 963 | bfin_write_SIC_IMASK(SIC_UNMASK_ALL); |
980 | bfin_write_SIC_IWR(IWR_ENABLE_ALL); | ||
981 | #endif | 964 | #endif |
982 | SSYNC(); | ||
983 | 965 | ||
984 | local_irq_disable(); | 966 | local_irq_disable(); |
985 | 967 | ||
@@ -1001,90 +983,53 @@ int __init init_arch_irq(void) | |||
1001 | set_irq_chip(irq, &bfin_core_irqchip); | 983 | set_irq_chip(irq, &bfin_core_irqchip); |
1002 | else | 984 | else |
1003 | set_irq_chip(irq, &bfin_internal_irqchip); | 985 | set_irq_chip(irq, &bfin_internal_irqchip); |
1004 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | ||
1005 | if (irq != IRQ_GENERIC_ERROR) { | ||
1006 | #endif | ||
1007 | 986 | ||
1008 | switch (irq) { | 987 | switch (irq) { |
1009 | #if defined(CONFIG_BF53x) | 988 | #if defined(CONFIG_BF53x) |
1010 | case IRQ_PROG_INTA: | 989 | case IRQ_PROG_INTA: |
1011 | set_irq_chained_handler(irq, | ||
1012 | bfin_demux_gpio_irq); | ||
1013 | break; | ||
1014 | # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) | 990 | # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) |
1015 | case IRQ_MAC_RX: | 991 | case IRQ_MAC_RX: |
1016 | set_irq_chained_handler(irq, | ||
1017 | bfin_demux_gpio_irq); | ||
1018 | break; | ||
1019 | # endif | 992 | # endif |
1020 | #elif defined(CONFIG_BF54x) | 993 | #elif defined(CONFIG_BF54x) |
1021 | case IRQ_PINT0: | 994 | case IRQ_PINT0: |
1022 | set_irq_chained_handler(irq, | 995 | case IRQ_PINT1: |
1023 | bfin_demux_gpio_irq); | 996 | case IRQ_PINT2: |
1024 | break; | 997 | case IRQ_PINT3: |
1025 | case IRQ_PINT1: | ||
1026 | set_irq_chained_handler(irq, | ||
1027 | bfin_demux_gpio_irq); | ||
1028 | break; | ||
1029 | case IRQ_PINT2: | ||
1030 | set_irq_chained_handler(irq, | ||
1031 | bfin_demux_gpio_irq); | ||
1032 | break; | ||
1033 | case IRQ_PINT3: | ||
1034 | set_irq_chained_handler(irq, | ||
1035 | bfin_demux_gpio_irq); | ||
1036 | break; | ||
1037 | #elif defined(CONFIG_BF52x) | 998 | #elif defined(CONFIG_BF52x) |
1038 | case IRQ_PORTF_INTA: | 999 | case IRQ_PORTF_INTA: |
1039 | set_irq_chained_handler(irq, | 1000 | case IRQ_PORTG_INTA: |
1040 | bfin_demux_gpio_irq); | 1001 | case IRQ_PORTH_INTA: |
1041 | break; | ||
1042 | case IRQ_PORTG_INTA: | ||
1043 | set_irq_chained_handler(irq, | ||
1044 | bfin_demux_gpio_irq); | ||
1045 | break; | ||
1046 | case IRQ_PORTH_INTA: | ||
1047 | set_irq_chained_handler(irq, | ||
1048 | bfin_demux_gpio_irq); | ||
1049 | break; | ||
1050 | #elif defined(CONFIG_BF561) | 1002 | #elif defined(CONFIG_BF561) |
1051 | case IRQ_PROG0_INTA: | 1003 | case IRQ_PROG0_INTA: |
1052 | set_irq_chained_handler(irq, | 1004 | case IRQ_PROG1_INTA: |
1053 | bfin_demux_gpio_irq); | 1005 | case IRQ_PROG2_INTA: |
1054 | break; | ||
1055 | case IRQ_PROG1_INTA: | ||
1056 | set_irq_chained_handler(irq, | ||
1057 | bfin_demux_gpio_irq); | ||
1058 | break; | ||
1059 | case IRQ_PROG2_INTA: | ||
1060 | set_irq_chained_handler(irq, | ||
1061 | bfin_demux_gpio_irq); | ||
1062 | break; | ||
1063 | #endif | 1006 | #endif |
1064 | default: | 1007 | set_irq_chained_handler(irq, |
1065 | set_irq_handler(irq, handle_simple_irq); | 1008 | bfin_demux_gpio_irq); |
1066 | break; | 1009 | break; |
1067 | } | ||
1068 | |||
1069 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | 1010 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX |
1070 | } else { | 1011 | case IRQ_GENERIC_ERROR: |
1071 | set_irq_handler(irq, bfin_demux_error_irq); | 1012 | set_irq_handler(irq, bfin_demux_error_irq); |
1072 | } | 1013 | |
1014 | break; | ||
1073 | #endif | 1015 | #endif |
1016 | default: | ||
1017 | set_irq_handler(irq, handle_simple_irq); | ||
1018 | break; | ||
1019 | } | ||
1074 | } | 1020 | } |
1021 | |||
1075 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | 1022 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX |
1076 | for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) { | 1023 | for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) |
1077 | set_irq_chip(irq, &bfin_generic_error_irqchip); | 1024 | set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, |
1078 | set_irq_handler(irq, handle_level_irq); | 1025 | handle_level_irq); |
1079 | } | ||
1080 | #endif | 1026 | #endif |
1081 | 1027 | ||
1082 | for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) { | 1028 | /* if configured as edge, then will be changed to do_edge_IRQ */ |
1029 | for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) | ||
1030 | set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, | ||
1031 | handle_level_irq); | ||
1083 | 1032 | ||
1084 | set_irq_chip(irq, &bfin_gpio_irqchip); | ||
1085 | /* if configured as edge, then will be changed to do_edge_IRQ */ | ||
1086 | set_irq_handler(irq, handle_level_irq); | ||
1087 | } | ||
1088 | 1033 | ||
1089 | bfin_write_IMASK(0); | 1034 | bfin_write_IMASK(0); |
1090 | CSYNC(); | 1035 | CSYNC(); |
@@ -1106,6 +1051,16 @@ int __init init_arch_irq(void) | |||
1106 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | 1051 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
1107 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | 1052 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; |
1108 | 1053 | ||
1054 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) | ||
1055 | bfin_write_SIC_IWR0(IWR_ENABLE_ALL); | ||
1056 | bfin_write_SIC_IWR1(IWR_ENABLE_ALL); | ||
1057 | # ifdef CONFIG_BF54x | ||
1058 | bfin_write_SIC_IWR2(IWR_ENABLE_ALL); | ||
1059 | # endif | ||
1060 | #else | ||
1061 | bfin_write_SIC_IWR(IWR_ENABLE_ALL); | ||
1062 | #endif | ||
1063 | |||
1109 | return 0; | 1064 | return 0; |
1110 | } | 1065 | } |
1111 | 1066 | ||
@@ -1122,7 +1077,6 @@ void do_irq(int vec, struct pt_regs *fp) | |||
1122 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) | 1077 | #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) |
1123 | unsigned long sic_status[3]; | 1078 | unsigned long sic_status[3]; |
1124 | 1079 | ||
1125 | SSYNC(); | ||
1126 | sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); | 1080 | sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); |
1127 | sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); | 1081 | sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); |
1128 | #ifdef CONFIG_BF54x | 1082 | #ifdef CONFIG_BF54x |
@@ -1138,7 +1092,7 @@ void do_irq(int vec, struct pt_regs *fp) | |||
1138 | } | 1092 | } |
1139 | #else | 1093 | #else |
1140 | unsigned long sic_status; | 1094 | unsigned long sic_status; |
1141 | SSYNC(); | 1095 | |
1142 | sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); | 1096 | sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); |
1143 | 1097 | ||
1144 | for (;; ivg++) { | 1098 | for (;; ivg++) { |
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c index 1f516c55bde6..ec3141fefd20 100644 --- a/arch/blackfin/mm/init.c +++ b/arch/blackfin/mm/init.c | |||
@@ -181,7 +181,7 @@ void __init mem_init(void) | |||
181 | } | 181 | } |
182 | } | 182 | } |
183 | 183 | ||
184 | static __init void free_init_pages(const char *what, unsigned long begin, unsigned long end) | 184 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) |
185 | { | 185 | { |
186 | unsigned long addr; | 186 | unsigned long addr; |
187 | /* next to check that the page we free is not a partial page */ | 187 | /* next to check that the page we free is not a partial page */ |
@@ -203,7 +203,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
203 | } | 203 | } |
204 | #endif | 204 | #endif |
205 | 205 | ||
206 | void __init free_initmem(void) | 206 | void __init_refok free_initmem(void) |
207 | { | 207 | { |
208 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU | 208 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU |
209 | free_init_pages("unused kernel memory", | 209 | free_init_pages("unused kernel memory", |
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c index 9310a7b476e9..525483f0ddf8 100644 --- a/arch/cris/arch-v10/kernel/time.c +++ b/arch/cris/arch-v10/kernel/time.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/swap.h> | 13 | #include <linux/swap.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/vmstat.h> | 16 | #include <linux/mm.h> |
17 | #include <asm/arch/svinto.h> | 17 | #include <asm/arch/svinto.h> |
18 | #include <asm/types.h> | 18 | #include <asm/types.h> |
19 | #include <asm/signal.h> | 19 | #include <asm/signal.h> |
diff --git a/arch/cris/arch-v10/lib/string.c b/arch/cris/arch-v10/lib/string.c index 7161a2bef4fe..c7bd6ebdc93c 100644 --- a/arch/cris/arch-v10/lib/string.c +++ b/arch/cris/arch-v10/lib/string.c | |||
@@ -1,55 +1,59 @@ | |||
1 | /*#************************************************************************#*/ | 1 | /* A memcpy for CRIS. |
2 | /*#-------------------------------------------------------------------------*/ | 2 | Copyright (C) 1994-2005 Axis Communications. |
3 | /*# */ | 3 | All rights reserved. |
4 | /*# FUNCTION NAME: memcpy() */ | 4 | |
5 | /*# */ | 5 | Redistribution and use in source and binary forms, with or without |
6 | /*# PARAMETERS: void* dst; Destination address. */ | 6 | modification, are permitted provided that the following conditions |
7 | /*# void* src; Source address. */ | 7 | are met: |
8 | /*# int len; Number of bytes to copy. */ | 8 | |
9 | /*# */ | 9 | 1. Redistributions of source code must retain the above copyright |
10 | /*# RETURNS: dst. */ | 10 | notice, this list of conditions and the following disclaimer. |
11 | /*# */ | 11 | |
12 | /*# DESCRIPTION: Copies len bytes of memory from src to dst. No guarantees */ | 12 | 2. Neither the name of Axis Communications nor the names of its |
13 | /*# about copying of overlapping memory areas. This routine is */ | 13 | contributors may be used to endorse or promote products derived |
14 | /*# very sensitive to compiler changes in register allocation. */ | 14 | from this software without specific prior written permission. |
15 | /*# Should really be rewritten to avoid this problem. */ | 15 | |
16 | /*# */ | 16 | THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS |
17 | /*#-------------------------------------------------------------------------*/ | 17 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 | /*# */ | 18 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 | /*# HISTORY */ | 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS |
20 | /*# */ | 20 | COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
21 | /*# DATE NAME CHANGES */ | 21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
22 | /*# ---- ---- ------- */ | 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
23 | /*# 941007 Kenny R Creation */ | 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 | /*# 941011 Kenny R Lots of optimizations and inlining. */ | 24 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
25 | /*# 941129 Ulf A Adapted for use in libc. */ | 25 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
26 | /*# 950216 HP N==0 forgotten if non-aligned src/dst. */ | 26 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
27 | /*# Added some optimizations. */ | 27 | POSSIBILITY OF SUCH DAMAGE. */ |
28 | /*# 001025 HP Make src and dst char *. Align dst to */ | 28 | |
29 | /*# dword, not just word-if-both-src-and-dst- */ | 29 | /* FIXME: This file should really only be used for reference, as the |
30 | /*# are-misaligned. */ | 30 | result is somewhat depending on gcc generating what we expect rather |
31 | /*# */ | 31 | than what we describe. An assembly file should be used instead. */ |
32 | /*#-------------------------------------------------------------------------*/ | 32 | |
33 | 33 | #include <stddef.h> | |
34 | #include <linux/types.h> | 34 | |
35 | 35 | /* Break even between movem and move16 is really at 38.7 * 2, but | |
36 | void *memcpy(void *pdst, | 36 | modulo 44, so up to the next multiple of 44, we use ordinary code. */ |
37 | const void *psrc, | 37 | #define MEMCPY_BY_BLOCK_THRESHOLD (44 * 2) |
38 | size_t pn) | 38 | |
39 | /* No name ambiguities in this file. */ | ||
40 | __asm__ (".syntax no_register_prefix"); | ||
41 | |||
42 | void * | ||
43 | memcpy(void *pdst, const void *psrc, size_t pn) | ||
39 | { | 44 | { |
40 | /* Ok. Now we want the parameters put in special registers. | 45 | /* Now we want the parameters put in special registers. |
41 | Make sure the compiler is able to make something useful of this. | 46 | Make sure the compiler is able to make something useful of this. |
42 | As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). | 47 | As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). |
43 | 48 | ||
44 | If gcc was alright, it really would need no temporaries, and no | 49 | If gcc was allright, it really would need no temporaries, and no |
45 | stack space to save stuff on. */ | 50 | stack space to save stuff on. */ |
46 | 51 | ||
47 | register void *return_dst __asm__ ("r10") = pdst; | 52 | register void *return_dst __asm__ ("r10") = pdst; |
48 | register char *dst __asm__ ("r13") = pdst; | 53 | register unsigned char *dst __asm__ ("r13") = pdst; |
49 | register const char *src __asm__ ("r11") = psrc; | 54 | register unsigned const char *src __asm__ ("r11") = psrc; |
50 | register int n __asm__ ("r12") = pn; | 55 | register int n __asm__ ("r12") = pn; |
51 | 56 | ||
52 | |||
53 | /* When src is aligned but not dst, this makes a few extra needless | 57 | /* When src is aligned but not dst, this makes a few extra needless |
54 | cycles. I believe it would take as many to check that the | 58 | cycles. I believe it would take as many to check that the |
55 | re-alignment was unnecessary. */ | 59 | re-alignment was unnecessary. */ |
@@ -59,167 +63,174 @@ void *memcpy(void *pdst, | |||
59 | && n >= 3) | 63 | && n >= 3) |
60 | { | 64 | { |
61 | if ((unsigned long) dst & 1) | 65 | if ((unsigned long) dst & 1) |
62 | { | 66 | { |
63 | n--; | 67 | n--; |
64 | *(char*)dst = *(char*)src; | 68 | *dst = *src; |
65 | src++; | 69 | src++; |
66 | dst++; | 70 | dst++; |
67 | } | 71 | } |
68 | 72 | ||
69 | if ((unsigned long) dst & 2) | 73 | if ((unsigned long) dst & 2) |
70 | { | 74 | { |
71 | n -= 2; | 75 | n -= 2; |
72 | *(short*)dst = *(short*)src; | 76 | *(short *) dst = *(short *) src; |
73 | src += 2; | 77 | src += 2; |
74 | dst += 2; | 78 | dst += 2; |
75 | } | 79 | } |
76 | } | 80 | } |
77 | 81 | ||
78 | /* Decide which copying method to use. */ | 82 | /* Decide which copying method to use. */ |
79 | if (n >= 44*2) /* Break even between movem and | 83 | if (n >= MEMCPY_BY_BLOCK_THRESHOLD) |
80 | move16 is at 38.7*2, but modulo 44. */ | 84 | { |
81 | { | 85 | /* It is not optimal to tell the compiler about clobbering any |
82 | /* For large copies we use 'movem' */ | 86 | registers; that will move the saving/restoring of those registers |
83 | 87 | to the function prologue/epilogue, and make non-movem sizes | |
84 | /* It is not optimal to tell the compiler about clobbering any | 88 | suboptimal. */ |
85 | registers; that will move the saving/restoring of those registers | 89 | __asm__ volatile |
86 | to the function prologue/epilogue, and make non-movem sizes | 90 | ("\ |
87 | suboptimal. | 91 | ;; GCC does promise correct register allocations, but let's \n\ |
88 | 92 | ;; make sure it keeps its promises. \n\ | |
89 | This method is not foolproof; it assumes that the "asm reg" | 93 | .ifnc %0-%1-%2,$r13-$r11-$r12 \n\ |
90 | declarations at the beginning of the function really are used | 94 | .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\ |
91 | here (beware: they may be moved to temporary registers). | 95 | .endif \n\ |
92 | This way, we do not have to save/move the registers around into | 96 | \n\ |
93 | temporaries; we can safely use them straight away. | 97 | ;; Save the registers we'll use in the movem process \n\ |
94 | 98 | ;; on the stack. \n\ | |
95 | If you want to check that the allocation was right; then | 99 | subq 11*4,sp \n\ |
96 | check the equalities in the first comment. It should say | 100 | movem r10,[sp] \n\ |
97 | "r13=r13, r11=r11, r12=r12" */ | ||
98 | __asm__ volatile ("\n\ | ||
99 | ;; Check that the following is true (same register names on \n\ | ||
100 | ;; both sides of equal sign, as in r8=r8): \n\ | ||
101 | ;; %0=r13, %1=r11, %2=r12 \n\ | ||
102 | ;; \n\ | ||
103 | ;; Save the registers we'll use in the movem process \n\ | ||
104 | ;; on the stack. \n\ | ||
105 | subq 11*4,$sp \n\ | ||
106 | movem $r10,[$sp] \n\ | ||
107 | \n\ | 101 | \n\ |
108 | ;; Now we've got this: \n\ | 102 | ;; Now we've got this: \n\ |
109 | ;; r11 - src \n\ | 103 | ;; r11 - src \n\ |
110 | ;; r13 - dst \n\ | 104 | ;; r13 - dst \n\ |
111 | ;; r12 - n \n\ | 105 | ;; r12 - n \n\ |
112 | \n\ | 106 | \n\ |
113 | ;; Update n for the first loop \n\ | 107 | ;; Update n for the first loop. \n\ |
114 | subq 44,$r12 \n\ | 108 | subq 44,r12 \n\ |
115 | 0: \n\ | 109 | 0: \n\ |
116 | movem [$r11+],$r10 \n\ | 110 | " |
117 | subq 44,$r12 \n\ | 111 | #ifdef __arch_common_v10_v32 |
118 | bge 0b \n\ | 112 | /* Cater to branch offset difference between v32 and v10. We |
119 | movem $r10,[$r13+] \n\ | 113 | assume the branch below has an 8-bit offset. */ |
114 | " setf\n" | ||
115 | #endif | ||
116 | " movem [r11+],r10 \n\ | ||
117 | subq 44,r12 \n\ | ||
118 | bge 0b \n\ | ||
119 | movem r10,[r13+] \n\ | ||
120 | \n\ | 120 | \n\ |
121 | addq 44,$r12 ;; compensate for last loop underflowing n \n\ | 121 | ;; Compensate for last loop underflowing n. \n\ |
122 | addq 44,r12 \n\ | ||
122 | \n\ | 123 | \n\ |
123 | ;; Restore registers from stack \n\ | 124 | ;; Restore registers from stack. \n\ |
124 | movem [$sp+],$r10" | 125 | movem [sp+],r10" |
125 | 126 | ||
126 | /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n) | 127 | /* Outputs. */ |
127 | /* Inputs */ : "0" (dst), "1" (src), "2" (n)); | 128 | : "=r" (dst), "=r" (src), "=r" (n) |
128 | |||
129 | } | ||
130 | 129 | ||
131 | /* Either we directly starts copying, using dword copying | 130 | /* Inputs. */ |
132 | in a loop, or we copy as much as possible with 'movem' | 131 | : "0" (dst), "1" (src), "2" (n)); |
133 | and then the last block (<44 bytes) is copied here. | 132 | } |
134 | This will work since 'movem' will have updated src,dst,n. */ | ||
135 | 133 | ||
136 | while ( n >= 16 ) | 134 | while (n >= 16) |
137 | { | 135 | { |
138 | *((long*)dst)++ = *((long*)src)++; | 136 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
139 | *((long*)dst)++ = *((long*)src)++; | 137 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
140 | *((long*)dst)++ = *((long*)src)++; | 138 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
141 | *((long*)dst)++ = *((long*)src)++; | 139 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
142 | n -= 16; | 140 | |
143 | } | 141 | n -= 16; |
142 | } | ||
144 | 143 | ||
145 | /* A switch() is definitely the fastest although it takes a LOT of code. | ||
146 | * Particularly if you inline code this. | ||
147 | */ | ||
148 | switch (n) | 144 | switch (n) |
149 | { | 145 | { |
150 | case 0: | 146 | case 0: |
151 | break; | 147 | break; |
148 | |||
152 | case 1: | 149 | case 1: |
153 | *(char*)dst = *(char*)src; | 150 | *dst = *src; |
154 | break; | 151 | break; |
152 | |||
155 | case 2: | 153 | case 2: |
156 | *(short*)dst = *(short*)src; | 154 | *(short *) dst = *(short *) src; |
157 | break; | 155 | break; |
156 | |||
158 | case 3: | 157 | case 3: |
159 | *((short*)dst)++ = *((short*)src)++; | 158 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
160 | *(char*)dst = *(char*)src; | 159 | *dst = *src; |
161 | break; | 160 | break; |
161 | |||
162 | case 4: | 162 | case 4: |
163 | *((long*)dst)++ = *((long*)src)++; | 163 | *(long *) dst = *(long *) src; |
164 | break; | 164 | break; |
165 | |||
165 | case 5: | 166 | case 5: |
166 | *((long*)dst)++ = *((long*)src)++; | 167 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
167 | *(char*)dst = *(char*)src; | 168 | *dst = *src; |
168 | break; | 169 | break; |
170 | |||
169 | case 6: | 171 | case 6: |
170 | *((long*)dst)++ = *((long*)src)++; | 172 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
171 | *(short*)dst = *(short*)src; | 173 | *(short *) dst = *(short *) src; |
172 | break; | 174 | break; |
175 | |||
173 | case 7: | 176 | case 7: |
174 | *((long*)dst)++ = *((long*)src)++; | 177 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
175 | *((short*)dst)++ = *((short*)src)++; | 178 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
176 | *(char*)dst = *(char*)src; | 179 | *dst = *src; |
177 | break; | 180 | break; |
181 | |||
178 | case 8: | 182 | case 8: |
179 | *((long*)dst)++ = *((long*)src)++; | 183 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
180 | *((long*)dst)++ = *((long*)src)++; | 184 | *(long *) dst = *(long *) src; |
181 | break; | 185 | break; |
186 | |||
182 | case 9: | 187 | case 9: |
183 | *((long*)dst)++ = *((long*)src)++; | 188 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
184 | *((long*)dst)++ = *((long*)src)++; | 189 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
185 | *(char*)dst = *(char*)src; | 190 | *dst = *src; |
186 | break; | 191 | break; |
192 | |||
187 | case 10: | 193 | case 10: |
188 | *((long*)dst)++ = *((long*)src)++; | 194 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
189 | *((long*)dst)++ = *((long*)src)++; | 195 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
190 | *(short*)dst = *(short*)src; | 196 | *(short *) dst = *(short *) src; |
191 | break; | 197 | break; |
198 | |||
192 | case 11: | 199 | case 11: |
193 | *((long*)dst)++ = *((long*)src)++; | 200 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
194 | *((long*)dst)++ = *((long*)src)++; | 201 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
195 | *((short*)dst)++ = *((short*)src)++; | 202 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
196 | *(char*)dst = *(char*)src; | 203 | *dst = *src; |
197 | break; | 204 | break; |
205 | |||
198 | case 12: | 206 | case 12: |
199 | *((long*)dst)++ = *((long*)src)++; | 207 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
200 | *((long*)dst)++ = *((long*)src)++; | 208 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
201 | *((long*)dst)++ = *((long*)src)++; | 209 | *(long *) dst = *(long *) src; |
202 | break; | 210 | break; |
211 | |||
203 | case 13: | 212 | case 13: |
204 | *((long*)dst)++ = *((long*)src)++; | 213 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
205 | *((long*)dst)++ = *((long*)src)++; | 214 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
206 | *((long*)dst)++ = *((long*)src)++; | 215 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
207 | *(char*)dst = *(char*)src; | 216 | *dst = *src; |
208 | break; | 217 | break; |
218 | |||
209 | case 14: | 219 | case 14: |
210 | *((long*)dst)++ = *((long*)src)++; | 220 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
211 | *((long*)dst)++ = *((long*)src)++; | 221 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
212 | *((long*)dst)++ = *((long*)src)++; | 222 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
213 | *(short*)dst = *(short*)src; | 223 | *(short *) dst = *(short *) src; |
214 | break; | 224 | break; |
225 | |||
215 | case 15: | 226 | case 15: |
216 | *((long*)dst)++ = *((long*)src)++; | 227 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
217 | *((long*)dst)++ = *((long*)src)++; | 228 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
218 | *((long*)dst)++ = *((long*)src)++; | 229 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
219 | *((short*)dst)++ = *((short*)src)++; | 230 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
220 | *(char*)dst = *(char*)src; | 231 | *dst = *src; |
221 | break; | 232 | break; |
222 | } | 233 | } |
223 | 234 | ||
224 | return return_dst; /* destination pointer. */ | 235 | return return_dst; |
225 | } /* memcpy() */ | 236 | } |
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c index b8e6c0430e5b..b0a608da7bd1 100644 --- a/arch/cris/arch-v10/lib/usercopy.c +++ b/arch/cris/arch-v10/lib/usercopy.c | |||
@@ -193,7 +193,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) | |||
193 | inaccessible. */ | 193 | inaccessible. */ |
194 | 194 | ||
195 | unsigned long | 195 | unsigned long |
196 | __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn) | 196 | __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) |
197 | { | 197 | { |
198 | /* We want the parameters put in special registers. | 198 | /* We want the parameters put in special registers. |
199 | Make sure the compiler is able to make something useful of this. | 199 | Make sure the compiler is able to make something useful of this. |
diff --git a/arch/cris/arch-v32/lib/string.c b/arch/cris/arch-v32/lib/string.c index 6740b2cebae5..c7bd6ebdc93c 100644 --- a/arch/cris/arch-v32/lib/string.c +++ b/arch/cris/arch-v32/lib/string.c | |||
@@ -1,55 +1,59 @@ | |||
1 | /*#************************************************************************#*/ | 1 | /* A memcpy for CRIS. |
2 | /*#-------------------------------------------------------------------------*/ | 2 | Copyright (C) 1994-2005 Axis Communications. |
3 | /*# */ | 3 | All rights reserved. |
4 | /*# FUNCTION NAME: memcpy() */ | 4 | |
5 | /*# */ | 5 | Redistribution and use in source and binary forms, with or without |
6 | /*# PARAMETERS: void* dst; Destination address. */ | 6 | modification, are permitted provided that the following conditions |
7 | /*# void* src; Source address. */ | 7 | are met: |
8 | /*# int len; Number of bytes to copy. */ | 8 | |
9 | /*# */ | 9 | 1. Redistributions of source code must retain the above copyright |
10 | /*# RETURNS: dst. */ | 10 | notice, this list of conditions and the following disclaimer. |
11 | /*# */ | 11 | |
12 | /*# DESCRIPTION: Copies len bytes of memory from src to dst. No guarantees */ | 12 | 2. Neither the name of Axis Communications nor the names of its |
13 | /*# about copying of overlapping memory areas. This routine is */ | 13 | contributors may be used to endorse or promote products derived |
14 | /*# very sensitive to compiler changes in register allocation. */ | 14 | from this software without specific prior written permission. |
15 | /*# Should really be rewritten to avoid this problem. */ | 15 | |
16 | /*# */ | 16 | THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS |
17 | /*#-------------------------------------------------------------------------*/ | 17 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 | /*# */ | 18 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 | /*# HISTORY */ | 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS |
20 | /*# */ | 20 | COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
21 | /*# DATE NAME CHANGES */ | 21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
22 | /*# ---- ---- ------- */ | 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
23 | /*# 941007 Kenny R Creation */ | 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 | /*# 941011 Kenny R Lots of optimizations and inlining. */ | 24 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
25 | /*# 941129 Ulf A Adapted for use in libc. */ | 25 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
26 | /*# 950216 HP N==0 forgotten if non-aligned src/dst. */ | 26 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
27 | /*# Added some optimizations. */ | 27 | POSSIBILITY OF SUCH DAMAGE. */ |
28 | /*# 001025 HP Make src and dst char *. Align dst to */ | 28 | |
29 | /*# dword, not just word-if-both-src-and-dst- */ | 29 | /* FIXME: This file should really only be used for reference, as the |
30 | /*# are-misaligned. */ | 30 | result is somewhat depending on gcc generating what we expect rather |
31 | /*# */ | 31 | than what we describe. An assembly file should be used instead. */ |
32 | /*#-------------------------------------------------------------------------*/ | 32 | |
33 | 33 | #include <stddef.h> | |
34 | #include <linux/types.h> | 34 | |
35 | 35 | /* Break even between movem and move16 is really at 38.7 * 2, but | |
36 | void *memcpy(void *pdst, | 36 | modulo 44, so up to the next multiple of 44, we use ordinary code. */ |
37 | const void *psrc, | 37 | #define MEMCPY_BY_BLOCK_THRESHOLD (44 * 2) |
38 | size_t pn) | 38 | |
39 | /* No name ambiguities in this file. */ | ||
40 | __asm__ (".syntax no_register_prefix"); | ||
41 | |||
42 | void * | ||
43 | memcpy(void *pdst, const void *psrc, size_t pn) | ||
39 | { | 44 | { |
40 | /* Ok. Now we want the parameters put in special registers. | 45 | /* Now we want the parameters put in special registers. |
41 | Make sure the compiler is able to make something useful of this. | 46 | Make sure the compiler is able to make something useful of this. |
42 | As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). | 47 | As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop). |
43 | 48 | ||
44 | If gcc was alright, it really would need no temporaries, and no | 49 | If gcc was allright, it really would need no temporaries, and no |
45 | stack space to save stuff on. */ | 50 | stack space to save stuff on. */ |
46 | 51 | ||
47 | register void *return_dst __asm__ ("r10") = pdst; | 52 | register void *return_dst __asm__ ("r10") = pdst; |
48 | register char *dst __asm__ ("r13") = pdst; | 53 | register unsigned char *dst __asm__ ("r13") = pdst; |
49 | register const char *src __asm__ ("r11") = psrc; | 54 | register unsigned const char *src __asm__ ("r11") = psrc; |
50 | register int n __asm__ ("r12") = pn; | 55 | register int n __asm__ ("r12") = pn; |
51 | 56 | ||
52 | |||
53 | /* When src is aligned but not dst, this makes a few extra needless | 57 | /* When src is aligned but not dst, this makes a few extra needless |
54 | cycles. I believe it would take as many to check that the | 58 | cycles. I believe it would take as many to check that the |
55 | re-alignment was unnecessary. */ | 59 | re-alignment was unnecessary. */ |
@@ -59,161 +63,174 @@ void *memcpy(void *pdst, | |||
59 | && n >= 3) | 63 | && n >= 3) |
60 | { | 64 | { |
61 | if ((unsigned long) dst & 1) | 65 | if ((unsigned long) dst & 1) |
62 | { | 66 | { |
63 | n--; | 67 | n--; |
64 | *(char*)dst = *(char*)src; | 68 | *dst = *src; |
65 | src++; | 69 | src++; |
66 | dst++; | 70 | dst++; |
67 | } | 71 | } |
68 | 72 | ||
69 | if ((unsigned long) dst & 2) | 73 | if ((unsigned long) dst & 2) |
70 | { | 74 | { |
71 | n -= 2; | 75 | n -= 2; |
72 | *(short*)dst = *(short*)src; | 76 | *(short *) dst = *(short *) src; |
73 | src += 2; | 77 | src += 2; |
74 | dst += 2; | 78 | dst += 2; |
75 | } | 79 | } |
76 | } | 80 | } |
77 | 81 | ||
78 | /* Decide which copying method to use. Movem is dirt cheap, so the | 82 | /* Decide which copying method to use. */ |
79 | overheap is low enough to always use the minimum block size as the | 83 | if (n >= MEMCPY_BY_BLOCK_THRESHOLD) |
80 | threshold. */ | 84 | { |
81 | if (n >= 44) | 85 | /* It is not optimal to tell the compiler about clobbering any |
82 | { | 86 | registers; that will move the saving/restoring of those registers |
83 | /* For large copies we use 'movem' */ | 87 | to the function prologue/epilogue, and make non-movem sizes |
84 | 88 | suboptimal. */ | |
85 | /* It is not optimal to tell the compiler about clobbering any | 89 | __asm__ volatile |
86 | registers; that will move the saving/restoring of those registers | 90 | ("\ |
87 | to the function prologue/epilogue, and make non-movem sizes | 91 | ;; GCC does promise correct register allocations, but let's \n\ |
88 | suboptimal. */ | 92 | ;; make sure it keeps its promises. \n\ |
89 | __asm__ volatile (" \n\ | 93 | .ifnc %0-%1-%2,$r13-$r11-$r12 \n\ |
90 | ;; Check that the register asm declaration got right. \n\ | 94 | .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\ |
91 | ;; The GCC manual explicitly says TRT will happen. \n\ | 95 | .endif \n\ |
92 | .ifnc %0-%1-%2,$r13-$r11-$r12 \n\ | ||
93 | .err \n\ | ||
94 | .endif \n\ | ||
95 | \n\ | ||
96 | ;; Save the registers we'll use in the movem process \n\ | ||
97 | \n\ | 96 | \n\ |
98 | ;; on the stack. \n\ | 97 | ;; Save the registers we'll use in the movem process \n\ |
99 | subq 11*4,$sp \n\ | 98 | ;; on the stack. \n\ |
100 | movem $r10,[$sp] \n\ | 99 | subq 11*4,sp \n\ |
100 | movem r10,[sp] \n\ | ||
101 | \n\ | 101 | \n\ |
102 | ;; Now we've got this: \n\ | 102 | ;; Now we've got this: \n\ |
103 | ;; r11 - src \n\ | 103 | ;; r11 - src \n\ |
104 | ;; r13 - dst \n\ | 104 | ;; r13 - dst \n\ |
105 | ;; r12 - n \n\ | 105 | ;; r12 - n \n\ |
106 | \n\ | 106 | \n\ |
107 | ;; Update n for the first loop \n\ | 107 | ;; Update n for the first loop. \n\ |
108 | subq 44,$r12 \n\ | 108 | subq 44,r12 \n\ |
109 | 0: \n\ | 109 | 0: \n\ |
110 | movem [$r11+],$r10 \n\ | 110 | " |
111 | subq 44,$r12 \n\ | 111 | #ifdef __arch_common_v10_v32 |
112 | bge 0b \n\ | 112 | /* Cater to branch offset difference between v32 and v10. We |
113 | movem $r10,[$r13+] \n\ | 113 | assume the branch below has an 8-bit offset. */ |
114 | " setf\n" | ||
115 | #endif | ||
116 | " movem [r11+],r10 \n\ | ||
117 | subq 44,r12 \n\ | ||
118 | bge 0b \n\ | ||
119 | movem r10,[r13+] \n\ | ||
114 | \n\ | 120 | \n\ |
115 | addq 44,$r12 ;; compensate for last loop underflowing n \n\ | 121 | ;; Compensate for last loop underflowing n. \n\ |
122 | addq 44,r12 \n\ | ||
116 | \n\ | 123 | \n\ |
117 | ;; Restore registers from stack \n\ | 124 | ;; Restore registers from stack. \n\ |
118 | movem [$sp+],$r10" | 125 | movem [sp+],r10" |
119 | 126 | ||
120 | /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n) | 127 | /* Outputs. */ |
121 | /* Inputs */ : "0" (dst), "1" (src), "2" (n)); | 128 | : "=r" (dst), "=r" (src), "=r" (n) |
122 | 129 | ||
123 | } | 130 | /* Inputs. */ |
131 | : "0" (dst), "1" (src), "2" (n)); | ||
132 | } | ||
124 | 133 | ||
125 | /* Either we directly starts copying, using dword copying | 134 | while (n >= 16) |
126 | in a loop, or we copy as much as possible with 'movem' | 135 | { |
127 | and then the last block (<44 bytes) is copied here. | 136 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
128 | This will work since 'movem' will have updated src,dst,n. */ | 137 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
138 | *(long *) dst = *(long *) src; dst += 4; src += 4; | ||
139 | *(long *) dst = *(long *) src; dst += 4; src += 4; | ||
129 | 140 | ||
130 | while ( n >= 16 ) | 141 | n -= 16; |
131 | { | 142 | } |
132 | *((long*)dst)++ = *((long*)src)++; | ||
133 | *((long*)dst)++ = *((long*)src)++; | ||
134 | *((long*)dst)++ = *((long*)src)++; | ||
135 | *((long*)dst)++ = *((long*)src)++; | ||
136 | n -= 16; | ||
137 | } | ||
138 | 143 | ||
139 | /* A switch() is definitely the fastest although it takes a LOT of code. | ||
140 | * Particularly if you inline code this. | ||
141 | */ | ||
142 | switch (n) | 144 | switch (n) |
143 | { | 145 | { |
144 | case 0: | 146 | case 0: |
145 | break; | 147 | break; |
148 | |||
146 | case 1: | 149 | case 1: |
147 | *(char*)dst = *(char*)src; | 150 | *dst = *src; |
148 | break; | 151 | break; |
152 | |||
149 | case 2: | 153 | case 2: |
150 | *(short*)dst = *(short*)src; | 154 | *(short *) dst = *(short *) src; |
151 | break; | 155 | break; |
156 | |||
152 | case 3: | 157 | case 3: |
153 | *((short*)dst)++ = *((short*)src)++; | 158 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
154 | *(char*)dst = *(char*)src; | 159 | *dst = *src; |
155 | break; | 160 | break; |
161 | |||
156 | case 4: | 162 | case 4: |
157 | *((long*)dst)++ = *((long*)src)++; | 163 | *(long *) dst = *(long *) src; |
158 | break; | 164 | break; |
165 | |||
159 | case 5: | 166 | case 5: |
160 | *((long*)dst)++ = *((long*)src)++; | 167 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
161 | *(char*)dst = *(char*)src; | 168 | *dst = *src; |
162 | break; | 169 | break; |
170 | |||
163 | case 6: | 171 | case 6: |
164 | *((long*)dst)++ = *((long*)src)++; | 172 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
165 | *(short*)dst = *(short*)src; | 173 | *(short *) dst = *(short *) src; |
166 | break; | 174 | break; |
175 | |||
167 | case 7: | 176 | case 7: |
168 | *((long*)dst)++ = *((long*)src)++; | 177 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
169 | *((short*)dst)++ = *((short*)src)++; | 178 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
170 | *(char*)dst = *(char*)src; | 179 | *dst = *src; |
171 | break; | 180 | break; |
181 | |||
172 | case 8: | 182 | case 8: |
173 | *((long*)dst)++ = *((long*)src)++; | 183 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
174 | *((long*)dst)++ = *((long*)src)++; | 184 | *(long *) dst = *(long *) src; |
175 | break; | 185 | break; |
186 | |||
176 | case 9: | 187 | case 9: |
177 | *((long*)dst)++ = *((long*)src)++; | 188 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
178 | *((long*)dst)++ = *((long*)src)++; | 189 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
179 | *(char*)dst = *(char*)src; | 190 | *dst = *src; |
180 | break; | 191 | break; |
192 | |||
181 | case 10: | 193 | case 10: |
182 | *((long*)dst)++ = *((long*)src)++; | 194 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
183 | *((long*)dst)++ = *((long*)src)++; | 195 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
184 | *(short*)dst = *(short*)src; | 196 | *(short *) dst = *(short *) src; |
185 | break; | 197 | break; |
198 | |||
186 | case 11: | 199 | case 11: |
187 | *((long*)dst)++ = *((long*)src)++; | 200 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
188 | *((long*)dst)++ = *((long*)src)++; | 201 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
189 | *((short*)dst)++ = *((short*)src)++; | 202 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
190 | *(char*)dst = *(char*)src; | 203 | *dst = *src; |
191 | break; | 204 | break; |
205 | |||
192 | case 12: | 206 | case 12: |
193 | *((long*)dst)++ = *((long*)src)++; | 207 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
194 | *((long*)dst)++ = *((long*)src)++; | 208 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
195 | *((long*)dst)++ = *((long*)src)++; | 209 | *(long *) dst = *(long *) src; |
196 | break; | 210 | break; |
211 | |||
197 | case 13: | 212 | case 13: |
198 | *((long*)dst)++ = *((long*)src)++; | 213 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
199 | *((long*)dst)++ = *((long*)src)++; | 214 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
200 | *((long*)dst)++ = *((long*)src)++; | 215 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
201 | *(char*)dst = *(char*)src; | 216 | *dst = *src; |
202 | break; | 217 | break; |
218 | |||
203 | case 14: | 219 | case 14: |
204 | *((long*)dst)++ = *((long*)src)++; | 220 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
205 | *((long*)dst)++ = *((long*)src)++; | 221 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
206 | *((long*)dst)++ = *((long*)src)++; | 222 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
207 | *(short*)dst = *(short*)src; | 223 | *(short *) dst = *(short *) src; |
208 | break; | 224 | break; |
225 | |||
209 | case 15: | 226 | case 15: |
210 | *((long*)dst)++ = *((long*)src)++; | 227 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
211 | *((long*)dst)++ = *((long*)src)++; | 228 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
212 | *((long*)dst)++ = *((long*)src)++; | 229 | *(long *) dst = *(long *) src; dst += 4; src += 4; |
213 | *((short*)dst)++ = *((short*)src)++; | 230 | *(short *) dst = *(short *) src; dst += 2; src += 2; |
214 | *(char*)dst = *(char*)src; | 231 | *dst = *src; |
215 | break; | 232 | break; |
216 | } | 233 | } |
217 | 234 | ||
218 | return return_dst; /* destination pointer. */ | 235 | return return_dst; |
219 | } /* memcpy() */ | 236 | } |
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c index 04d0cf35a276..0b5b70d5f58a 100644 --- a/arch/cris/arch-v32/lib/usercopy.c +++ b/arch/cris/arch-v32/lib/usercopy.c | |||
@@ -161,7 +161,7 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) | |||
161 | inaccessible. */ | 161 | inaccessible. */ |
162 | 162 | ||
163 | unsigned long | 163 | unsigned long |
164 | __copy_user_zeroing (void __user *pdst, const void *psrc, unsigned long pn) | 164 | __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) |
165 | { | 165 | { |
166 | /* We want the parameters put in special registers. | 166 | /* We want the parameters put in special registers. |
167 | Make sure the compiler is able to make something useful of this. | 167 | Make sure the compiler is able to make something useful of this. |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index dff9edfc7465..8fa3faf5ef1b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -18,6 +18,7 @@ config IA64 | |||
18 | select HAVE_IDE | 18 | select HAVE_IDE |
19 | select HAVE_OPROFILE | 19 | select HAVE_OPROFILE |
20 | select HAVE_KPROBES | 20 | select HAVE_KPROBES |
21 | select HAVE_KRETPROBES | ||
21 | default y | 22 | default y |
22 | help | 23 | help |
23 | The Itanium Processor Family is Intel's 64-bit successor to | 24 | The Itanium Processor Family is Intel's 64-bit successor to |
@@ -155,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB | |||
155 | 156 | ||
156 | config IA64_SGI_SN2 | 157 | config IA64_SGI_SN2 |
157 | bool "SGI-SN2" | 158 | bool "SGI-SN2" |
159 | select NUMA | ||
160 | select ACPI_NUMA | ||
158 | help | 161 | help |
159 | Selecting this option will optimize the kernel for use on sn2 based | 162 | Selecting this option will optimize the kernel for use on sn2 based |
160 | systems, but the resulting kernel binary will not run on other | 163 | systems, but the resulting kernel binary will not run on other |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index b916ccfdef84..f1645c4f7039 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -11,6 +11,8 @@ | |||
11 | # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> | 11 | # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> |
12 | # | 12 | # |
13 | 13 | ||
14 | KBUILD_DEFCONFIG := generic_defconfig | ||
15 | |||
14 | NM := $(CROSS_COMPILE)nm -B | 16 | NM := $(CROSS_COMPILE)nm -B |
15 | READELF := $(CROSS_COMPILE)readelf | 17 | READELF := $(CROSS_COMPILE)readelf |
16 | 18 | ||
diff --git a/arch/ia64/defconfig b/arch/ia64/configs/generic_defconfig index 0210545e7f61..0210545e7f61 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 85e82f32e480..256a7faeda07 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
@@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
766 | 766 | ||
767 | /* This is the X/Open sanctioned signal stack switching. */ | 767 | /* This is the X/Open sanctioned signal stack switching. */ |
768 | if (ka->sa.sa_flags & SA_ONSTACK) { | 768 | if (ka->sa.sa_flags & SA_ONSTACK) { |
769 | if (!on_sig_stack(esp)) | 769 | int onstack = sas_ss_flags(esp); |
770 | |||
771 | if (onstack == 0) | ||
770 | esp = current->sas_ss_sp + current->sas_ss_size; | 772 | esp = current->sas_ss_sp + current->sas_ss_size; |
773 | else if (onstack == SS_ONSTACK) { | ||
774 | /* | ||
775 | * If we are on the alternate signal stack and would | ||
776 | * overflow it, don't. Return an always-bogus address | ||
777 | * instead so we will die with SIGSEGV. | ||
778 | */ | ||
779 | if (!likely(on_sig_stack(esp - frame_size))) | ||
780 | return (void __user *) -1L; | ||
781 | } | ||
771 | } | 782 | } |
772 | /* Legacy stack switching not supported */ | 783 | /* Legacy stack switching not supported */ |
773 | 784 | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 398e2fd1cd25..7b3292282dea 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
345 | if (cpus_empty(mask)) | 345 | if (cpus_empty(mask)) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | if (reassign_irq_vector(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, first_cpu(mask))) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(first_cpu(mask)); |
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq) | |||
397 | struct iosapic_rte_info *rte; | 397 | struct iosapic_rte_info *rte; |
398 | int do_unmask_irq = 0; | 398 | int do_unmask_irq = 0; |
399 | 399 | ||
400 | irq_complete_move(irq); | ||
400 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 401 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { |
401 | do_unmask_irq = 1; | 402 | do_unmask_irq = 1; |
402 | mask_irq(irq); | 403 | mask_irq(irq); |
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq) | |||
450 | { | 451 | { |
451 | irq_desc_t *idesc = irq_desc + irq; | 452 | irq_desc_t *idesc = irq_desc + irq; |
452 | 453 | ||
454 | irq_complete_move(irq); | ||
453 | move_native_irq(irq); | 455 | move_native_irq(irq); |
454 | /* | 456 | /* |
455 | * Once we have recorded IRQ_PENDING already, we can mask the | 457 | * Once we have recorded IRQ_PENDING already, we can mask the |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 0b52f19ed046..2b8cf6e85af4 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu) | |||
260 | } | 260 | } |
261 | 261 | ||
262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | 262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) |
263 | #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR | ||
264 | |||
263 | static enum vector_domain_type { | 265 | static enum vector_domain_type { |
264 | VECTOR_DOMAIN_NONE, | 266 | VECTOR_DOMAIN_NONE, |
265 | VECTOR_DOMAIN_PERCPU | 267 | VECTOR_DOMAIN_PERCPU |
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
272 | return CPU_MASK_ALL; | 274 | return CPU_MASK_ALL; |
273 | } | 275 | } |
274 | 276 | ||
277 | static int __irq_prepare_move(int irq, int cpu) | ||
278 | { | ||
279 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
280 | int vector; | ||
281 | cpumask_t domain; | ||
282 | |||
283 | if (cfg->move_in_progress || cfg->move_cleanup_count) | ||
284 | return -EBUSY; | ||
285 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | ||
286 | return -EINVAL; | ||
287 | if (cpu_isset(cpu, cfg->domain)) | ||
288 | return 0; | ||
289 | domain = vector_allocation_domain(cpu); | ||
290 | vector = find_unassigned_vector(domain); | ||
291 | if (vector < 0) | ||
292 | return -ENOSPC; | ||
293 | cfg->move_in_progress = 1; | ||
294 | cfg->old_domain = cfg->domain; | ||
295 | cfg->vector = IRQ_VECTOR_UNASSIGNED; | ||
296 | cfg->domain = CPU_MASK_NONE; | ||
297 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | int irq_prepare_move(int irq, int cpu) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | int ret; | ||
305 | |||
306 | spin_lock_irqsave(&vector_lock, flags); | ||
307 | ret = __irq_prepare_move(irq, cpu); | ||
308 | spin_unlock_irqrestore(&vector_lock, flags); | ||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | void irq_complete_move(unsigned irq) | ||
313 | { | ||
314 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
315 | cpumask_t cleanup_mask; | ||
316 | int i; | ||
317 | |||
318 | if (likely(!cfg->move_in_progress)) | ||
319 | return; | ||
320 | |||
321 | if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) | ||
322 | return; | ||
323 | |||
324 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
325 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
326 | for_each_cpu_mask(i, cleanup_mask) | ||
327 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); | ||
328 | cfg->move_in_progress = 0; | ||
329 | } | ||
330 | |||
331 | static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | ||
332 | { | ||
333 | int me = smp_processor_id(); | ||
334 | ia64_vector vector; | ||
335 | unsigned long flags; | ||
336 | |||
337 | for (vector = IA64_FIRST_DEVICE_VECTOR; | ||
338 | vector < IA64_LAST_DEVICE_VECTOR; vector++) { | ||
339 | int irq; | ||
340 | struct irq_desc *desc; | ||
341 | struct irq_cfg *cfg; | ||
342 | irq = __get_cpu_var(vector_irq)[vector]; | ||
343 | if (irq < 0) | ||
344 | continue; | ||
345 | |||
346 | desc = irq_desc + irq; | ||
347 | cfg = irq_cfg + irq; | ||
348 | spin_lock(&desc->lock); | ||
349 | if (!cfg->move_cleanup_count) | ||
350 | goto unlock; | ||
351 | |||
352 | if (!cpu_isset(me, cfg->old_domain)) | ||
353 | goto unlock; | ||
354 | |||
355 | spin_lock_irqsave(&vector_lock, flags); | ||
356 | __get_cpu_var(vector_irq)[vector] = -1; | ||
357 | cpu_clear(me, vector_table[vector]); | ||
358 | spin_unlock_irqrestore(&vector_lock, flags); | ||
359 | cfg->move_cleanup_count--; | ||
360 | unlock: | ||
361 | spin_unlock(&desc->lock); | ||
362 | } | ||
363 | return IRQ_HANDLED; | ||
364 | } | ||
365 | |||
366 | static struct irqaction irq_move_irqaction = { | ||
367 | .handler = smp_irq_move_cleanup_interrupt, | ||
368 | .flags = IRQF_DISABLED, | ||
369 | .name = "irq_move" | ||
370 | }; | ||
371 | |||
275 | static int __init parse_vector_domain(char *arg) | 372 | static int __init parse_vector_domain(char *arg) |
276 | { | 373 | { |
277 | if (!arg) | 374 | if (!arg) |
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq) | |||
303 | spin_unlock_irqrestore(&vector_lock, flags); | 400 | spin_unlock_irqrestore(&vector_lock, flags); |
304 | } | 401 | } |
305 | 402 | ||
306 | static int __reassign_irq_vector(int irq, int cpu) | ||
307 | { | ||
308 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
309 | int vector; | ||
310 | cpumask_t domain; | ||
311 | |||
312 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | ||
313 | return -EINVAL; | ||
314 | if (cpu_isset(cpu, cfg->domain)) | ||
315 | return 0; | ||
316 | domain = vector_allocation_domain(cpu); | ||
317 | vector = find_unassigned_vector(domain); | ||
318 | if (vector < 0) | ||
319 | return -ENOSPC; | ||
320 | __clear_irq_vector(irq); | ||
321 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | int reassign_irq_vector(int irq, int cpu) | ||
326 | { | ||
327 | unsigned long flags; | ||
328 | int ret; | ||
329 | |||
330 | spin_lock_irqsave(&vector_lock, flags); | ||
331 | ret = __reassign_irq_vector(irq, cpu); | ||
332 | spin_unlock_irqrestore(&vector_lock, flags); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | /* | 403 | /* |
337 | * Dynamic irq allocate and deallocation for MSI | 404 | * Dynamic irq allocate and deallocation for MSI |
338 | */ | 405 | */ |
@@ -578,6 +645,13 @@ init_IRQ (void) | |||
578 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); | 645 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); |
579 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); | 646 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); |
580 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); | 647 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); |
648 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) | ||
649 | if (vector_domain_type != VECTOR_DOMAIN_NONE) { | ||
650 | BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); | ||
651 | IA64_FIRST_DEVICE_VECTOR++; | ||
652 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); | ||
653 | } | ||
654 | #endif | ||
581 | #endif | 655 | #endif |
582 | #ifdef CONFIG_PERFMON | 656 | #ifdef CONFIG_PERFMON |
583 | pfm_init_percpu(); | 657 | pfm_init_percpu(); |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index b618487cdc85..615c3d2b6348 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
1001 | return 1; | 1001 | return 1; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | /* ia64 does not need this */ | ||
1005 | void __kprobes jprobe_return(void) | ||
1006 | { | ||
1007 | } | ||
1008 | |||
1004 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 1009 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1005 | { | 1010 | { |
1006 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1011 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index e86d02959794..60c6ef67ebb2 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
57 | if (!cpu_online(cpu)) | 57 | if (!cpu_online(cpu)) |
58 | return; | 58 | return; |
59 | 59 | ||
60 | if (reassign_irq_vector(irq, cpu)) | 60 | if (irq_prepare_move(irq, cpu)) |
61 | return; | 61 | return; |
62 | 62 | ||
63 | read_msi_msg(irq, &msg); | 63 | read_msi_msg(irq, &msg); |
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq) | |||
119 | 119 | ||
120 | static void ia64_ack_msi_irq(unsigned int irq) | 120 | static void ia64_ack_msi_irq(unsigned int irq) |
121 | { | 121 | { |
122 | irq_complete_move(irq); | ||
122 | move_native_irq(irq); | 123 | move_native_irq(irq); |
123 | ia64_eoi(); | 124 | ia64_eoi(); |
124 | } | 125 | } |
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index f44fe8412162..a3022dc48ef8 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c | |||
@@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab) | |||
109 | sal_revision = SAL_VERSION_CODE(2, 8); | 109 | sal_revision = SAL_VERSION_CODE(2, 8); |
110 | sal_version = SAL_VERSION_CODE(0, 0); | 110 | sal_version = SAL_VERSION_CODE(0, 0); |
111 | } | 111 | } |
112 | |||
113 | if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9))) | ||
114 | /* | ||
115 | * SGI Altix has hard-coded version 2.9 in their prom | ||
116 | * but they actually implement 3.2, so let's fix it here. | ||
117 | */ | ||
118 | sal_revision = SAL_VERSION_CODE(3, 2); | ||
112 | } | 119 | } |
113 | 120 | ||
114 | static void __init | 121 | static void __init |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 309da3567bc8..5740296c35af 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
342 | 342 | ||
343 | new_sp = scr->pt.r12; | 343 | new_sp = scr->pt.r12; |
344 | tramp_addr = (unsigned long) __kernel_sigtramp; | 344 | tramp_addr = (unsigned long) __kernel_sigtramp; |
345 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { | 345 | if (ka->sa.sa_flags & SA_ONSTACK) { |
346 | new_sp = current->sas_ss_sp + current->sas_ss_size; | 346 | int onstack = sas_ss_flags(new_sp); |
347 | /* | 347 | |
348 | * We need to check for the register stack being on the signal stack | 348 | if (onstack == 0) { |
349 | * separately, because it's switched separately (memory stack is switched | 349 | new_sp = current->sas_ss_sp + current->sas_ss_size; |
350 | * in the kernel, register stack is switched in the signal trampoline). | 350 | /* |
351 | */ | 351 | * We need to check for the register stack being on the |
352 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | 352 | * signal stack separately, because it's switched |
353 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); | 353 | * separately (memory stack is switched in the kernel, |
354 | * register stack is switched in the signal trampoline). | ||
355 | */ | ||
356 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | ||
357 | new_rbs = ALIGN(current->sas_ss_sp, | ||
358 | sizeof(long)); | ||
359 | } else if (onstack == SS_ONSTACK) { | ||
360 | unsigned long check_sp; | ||
361 | |||
362 | /* | ||
363 | * If we are on the alternate signal stack and would | ||
364 | * overflow it, don't. Return an always-bogus address | ||
365 | * instead so we will die with SIGSEGV. | ||
366 | */ | ||
367 | check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; | ||
368 | if (!likely(on_sig_stack(check_sp))) | ||
369 | return force_sigsegv_info(sig, (void __user *) | ||
370 | check_sp); | ||
371 | } | ||
354 | } | 372 | } |
355 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); | 373 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); |
356 | 374 | ||
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 6dfa3b3c0e2a..18a9c5f4b00d 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -742,7 +742,9 @@ sys_call_table: | |||
742 | .long sys_epoll_pwait /* 315 */ | 742 | .long sys_epoll_pwait /* 315 */ |
743 | .long sys_utimensat | 743 | .long sys_utimensat |
744 | .long sys_signalfd | 744 | .long sys_signalfd |
745 | .long sys_ni_syscall | 745 | .long sys_timerfd_create |
746 | .long sys_eventfd | 746 | .long sys_eventfd |
747 | .long sys_fallocate /* 320 */ | 747 | .long sys_fallocate /* 320 */ |
748 | .long sys_timerfd_settime | ||
749 | .long sys_timerfd_gettime | ||
748 | 750 | ||
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig index 648113075f97..670b0a99cfa0 100644 --- a/arch/m68knommu/defconfig +++ b/arch/m68knommu/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.23 | 3 | # Linux kernel version: 2.6.25-rc3 |
4 | # Thu Oct 18 13:17:38 2007 | 4 | # Mon Feb 25 15:03:00 2008 |
5 | # | 5 | # |
6 | CONFIG_M68K=y | 6 | CONFIG_M68K=y |
7 | # CONFIG_MMU is not set | 7 | # CONFIG_MMU is not set |
@@ -15,8 +15,10 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y | |||
15 | CONFIG_GENERIC_HWEIGHT=y | 15 | CONFIG_GENERIC_HWEIGHT=y |
16 | CONFIG_GENERIC_HARDIRQS=y | 16 | CONFIG_GENERIC_HARDIRQS=y |
17 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 17 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
18 | CONFIG_GENERIC_TIME=y | ||
18 | CONFIG_TIME_LOW_RES=y | 19 | CONFIG_TIME_LOW_RES=y |
19 | CONFIG_NO_IOPORT=y | 20 | CONFIG_NO_IOPORT=y |
21 | CONFIG_ARCH_SUPPORTS_AOUT=y | ||
20 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 22 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
21 | 23 | ||
22 | # | 24 | # |
@@ -31,12 +33,14 @@ CONFIG_LOCALVERSION_AUTO=y | |||
31 | # CONFIG_POSIX_MQUEUE is not set | 33 | # CONFIG_POSIX_MQUEUE is not set |
32 | # CONFIG_BSD_PROCESS_ACCT is not set | 34 | # CONFIG_BSD_PROCESS_ACCT is not set |
33 | # CONFIG_TASKSTATS is not set | 35 | # CONFIG_TASKSTATS is not set |
34 | # CONFIG_USER_NS is not set | ||
35 | # CONFIG_AUDIT is not set | 36 | # CONFIG_AUDIT is not set |
36 | # CONFIG_IKCONFIG is not set | 37 | # CONFIG_IKCONFIG is not set |
37 | CONFIG_LOG_BUF_SHIFT=14 | 38 | CONFIG_LOG_BUF_SHIFT=14 |
39 | # CONFIG_CGROUPS is not set | ||
40 | # CONFIG_GROUP_SCHED is not set | ||
38 | # CONFIG_SYSFS_DEPRECATED is not set | 41 | # CONFIG_SYSFS_DEPRECATED is not set |
39 | # CONFIG_RELAY is not set | 42 | # CONFIG_RELAY is not set |
43 | # CONFIG_NAMESPACES is not set | ||
40 | # CONFIG_BLK_DEV_INITRD is not set | 44 | # CONFIG_BLK_DEV_INITRD is not set |
41 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 45 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
42 | CONFIG_SYSCTL=y | 46 | CONFIG_SYSCTL=y |
@@ -48,15 +52,22 @@ CONFIG_SYSCTL_SYSCALL=y | |||
48 | CONFIG_PRINTK=y | 52 | CONFIG_PRINTK=y |
49 | CONFIG_BUG=y | 53 | CONFIG_BUG=y |
50 | CONFIG_ELF_CORE=y | 54 | CONFIG_ELF_CORE=y |
55 | CONFIG_COMPAT_BRK=y | ||
51 | CONFIG_BASE_FULL=y | 56 | CONFIG_BASE_FULL=y |
52 | # CONFIG_FUTEX is not set | 57 | # CONFIG_FUTEX is not set |
53 | # CONFIG_EPOLL is not set | 58 | # CONFIG_EPOLL is not set |
54 | # CONFIG_SIGNALFD is not set | 59 | # CONFIG_SIGNALFD is not set |
60 | # CONFIG_TIMERFD is not set | ||
55 | # CONFIG_EVENTFD is not set | 61 | # CONFIG_EVENTFD is not set |
56 | # CONFIG_VM_EVENT_COUNTERS is not set | 62 | # CONFIG_VM_EVENT_COUNTERS is not set |
57 | CONFIG_SLAB=y | 63 | CONFIG_SLAB=y |
58 | # CONFIG_SLUB is not set | 64 | # CONFIG_SLUB is not set |
59 | # CONFIG_SLOB is not set | 65 | # CONFIG_SLOB is not set |
66 | # CONFIG_PROFILING is not set | ||
67 | # CONFIG_MARKERS is not set | ||
68 | # CONFIG_HAVE_OPROFILE is not set | ||
69 | # CONFIG_HAVE_KPROBES is not set | ||
70 | CONFIG_SLABINFO=y | ||
60 | CONFIG_TINY_SHMEM=y | 71 | CONFIG_TINY_SHMEM=y |
61 | CONFIG_BASE_SMALL=0 | 72 | CONFIG_BASE_SMALL=0 |
62 | CONFIG_MODULES=y | 73 | CONFIG_MODULES=y |
@@ -83,6 +94,8 @@ CONFIG_IOSCHED_NOOP=y | |||
83 | # CONFIG_DEFAULT_CFQ is not set | 94 | # CONFIG_DEFAULT_CFQ is not set |
84 | CONFIG_DEFAULT_NOOP=y | 95 | CONFIG_DEFAULT_NOOP=y |
85 | CONFIG_DEFAULT_IOSCHED="noop" | 96 | CONFIG_DEFAULT_IOSCHED="noop" |
97 | CONFIG_CLASSIC_RCU=y | ||
98 | # CONFIG_PREEMPT_RCU is not set | ||
86 | 99 | ||
87 | # | 100 | # |
88 | # Processor type and features | 101 | # Processor type and features |
@@ -121,6 +134,7 @@ CONFIG_M5272C3=y | |||
121 | # CONFIG_MOD5272 is not set | 134 | # CONFIG_MOD5272 is not set |
122 | CONFIG_FREESCALE=y | 135 | CONFIG_FREESCALE=y |
123 | CONFIG_4KSTACKS=y | 136 | CONFIG_4KSTACKS=y |
137 | CONFIG_HZ=100 | ||
124 | 138 | ||
125 | # | 139 | # |
126 | # RAM configuration | 140 | # RAM configuration |
@@ -147,6 +161,7 @@ CONFIG_FLATMEM_MANUAL=y | |||
147 | CONFIG_FLATMEM=y | 161 | CONFIG_FLATMEM=y |
148 | CONFIG_FLAT_NODE_MEM_MAP=y | 162 | CONFIG_FLAT_NODE_MEM_MAP=y |
149 | # CONFIG_SPARSEMEM_STATIC is not set | 163 | # CONFIG_SPARSEMEM_STATIC is not set |
164 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
150 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 165 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
151 | # CONFIG_RESOURCES_64BIT is not set | 166 | # CONFIG_RESOURCES_64BIT is not set |
152 | CONFIG_ZONE_DMA_FLAG=1 | 167 | CONFIG_ZONE_DMA_FLAG=1 |
@@ -159,10 +174,6 @@ CONFIG_VIRT_TO_BUS=y | |||
159 | # CONFIG_ARCH_SUPPORTS_MSI is not set | 174 | # CONFIG_ARCH_SUPPORTS_MSI is not set |
160 | 175 | ||
161 | # | 176 | # |
162 | # PCCARD (PCMCIA/CardBus) support | ||
163 | # | ||
164 | |||
165 | # | ||
166 | # Executable file formats | 177 | # Executable file formats |
167 | # | 178 | # |
168 | CONFIG_BINFMT_FLAT=y | 179 | CONFIG_BINFMT_FLAT=y |
@@ -205,6 +216,7 @@ CONFIG_IP_FIB_HASH=y | |||
205 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | 216 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set |
206 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | 217 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set |
207 | # CONFIG_INET_XFRM_MODE_BEET is not set | 218 | # CONFIG_INET_XFRM_MODE_BEET is not set |
219 | # CONFIG_INET_LRO is not set | ||
208 | # CONFIG_INET_DIAG is not set | 220 | # CONFIG_INET_DIAG is not set |
209 | # CONFIG_TCP_CONG_ADVANCED is not set | 221 | # CONFIG_TCP_CONG_ADVANCED is not set |
210 | CONFIG_TCP_CONG_CUBIC=y | 222 | CONFIG_TCP_CONG_CUBIC=y |
@@ -229,10 +241,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic" | |||
229 | # CONFIG_LAPB is not set | 241 | # CONFIG_LAPB is not set |
230 | # CONFIG_ECONET is not set | 242 | # CONFIG_ECONET is not set |
231 | # CONFIG_WAN_ROUTER is not set | 243 | # CONFIG_WAN_ROUTER is not set |
232 | |||
233 | # | ||
234 | # QoS and/or fair queueing | ||
235 | # | ||
236 | # CONFIG_NET_SCHED is not set | 244 | # CONFIG_NET_SCHED is not set |
237 | 245 | ||
238 | # | 246 | # |
@@ -240,6 +248,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" | |||
240 | # | 248 | # |
241 | # CONFIG_NET_PKTGEN is not set | 249 | # CONFIG_NET_PKTGEN is not set |
242 | # CONFIG_HAMRADIO is not set | 250 | # CONFIG_HAMRADIO is not set |
251 | # CONFIG_CAN is not set | ||
243 | # CONFIG_IRDA is not set | 252 | # CONFIG_IRDA is not set |
244 | # CONFIG_BT is not set | 253 | # CONFIG_BT is not set |
245 | # CONFIG_AF_RXRPC is not set | 254 | # CONFIG_AF_RXRPC is not set |
@@ -283,6 +292,7 @@ CONFIG_MTD_BLOCK=y | |||
283 | # CONFIG_INFTL is not set | 292 | # CONFIG_INFTL is not set |
284 | # CONFIG_RFD_FTL is not set | 293 | # CONFIG_RFD_FTL is not set |
285 | # CONFIG_SSFDC is not set | 294 | # CONFIG_SSFDC is not set |
295 | # CONFIG_MTD_OOPS is not set | ||
286 | 296 | ||
287 | # | 297 | # |
288 | # RAM/ROM/Flash chip drivers | 298 | # RAM/ROM/Flash chip drivers |
@@ -339,10 +349,11 @@ CONFIG_BLK_DEV=y | |||
339 | CONFIG_BLK_DEV_RAM=y | 349 | CONFIG_BLK_DEV_RAM=y |
340 | CONFIG_BLK_DEV_RAM_COUNT=16 | 350 | CONFIG_BLK_DEV_RAM_COUNT=16 |
341 | CONFIG_BLK_DEV_RAM_SIZE=4096 | 351 | CONFIG_BLK_DEV_RAM_SIZE=4096 |
342 | CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 | 352 | # CONFIG_BLK_DEV_XIP is not set |
343 | # CONFIG_CDROM_PKTCDVD is not set | 353 | # CONFIG_CDROM_PKTCDVD is not set |
344 | # CONFIG_ATA_OVER_ETH is not set | 354 | # CONFIG_ATA_OVER_ETH is not set |
345 | # CONFIG_MISC_DEVICES is not set | 355 | # CONFIG_MISC_DEVICES is not set |
356 | CONFIG_HAVE_IDE=y | ||
346 | # CONFIG_IDE is not set | 357 | # CONFIG_IDE is not set |
347 | 358 | ||
348 | # | 359 | # |
@@ -360,9 +371,15 @@ CONFIG_NETDEVICES=y | |||
360 | # CONFIG_MACVLAN is not set | 371 | # CONFIG_MACVLAN is not set |
361 | # CONFIG_EQUALIZER is not set | 372 | # CONFIG_EQUALIZER is not set |
362 | # CONFIG_TUN is not set | 373 | # CONFIG_TUN is not set |
374 | # CONFIG_VETH is not set | ||
363 | # CONFIG_PHYLIB is not set | 375 | # CONFIG_PHYLIB is not set |
364 | CONFIG_NET_ETHERNET=y | 376 | CONFIG_NET_ETHERNET=y |
365 | # CONFIG_MII is not set | 377 | # CONFIG_MII is not set |
378 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
379 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
380 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
381 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
382 | # CONFIG_B44 is not set | ||
366 | CONFIG_FEC=y | 383 | CONFIG_FEC=y |
367 | # CONFIG_FEC2 is not set | 384 | # CONFIG_FEC2 is not set |
368 | # CONFIG_NETDEV_1000 is not set | 385 | # CONFIG_NETDEV_1000 is not set |
@@ -377,7 +394,7 @@ CONFIG_FEC=y | |||
377 | CONFIG_PPP=y | 394 | CONFIG_PPP=y |
378 | # CONFIG_PPP_MULTILINK is not set | 395 | # CONFIG_PPP_MULTILINK is not set |
379 | # CONFIG_PPP_FILTER is not set | 396 | # CONFIG_PPP_FILTER is not set |
380 | # CONFIG_PPP_ASYNC is not set | 397 | CONFIG_PPP_ASYNC=y |
381 | # CONFIG_PPP_SYNC_TTY is not set | 398 | # CONFIG_PPP_SYNC_TTY is not set |
382 | # CONFIG_PPP_DEFLATE is not set | 399 | # CONFIG_PPP_DEFLATE is not set |
383 | # CONFIG_PPP_BSDCOMP is not set | 400 | # CONFIG_PPP_BSDCOMP is not set |
@@ -386,7 +403,6 @@ CONFIG_PPP=y | |||
386 | # CONFIG_PPPOL2TP is not set | 403 | # CONFIG_PPPOL2TP is not set |
387 | # CONFIG_SLIP is not set | 404 | # CONFIG_SLIP is not set |
388 | CONFIG_SLHC=y | 405 | CONFIG_SLHC=y |
389 | # CONFIG_SHAPER is not set | ||
390 | # CONFIG_NETCONSOLE is not set | 406 | # CONFIG_NETCONSOLE is not set |
391 | # CONFIG_NETPOLL is not set | 407 | # CONFIG_NETPOLL is not set |
392 | # CONFIG_NET_POLL_CONTROLLER is not set | 408 | # CONFIG_NET_POLL_CONTROLLER is not set |
@@ -418,12 +434,16 @@ CONFIG_SLHC=y | |||
418 | # | 434 | # |
419 | # Non-8250 serial port support | 435 | # Non-8250 serial port support |
420 | # | 436 | # |
421 | CONFIG_SERIAL_COLDFIRE=y | 437 | CONFIG_SERIAL_CORE=y |
438 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
439 | # CONFIG_SERIAL_COLDFIRE is not set | ||
440 | CONFIG_SERIAL_MCF=y | ||
441 | CONFIG_SERIAL_MCF_BAUDRATE=19200 | ||
442 | CONFIG_SERIAL_MCF_CONSOLE=y | ||
422 | # CONFIG_UNIX98_PTYS is not set | 443 | # CONFIG_UNIX98_PTYS is not set |
423 | CONFIG_LEGACY_PTYS=y | 444 | CONFIG_LEGACY_PTYS=y |
424 | CONFIG_LEGACY_PTY_COUNT=256 | 445 | CONFIG_LEGACY_PTY_COUNT=256 |
425 | # CONFIG_IPMI_HANDLER is not set | 446 | # CONFIG_IPMI_HANDLER is not set |
426 | # CONFIG_WATCHDOG is not set | ||
427 | # CONFIG_HW_RANDOM is not set | 447 | # CONFIG_HW_RANDOM is not set |
428 | # CONFIG_GEN_RTC is not set | 448 | # CONFIG_GEN_RTC is not set |
429 | # CONFIG_R3964 is not set | 449 | # CONFIG_R3964 is not set |
@@ -439,6 +459,14 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
439 | # CONFIG_W1 is not set | 459 | # CONFIG_W1 is not set |
440 | # CONFIG_POWER_SUPPLY is not set | 460 | # CONFIG_POWER_SUPPLY is not set |
441 | # CONFIG_HWMON is not set | 461 | # CONFIG_HWMON is not set |
462 | # CONFIG_THERMAL is not set | ||
463 | # CONFIG_WATCHDOG is not set | ||
464 | |||
465 | # | ||
466 | # Sonics Silicon Backplane | ||
467 | # | ||
468 | CONFIG_SSB_POSSIBLE=y | ||
469 | # CONFIG_SSB is not set | ||
442 | 470 | ||
443 | # | 471 | # |
444 | # Multifunction device drivers | 472 | # Multifunction device drivers |
@@ -450,20 +478,20 @@ CONFIG_LEGACY_PTY_COUNT=256 | |||
450 | # | 478 | # |
451 | # CONFIG_VIDEO_DEV is not set | 479 | # CONFIG_VIDEO_DEV is not set |
452 | # CONFIG_DVB_CORE is not set | 480 | # CONFIG_DVB_CORE is not set |
453 | CONFIG_DAB=y | 481 | # CONFIG_DAB is not set |
454 | 482 | ||
455 | # | 483 | # |
456 | # Graphics support | 484 | # Graphics support |
457 | # | 485 | # |
486 | # CONFIG_VGASTATE is not set | ||
487 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
488 | # CONFIG_FB is not set | ||
458 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | 489 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set |
459 | 490 | ||
460 | # | 491 | # |
461 | # Display device support | 492 | # Display device support |
462 | # | 493 | # |
463 | # CONFIG_DISPLAY_SUPPORT is not set | 494 | # CONFIG_DISPLAY_SUPPORT is not set |
464 | # CONFIG_VGASTATE is not set | ||
465 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
466 | # CONFIG_FB is not set | ||
467 | 495 | ||
468 | # | 496 | # |
469 | # Sound | 497 | # Sound |
@@ -471,23 +499,11 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y | |||
471 | # CONFIG_SOUND is not set | 499 | # CONFIG_SOUND is not set |
472 | # CONFIG_USB_SUPPORT is not set | 500 | # CONFIG_USB_SUPPORT is not set |
473 | # CONFIG_MMC is not set | 501 | # CONFIG_MMC is not set |
502 | # CONFIG_MEMSTICK is not set | ||
474 | # CONFIG_NEW_LEDS is not set | 503 | # CONFIG_NEW_LEDS is not set |
475 | # CONFIG_RTC_CLASS is not set | 504 | # CONFIG_RTC_CLASS is not set |
476 | 505 | ||
477 | # | 506 | # |
478 | # DMA Engine support | ||
479 | # | ||
480 | # CONFIG_DMA_ENGINE is not set | ||
481 | |||
482 | # | ||
483 | # DMA Clients | ||
484 | # | ||
485 | |||
486 | # | ||
487 | # DMA Devices | ||
488 | # | ||
489 | |||
490 | # | ||
491 | # Userspace I/O | 507 | # Userspace I/O |
492 | # | 508 | # |
493 | # CONFIG_UIO is not set | 509 | # CONFIG_UIO is not set |
@@ -505,11 +521,9 @@ CONFIG_EXT2_FS=y | |||
505 | # CONFIG_XFS_FS is not set | 521 | # CONFIG_XFS_FS is not set |
506 | # CONFIG_GFS2_FS is not set | 522 | # CONFIG_GFS2_FS is not set |
507 | # CONFIG_OCFS2_FS is not set | 523 | # CONFIG_OCFS2_FS is not set |
508 | # CONFIG_MINIX_FS is not set | 524 | # CONFIG_DNOTIFY is not set |
509 | CONFIG_ROMFS_FS=y | ||
510 | # CONFIG_INOTIFY is not set | 525 | # CONFIG_INOTIFY is not set |
511 | # CONFIG_QUOTA is not set | 526 | # CONFIG_QUOTA is not set |
512 | # CONFIG_DNOTIFY is not set | ||
513 | # CONFIG_AUTOFS_FS is not set | 527 | # CONFIG_AUTOFS_FS is not set |
514 | # CONFIG_AUTOFS4_FS is not set | 528 | # CONFIG_AUTOFS4_FS is not set |
515 | # CONFIG_FUSE_FS is not set | 529 | # CONFIG_FUSE_FS is not set |
@@ -535,7 +549,6 @@ CONFIG_PROC_SYSCTL=y | |||
535 | CONFIG_SYSFS=y | 549 | CONFIG_SYSFS=y |
536 | # CONFIG_TMPFS is not set | 550 | # CONFIG_TMPFS is not set |
537 | # CONFIG_HUGETLB_PAGE is not set | 551 | # CONFIG_HUGETLB_PAGE is not set |
538 | CONFIG_RAMFS=y | ||
539 | # CONFIG_CONFIGFS_FS is not set | 552 | # CONFIG_CONFIGFS_FS is not set |
540 | 553 | ||
541 | # | 554 | # |
@@ -551,42 +564,27 @@ CONFIG_RAMFS=y | |||
551 | # CONFIG_JFFS2_FS is not set | 564 | # CONFIG_JFFS2_FS is not set |
552 | # CONFIG_CRAMFS is not set | 565 | # CONFIG_CRAMFS is not set |
553 | # CONFIG_VXFS_FS is not set | 566 | # CONFIG_VXFS_FS is not set |
567 | # CONFIG_MINIX_FS is not set | ||
554 | # CONFIG_HPFS_FS is not set | 568 | # CONFIG_HPFS_FS is not set |
555 | # CONFIG_QNX4FS_FS is not set | 569 | # CONFIG_QNX4FS_FS is not set |
570 | CONFIG_ROMFS_FS=y | ||
556 | # CONFIG_SYSV_FS is not set | 571 | # CONFIG_SYSV_FS is not set |
557 | # CONFIG_UFS_FS is not set | 572 | # CONFIG_UFS_FS is not set |
558 | 573 | # CONFIG_NETWORK_FILESYSTEMS is not set | |
559 | # | ||
560 | # Network File Systems | ||
561 | # | ||
562 | # CONFIG_NFS_FS is not set | ||
563 | # CONFIG_NFSD is not set | ||
564 | # CONFIG_SMB_FS is not set | ||
565 | # CONFIG_CIFS is not set | ||
566 | # CONFIG_NCP_FS is not set | ||
567 | # CONFIG_CODA_FS is not set | ||
568 | # CONFIG_AFS_FS is not set | ||
569 | 574 | ||
570 | # | 575 | # |
571 | # Partition Types | 576 | # Partition Types |
572 | # | 577 | # |
573 | # CONFIG_PARTITION_ADVANCED is not set | 578 | # CONFIG_PARTITION_ADVANCED is not set |
574 | CONFIG_MSDOS_PARTITION=y | 579 | CONFIG_MSDOS_PARTITION=y |
575 | |||
576 | # | ||
577 | # Native Language Support | ||
578 | # | ||
579 | # CONFIG_NLS is not set | 580 | # CONFIG_NLS is not set |
580 | |||
581 | # | ||
582 | # Distributed Lock Manager | ||
583 | # | ||
584 | # CONFIG_DLM is not set | 581 | # CONFIG_DLM is not set |
585 | 582 | ||
586 | # | 583 | # |
587 | # Kernel hacking | 584 | # Kernel hacking |
588 | # | 585 | # |
589 | # CONFIG_PRINTK_TIME is not set | 586 | # CONFIG_PRINTK_TIME is not set |
587 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
590 | # CONFIG_ENABLE_MUST_CHECK is not set | 588 | # CONFIG_ENABLE_MUST_CHECK is not set |
591 | # CONFIG_MAGIC_SYSRQ is not set | 589 | # CONFIG_MAGIC_SYSRQ is not set |
592 | # CONFIG_UNUSED_SYMBOLS is not set | 590 | # CONFIG_UNUSED_SYMBOLS is not set |
@@ -594,6 +592,7 @@ CONFIG_MSDOS_PARTITION=y | |||
594 | # CONFIG_HEADERS_CHECK is not set | 592 | # CONFIG_HEADERS_CHECK is not set |
595 | # CONFIG_DEBUG_KERNEL is not set | 593 | # CONFIG_DEBUG_KERNEL is not set |
596 | # CONFIG_DEBUG_BUGVERBOSE is not set | 594 | # CONFIG_DEBUG_BUGVERBOSE is not set |
595 | # CONFIG_SAMPLES is not set | ||
597 | # CONFIG_FULLDEBUG is not set | 596 | # CONFIG_FULLDEBUG is not set |
598 | # CONFIG_HIGHPROFILE is not set | 597 | # CONFIG_HIGHPROFILE is not set |
599 | # CONFIG_BOOTPARAM is not set | 598 | # CONFIG_BOOTPARAM is not set |
@@ -605,6 +604,7 @@ CONFIG_MSDOS_PARTITION=y | |||
605 | # | 604 | # |
606 | # CONFIG_KEYS is not set | 605 | # CONFIG_KEYS is not set |
607 | # CONFIG_SECURITY is not set | 606 | # CONFIG_SECURITY is not set |
607 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
608 | # CONFIG_CRYPTO is not set | 608 | # CONFIG_CRYPTO is not set |
609 | 609 | ||
610 | # | 610 | # |
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index 1b02b8820068..fca2e49917a3 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S | |||
@@ -336,9 +336,11 @@ ENTRY(sys_call_table) | |||
336 | .long sys_epoll_pwait /* 315 */ | 336 | .long sys_epoll_pwait /* 315 */ |
337 | .long sys_utimensat | 337 | .long sys_utimensat |
338 | .long sys_signalfd | 338 | .long sys_signalfd |
339 | .long sys_ni_syscall | 339 | .long sys_timerfd_create |
340 | .long sys_eventfd | 340 | .long sys_eventfd |
341 | .long sys_fallocate /* 320 */ | 341 | .long sys_fallocate /* 320 */ |
342 | .long sys_timerfd_settime | ||
343 | .long sys_timerfd_gettime | ||
342 | 344 | ||
343 | .rept NR_syscalls-(.-sys_call_table)/4 | 345 | .rept NR_syscalls-(.-sys_call_table)/4 |
344 | .long sys_ni_syscall | 346 | .long sys_ni_syscall |
diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c index 9159fd05c9ac..6bafefa546e5 100644 --- a/arch/m68knommu/platform/68328/timers.c +++ b/arch/m68knommu/platform/68328/timers.c | |||
@@ -67,16 +67,6 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
67 | 67 | ||
68 | /***************************************************************************/ | 68 | /***************************************************************************/ |
69 | 69 | ||
70 | static irqreturn_t hw_tick(int irq, void *dummy) | ||
71 | { | ||
72 | /* Reset Timer1 */ | ||
73 | TSTAT &= 0; | ||
74 | |||
75 | return arch_timer_interrupt(irq, dummy); | ||
76 | } | ||
77 | |||
78 | /***************************************************************************/ | ||
79 | |||
80 | static struct irqaction m68328_timer_irq = { | 70 | static struct irqaction m68328_timer_irq = { |
81 | .name = "timer", | 71 | .name = "timer", |
82 | .flags = IRQF_DISABLED | IRQF_TIMER, | 72 | .flags = IRQF_DISABLED | IRQF_TIMER, |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5b8d8382b762..1189d8d6170d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -90,6 +90,7 @@ config PPC | |||
90 | select HAVE_IDE | 90 | select HAVE_IDE |
91 | select HAVE_OPROFILE | 91 | select HAVE_OPROFILE |
92 | select HAVE_KPROBES | 92 | select HAVE_KPROBES |
93 | select HAVE_KRETPROBES | ||
93 | 94 | ||
94 | config EARLY_PRINTK | 95 | config EARLY_PRINTK |
95 | bool | 96 | bool |
diff --git a/arch/powerpc/boot/cuboot-bamboo.c b/arch/powerpc/boot/cuboot-bamboo.c index 900c7ff2b7e9..b5c30f766c40 100644 --- a/arch/powerpc/boot/cuboot-bamboo.c +++ b/arch/powerpc/boot/cuboot-bamboo.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include "44x.h" | 17 | #include "44x.h" |
18 | #include "cuboot.h" | 18 | #include "cuboot.h" |
19 | 19 | ||
20 | #define TARGET_4xx | ||
20 | #define TARGET_44x | 21 | #define TARGET_44x |
21 | #include "ppcboot.h" | 22 | #include "ppcboot.h" |
22 | 23 | ||
diff --git a/arch/powerpc/boot/cuboot-ebony.c b/arch/powerpc/boot/cuboot-ebony.c index c5f37ce172ea..56564ba37f62 100644 --- a/arch/powerpc/boot/cuboot-ebony.c +++ b/arch/powerpc/boot/cuboot-ebony.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include "44x.h" | 17 | #include "44x.h" |
18 | #include "cuboot.h" | 18 | #include "cuboot.h" |
19 | 19 | ||
20 | #define TARGET_4xx | ||
20 | #define TARGET_44x | 21 | #define TARGET_44x |
21 | #include "ppcboot.h" | 22 | #include "ppcboot.h" |
22 | 23 | ||
diff --git a/arch/powerpc/boot/cuboot-katmai.c b/arch/powerpc/boot/cuboot-katmai.c index c021167f9381..5434d70b5660 100644 --- a/arch/powerpc/boot/cuboot-katmai.c +++ b/arch/powerpc/boot/cuboot-katmai.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "44x.h" | 22 | #include "44x.h" |
23 | #include "cuboot.h" | 23 | #include "cuboot.h" |
24 | 24 | ||
25 | #define TARGET_4xx | ||
25 | #define TARGET_44x | 26 | #define TARGET_44x |
26 | #include "ppcboot.h" | 27 | #include "ppcboot.h" |
27 | 28 | ||
diff --git a/arch/powerpc/boot/cuboot-taishan.c b/arch/powerpc/boot/cuboot-taishan.c index f66455a45ab1..b55b80467eed 100644 --- a/arch/powerpc/boot/cuboot-taishan.c +++ b/arch/powerpc/boot/cuboot-taishan.c | |||
@@ -21,7 +21,9 @@ | |||
21 | #include "dcr.h" | 21 | #include "dcr.h" |
22 | #include "4xx.h" | 22 | #include "4xx.h" |
23 | 23 | ||
24 | #define TARGET_4xx | ||
24 | #define TARGET_44x | 25 | #define TARGET_44x |
26 | #define TARGET_440GX | ||
25 | #include "ppcboot.h" | 27 | #include "ppcboot.h" |
26 | 28 | ||
27 | static bd_t bd; | 29 | static bd_t bd; |
diff --git a/arch/powerpc/boot/cuboot-warp.c b/arch/powerpc/boot/cuboot-warp.c index bdedebe1bc14..3db93e85e9ea 100644 --- a/arch/powerpc/boot/cuboot-warp.c +++ b/arch/powerpc/boot/cuboot-warp.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include "4xx.h" | 11 | #include "4xx.h" |
12 | #include "cuboot.h" | 12 | #include "cuboot.h" |
13 | 13 | ||
14 | #define TARGET_4xx | ||
14 | #define TARGET_44x | 15 | #define TARGET_44x |
15 | #include "ppcboot.h" | 16 | #include "ppcboot.h" |
16 | 17 | ||
diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts index 5dd3d15f0feb..ae68fefc01b6 100644 --- a/arch/powerpc/boot/dts/haleakala.dts +++ b/arch/powerpc/boot/dts/haleakala.dts | |||
@@ -235,7 +235,7 @@ | |||
235 | #interrupt-cells = <1>; | 235 | #interrupt-cells = <1>; |
236 | #size-cells = <2>; | 236 | #size-cells = <2>; |
237 | #address-cells = <3>; | 237 | #address-cells = <3>; |
238 | compatible = "ibm,plb-pciex-405exr", "ibm,plb-pciex"; | 238 | compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; |
239 | primary; | 239 | primary; |
240 | port = <0>; /* port number */ | 240 | port = <0>; /* port number */ |
241 | reg = <a0000000 20000000 /* Config space access */ | 241 | reg = <a0000000 20000000 /* Config space access */ |
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index bc32ac7250ec..fc86e5a3afc4 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts | |||
@@ -38,8 +38,8 @@ | |||
38 | timebase-frequency = <0>; /* Filled in by zImage */ | 38 | timebase-frequency = <0>; /* Filled in by zImage */ |
39 | i-cache-line-size = <20>; | 39 | i-cache-line-size = <20>; |
40 | d-cache-line-size = <20>; | 40 | d-cache-line-size = <20>; |
41 | i-cache-size = <20000>; | 41 | i-cache-size = <8000>; |
42 | d-cache-size = <20000>; | 42 | d-cache-size = <8000>; |
43 | dcr-controller; | 43 | dcr-controller; |
44 | dcr-access-method = "native"; | 44 | dcr-access-method = "native"; |
45 | }; | 45 | }; |
@@ -136,11 +136,11 @@ | |||
136 | }; | 136 | }; |
137 | 137 | ||
138 | POB0: opb { | 138 | POB0: opb { |
139 | compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb"; | 139 | compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb"; |
140 | #address-cells = <1>; | 140 | #address-cells = <1>; |
141 | #size-cells = <1>; | 141 | #size-cells = <1>; |
142 | ranges = <00000000 4 e0000000 20000000>; | 142 | ranges = <00000000 4 e0000000 20000000>; |
143 | clock-frequency = <0>; /* Filled in by zImage */ | 143 | clock-frequency = <0>; /* Filled in by zImage */ |
144 | 144 | ||
145 | EBC0: ebc { | 145 | EBC0: ebc { |
146 | compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc"; | 146 | compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc"; |
@@ -153,38 +153,38 @@ | |||
153 | }; | 153 | }; |
154 | 154 | ||
155 | UART0: serial@10000200 { | 155 | UART0: serial@10000200 { |
156 | device_type = "serial"; | 156 | device_type = "serial"; |
157 | compatible = "ns16550"; | 157 | compatible = "ns16550"; |
158 | reg = <10000200 8>; | 158 | reg = <10000200 8>; |
159 | virtual-reg = <a0000200>; | 159 | virtual-reg = <a0000200>; |
160 | clock-frequency = <0>; /* Filled in by zImage */ | 160 | clock-frequency = <0>; /* Filled in by zImage */ |
161 | current-speed = <1c200>; | 161 | current-speed = <1c200>; |
162 | interrupt-parent = <&UIC0>; | 162 | interrupt-parent = <&UIC0>; |
163 | interrupts = <0 4>; | 163 | interrupts = <0 4>; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | UART1: serial@10000300 { | 166 | UART1: serial@10000300 { |
167 | device_type = "serial"; | 167 | device_type = "serial"; |
168 | compatible = "ns16550"; | 168 | compatible = "ns16550"; |
169 | reg = <10000300 8>; | 169 | reg = <10000300 8>; |
170 | virtual-reg = <a0000300>; | 170 | virtual-reg = <a0000300>; |
171 | clock-frequency = <0>; | 171 | clock-frequency = <0>; |
172 | current-speed = <0>; | 172 | current-speed = <0>; |
173 | interrupt-parent = <&UIC0>; | 173 | interrupt-parent = <&UIC0>; |
174 | interrupts = <1 4>; | 174 | interrupts = <1 4>; |
175 | }; | 175 | }; |
176 | 176 | ||
177 | 177 | ||
178 | UART2: serial@10000600 { | 178 | UART2: serial@10000600 { |
179 | device_type = "serial"; | 179 | device_type = "serial"; |
180 | compatible = "ns16550"; | 180 | compatible = "ns16550"; |
181 | reg = <10000600 8>; | 181 | reg = <10000600 8>; |
182 | virtual-reg = <a0000600>; | 182 | virtual-reg = <a0000600>; |
183 | clock-frequency = <0>; | 183 | clock-frequency = <0>; |
184 | current-speed = <0>; | 184 | current-speed = <0>; |
185 | interrupt-parent = <&UIC1>; | 185 | interrupt-parent = <&UIC1>; |
186 | interrupts = <5 4>; | 186 | interrupts = <5 4>; |
187 | }; | 187 | }; |
188 | 188 | ||
189 | IIC0: i2c@10000400 { | 189 | IIC0: i2c@10000400 { |
190 | compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic"; | 190 | compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic"; |
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 13929771bee7..9eed1f68fcab 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c | |||
@@ -1151,7 +1151,7 @@ static void cell_handle_interrupt(struct pt_regs *regs, | |||
1151 | for (i = 0; i < num_counters; ++i) { | 1151 | for (i = 0; i < num_counters; ++i) { |
1152 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i)) | 1152 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i)) |
1153 | && ctr[i].enabled) { | 1153 | && ctr[i].enabled) { |
1154 | oprofile_add_pc(pc, is_kernel, i); | 1154 | oprofile_add_ext_sample(pc, regs, i, is_kernel); |
1155 | cbe_write_ctr(cpu, i, reset_value[i]); | 1155 | cbe_write_ctr(cpu, i, reset_value[i]); |
1156 | } | 1156 | } |
1157 | } | 1157 | } |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 9aa4425d80b2..4d5fd1dbd400 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv) | |||
199 | 199 | ||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv); | ||
202 | 203 | ||
203 | /** | 204 | /** |
204 | * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer | 205 | * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index edab631a8dcb..20ea0e118f24 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -113,7 +113,7 @@ | |||
113 | 113 | ||
114 | /* IOMMU sizing */ | 114 | /* IOMMU sizing */ |
115 | #define IO_SEGMENT_SHIFT 28 | 115 | #define IO_SEGMENT_SHIFT 28 |
116 | #define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT) | 116 | #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) |
117 | 117 | ||
118 | /* The high bit needs to be set on every DMA address */ | 118 | /* The high bit needs to be set on every DMA address */ |
119 | #define SPIDER_DMA_OFFSET 0x80000000ul | 119 | #define SPIDER_DMA_OFFSET 0x80000000ul |
@@ -123,7 +123,6 @@ struct iommu_window { | |||
123 | struct cbe_iommu *iommu; | 123 | struct cbe_iommu *iommu; |
124 | unsigned long offset; | 124 | unsigned long offset; |
125 | unsigned long size; | 125 | unsigned long size; |
126 | unsigned long pte_offset; | ||
127 | unsigned int ioid; | 126 | unsigned int ioid; |
128 | struct iommu_table table; | 127 | struct iommu_table table; |
129 | }; | 128 | }; |
@@ -200,7 +199,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages, | |||
200 | (window->ioid & IOPTE_IOID_Mask); | 199 | (window->ioid & IOPTE_IOID_Mask); |
201 | #endif | 200 | #endif |
202 | 201 | ||
203 | io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); | 202 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); |
204 | 203 | ||
205 | for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) | 204 | for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) |
206 | io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); | 205 | io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); |
@@ -232,7 +231,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages) | |||
232 | | (window->ioid & IOPTE_IOID_Mask); | 231 | | (window->ioid & IOPTE_IOID_Mask); |
233 | #endif | 232 | #endif |
234 | 233 | ||
235 | io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); | 234 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); |
236 | 235 | ||
237 | for (i = 0; i < npages; i++) | 236 | for (i = 0; i < npages; i++) |
238 | io_pte[i] = pte; | 237 | io_pte[i] = pte; |
@@ -307,76 +306,84 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base) | |||
307 | return -ENODEV; | 306 | return -ENODEV; |
308 | } | 307 | } |
309 | 308 | ||
310 | static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, | 309 | static void cell_iommu_setup_stab(struct cbe_iommu *iommu, |
311 | unsigned long dbase, unsigned long dsize, | 310 | unsigned long dbase, unsigned long dsize, |
312 | unsigned long fbase, unsigned long fsize) | 311 | unsigned long fbase, unsigned long fsize) |
313 | { | 312 | { |
314 | struct page *page; | 313 | struct page *page; |
315 | int i; | 314 | unsigned long segments, stab_size; |
316 | unsigned long reg, segments, pages_per_segment, ptab_size, stab_size, | ||
317 | n_pte_pages, base; | ||
318 | |||
319 | base = dbase; | ||
320 | if (fsize != 0) | ||
321 | base = min(fbase, dbase); | ||
322 | 315 | ||
323 | segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; | 316 | segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; |
324 | pages_per_segment = 1ull << IO_PAGENO_BITS; | ||
325 | 317 | ||
326 | pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n", | 318 | pr_debug("%s: iommu[%d]: segments: %lu\n", |
327 | __FUNCTION__, iommu->nid, segments, pages_per_segment); | 319 | __FUNCTION__, iommu->nid, segments); |
328 | 320 | ||
329 | /* set up the segment table */ | 321 | /* set up the segment table */ |
330 | stab_size = segments * sizeof(unsigned long); | 322 | stab_size = segments * sizeof(unsigned long); |
331 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); | 323 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); |
332 | BUG_ON(!page); | 324 | BUG_ON(!page); |
333 | iommu->stab = page_address(page); | 325 | iommu->stab = page_address(page); |
334 | clear_page(iommu->stab); | 326 | memset(iommu->stab, 0, stab_size); |
327 | } | ||
328 | |||
329 | static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, | ||
330 | unsigned long base, unsigned long size, unsigned long gap_base, | ||
331 | unsigned long gap_size, unsigned long page_shift) | ||
332 | { | ||
333 | struct page *page; | ||
334 | int i; | ||
335 | unsigned long reg, segments, pages_per_segment, ptab_size, | ||
336 | n_pte_pages, start_seg, *ptab; | ||
337 | |||
338 | start_seg = base >> IO_SEGMENT_SHIFT; | ||
339 | segments = size >> IO_SEGMENT_SHIFT; | ||
340 | pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); | ||
341 | /* PTEs for each segment must start on a 4K bounday */ | ||
342 | pages_per_segment = max(pages_per_segment, | ||
343 | (1 << 12) / sizeof(unsigned long)); | ||
335 | 344 | ||
336 | /* ... and the page tables. Since these are contiguous, we can treat | ||
337 | * the page tables as one array of ptes, like pSeries does. | ||
338 | */ | ||
339 | ptab_size = segments * pages_per_segment * sizeof(unsigned long); | 345 | ptab_size = segments * pages_per_segment * sizeof(unsigned long); |
340 | pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, | 346 | pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, |
341 | iommu->nid, ptab_size, get_order(ptab_size)); | 347 | iommu->nid, ptab_size, get_order(ptab_size)); |
342 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); | 348 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); |
343 | BUG_ON(!page); | 349 | BUG_ON(!page); |
344 | 350 | ||
345 | iommu->ptab = page_address(page); | 351 | ptab = page_address(page); |
346 | memset(iommu->ptab, 0, ptab_size); | 352 | memset(ptab, 0, ptab_size); |
347 | 353 | ||
348 | /* allocate a bogus page for the end of each mapping */ | 354 | /* number of 4K pages needed for a page table */ |
349 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); | 355 | n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; |
350 | BUG_ON(!page); | ||
351 | iommu->pad_page = page_address(page); | ||
352 | clear_page(iommu->pad_page); | ||
353 | |||
354 | /* number of pages needed for a page table */ | ||
355 | n_pte_pages = (pages_per_segment * | ||
356 | sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; | ||
357 | 356 | ||
358 | pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", | 357 | pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", |
359 | __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab, | 358 | __FUNCTION__, iommu->nid, iommu->stab, ptab, |
360 | n_pte_pages); | 359 | n_pte_pages); |
361 | 360 | ||
362 | /* initialise the STEs */ | 361 | /* initialise the STEs */ |
363 | reg = IOSTE_V | ((n_pte_pages - 1) << 5); | 362 | reg = IOSTE_V | ((n_pte_pages - 1) << 5); |
364 | 363 | ||
365 | if (IOMMU_PAGE_SIZE == 0x1000) | 364 | switch (page_shift) { |
366 | reg |= IOSTE_PS_4K; | 365 | case 12: reg |= IOSTE_PS_4K; break; |
367 | else if (IOMMU_PAGE_SIZE == 0x10000) | 366 | case 16: reg |= IOSTE_PS_64K; break; |
368 | reg |= IOSTE_PS_64K; | 367 | case 20: reg |= IOSTE_PS_1M; break; |
369 | else { | 368 | case 24: reg |= IOSTE_PS_16M; break; |
370 | extern void __unknown_page_size_error(void); | 369 | default: BUG(); |
371 | __unknown_page_size_error(); | ||
372 | } | 370 | } |
373 | 371 | ||
372 | gap_base = gap_base >> IO_SEGMENT_SHIFT; | ||
373 | gap_size = gap_size >> IO_SEGMENT_SHIFT; | ||
374 | |||
374 | pr_debug("Setting up IOMMU stab:\n"); | 375 | pr_debug("Setting up IOMMU stab:\n"); |
375 | for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) { | 376 | for (i = start_seg; i < (start_seg + segments); i++) { |
376 | iommu->stab[i] = reg | | 377 | if (i >= gap_base && i < (gap_base + gap_size)) { |
377 | (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); | 378 | pr_debug("\toverlap at %d, skipping\n", i); |
379 | continue; | ||
380 | } | ||
381 | iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * | ||
382 | (i - start_seg)); | ||
378 | pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); | 383 | pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); |
379 | } | 384 | } |
385 | |||
386 | return ptab; | ||
380 | } | 387 | } |
381 | 388 | ||
382 | static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) | 389 | static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) |
@@ -423,7 +430,9 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) | |||
423 | static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, | 430 | static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, |
424 | unsigned long base, unsigned long size) | 431 | unsigned long base, unsigned long size) |
425 | { | 432 | { |
426 | cell_iommu_setup_page_tables(iommu, base, size, 0, 0); | 433 | cell_iommu_setup_stab(iommu, base, size, 0, 0); |
434 | iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, | ||
435 | IOMMU_PAGE_SHIFT); | ||
427 | cell_iommu_enable_hardware(iommu); | 436 | cell_iommu_enable_hardware(iommu); |
428 | } | 437 | } |
429 | 438 | ||
@@ -464,6 +473,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
464 | unsigned long pte_offset) | 473 | unsigned long pte_offset) |
465 | { | 474 | { |
466 | struct iommu_window *window; | 475 | struct iommu_window *window; |
476 | struct page *page; | ||
467 | u32 ioid; | 477 | u32 ioid; |
468 | 478 | ||
469 | ioid = cell_iommu_get_ioid(np); | 479 | ioid = cell_iommu_get_ioid(np); |
@@ -475,13 +485,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
475 | window->size = size; | 485 | window->size = size; |
476 | window->ioid = ioid; | 486 | window->ioid = ioid; |
477 | window->iommu = iommu; | 487 | window->iommu = iommu; |
478 | window->pte_offset = pte_offset; | ||
479 | 488 | ||
480 | window->table.it_blocksize = 16; | 489 | window->table.it_blocksize = 16; |
481 | window->table.it_base = (unsigned long)iommu->ptab; | 490 | window->table.it_base = (unsigned long)iommu->ptab; |
482 | window->table.it_index = iommu->nid; | 491 | window->table.it_index = iommu->nid; |
483 | window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + | 492 | window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; |
484 | window->pte_offset; | ||
485 | window->table.it_size = size >> IOMMU_PAGE_SHIFT; | 493 | window->table.it_size = size >> IOMMU_PAGE_SHIFT; |
486 | 494 | ||
487 | iommu_init_table(&window->table, iommu->nid); | 495 | iommu_init_table(&window->table, iommu->nid); |
@@ -504,6 +512,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
504 | * This code also assumes that we have a window that starts at 0, | 512 | * This code also assumes that we have a window that starts at 0, |
505 | * which is the case on all spider based blades. | 513 | * which is the case on all spider based blades. |
506 | */ | 514 | */ |
515 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); | ||
516 | BUG_ON(!page); | ||
517 | iommu->pad_page = page_address(page); | ||
518 | clear_page(iommu->pad_page); | ||
519 | |||
507 | __set_bit(0, window->table.it_map); | 520 | __set_bit(0, window->table.it_map); |
508 | tce_build_cell(&window->table, window->table.it_offset, 1, | 521 | tce_build_cell(&window->table, window->table.it_offset, 1, |
509 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE); | 522 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE); |
@@ -549,7 +562,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev) | |||
549 | archdata->dma_data = &window->table; | 562 | archdata->dma_data = &window->table; |
550 | } | 563 | } |
551 | 564 | ||
552 | static void cell_dma_dev_setup_static(struct device *dev); | 565 | static void cell_dma_dev_setup_fixed(struct device *dev); |
553 | 566 | ||
554 | static void cell_dma_dev_setup(struct device *dev) | 567 | static void cell_dma_dev_setup(struct device *dev) |
555 | { | 568 | { |
@@ -557,7 +570,7 @@ static void cell_dma_dev_setup(struct device *dev) | |||
557 | 570 | ||
558 | /* Order is important here, these are not mutually exclusive */ | 571 | /* Order is important here, these are not mutually exclusive */ |
559 | if (get_dma_ops(dev) == &dma_iommu_fixed_ops) | 572 | if (get_dma_ops(dev) == &dma_iommu_fixed_ops) |
560 | cell_dma_dev_setup_static(dev); | 573 | cell_dma_dev_setup_fixed(dev); |
561 | else if (get_pci_dma_ops() == &dma_iommu_ops) | 574 | else if (get_pci_dma_ops() == &dma_iommu_ops) |
562 | cell_dma_dev_setup_iommu(dev); | 575 | cell_dma_dev_setup_iommu(dev); |
563 | else if (get_pci_dma_ops() == &dma_direct_ops) | 576 | else if (get_pci_dma_ops() == &dma_direct_ops) |
@@ -858,7 +871,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) | |||
858 | return 0; | 871 | return 0; |
859 | } | 872 | } |
860 | 873 | ||
861 | static void cell_dma_dev_setup_static(struct device *dev) | 874 | static void cell_dma_dev_setup_fixed(struct device *dev) |
862 | { | 875 | { |
863 | struct dev_archdata *archdata = &dev->archdata; | 876 | struct dev_archdata *archdata = &dev->archdata; |
864 | u64 addr; | 877 | u64 addr; |
@@ -869,35 +882,45 @@ static void cell_dma_dev_setup_static(struct device *dev) | |||
869 | dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); | 882 | dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); |
870 | } | 883 | } |
871 | 884 | ||
885 | static void insert_16M_pte(unsigned long addr, unsigned long *ptab, | ||
886 | unsigned long base_pte) | ||
887 | { | ||
888 | unsigned long segment, offset; | ||
889 | |||
890 | segment = addr >> IO_SEGMENT_SHIFT; | ||
891 | offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); | ||
892 | ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); | ||
893 | |||
894 | pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", | ||
895 | addr, ptab, segment, offset); | ||
896 | |||
897 | ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask); | ||
898 | } | ||
899 | |||
872 | static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, | 900 | static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, |
873 | struct device_node *np, unsigned long dbase, unsigned long dsize, | 901 | struct device_node *np, unsigned long dbase, unsigned long dsize, |
874 | unsigned long fbase, unsigned long fsize) | 902 | unsigned long fbase, unsigned long fsize) |
875 | { | 903 | { |
876 | unsigned long base_pte, uaddr, *io_pte; | 904 | unsigned long base_pte, uaddr, ioaddr, *ptab; |
877 | int i; | ||
878 | 905 | ||
879 | dma_iommu_fixed_base = fbase; | 906 | ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); |
880 | 907 | ||
881 | /* convert from bytes into page table indices */ | 908 | dma_iommu_fixed_base = fbase; |
882 | dbase = dbase >> IOMMU_PAGE_SHIFT; | ||
883 | dsize = dsize >> IOMMU_PAGE_SHIFT; | ||
884 | fbase = fbase >> IOMMU_PAGE_SHIFT; | ||
885 | fsize = fsize >> IOMMU_PAGE_SHIFT; | ||
886 | 909 | ||
887 | pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); | 910 | pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); |
888 | 911 | ||
889 | io_pte = iommu->ptab; | ||
890 | base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | 912 | base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
891 | | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); | 913 | | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); |
892 | 914 | ||
893 | uaddr = 0; | 915 | for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { |
894 | for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) { | ||
895 | /* Don't touch the dynamic region */ | 916 | /* Don't touch the dynamic region */ |
896 | if (i >= dbase && i < (dbase + dsize)) { | 917 | ioaddr = uaddr + fbase; |
897 | pr_debug("iommu: static/dynamic overlap, skipping\n"); | 918 | if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { |
919 | pr_debug("iommu: fixed/dynamic overlap, skipping\n"); | ||
898 | continue; | 920 | continue; |
899 | } | 921 | } |
900 | io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); | 922 | |
923 | insert_16M_pte(uaddr, ptab, base_pte); | ||
901 | } | 924 | } |
902 | 925 | ||
903 | mb(); | 926 | mb(); |
@@ -995,7 +1018,9 @@ static int __init cell_iommu_fixed_mapping_init(void) | |||
995 | "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, | 1018 | "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, |
996 | dbase + dsize, fbase, fbase + fsize); | 1019 | dbase + dsize, fbase, fbase + fsize); |
997 | 1020 | ||
998 | cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); | 1021 | cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); |
1022 | iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, | ||
1023 | IOMMU_PAGE_SHIFT); | ||
999 | cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, | 1024 | cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, |
1000 | fbase, fsize); | 1025 | fbase, fsize); |
1001 | cell_iommu_enable_hardware(iommu); | 1026 | cell_iommu_enable_hardware(iommu); |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index a7f609b3b876..dda34650cb07 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -149,6 +149,11 @@ static void __init cell_init_irq(void) | |||
149 | mpic_init_IRQ(); | 149 | mpic_init_IRQ(); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void __init cell_set_dabrx(void) | ||
153 | { | ||
154 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); | ||
155 | } | ||
156 | |||
152 | static void __init cell_setup_arch(void) | 157 | static void __init cell_setup_arch(void) |
153 | { | 158 | { |
154 | #ifdef CONFIG_SPU_BASE | 159 | #ifdef CONFIG_SPU_BASE |
@@ -158,6 +163,8 @@ static void __init cell_setup_arch(void) | |||
158 | 163 | ||
159 | cbe_regs_init(); | 164 | cbe_regs_init(); |
160 | 165 | ||
166 | cell_set_dabrx(); | ||
167 | |||
161 | #ifdef CONFIG_CBE_RAS | 168 | #ifdef CONFIG_CBE_RAS |
162 | cbe_ras_init(); | 169 | cbe_ras_init(); |
163 | #endif | 170 | #endif |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 87eb07f94c5f..712001f6b7da 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -81,9 +81,12 @@ struct spu_slb { | |||
81 | void spu_invalidate_slbs(struct spu *spu) | 81 | void spu_invalidate_slbs(struct spu *spu) |
82 | { | 82 | { |
83 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 83 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
84 | unsigned long flags; | ||
84 | 85 | ||
86 | spin_lock_irqsave(&spu->register_lock, flags); | ||
85 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) | 87 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
86 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | 88 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
89 | spin_unlock_irqrestore(&spu->register_lock, flags); | ||
87 | } | 90 | } |
88 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | 91 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
89 | 92 | ||
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) | |||
148 | __func__, slbe, slb->vsid, slb->esid); | 151 | __func__, slbe, slb->vsid, slb->esid); |
149 | 152 | ||
150 | out_be64(&priv2->slb_index_W, slbe); | 153 | out_be64(&priv2->slb_index_W, slbe); |
154 | /* set invalid before writing vsid */ | ||
155 | out_be64(&priv2->slb_esid_RW, 0); | ||
156 | /* now it's safe to write the vsid */ | ||
151 | out_be64(&priv2->slb_vsid_RW, slb->vsid); | 157 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
158 | /* setting the new esid makes the entry valid again */ | ||
152 | out_be64(&priv2->slb_esid_RW, slb->esid); | 159 | out_be64(&priv2->slb_esid_RW, slb->esid); |
153 | } | 160 | } |
154 | 161 | ||
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, | |||
290 | nr_slbs++; | 297 | nr_slbs++; |
291 | } | 298 | } |
292 | 299 | ||
300 | spin_lock_irq(&spu->register_lock); | ||
293 | /* Add the set of SLBs */ | 301 | /* Add the set of SLBs */ |
294 | for (i = 0; i < nr_slbs; i++) | 302 | for (i = 0; i < nr_slbs; i++) |
295 | spu_load_slb(spu, i, &slbs[i]); | 303 | spu_load_slb(spu, i, &slbs[i]); |
304 | spin_unlock_irq(&spu->register_lock); | ||
296 | } | 305 | } |
297 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | 306 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); |
298 | 307 | ||
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data) | |||
337 | if (stat & CLASS1_STORAGE_FAULT_INTR) | 346 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
338 | spu_mfc_dsisr_set(spu, 0ul); | 347 | spu_mfc_dsisr_set(spu, 0ul); |
339 | spu_int_stat_clear(spu, 1, stat); | 348 | spu_int_stat_clear(spu, 1, stat); |
340 | spin_unlock(&spu->register_lock); | ||
341 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, | ||
342 | dar, dsisr); | ||
343 | 349 | ||
344 | if (stat & CLASS1_SEGMENT_FAULT_INTR) | 350 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
345 | __spu_trap_data_seg(spu, dar); | 351 | __spu_trap_data_seg(spu, dar); |
346 | 352 | ||
353 | spin_unlock(&spu->register_lock); | ||
354 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, | ||
355 | dar, dsisr); | ||
356 | |||
347 | if (stat & CLASS1_STORAGE_FAULT_INTR) | 357 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
348 | __spu_trap_data_map(spu, dar, dsisr); | 358 | __spu_trap_data_map(spu, dar, dsisr); |
349 | 359 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 133995ed5cc7..cf6c2c89211d 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx) | |||
109 | 109 | ||
110 | /* | 110 | /* |
111 | * This is basically an open-coded spu_acquire_saved, except that | 111 | * This is basically an open-coded spu_acquire_saved, except that |
112 | * we don't acquire the state mutex interruptible. | 112 | * we don't acquire the state mutex interruptible, and we don't |
113 | * want this context to be rescheduled on release. | ||
113 | */ | 114 | */ |
114 | mutex_lock(&ctx->state_mutex); | 115 | mutex_lock(&ctx->state_mutex); |
115 | if (ctx->state != SPU_STATE_SAVED) { | 116 | if (ctx->state != SPU_STATE_SAVED) |
116 | set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); | ||
117 | spu_deactivate(ctx); | 117 | spu_deactivate(ctx); |
118 | } | ||
119 | 118 | ||
120 | mm = ctx->owner; | 119 | mm = ctx->owner; |
121 | ctx->owner = NULL; | 120 | ctx->owner = NULL; |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c66c3756970d..f7a7e8635fb6 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -367,6 +367,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, | |||
367 | return NOPFN_SIGBUS; | 367 | return NOPFN_SIGBUS; |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * Because we release the mmap_sem, the context may be destroyed while | ||
371 | * we're in spu_wait. Grab an extra reference so it isn't destroyed | ||
372 | * in the meantime. | ||
373 | */ | ||
374 | get_spu_context(ctx); | ||
375 | |||
376 | /* | ||
370 | * We have to wait for context to be loaded before we have | 377 | * We have to wait for context to be loaded before we have |
371 | * pages to hand out to the user, but we don't want to wait | 378 | * pages to hand out to the user, but we don't want to wait |
372 | * with the mmap_sem held. | 379 | * with the mmap_sem held. |
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, | |||
375 | * hanged. | 382 | * hanged. |
376 | */ | 383 | */ |
377 | if (spu_acquire(ctx)) | 384 | if (spu_acquire(ctx)) |
378 | return NOPFN_REFAULT; | 385 | goto refault; |
379 | 386 | ||
380 | if (ctx->state == SPU_STATE_SAVED) { | 387 | if (ctx->state == SPU_STATE_SAVED) { |
381 | up_read(¤t->mm->mmap_sem); | 388 | up_read(¤t->mm->mmap_sem); |
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, | |||
391 | 398 | ||
392 | if (!ret) | 399 | if (!ret) |
393 | spu_release(ctx); | 400 | spu_release(ctx); |
401 | |||
402 | refault: | ||
403 | put_spu_context(ctx); | ||
394 | return NOPFN_REFAULT; | 404 | return NOPFN_REFAULT; |
395 | } | 405 | } |
396 | 406 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 3a5972117de7..5d5f680cd0b8 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | |||
246 | spu_switch_notify(spu, ctx); | 246 | spu_switch_notify(spu, ctx); |
247 | ctx->state = SPU_STATE_RUNNABLE; | 247 | ctx->state = SPU_STATE_RUNNABLE; |
248 | 248 | ||
249 | spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); | 249 | spuctx_switch_state(ctx, SPU_UTIL_USER); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* | 252 | /* |
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 01974f7776e1..79aa773f3c99 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c | |||
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n) | |||
58 | ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); | 58 | ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); |
59 | 59 | ||
60 | return snprintf(tbuf, n, | 60 | return snprintf(tbuf, n, |
61 | "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", | 61 | "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n", |
62 | (unsigned long) tv.tv_sec, | 62 | (unsigned long) tv.tv_sec, |
63 | (unsigned long) tv.tv_nsec, | 63 | (unsigned long) tv.tv_nsec, |
64 | t->owner_tid, | ||
65 | t->name, | ||
66 | t->curr_tid, | 64 | t->curr_tid, |
65 | t->name, | ||
66 | t->owner_tid, | ||
67 | t->number); | 67 | t->number); |
68 | } | 68 | } |
69 | 69 | ||
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = { | |||
188 | { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, | 188 | { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, |
189 | { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, | 189 | { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, |
190 | { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, | 190 | { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, |
191 | { "spufs_stop_callback__enter", "%p %p", spu_context_event }, | ||
191 | }; | 192 | }; |
192 | 193 | ||
193 | static int __init sputrace_init(void) | 194 | static int __init sputrace_init(void) |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1f9..e9dc7a55d1b9 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
37 | #include <linux/hardirq.h> | ||
37 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
38 | #include <linux/kernel.h> | 39 | #include <linux/kernel.h> |
39 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) | |||
117 | * Write INT_MASK_class1 with value of 0. | 118 | * Write INT_MASK_class1 with value of 0. |
118 | * Save INT_Mask_class2 in CSA. | 119 | * Save INT_Mask_class2 in CSA. |
119 | * Write INT_MASK_class2 with value of 0. | 120 | * Write INT_MASK_class2 with value of 0. |
121 | * Synchronize all three interrupts to be sure | ||
122 | * we no longer execute a handler on another CPU. | ||
120 | */ | 123 | */ |
121 | spin_lock_irq(&spu->register_lock); | 124 | spin_lock_irq(&spu->register_lock); |
122 | if (csa) { | 125 | if (csa) { |
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) | |||
129 | spu_int_mask_set(spu, 2, 0ul); | 132 | spu_int_mask_set(spu, 2, 0ul); |
130 | eieio(); | 133 | eieio(); |
131 | spin_unlock_irq(&spu->register_lock); | 134 | spin_unlock_irq(&spu->register_lock); |
135 | synchronize_irq(spu->irqs[0]); | ||
136 | synchronize_irq(spu->irqs[1]); | ||
137 | synchronize_irq(spu->irqs[2]); | ||
132 | } | 138 | } |
133 | 139 | ||
134 | static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) | 140 | static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) |
diff --git a/arch/powerpc/platforms/celleb/beat.h b/arch/powerpc/platforms/celleb/beat.h index b2e292df13ca..ac82ac35b991 100644 --- a/arch/powerpc/platforms/celleb/beat.h +++ b/arch/powerpc/platforms/celleb/beat.h | |||
@@ -21,9 +21,6 @@ | |||
21 | #ifndef _CELLEB_BEAT_H | 21 | #ifndef _CELLEB_BEAT_H |
22 | #define _CELLEB_BEAT_H | 22 | #define _CELLEB_BEAT_H |
23 | 23 | ||
24 | #define DABRX_KERNEL (1UL<<1) | ||
25 | #define DABRX_USER (1UL<<0) | ||
26 | |||
27 | int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*); | 24 | int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*); |
28 | int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t); | 25 | int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t); |
29 | int64_t beat_repository_encode(int, const char *, uint64_t[4]); | 26 | int64_t beat_repository_encode(int, const char *, uint64_t[4]); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b21444b681b6..9892827b6176 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -61,6 +61,7 @@ config S390 | |||
61 | def_bool y | 61 | def_bool y |
62 | select HAVE_OPROFILE | 62 | select HAVE_OPROFILE |
63 | select HAVE_KPROBES | 63 | select HAVE_KPROBES |
64 | select HAVE_KRETPROBES | ||
64 | 65 | ||
65 | source "init/Kconfig" | 66 | source "init/Kconfig" |
66 | 67 | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index b3400b5ad5c6..783cfbbf87ca 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -330,6 +330,7 @@ config CPU_SUBTYPE_SH5_101 | |||
330 | 330 | ||
331 | config CPU_SUBTYPE_SH5_103 | 331 | config CPU_SUBTYPE_SH5_103 |
332 | bool "Support SH5-103 processor" | 332 | bool "Support SH5-103 processor" |
333 | select CPU_SH5 | ||
333 | 334 | ||
334 | endchoice | 335 | endchoice |
335 | 336 | ||
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 5c3359756a92..71ff3d6f26e2 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c | |||
@@ -90,7 +90,7 @@ static irqreturn_t dma_tei(int irq, void *dev_id) | |||
90 | 90 | ||
91 | static int sh_dmac_request_dma(struct dma_channel *chan) | 91 | static int sh_dmac_request_dma(struct dma_channel *chan) |
92 | { | 92 | { |
93 | if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) | 93 | if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) |
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | return request_irq(get_dmte_irq(chan->chan), dma_tei, | 96 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c index b76a14f12ce2..ab77b0e0fa0e 100644 --- a/arch/sh/drivers/heartbeat.c +++ b/arch/sh/drivers/heartbeat.c | |||
@@ -93,7 +93,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | hd->base = ioremap_nocache(res->start, res->end - res->start + 1); | 95 | hd->base = ioremap_nocache(res->start, res->end - res->start + 1); |
96 | if (!unlikely(hd->base)) { | 96 | if (unlikely(!hd->base)) { |
97 | dev_err(&pdev->dev, "ioremap failed\n"); | 97 | dev_err(&pdev->dev, "ioremap failed\n"); |
98 | 98 | ||
99 | if (!pdev->dev.platform_data) | 99 | if (!pdev->dev.platform_data) |
diff --git a/arch/sh/drivers/pci/ops-dreamcast.c b/arch/sh/drivers/pci/ops-dreamcast.c index 0dac87b19624..e1284fc69361 100644 --- a/arch/sh/drivers/pci/ops-dreamcast.c +++ b/arch/sh/drivers/pci/ops-dreamcast.c | |||
@@ -83,9 +83,9 @@ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int | |||
83 | return PCIBIOS_DEVICE_NOT_FOUND; | 83 | return PCIBIOS_DEVICE_NOT_FOUND; |
84 | 84 | ||
85 | switch (size) { | 85 | switch (size) { |
86 | case 1: *val = ctrl_inb(GAPSPCI_BBA_CONFIG+where); break; | 86 | case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; |
87 | case 2: *val = ctrl_inw(GAPSPCI_BBA_CONFIG+where); break; | 87 | case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; |
88 | case 4: *val = ctrl_inl(GAPSPCI_BBA_CONFIG+where); break; | 88 | case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; |
89 | } | 89 | } |
90 | 90 | ||
91 | return PCIBIOS_SUCCESSFUL; | 91 | return PCIBIOS_SUCCESSFUL; |
@@ -97,9 +97,9 @@ static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int | |||
97 | return PCIBIOS_DEVICE_NOT_FOUND; | 97 | return PCIBIOS_DEVICE_NOT_FOUND; |
98 | 98 | ||
99 | switch (size) { | 99 | switch (size) { |
100 | case 1: ctrl_outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; | 100 | case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; |
101 | case 2: ctrl_outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; | 101 | case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; |
102 | case 4: ctrl_outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; | 102 | case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; |
103 | } | 103 | } |
104 | 104 | ||
105 | return PCIBIOS_SUCCESSFUL; | 105 | return PCIBIOS_SUCCESSFUL; |
@@ -127,36 +127,36 @@ int __init gapspci_init(void) | |||
127 | */ | 127 | */ |
128 | 128 | ||
129 | for (i=0; i<16; i++) | 129 | for (i=0; i<16; i++) |
130 | idbuf[i] = ctrl_inb(GAPSPCI_REGS+i); | 130 | idbuf[i] = inb(GAPSPCI_REGS+i); |
131 | 131 | ||
132 | if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16)) | 132 | if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16)) |
133 | return -ENODEV; | 133 | return -ENODEV; |
134 | 134 | ||
135 | ctrl_outl(0x5a14a501, GAPSPCI_REGS+0x18); | 135 | outl(0x5a14a501, GAPSPCI_REGS+0x18); |
136 | 136 | ||
137 | for (i=0; i<1000000; i++) | 137 | for (i=0; i<1000000; i++) |
138 | ; | 138 | ; |
139 | 139 | ||
140 | if (ctrl_inl(GAPSPCI_REGS+0x18) != 1) | 140 | if (inl(GAPSPCI_REGS+0x18) != 1) |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | 142 | ||
143 | ctrl_outl(0x01000000, GAPSPCI_REGS+0x20); | 143 | outl(0x01000000, GAPSPCI_REGS+0x20); |
144 | ctrl_outl(0x01000000, GAPSPCI_REGS+0x24); | 144 | outl(0x01000000, GAPSPCI_REGS+0x24); |
145 | 145 | ||
146 | ctrl_outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28); | 146 | outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28); |
147 | ctrl_outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c); | 147 | outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c); |
148 | 148 | ||
149 | ctrl_outl(1, GAPSPCI_REGS+0x14); | 149 | outl(1, GAPSPCI_REGS+0x14); |
150 | ctrl_outl(1, GAPSPCI_REGS+0x34); | 150 | outl(1, GAPSPCI_REGS+0x34); |
151 | 151 | ||
152 | /* Setting Broadband Adapter */ | 152 | /* Setting Broadband Adapter */ |
153 | ctrl_outw(0xf900, GAPSPCI_BBA_CONFIG+0x06); | 153 | outw(0xf900, GAPSPCI_BBA_CONFIG+0x06); |
154 | ctrl_outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30); | 154 | outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30); |
155 | ctrl_outb(0x00, GAPSPCI_BBA_CONFIG+0x3c); | 155 | outb(0x00, GAPSPCI_BBA_CONFIG+0x3c); |
156 | ctrl_outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d); | 156 | outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d); |
157 | ctrl_outw(0x0006, GAPSPCI_BBA_CONFIG+0x04); | 157 | outw(0x0006, GAPSPCI_BBA_CONFIG+0x04); |
158 | ctrl_outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10); | 158 | outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10); |
159 | ctrl_outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14); | 159 | outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14); |
160 | 160 | ||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index b230eb278cef..cc530f4d84d6 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | enum { | 15 | enum { |
16 | UNUSED = 0, | 16 | UNUSED = 0, |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c index 3feb95a4fcbc..fb781329848a 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c | |||
@@ -21,8 +21,8 @@ | |||
21 | #include <asm/freq.h> | 21 | #include <asm/freq.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | 23 | ||
24 | const static int pll1rate[]={8,12,16,0}; | 24 | static const int pll1rate[]={8,12,16,0}; |
25 | const static int pfc_divisors[]={1,2,3,4,6,8,12}; | 25 | static const int pfc_divisors[]={1,2,3,4,6,8,12}; |
26 | #define ifc_divisors pfc_divisors | 26 | #define ifc_divisors pfc_divisors |
27 | 27 | ||
28 | #if (CONFIG_SH_CLK_MD == 0) | 28 | #if (CONFIG_SH_CLK_MD == 0) |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index db6ef5cecde1..e98dc4450352 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | enum { | 15 | enum { |
16 | UNUSED = 0, | 16 | UNUSED = 0, |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index a564425b905f..e6d4ec445dd8 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | enum { | 15 | enum { |
16 | UNUSED = 0, | 16 | UNUSED = 0, |
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index fcc80bb7bee7..10f2a760c5ee 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c | |||
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void) | |||
94 | boot_cpu_data.dcache.way_incr = (1 << 13); | 94 | boot_cpu_data.dcache.way_incr = (1 << 13); |
95 | boot_cpu_data.dcache.entry_mask = 0x1ff0; | 95 | boot_cpu_data.dcache.entry_mask = 0x1ff0; |
96 | boot_cpu_data.dcache.sets = 512; | 96 | boot_cpu_data.dcache.sets = 512; |
97 | ctrl_outl(CCR_CACHE_32KB, CCR3); | 97 | ctrl_outl(CCR_CACHE_32KB, CCR3_REG); |
98 | #else | 98 | #else |
99 | ctrl_outl(CCR_CACHE_16KB, CCR3); | 99 | ctrl_outl(CCR_CACHE_16KB, CCR3_REG); |
100 | #endif | 100 | #endif |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index dd0a20a685f7..f581534cb732 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/serial.h> | 14 | #include <linux/serial.h> |
15 | #include <asm/sci.h> | 15 | #include <linux/serial_sci.h> |
16 | #include <asm/rtc.h> | 16 | #include <asm/rtc.h> |
17 | 17 | ||
18 | enum { | 18 | enum { |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 969804bb523b..d3733b13ea52 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/serial.h> | 18 | #include <linux/serial.h> |
19 | #include <asm/sci.h> | 19 | #include <linux/serial_sci.h> |
20 | 20 | ||
21 | enum { | 21 | enum { |
22 | UNUSED = 0, | 22 | UNUSED = 0, |
@@ -123,15 +123,15 @@ static struct resource rtc_resources[] = { | |||
123 | .flags = IORESOURCE_IO, | 123 | .flags = IORESOURCE_IO, |
124 | }, | 124 | }, |
125 | [1] = { | 125 | [1] = { |
126 | .start = 20, | 126 | .start = 21, |
127 | .flags = IORESOURCE_IRQ, | 127 | .flags = IORESOURCE_IRQ, |
128 | }, | 128 | }, |
129 | [2] = { | 129 | [2] = { |
130 | .start = 21, | 130 | .start = 22, |
131 | .flags = IORESOURCE_IRQ, | 131 | .flags = IORESOURCE_IRQ, |
132 | }, | 132 | }, |
133 | [3] = { | 133 | [3] = { |
134 | .start = 22, | 134 | .start = 20, |
135 | .flags = IORESOURCE_IRQ, | 135 | .flags = IORESOURCE_IRQ, |
136 | }, | 136 | }, |
137 | }; | 137 | }; |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index 0cc0e2bf135d..7406c9ad9259 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/serial.h> | 14 | #include <linux/serial.h> |
15 | #include <asm/sci.h> | 15 | #include <linux/serial_sci.h> |
16 | #include <asm/rtc.h> | 16 | #include <asm/rtc.h> |
17 | 17 | ||
18 | enum { | 18 | enum { |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 3855ea4c21c8..8028082527c5 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/serial.h> | 17 | #include <linux/serial.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/sci.h> | 19 | #include <linux/serial_sci.h> |
20 | #include <asm/rtc.h> | 20 | #include <asm/rtc.h> |
21 | 21 | ||
22 | #define INTC_ICR1 0xA4140010UL | 22 | #define INTC_ICR1 0xA4140010UL |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index dab193293f20..7371abf64f80 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | static struct plat_sci_port sci_platform_data[] = { | 15 | static struct plat_sci_port sci_platform_data[] = { |
16 | { | 16 | { |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index ae3603aca615..ec884039b914 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/serial.h> | 13 | #include <linux/serial.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <asm/sci.h> | 15 | #include <linux/serial_sci.h> |
16 | 16 | ||
17 | static struct resource rtc_resources[] = { | 17 | static struct resource rtc_resources[] = { |
18 | [0] = { | 18 | [0] = { |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 85f81579b97e..254c5c55ab91 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | enum { | 15 | enum { |
16 | UNUSED = 0, | 16 | UNUSED = 0, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index c0a3f079dfdc..6d4f50cd4aaf 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | static struct plat_sci_port sci_platform_data[] = { | 15 | static struct plat_sci_port sci_platform_data[] = { |
16 | { | 16 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index 967e8b69a2f8..f26b5cdad0d1 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/serial.h> | 14 | #include <linux/serial.h> |
15 | #include <asm/sci.h> | 15 | #include <linux/serial_sci.h> |
16 | 16 | ||
17 | static struct plat_sci_port sci_platform_data[] = { | 17 | static struct plat_sci_port sci_platform_data[] = { |
18 | { | 18 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 73c778d40d13..b98b4bc93ec9 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -10,9 +10,9 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <linux/serial_sci.h> | ||
13 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
14 | #include <asm/mmzone.h> | 15 | #include <asm/mmzone.h> |
15 | #include <asm/sci.h> | ||
16 | 16 | ||
17 | static struct resource usbf_resources[] = { | 17 | static struct resource usbf_resources[] = { |
18 | [0] = { | 18 | [0] = { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index eabd5386812d..07c988dc9de6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/serial.h> | 13 | #include <linux/serial.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <asm/sci.h> | 15 | #include <linux/serial_sci.h> |
16 | 16 | ||
17 | static struct resource rtc_resources[] = { | 17 | static struct resource rtc_resources[] = { |
18 | [0] = { | 18 | [0] = { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 32f4f59a837b..b9cec48b1808 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <asm/sci.h> | 13 | #include <linux/serial_sci.h> |
14 | 14 | ||
15 | static struct plat_sci_port sci_platform_data[] = { | 15 | static struct plat_sci_port sci_platform_data[] = { |
16 | { | 16 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 293004b526ff..18dbbe23fea1 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <asm/sci.h> | 14 | #include <linux/serial_sci.h> |
15 | 15 | ||
16 | static struct resource rtc_resources[] = { | 16 | static struct resource rtc_resources[] = { |
17 | [0] = { | 17 | [0] = { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index 74b60e96cdf4..621e7329ec63 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c | |||
@@ -10,10 +10,10 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <linux/serial_sci.h> | ||
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
14 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
15 | #include <asm/mmzone.h> | 16 | #include <asm/mmzone.h> |
16 | #include <asm/sci.h> | ||
17 | 17 | ||
18 | static struct plat_sci_port sci_platform_data[] = { | 18 | static struct plat_sci_port sci_platform_data[] = { |
19 | { | 19 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 4dc958b6b314..bd35f32534b9 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
@@ -10,9 +10,9 @@ | |||
10 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <linux/serial_sci.h> | ||
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
14 | #include <asm/mmzone.h> | 15 | #include <asm/mmzone.h> |
15 | #include <asm/sci.h> | ||
16 | 16 | ||
17 | static struct plat_sci_port sci_platform_data[] = { | 17 | static struct plat_sci_port sci_platform_data[] = { |
18 | { | 18 | { |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index e795f282dece..bf1b15d3f6f5 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | # $Id: Makefile,v 1.62 2000/12/15 00:41:17 davem Exp $ | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
@@ -12,7 +12,8 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \ | |||
12 | sys_sparc.o sunos_asm.o systbls.o \ | 12 | sys_sparc.o sunos_asm.o systbls.o \ |
13 | time.o windows.o cpu.o devices.o sclow.o \ | 13 | time.o windows.o cpu.o devices.o sclow.o \ |
14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ | 14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ |
15 | unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o | 15 | unaligned.o una_asm.o muldiv.o semaphore.o \ |
16 | prom.o of_device.o devres.o | ||
16 | 17 | ||
17 | devres-y = ../../../kernel/irq/devres.o | 18 | devres-y = ../../../kernel/irq/devres.o |
18 | 19 | ||
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 259a559d4cea..e7a0edfc1a32 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -32,7 +32,7 @@ struct cpu_fp_info { | |||
32 | /* In order to get the fpu type correct, you need to take the IDPROM's | 32 | /* In order to get the fpu type correct, you need to take the IDPROM's |
33 | * machine type value into consideration too. I will fix this. | 33 | * machine type value into consideration too. I will fix this. |
34 | */ | 34 | */ |
35 | struct cpu_fp_info linux_sparc_fpu[] = { | 35 | static struct cpu_fp_info linux_sparc_fpu[] = { |
36 | { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"}, | 36 | { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"}, |
37 | { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"}, | 37 | { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"}, |
38 | { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"}, | 38 | { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"}, |
@@ -76,7 +76,7 @@ struct cpu_fp_info linux_sparc_fpu[] = { | |||
76 | 76 | ||
77 | #define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu) | 77 | #define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu) |
78 | 78 | ||
79 | struct cpu_iu_info linux_sparc_chips[] = { | 79 | static struct cpu_iu_info linux_sparc_chips[] = { |
80 | /* Sun4/100, 4/200, SLC */ | 80 | /* Sun4/100, 4/200, SLC */ |
81 | { 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"}, | 81 | { 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"}, |
82 | /* borned STP1012PGA */ | 82 | /* borned STP1012PGA */ |
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c index d850785b2080..96344ff2bbe1 100644 --- a/arch/sparc/kernel/ebus.c +++ b/arch/sparc/kernel/ebus.c | |||
@@ -101,7 +101,7 @@ void __init fill_ebus_child(struct device_node *dp, | |||
101 | prom_printf("UGH: property for %s was %d, need < %d\n", | 101 | prom_printf("UGH: property for %s was %d, need < %d\n", |
102 | dev->prom_node->name, len, | 102 | dev->prom_node->name, len, |
103 | dev->parent->num_addrs); | 103 | dev->parent->num_addrs); |
104 | panic(__FUNCTION__); | 104 | panic(__func__); |
105 | } | 105 | } |
106 | 106 | ||
107 | /* XXX resource */ | 107 | /* XXX resource */ |
@@ -162,7 +162,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d | |||
162 | prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", | 162 | prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", |
163 | dev->prom_node->name, len, | 163 | dev->prom_node->name, len, |
164 | (int)sizeof(struct linux_prom_registers)); | 164 | (int)sizeof(struct linux_prom_registers)); |
165 | panic(__FUNCTION__); | 165 | panic(__func__); |
166 | } | 166 | } |
167 | dev->num_addrs = len / sizeof(struct linux_prom_registers); | 167 | dev->num_addrs = len / sizeof(struct linux_prom_registers); |
168 | 168 | ||
@@ -324,7 +324,7 @@ void __init ebus_init(void) | |||
324 | regs = of_get_property(dp, "reg", &len); | 324 | regs = of_get_property(dp, "reg", &len); |
325 | if (!regs) { | 325 | if (!regs) { |
326 | prom_printf("%s: can't find reg property\n", | 326 | prom_printf("%s: can't find reg property\n", |
327 | __FUNCTION__); | 327 | __func__); |
328 | prom_halt(); | 328 | prom_halt(); |
329 | } | 329 | } |
330 | nreg = len / sizeof(struct linux_prom_pci_registers); | 330 | nreg = len / sizeof(struct linux_prom_pci_registers); |
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index 313d1620ae8e..59e9344e7a0d 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c | |||
@@ -3,6 +3,9 @@ | |||
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/proc_fs.h> | 4 | #include <linux/proc_fs.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <linux/jiffies.h> | ||
7 | #include <linux/timer.h> | ||
8 | #include <linux/uaccess.h> | ||
6 | 9 | ||
7 | #include <asm/auxio.h> | 10 | #include <asm/auxio.h> |
8 | 11 | ||
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 0bd69d0b5cd7..70c0dd22491d 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c | |||
@@ -139,8 +139,6 @@ void cpu_idle(void) | |||
139 | 139 | ||
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | extern char reboot_command []; | ||
143 | |||
144 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ | 142 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ |
145 | void machine_halt(void) | 143 | void machine_halt(void) |
146 | { | 144 | { |
diff --git a/arch/sparc/kernel/una_asm.S b/arch/sparc/kernel/una_asm.S new file mode 100644 index 000000000000..8cc03458eb7e --- /dev/null +++ b/arch/sparc/kernel/una_asm.S | |||
@@ -0,0 +1,153 @@ | |||
1 | /* una_asm.S: Kernel unaligned trap assembler helpers. | ||
2 | * | ||
3 | * Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | */ | ||
6 | |||
7 | #include <linux/errno.h> | ||
8 | |||
9 | .text | ||
10 | |||
11 | retl_efault: | ||
12 | retl | ||
13 | mov -EFAULT, %o0 | ||
14 | |||
15 | /* int __do_int_store(unsigned long *dst_addr, int size, | ||
16 | * unsigned long *src_val) | ||
17 | * | ||
18 | * %o0 = dest_addr | ||
19 | * %o1 = size | ||
20 | * %o2 = src_val | ||
21 | * | ||
22 | * Return '0' on success, -EFAULT on failure. | ||
23 | */ | ||
24 | .globl __do_int_store | ||
25 | __do_int_store: | ||
26 | ld [%o2], %g1 | ||
27 | cmp %1, 2 | ||
28 | be 2f | ||
29 | cmp %1, 4 | ||
30 | be 1f | ||
31 | srl %g1, 24, %g2 | ||
32 | srl %g1, 16, %g7 | ||
33 | 4: stb %g2, [%o0] | ||
34 | srl %g1, 8, %g2 | ||
35 | 5: stb %g7, [%o0 + 1] | ||
36 | ld [%o2 + 4], %g7 | ||
37 | 6: stb %g2, [%o0 + 2] | ||
38 | srl %g7, 24, %g2 | ||
39 | 7: stb %g1, [%o0 + 3] | ||
40 | srl %g7, 16, %g1 | ||
41 | 8: stb %g2, [%o0 + 4] | ||
42 | srl %g7, 8, %g2 | ||
43 | 9: stb %g1, [%o0 + 5] | ||
44 | 10: stb %g2, [%o0 + 6] | ||
45 | b 0f | ||
46 | 11: stb %g7, [%o0 + 7] | ||
47 | 1: srl %g1, 16, %g7 | ||
48 | 12: stb %g2, [%o0] | ||
49 | srl %g1, 8, %g2 | ||
50 | 13: stb %g7, [%o0 + 1] | ||
51 | 14: stb %g2, [%o0 + 2] | ||
52 | b 0f | ||
53 | 15: stb %g1, [%o0 + 3] | ||
54 | 2: srl %g1, 8, %g2 | ||
55 | 16: stb %g2, [%o0] | ||
56 | 17: stb %g1, [%o0 + 1] | ||
57 | 0: retl | ||
58 | mov 0, %o0 | ||
59 | |||
60 | .section __ex_table,#alloc | ||
61 | .word 4b, retl_efault | ||
62 | .word 5b, retl_efault | ||
63 | .word 6b, retl_efault | ||
64 | .word 7b, retl_efault | ||
65 | .word 8b, retl_efault | ||
66 | .word 9b, retl_efault | ||
67 | .word 10b, retl_efault | ||
68 | .word 11b, retl_efault | ||
69 | .word 12b, retl_efault | ||
70 | .word 13b, retl_efault | ||
71 | .word 14b, retl_efault | ||
72 | .word 15b, retl_efault | ||
73 | .word 16b, retl_efault | ||
74 | .word 17b, retl_efault | ||
75 | .previous | ||
76 | |||
77 | /* int do_int_load(unsigned long *dest_reg, int size, | ||
78 | * unsigned long *saddr, int is_signed) | ||
79 | * | ||
80 | * %o0 = dest_reg | ||
81 | * %o1 = size | ||
82 | * %o2 = saddr | ||
83 | * %o3 = is_signed | ||
84 | * | ||
85 | * Return '0' on success, -EFAULT on failure. | ||
86 | */ | ||
87 | .globl do_int_load | ||
88 | do_int_load: | ||
89 | cmp %o1, 8 | ||
90 | be 9f | ||
91 | cmp %o1, 4 | ||
92 | be 6f | ||
93 | 4: ldub [%o2], %g1 | ||
94 | 5: ldub [%o2 + 1], %g2 | ||
95 | sll %g1, 8, %g1 | ||
96 | tst %o3 | ||
97 | be 3f | ||
98 | or %g1, %g2, %g1 | ||
99 | sll %g1, 16, %g1 | ||
100 | sra %g1, 16, %g1 | ||
101 | 3: b 0f | ||
102 | st %g1, [%o0] | ||
103 | 6: ldub [%o2 + 1], %g2 | ||
104 | sll %g1, 24, %g1 | ||
105 | 7: ldub [%o2 + 2], %g7 | ||
106 | sll %g2, 16, %g2 | ||
107 | 8: ldub [%o2 + 3], %g3 | ||
108 | sll %g7, 8, %g7 | ||
109 | or %g3, %g2, %g3 | ||
110 | or %g7, %g3, %g7 | ||
111 | or %g1, %g7, %g1 | ||
112 | b 0f | ||
113 | st %g1, [%o0] | ||
114 | 9: ldub [%o2], %g1 | ||
115 | 10: ldub [%o2 + 1], %g2 | ||
116 | sll %g1, 24, %g1 | ||
117 | 11: ldub [%o2 + 2], %g7 | ||
118 | sll %g2, 16, %g2 | ||
119 | 12: ldub [%o2 + 3], %g3 | ||
120 | sll %g7, 8, %g7 | ||
121 | or %g1, %g2, %g1 | ||
122 | or %g7, %g3, %g7 | ||
123 | or %g1, %g7, %g7 | ||
124 | 13: ldub [%o2 + 4], %g1 | ||
125 | st %g7, [%o0] | ||
126 | 14: ldub [%o2 + 5], %g2 | ||
127 | sll %g1, 24, %g1 | ||
128 | 15: ldub [%o2 + 6], %g7 | ||
129 | sll %g2, 16, %g2 | ||
130 | 16: ldub [%o2 + 7], %g3 | ||
131 | sll %g7, 8, %g7 | ||
132 | or %g1, %g2, %g1 | ||
133 | or %g7, %g3, %g7 | ||
134 | or %g1, %g7, %g7 | ||
135 | st %g7, [%o0 + 4] | ||
136 | 0: retl | ||
137 | mov 0, %o0 | ||
138 | |||
139 | .section __ex_table,#alloc | ||
140 | .word 4b, retl_efault | ||
141 | .word 5b, retl_efault | ||
142 | .word 6b, retl_efault | ||
143 | .word 7b, retl_efault | ||
144 | .word 8b, retl_efault | ||
145 | .word 9b, retl_efault | ||
146 | .word 10b, retl_efault | ||
147 | .word 11b, retl_efault | ||
148 | .word 12b, retl_efault | ||
149 | .word 13b, retl_efault | ||
150 | .word 14b, retl_efault | ||
151 | .word 15b, retl_efault | ||
152 | .word 16b, retl_efault | ||
153 | .previous | ||
diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c index a6330fbc9dd9..33857be16661 100644 --- a/arch/sparc/kernel/unaligned.c +++ b/arch/sparc/kernel/unaligned.c | |||
@@ -175,157 +175,31 @@ static void unaligned_panic(char *str) | |||
175 | panic(str); | 175 | panic(str); |
176 | } | 176 | } |
177 | 177 | ||
178 | #define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({ \ | 178 | /* una_asm.S */ |
179 | __asm__ __volatile__ ( \ | 179 | extern int do_int_load(unsigned long *dest_reg, int size, |
180 | "cmp %1, 8\n\t" \ | 180 | unsigned long *saddr, int is_signed); |
181 | "be 9f\n\t" \ | 181 | extern int __do_int_store(unsigned long *dst_addr, int size, |
182 | " cmp %1, 4\n\t" \ | 182 | unsigned long *src_val); |
183 | "be 6f\n" \ | 183 | |
184 | "4:\t" " ldub [%2], %%l1\n" \ | 184 | static int do_int_store(int reg_num, int size, unsigned long *dst_addr, |
185 | "5:\t" "ldub [%2 + 1], %%l2\n\t" \ | 185 | struct pt_regs *regs) |
186 | "sll %%l1, 8, %%l1\n\t" \ | 186 | { |
187 | "tst %3\n\t" \ | 187 | unsigned long zero[2] = { 0, 0 }; |
188 | "be 3f\n\t" \ | 188 | unsigned long *src_val; |
189 | " add %%l1, %%l2, %%l1\n\t" \ | 189 | |
190 | "sll %%l1, 16, %%l1\n\t" \ | 190 | if (reg_num) |
191 | "sra %%l1, 16, %%l1\n" \ | 191 | src_val = fetch_reg_addr(reg_num, regs); |
192 | "3:\t" "b 0f\n\t" \ | 192 | else { |
193 | " st %%l1, [%0]\n" \ | 193 | src_val = &zero[0]; |
194 | "6:\t" "ldub [%2 + 1], %%l2\n\t" \ | 194 | if (size == 8) |
195 | "sll %%l1, 24, %%l1\n" \ | 195 | zero[1] = fetch_reg(1, regs); |
196 | "7:\t" "ldub [%2 + 2], %%g7\n\t" \ | 196 | } |
197 | "sll %%l2, 16, %%l2\n" \ | 197 | return __do_int_store(dst_addr, size, src_val); |
198 | "8:\t" "ldub [%2 + 3], %%g1\n\t" \ | 198 | } |
199 | "sll %%g7, 8, %%g7\n\t" \ | ||
200 | "or %%l1, %%l2, %%l1\n\t" \ | ||
201 | "or %%g7, %%g1, %%g7\n\t" \ | ||
202 | "or %%l1, %%g7, %%l1\n\t" \ | ||
203 | "b 0f\n\t" \ | ||
204 | " st %%l1, [%0]\n" \ | ||
205 | "9:\t" "ldub [%2], %%l1\n" \ | ||
206 | "10:\t" "ldub [%2 + 1], %%l2\n\t" \ | ||
207 | "sll %%l1, 24, %%l1\n" \ | ||
208 | "11:\t" "ldub [%2 + 2], %%g7\n\t" \ | ||
209 | "sll %%l2, 16, %%l2\n" \ | ||
210 | "12:\t" "ldub [%2 + 3], %%g1\n\t" \ | ||
211 | "sll %%g7, 8, %%g7\n\t" \ | ||
212 | "or %%l1, %%l2, %%l1\n\t" \ | ||
213 | "or %%g7, %%g1, %%g7\n\t" \ | ||
214 | "or %%l1, %%g7, %%g7\n" \ | ||
215 | "13:\t" "ldub [%2 + 4], %%l1\n\t" \ | ||
216 | "st %%g7, [%0]\n" \ | ||
217 | "14:\t" "ldub [%2 + 5], %%l2\n\t" \ | ||
218 | "sll %%l1, 24, %%l1\n" \ | ||
219 | "15:\t" "ldub [%2 + 6], %%g7\n\t" \ | ||
220 | "sll %%l2, 16, %%l2\n" \ | ||
221 | "16:\t" "ldub [%2 + 7], %%g1\n\t" \ | ||
222 | "sll %%g7, 8, %%g7\n\t" \ | ||
223 | "or %%l1, %%l2, %%l1\n\t" \ | ||
224 | "or %%g7, %%g1, %%g7\n\t" \ | ||
225 | "or %%l1, %%g7, %%g7\n\t" \ | ||
226 | "st %%g7, [%0 + 4]\n" \ | ||
227 | "0:\n\n\t" \ | ||
228 | ".section __ex_table,#alloc\n\t" \ | ||
229 | ".word 4b, " #errh "\n\t" \ | ||
230 | ".word 5b, " #errh "\n\t" \ | ||
231 | ".word 6b, " #errh "\n\t" \ | ||
232 | ".word 7b, " #errh "\n\t" \ | ||
233 | ".word 8b, " #errh "\n\t" \ | ||
234 | ".word 9b, " #errh "\n\t" \ | ||
235 | ".word 10b, " #errh "\n\t" \ | ||
236 | ".word 11b, " #errh "\n\t" \ | ||
237 | ".word 12b, " #errh "\n\t" \ | ||
238 | ".word 13b, " #errh "\n\t" \ | ||
239 | ".word 14b, " #errh "\n\t" \ | ||
240 | ".word 15b, " #errh "\n\t" \ | ||
241 | ".word 16b, " #errh "\n\n\t" \ | ||
242 | ".previous\n\t" \ | ||
243 | : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \ | ||
244 | : "l1", "l2", "g7", "g1", "cc"); \ | ||
245 | }) | ||
246 | |||
247 | #define store_common(dst_addr, size, src_val, errh) ({ \ | ||
248 | __asm__ __volatile__ ( \ | ||
249 | "ld [%2], %%l1\n" \ | ||
250 | "cmp %1, 2\n\t" \ | ||
251 | "be 2f\n\t" \ | ||
252 | " cmp %1, 4\n\t" \ | ||
253 | "be 1f\n\t" \ | ||
254 | " srl %%l1, 24, %%l2\n\t" \ | ||
255 | "srl %%l1, 16, %%g7\n" \ | ||
256 | "4:\t" "stb %%l2, [%0]\n\t" \ | ||
257 | "srl %%l1, 8, %%l2\n" \ | ||
258 | "5:\t" "stb %%g7, [%0 + 1]\n\t" \ | ||
259 | "ld [%2 + 4], %%g7\n" \ | ||
260 | "6:\t" "stb %%l2, [%0 + 2]\n\t" \ | ||
261 | "srl %%g7, 24, %%l2\n" \ | ||
262 | "7:\t" "stb %%l1, [%0 + 3]\n\t" \ | ||
263 | "srl %%g7, 16, %%l1\n" \ | ||
264 | "8:\t" "stb %%l2, [%0 + 4]\n\t" \ | ||
265 | "srl %%g7, 8, %%l2\n" \ | ||
266 | "9:\t" "stb %%l1, [%0 + 5]\n" \ | ||
267 | "10:\t" "stb %%l2, [%0 + 6]\n\t" \ | ||
268 | "b 0f\n" \ | ||
269 | "11:\t" " stb %%g7, [%0 + 7]\n" \ | ||
270 | "1:\t" "srl %%l1, 16, %%g7\n" \ | ||
271 | "12:\t" "stb %%l2, [%0]\n\t" \ | ||
272 | "srl %%l1, 8, %%l2\n" \ | ||
273 | "13:\t" "stb %%g7, [%0 + 1]\n" \ | ||
274 | "14:\t" "stb %%l2, [%0 + 2]\n\t" \ | ||
275 | "b 0f\n" \ | ||
276 | "15:\t" " stb %%l1, [%0 + 3]\n" \ | ||
277 | "2:\t" "srl %%l1, 8, %%l2\n" \ | ||
278 | "16:\t" "stb %%l2, [%0]\n" \ | ||
279 | "17:\t" "stb %%l1, [%0 + 1]\n" \ | ||
280 | "0:\n\n\t" \ | ||
281 | ".section __ex_table,#alloc\n\t" \ | ||
282 | ".word 4b, " #errh "\n\t" \ | ||
283 | ".word 5b, " #errh "\n\t" \ | ||
284 | ".word 6b, " #errh "\n\t" \ | ||
285 | ".word 7b, " #errh "\n\t" \ | ||
286 | ".word 8b, " #errh "\n\t" \ | ||
287 | ".word 9b, " #errh "\n\t" \ | ||
288 | ".word 10b, " #errh "\n\t" \ | ||
289 | ".word 11b, " #errh "\n\t" \ | ||
290 | ".word 12b, " #errh "\n\t" \ | ||
291 | ".word 13b, " #errh "\n\t" \ | ||
292 | ".word 14b, " #errh "\n\t" \ | ||
293 | ".word 15b, " #errh "\n\t" \ | ||
294 | ".word 16b, " #errh "\n\t" \ | ||
295 | ".word 17b, " #errh "\n\n\t" \ | ||
296 | ".previous\n\t" \ | ||
297 | : : "r" (dst_addr), "r" (size), "r" (src_val) \ | ||
298 | : "l1", "l2", "g7", "g1", "cc"); \ | ||
299 | }) | ||
300 | |||
301 | #define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \ | ||
302 | unsigned long *src_val; \ | ||
303 | static unsigned long zero[2] = { 0, }; \ | ||
304 | \ | ||
305 | if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \ | ||
306 | else { \ | ||
307 | src_val = &zero[0]; \ | ||
308 | if (size == 8) \ | ||
309 | zero[1] = fetch_reg(1, regs); \ | ||
310 | } \ | ||
311 | store_common(dst_addr, size, src_val, errh); \ | ||
312 | }) | ||
313 | 199 | ||
314 | extern void smp_capture(void); | 200 | extern void smp_capture(void); |
315 | extern void smp_release(void); | 201 | extern void smp_release(void); |
316 | 202 | ||
317 | #define do_atomic(srcdest_reg, mem, errh) ({ \ | ||
318 | unsigned long flags, tmp; \ | ||
319 | \ | ||
320 | smp_capture(); \ | ||
321 | local_irq_save(flags); \ | ||
322 | tmp = *srcdest_reg; \ | ||
323 | do_integer_load(srcdest_reg, 4, mem, 0, errh); \ | ||
324 | store_common(mem, 4, &tmp, errh); \ | ||
325 | local_irq_restore(flags); \ | ||
326 | smp_release(); \ | ||
327 | }) | ||
328 | |||
329 | static inline void advance(struct pt_regs *regs) | 203 | static inline void advance(struct pt_regs *regs) |
330 | { | 204 | { |
331 | regs->pc = regs->npc; | 205 | regs->pc = regs->npc; |
@@ -342,9 +216,7 @@ static inline int ok_for_kernel(unsigned int insn) | |||
342 | return !floating_point_load_or_store_p(insn); | 216 | return !floating_point_load_or_store_p(insn); |
343 | } | 217 | } |
344 | 218 | ||
345 | void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault"); | 219 | static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) |
346 | |||
347 | void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) | ||
348 | { | 220 | { |
349 | unsigned long g2 = regs->u_regs [UREG_G2]; | 221 | unsigned long g2 = regs->u_regs [UREG_G2]; |
350 | unsigned long fixup = search_extables_range(regs->pc, &g2); | 222 | unsigned long fixup = search_extables_range(regs->pc, &g2); |
@@ -379,48 +251,34 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
379 | printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", | 251 | printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", |
380 | regs->pc); | 252 | regs->pc); |
381 | unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); | 253 | unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); |
382 | |||
383 | __asm__ __volatile__ ("\n" | ||
384 | "kernel_unaligned_trap_fault:\n\t" | ||
385 | "mov %0, %%o0\n\t" | ||
386 | "call kernel_mna_trap_fault\n\t" | ||
387 | " mov %1, %%o1\n\t" | ||
388 | : | ||
389 | : "r" (regs), "r" (insn) | ||
390 | : "o0", "o1", "o2", "o3", "o4", "o5", "o7", | ||
391 | "g1", "g2", "g3", "g4", "g5", "g7", "cc"); | ||
392 | } else { | 254 | } else { |
393 | unsigned long addr = compute_effective_address(regs, insn); | 255 | unsigned long addr = compute_effective_address(regs, insn); |
256 | int err; | ||
394 | 257 | ||
395 | #ifdef DEBUG_MNA | 258 | #ifdef DEBUG_MNA |
396 | printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", | 259 | printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", |
397 | regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); | 260 | regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); |
398 | #endif | 261 | #endif |
399 | switch(dir) { | 262 | switch (dir) { |
400 | case load: | 263 | case load: |
401 | do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), | 264 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
402 | size, (unsigned long *) addr, | 265 | regs), |
403 | decode_signedness(insn), | 266 | size, (unsigned long *) addr, |
404 | kernel_unaligned_trap_fault); | 267 | decode_signedness(insn)); |
405 | break; | 268 | break; |
406 | 269 | ||
407 | case store: | 270 | case store: |
408 | do_integer_store(((insn>>25)&0x1f), size, | 271 | err = do_int_store(((insn>>25)&0x1f), size, |
409 | (unsigned long *) addr, regs, | 272 | (unsigned long *) addr, regs); |
410 | kernel_unaligned_trap_fault); | ||
411 | break; | 273 | break; |
412 | #if 0 /* unsupported */ | ||
413 | case both: | ||
414 | do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs), | ||
415 | (unsigned long *) addr, | ||
416 | kernel_unaligned_trap_fault); | ||
417 | break; | ||
418 | #endif | ||
419 | default: | 274 | default: |
420 | panic("Impossible kernel unaligned trap."); | 275 | panic("Impossible kernel unaligned trap."); |
421 | /* Not reached... */ | 276 | /* Not reached... */ |
422 | } | 277 | } |
423 | advance(regs); | 278 | if (err) |
279 | kernel_mna_trap_fault(regs, insn); | ||
280 | else | ||
281 | advance(regs); | ||
424 | } | 282 | } |
425 | } | 283 | } |
426 | 284 | ||
@@ -459,9 +317,7 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, | |||
459 | return 0; | 317 | return 0; |
460 | } | 318 | } |
461 | 319 | ||
462 | void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault"); | 320 | static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) |
463 | |||
464 | void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) | ||
465 | { | 321 | { |
466 | siginfo_t info; | 322 | siginfo_t info; |
467 | 323 | ||
@@ -485,7 +341,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
485 | if(!ok_for_user(regs, insn, dir)) { | 341 | if(!ok_for_user(regs, insn, dir)) { |
486 | goto kill_user; | 342 | goto kill_user; |
487 | } else { | 343 | } else { |
488 | int size = decode_access_size(insn); | 344 | int err, size = decode_access_size(insn); |
489 | unsigned long addr; | 345 | unsigned long addr; |
490 | 346 | ||
491 | if(floating_point_load_or_store_p(insn)) { | 347 | if(floating_point_load_or_store_p(insn)) { |
@@ -496,48 +352,34 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
496 | addr = compute_effective_address(regs, insn); | 352 | addr = compute_effective_address(regs, insn); |
497 | switch(dir) { | 353 | switch(dir) { |
498 | case load: | 354 | case load: |
499 | do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), | 355 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
500 | size, (unsigned long *) addr, | 356 | regs), |
501 | decode_signedness(insn), | 357 | size, (unsigned long *) addr, |
502 | user_unaligned_trap_fault); | 358 | decode_signedness(insn)); |
503 | break; | 359 | break; |
504 | 360 | ||
505 | case store: | 361 | case store: |
506 | do_integer_store(((insn>>25)&0x1f), size, | 362 | err = do_int_store(((insn>>25)&0x1f), size, |
507 | (unsigned long *) addr, regs, | 363 | (unsigned long *) addr, regs); |
508 | user_unaligned_trap_fault); | ||
509 | break; | 364 | break; |
510 | 365 | ||
511 | case both: | 366 | case both: |
512 | #if 0 /* unsupported */ | ||
513 | do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs), | ||
514 | (unsigned long *) addr, | ||
515 | user_unaligned_trap_fault); | ||
516 | #else | ||
517 | /* | 367 | /* |
518 | * This was supported in 2.4. However, we question | 368 | * This was supported in 2.4. However, we question |
519 | * the value of SWAP instruction across word boundaries. | 369 | * the value of SWAP instruction across word boundaries. |
520 | */ | 370 | */ |
521 | printk("Unaligned SWAP unsupported.\n"); | 371 | printk("Unaligned SWAP unsupported.\n"); |
522 | goto kill_user; | 372 | err = -EFAULT; |
523 | #endif | ||
524 | break; | 373 | break; |
525 | 374 | ||
526 | default: | 375 | default: |
527 | unaligned_panic("Impossible user unaligned trap."); | 376 | unaligned_panic("Impossible user unaligned trap."); |
528 | |||
529 | __asm__ __volatile__ ("\n" | ||
530 | "user_unaligned_trap_fault:\n\t" | ||
531 | "mov %0, %%o0\n\t" | ||
532 | "call user_mna_trap_fault\n\t" | ||
533 | " mov %1, %%o1\n\t" | ||
534 | : | ||
535 | : "r" (regs), "r" (insn) | ||
536 | : "o0", "o1", "o2", "o3", "o4", "o5", "o7", | ||
537 | "g1", "g2", "g3", "g4", "g5", "g7", "cc"); | ||
538 | goto out; | 377 | goto out; |
539 | } | 378 | } |
540 | advance(regs); | 379 | if (err) |
380 | goto kill_user; | ||
381 | else | ||
382 | advance(regs); | ||
541 | goto out; | 383 | goto out; |
542 | } | 384 | } |
543 | 385 | ||
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 3af378ddb6ae..463d1be32c98 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -10,6 +10,7 @@ config SPARC | |||
10 | default y | 10 | default y |
11 | select HAVE_OPROFILE | 11 | select HAVE_OPROFILE |
12 | select HAVE_KPROBES | 12 | select HAVE_KPROBES |
13 | select HAVE_KRETPROBES | ||
13 | 14 | ||
14 | config SPARC64 | 15 | config SPARC64 |
15 | bool | 16 | bool |
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index e43db73f2b91..dd5d28e3d798 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c | |||
@@ -30,7 +30,7 @@ struct cpu_fp_info { | |||
30 | char* fp_name; | 30 | char* fp_name; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct cpu_fp_info linux_sparc_fpu[] = { | 33 | static struct cpu_fp_info linux_sparc_fpu[] = { |
34 | { 0x17, 0x10, 0, "UltraSparc I integrated FPU"}, | 34 | { 0x17, 0x10, 0, "UltraSparc I integrated FPU"}, |
35 | { 0x22, 0x10, 0, "UltraSparc I integrated FPU"}, | 35 | { 0x22, 0x10, 0, "UltraSparc I integrated FPU"}, |
36 | { 0x17, 0x11, 0, "UltraSparc II integrated FPU"}, | 36 | { 0x17, 0x11, 0, "UltraSparc II integrated FPU"}, |
@@ -46,7 +46,7 @@ struct cpu_fp_info linux_sparc_fpu[] = { | |||
46 | 46 | ||
47 | #define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu) | 47 | #define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu) |
48 | 48 | ||
49 | struct cpu_iu_info linux_sparc_chips[] = { | 49 | static struct cpu_iu_info linux_sparc_chips[] = { |
50 | { 0x17, 0x10, "TI UltraSparc I (SpitFire)"}, | 50 | { 0x17, 0x10, "TI UltraSparc I (SpitFire)"}, |
51 | { 0x22, 0x10, "TI UltraSparc I (SpitFire)"}, | 51 | { 0x22, 0x10, "TI UltraSparc I (SpitFire)"}, |
52 | { 0x17, 0x11, "TI UltraSparc II (BlackBird)"}, | 52 | { 0x17, 0x11, "TI UltraSparc II (BlackBird)"}, |
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c index eeb5a2fc788d..bd76482077be 100644 --- a/arch/sparc64/kernel/ds.c +++ b/arch/sparc64/kernel/ds.c | |||
@@ -525,10 +525,10 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, | |||
525 | } | 525 | } |
526 | } | 526 | } |
527 | 527 | ||
528 | static int dr_cpu_configure(struct ds_info *dp, | 528 | static int __cpuinit dr_cpu_configure(struct ds_info *dp, |
529 | struct ds_cap_state *cp, | 529 | struct ds_cap_state *cp, |
530 | u64 req_num, | 530 | u64 req_num, |
531 | cpumask_t *mask) | 531 | cpumask_t *mask) |
532 | { | 532 | { |
533 | struct ds_data *resp; | 533 | struct ds_data *resp; |
534 | int resp_len, ncpus, cpu; | 534 | int resp_len, ncpus, cpu; |
@@ -623,9 +623,9 @@ static int dr_cpu_unconfigure(struct ds_info *dp, | |||
623 | return 0; | 623 | return 0; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void dr_cpu_data(struct ds_info *dp, | 626 | static void __cpuinit dr_cpu_data(struct ds_info *dp, |
627 | struct ds_cap_state *cp, | 627 | struct ds_cap_state *cp, |
628 | void *buf, int len) | 628 | void *buf, int len) |
629 | { | 629 | { |
630 | struct ds_data *data = buf; | 630 | struct ds_data *data = buf; |
631 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); | 631 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); |
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index 856659bb1311..910083589569 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c | |||
@@ -758,7 +758,7 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, | |||
758 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | 758 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); |
759 | } | 759 | } |
760 | 760 | ||
761 | void __devinit mdesc_fill_in_cpu_data(cpumask_t mask) | 761 | void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) |
762 | { | 762 | { |
763 | struct mdesc_handle *hp = mdesc_grab(); | 763 | struct mdesc_handle *hp = mdesc_grab(); |
764 | u64 mp; | 764 | u64 mp; |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 2aafce7dfc0e..e116e38b160e 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -114,8 +114,6 @@ void cpu_idle(void) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | extern char reboot_command []; | ||
118 | |||
119 | void machine_halt(void) | 117 | void machine_halt(void) |
120 | { | 118 | { |
121 | sstate_halt(); | 119 | sstate_halt(); |
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index e2027f27c0fe..2650d0d33ac2 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c | |||
@@ -244,16 +244,8 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | |||
244 | if (regs->tstate & TSTATE_PRIV) { | 244 | if (regs->tstate & TSTATE_PRIV) { |
245 | const struct exception_table_entry *entry; | 245 | const struct exception_table_entry *entry; |
246 | 246 | ||
247 | if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { | 247 | entry = search_exception_tables(regs->tpc); |
248 | if (insn & 0x2000) | 248 | if (entry) { |
249 | asi = (regs->tstate >> 24); | ||
250 | else | ||
251 | asi = (insn >> 5); | ||
252 | } | ||
253 | |||
254 | /* Look in asi.h: All _S asis have LS bit set */ | ||
255 | if ((asi & 0x1) && | ||
256 | (entry = search_exception_tables(regs->tpc))) { | ||
257 | regs->tpc = entry->fixup; | 249 | regs->tpc = entry->fixup; |
258 | regs->tnpc = regs->tpc + 4; | 250 | regs->tnpc = regs->tpc + 4; |
259 | return; | 251 | return; |
@@ -294,7 +286,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
294 | unsigned long tpc = regs->tpc; | 286 | unsigned long tpc = regs->tpc; |
295 | 287 | ||
296 | /* Sanity check the PC. */ | 288 | /* Sanity check the PC. */ |
297 | if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) || | 289 | if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || |
298 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { | 290 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { |
299 | /* Valid, no problems... */ | 291 | /* Valid, no problems... */ |
300 | } else { | 292 | } else { |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9e6bca266d88..b5c30416fdac 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -1010,7 +1010,8 @@ static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | |||
1010 | static int pall_ents __initdata; | 1010 | static int pall_ents __initdata; |
1011 | 1011 | ||
1012 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1012 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1013 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | 1013 | static unsigned long __ref kernel_map_range(unsigned long pstart, |
1014 | unsigned long pend, pgprot_t prot) | ||
1014 | { | 1015 | { |
1015 | unsigned long vstart = PAGE_OFFSET + pstart; | 1016 | unsigned long vstart = PAGE_OFFSET + pstart; |
1016 | unsigned long vend = PAGE_OFFSET + pend; | 1017 | unsigned long vend = PAGE_OFFSET + pend; |
diff --git a/arch/sparc64/solaris/conv.h b/arch/sparc64/solaris/conv.h index 5faf59a9de39..50e58232cf2b 100644 --- a/arch/sparc64/solaris/conv.h +++ b/arch/sparc64/solaris/conv.h | |||
@@ -28,7 +28,7 @@ extern unsigned sunos_sys_table[]; | |||
28 | #define SUNOS(x) ((long)sunos_sys_table[x]) | 28 | #define SUNOS(x) ((long)sunos_sys_table[x]) |
29 | 29 | ||
30 | #ifdef DEBUG_SOLARIS | 30 | #ifdef DEBUG_SOLARIS |
31 | #define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__FUNCTION__,(s)) | 31 | #define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__func__,(s)) |
32 | #define SOLDD(s) printk("solaris: "); printk s | 32 | #define SOLDD(s) printk("solaris: "); printk s |
33 | #else | 33 | #else |
34 | #define SOLD(s) | 34 | #define SOLD(s) |
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c index f53123c02c2b..15234fcd191a 100644 --- a/arch/sparc64/solaris/timod.c +++ b/arch/sparc64/solaris/timod.c | |||
@@ -81,7 +81,7 @@ void mykfree(void *p) | |||
81 | #define MKCTL_MAGIC 0xDEADBABEBADC0DEDL | 81 | #define MKCTL_MAGIC 0xDEADBABEBADC0DEDL |
82 | #define PUT_MAGIC(a,m) do{(*(u64*)(a))=(m);}while(0) | 82 | #define PUT_MAGIC(a,m) do{(*(u64*)(a))=(m);}while(0) |
83 | #define SCHECK_MAGIC(a,m) do{if((*(u64*)(a))!=(m))printk("%s,%u,%s(): magic %08x at %p corrupted!\n",\ | 83 | #define SCHECK_MAGIC(a,m) do{if((*(u64*)(a))!=(m))printk("%s,%u,%s(): magic %08x at %p corrupted!\n",\ |
84 | __FILE__,__LINE__,__FUNCTION__,(m),(a));}while(0) | 84 | __FILE__,__LINE__,__func__,(m),(a));}while(0) |
85 | #define BUF_OFFSET sizeof(u64) | 85 | #define BUF_OFFSET sizeof(u64) |
86 | #define MKCTL_TRAILER sizeof(u64) | 86 | #define MKCTL_TRAILER sizeof(u64) |
87 | 87 | ||
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index fc50d2f959d1..e8cb9ff183e9 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -128,8 +128,6 @@ void *get_current(void) | |||
128 | return current; | 128 | return current; |
129 | } | 129 | } |
130 | 130 | ||
131 | extern void schedule_tail(struct task_struct *prev); | ||
132 | |||
133 | /* | 131 | /* |
134 | * This is called magically, by its address being stuffed in a jmp_buf | 132 | * This is called magically, by its address being stuffed in a jmp_buf |
135 | * and being longjmp-d to. | 133 | * and being longjmp-d to. |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4a88cf7695b4..f41c9538ca30 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -21,7 +21,8 @@ config X86 | |||
21 | select HAVE_IDE | 21 | select HAVE_IDE |
22 | select HAVE_OPROFILE | 22 | select HAVE_OPROFILE |
23 | select HAVE_KPROBES | 23 | select HAVE_KPROBES |
24 | select HAVE_KVM | 24 | select HAVE_KRETPROBES |
25 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | ||
25 | 26 | ||
26 | 27 | ||
27 | config GENERIC_LOCKBREAK | 28 | config GENERIC_LOCKBREAK |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index e09a6b73a1aa..9304bfba7d45 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -377,6 +377,19 @@ config X86_OOSTORE | |||
377 | def_bool y | 377 | def_bool y |
378 | depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR | 378 | depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR |
379 | 379 | ||
380 | # | ||
381 | # P6_NOPs are a relatively minor optimization that require a family >= | ||
382 | # 6 processor, except that it is broken on certain VIA chips. | ||
383 | # Furthermore, AMD chips prefer a totally different sequence of NOPs | ||
384 | # (which work on all CPUs). As a result, disallow these if we're | ||
385 | # compiling X86_GENERIC but not X86_64 (these NOPs do work on all | ||
386 | # x86-64 capable chips); the list of processors in the right-hand clause | ||
387 | # are the cores that benefit from this optimization. | ||
388 | # | ||
389 | config X86_P6_NOP | ||
390 | def_bool y | ||
391 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4) | ||
392 | |||
380 | config X86_TSC | 393 | config X86_TSC |
381 | def_bool y | 394 | def_bool y |
382 | depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64 | 395 | depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64 |
@@ -390,6 +403,7 @@ config X86_CMOV | |||
390 | config X86_MINIMUM_CPU_FAMILY | 403 | config X86_MINIMUM_CPU_FAMILY |
391 | int | 404 | int |
392 | default "64" if X86_64 | 405 | default "64" if X86_64 |
406 | default "6" if X86_32 && X86_P6_NOP | ||
393 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) | 407 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) |
394 | default "3" | 408 | default "3" |
395 | 409 | ||
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 378353956b5d..e77d89f9e8aa 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c | |||
@@ -37,6 +37,12 @@ static int detect_memory_e820(void) | |||
37 | "=m" (*desc) | 37 | "=m" (*desc) |
38 | : "D" (desc), "d" (SMAP), "a" (0xe820)); | 38 | : "D" (desc), "d" (SMAP), "a" (0xe820)); |
39 | 39 | ||
40 | /* BIOSes which terminate the chain with CF = 1 as opposed | ||
41 | to %ebx = 0 don't always report the SMAP signature on | ||
42 | the final, failing, probe. */ | ||
43 | if (err) | ||
44 | break; | ||
45 | |||
40 | /* Some BIOSes stop returning SMAP in the middle of | 46 | /* Some BIOSes stop returning SMAP in the middle of |
41 | the search loop. We don't know exactly how the BIOS | 47 | the search loop. We don't know exactly how the BIOS |
42 | screwed up the map at that point, we might have a | 48 | screwed up the map at that point, we might have a |
@@ -47,9 +53,6 @@ static int detect_memory_e820(void) | |||
47 | break; | 53 | break; |
48 | } | 54 | } |
49 | 55 | ||
50 | if (err) | ||
51 | break; | ||
52 | |||
53 | count++; | 56 | count++; |
54 | desc++; | 57 | desc++; |
55 | } while (next && count < E820MAX); | 58 | } while (next && count < E820MAX); |
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index a33d53017997..8ea040124f7d 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -128,13 +128,11 @@ void foo(void) | |||
128 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | 128 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | #ifdef CONFIG_LGUEST_GUEST | 131 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) |
132 | BLANK(); | 132 | BLANK(); |
133 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); | 133 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); |
134 | OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); | 134 | OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); |
135 | #endif | ||
136 | 135 | ||
137 | #ifdef CONFIG_LGUEST | ||
138 | BLANK(); | 136 | BLANK(); |
139 | OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); | 137 | OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); |
140 | OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc); | 138 | OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f86a3c4a2669..a38aafaefc23 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -504,7 +504,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
504 | 504 | ||
505 | /* Clear all flags overriden by options */ | 505 | /* Clear all flags overriden by options */ |
506 | for (i = 0; i < NCAPINTS; i++) | 506 | for (i = 0; i < NCAPINTS; i++) |
507 | c->x86_capability[i] ^= cleared_cpu_caps[i]; | 507 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; |
508 | 508 | ||
509 | /* Init Machine Check Exception if available. */ | 509 | /* Init Machine Check Exception if available. */ |
510 | mcheck_init(c); | 510 | mcheck_init(c); |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index b6e136f23d3d..be83336fddba 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
44 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
45 | #include <asm/msr.h> | 45 | #include <asm/msr.h> |
46 | #include <asm/kvm_para.h> | ||
46 | #include "mtrr.h" | 47 | #include "mtrr.h" |
47 | 48 | ||
48 | u32 num_var_ranges = 0; | 49 | u32 num_var_ranges = 0; |
@@ -649,6 +650,7 @@ static __init int amd_special_default_mtrr(void) | |||
649 | 650 | ||
650 | /** | 651 | /** |
651 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | 652 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
653 | * @end_pfn: ending page frame number | ||
652 | * | 654 | * |
653 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | 655 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
654 | * memory configurations. This routine checks that the highest MTRR matches | 656 | * memory configurations. This routine checks that the highest MTRR matches |
@@ -688,8 +690,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
688 | 690 | ||
689 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 691 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
690 | if (!highest_pfn) { | 692 | if (!highest_pfn) { |
691 | printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n"); | 693 | if (!kvm_para_available()) { |
692 | WARN_ON(1); | 694 | printk(KERN_WARNING |
695 | "WARNING: strange, CPU MTRRs all blank?\n"); | ||
696 | WARN_ON(1); | ||
697 | } | ||
693 | return 0; | 698 | return 0; |
694 | } | 699 | } |
695 | 700 | ||
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 200fb3f9ebfb..e8b422c1c512 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -76,13 +76,6 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
76 | /* All Transmeta CPUs have a constant TSC */ | 76 | /* All Transmeta CPUs have a constant TSC */ |
77 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | 77 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); |
78 | 78 | ||
79 | /* If we can run i686 user-space code, call us an i686 */ | ||
80 | #define USER686 ((1 << X86_FEATURE_TSC)|\ | ||
81 | (1 << X86_FEATURE_CX8)|\ | ||
82 | (1 << X86_FEATURE_CMOV)) | ||
83 | if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686) | ||
84 | c->x86 = 6; | ||
85 | |||
86 | #ifdef CONFIG_SYSCTL | 79 | #ifdef CONFIG_SYSCTL |
87 | /* randomize_va_space slows us down enormously; | 80 | /* randomize_va_space slows us down enormously; |
88 | it probably triggers retranslation of x86->native bytecode */ | 81 | it probably triggers retranslation of x86->native bytecode */ |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 2ad9a1bc6a73..c20c9e7e08dd 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -453,6 +453,7 @@ ENTRY(stub_execve) | |||
453 | CFI_REGISTER rip, r11 | 453 | CFI_REGISTER rip, r11 |
454 | SAVE_REST | 454 | SAVE_REST |
455 | FIXUP_TOP_OF_STACK %r11 | 455 | FIXUP_TOP_OF_STACK %r11 |
456 | movq %rsp, %rcx | ||
456 | call sys_execve | 457 | call sys_execve |
457 | RESTORE_TOP_OF_STACK %r11 | 458 | RESTORE_TOP_OF_STACK %r11 |
458 | movq %rax,RAX(%rsp) | 459 | movq %rax,RAX(%rsp) |
@@ -1036,15 +1037,16 @@ ENDPROC(child_rip) | |||
1036 | * rdi: name, rsi: argv, rdx: envp | 1037 | * rdi: name, rsi: argv, rdx: envp |
1037 | * | 1038 | * |
1038 | * We want to fallback into: | 1039 | * We want to fallback into: |
1039 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) | 1040 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs) |
1040 | * | 1041 | * |
1041 | * do_sys_execve asm fallback arguments: | 1042 | * do_sys_execve asm fallback arguments: |
1042 | * rdi: name, rsi: argv, rdx: envp, fake frame on the stack | 1043 | * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack |
1043 | */ | 1044 | */ |
1044 | ENTRY(kernel_execve) | 1045 | ENTRY(kernel_execve) |
1045 | CFI_STARTPROC | 1046 | CFI_STARTPROC |
1046 | FAKE_STACK_FRAME $0 | 1047 | FAKE_STACK_FRAME $0 |
1047 | SAVE_ALL | 1048 | SAVE_ALL |
1049 | movq %rsp,%rcx | ||
1048 | call sys_execve | 1050 | call sys_execve |
1049 | movq %rax, RAX(%rsp) | 1051 | movq %rax, RAX(%rsp) |
1050 | RESTORE_REST | 1052 | RESTORE_REST |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 25eb98540a41..fd8ca53943a8 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -606,7 +606,7 @@ ENTRY(_stext) | |||
606 | .section ".bss.page_aligned","wa" | 606 | .section ".bss.page_aligned","wa" |
607 | .align PAGE_SIZE_asm | 607 | .align PAGE_SIZE_asm |
608 | #ifdef CONFIG_X86_PAE | 608 | #ifdef CONFIG_X86_PAE |
609 | ENTRY(swapper_pg_pmd) | 609 | swapper_pg_pmd: |
610 | .fill 1024*KPMDS,4,0 | 610 | .fill 1024*KPMDS,4,0 |
611 | #else | 611 | #else |
612 | ENTRY(swapper_pg_dir) | 612 | ENTRY(swapper_pg_dir) |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index eb415043a929..a007454133a3 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -379,18 +379,24 @@ NEXT_PAGE(level2_ident_pgt) | |||
379 | /* Since I easily can, map the first 1G. | 379 | /* Since I easily can, map the first 1G. |
380 | * Don't set NX because code runs from these pages. | 380 | * Don't set NX because code runs from these pages. |
381 | */ | 381 | */ |
382 | PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) | 382 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) |
383 | 383 | ||
384 | NEXT_PAGE(level2_kernel_pgt) | 384 | NEXT_PAGE(level2_kernel_pgt) |
385 | /* 40MB kernel mapping. The kernel code cannot be bigger than that. | 385 | /* |
386 | When you change this change KERNEL_TEXT_SIZE in page.h too. */ | 386 | * 128 MB kernel mapping. We spend a full page on this pagetable |
387 | /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */ | 387 | * anyway. |
388 | PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE) | 388 | * |
389 | /* Module mapping starts here */ | 389 | * The kernel code+data+bss must not be bigger than that. |
390 | .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0 | 390 | * |
391 | * (NOTE: at +128MB starts the module area, see MODULES_VADDR. | ||
392 | * If you want to increase this then increase MODULES_VADDR | ||
393 | * too.) | ||
394 | */ | ||
395 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, | ||
396 | KERNEL_IMAGE_SIZE/PMD_SIZE) | ||
391 | 397 | ||
392 | NEXT_PAGE(level2_spare_pgt) | 398 | NEXT_PAGE(level2_spare_pgt) |
393 | .fill 512,8,0 | 399 | .fill 512, 8, 0 |
394 | 400 | ||
395 | #undef PMDS | 401 | #undef PMDS |
396 | #undef NEXT_PAGE | 402 | #undef NEXT_PAGE |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 429d084e014d..235fd6c77504 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -368,8 +368,8 @@ static int hpet_clocksource_register(void) | |||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | /* | 371 | /** |
372 | * Try to setup the HPET timer | 372 | * hpet_enable - Try to setup the HPET timer. Returns 1 on success. |
373 | */ | 373 | */ |
374 | int __init hpet_enable(void) | 374 | int __init hpet_enable(void) |
375 | { | 375 | { |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 763dfc407232..60fe80157569 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -132,7 +132,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
132 | if (!cpu_has_fxsr) | 132 | if (!cpu_has_fxsr) |
133 | return -ENODEV; | 133 | return -ENODEV; |
134 | 134 | ||
135 | unlazy_fpu(target); | 135 | init_fpu(target); |
136 | 136 | ||
137 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 137 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
138 | &target->thread.i387.fxsave, 0, -1); | 138 | &target->thread.i387.fxsave, 0, -1); |
@@ -147,7 +147,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
147 | if (!cpu_has_fxsr) | 147 | if (!cpu_has_fxsr) |
148 | return -ENODEV; | 148 | return -ENODEV; |
149 | 149 | ||
150 | unlazy_fpu(target); | 150 | init_fpu(target); |
151 | set_stopped_child_used_math(target); | 151 | set_stopped_child_used_math(target); |
152 | 152 | ||
153 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 153 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
@@ -307,7 +307,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
307 | if (!HAVE_HWFP) | 307 | if (!HAVE_HWFP) |
308 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | 308 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
309 | 309 | ||
310 | unlazy_fpu(target); | 310 | init_fpu(target); |
311 | 311 | ||
312 | if (!cpu_has_fxsr) | 312 | if (!cpu_has_fxsr) |
313 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 313 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
@@ -332,7 +332,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
332 | if (!HAVE_HWFP) | 332 | if (!HAVE_HWFP) |
333 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 333 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
334 | 334 | ||
335 | unlazy_fpu(target); | 335 | init_fpu(target); |
336 | set_stopped_child_used_math(target); | 336 | set_stopped_child_used_math(target); |
337 | 337 | ||
338 | if (!cpu_has_fxsr) | 338 | if (!cpu_has_fxsr) |
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 5b3ce7934363..3d01e47777db 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c | |||
@@ -15,6 +15,7 @@ static struct files_struct init_files = INIT_FILES; | |||
15 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 15 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
16 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 16 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
17 | struct mm_struct init_mm = INIT_MM(init_mm); | 17 | struct mm_struct init_mm = INIT_MM(init_mm); |
18 | EXPORT_UNUSED_SYMBOL(init_mm); /* will be removed in 2.6.26 */ | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Initial thread structure. | 21 | * Initial thread structure. |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a7d50a547dc2..be3c7a299f02 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -603,11 +603,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
603 | } | 603 | } |
604 | #endif | 604 | #endif |
605 | 605 | ||
606 | #ifdef X86_BTS | ||
606 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 607 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
607 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 608 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
608 | 609 | ||
609 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 610 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
610 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 611 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
612 | #endif | ||
611 | 613 | ||
612 | 614 | ||
613 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | 615 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b0cc8f0136d8..3baf9b9f4c87 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -604,11 +604,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
604 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 604 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
605 | } | 605 | } |
606 | 606 | ||
607 | #ifdef X86_BTS | ||
607 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) | 608 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
608 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | 609 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); |
609 | 610 | ||
610 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | 611 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) |
611 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | 612 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); |
613 | #endif | ||
612 | } | 614 | } |
613 | 615 | ||
614 | /* | 616 | /* |
@@ -730,16 +732,16 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
730 | */ | 732 | */ |
731 | asmlinkage | 733 | asmlinkage |
732 | long sys_execve(char __user *name, char __user * __user *argv, | 734 | long sys_execve(char __user *name, char __user * __user *argv, |
733 | char __user * __user *envp, struct pt_regs regs) | 735 | char __user * __user *envp, struct pt_regs *regs) |
734 | { | 736 | { |
735 | long error; | 737 | long error; |
736 | char * filename; | 738 | char * filename; |
737 | 739 | ||
738 | filename = getname(name); | 740 | filename = getname(name); |
739 | error = PTR_ERR(filename); | 741 | error = PTR_ERR(filename); |
740 | if (IS_ERR(filename)) | 742 | if (IS_ERR(filename)) |
741 | return error; | 743 | return error; |
742 | error = do_execve(filename, argv, envp, ®s); | 744 | error = do_execve(filename, argv, envp, regs); |
743 | putname(filename); | 745 | putname(filename); |
744 | return error; | 746 | return error; |
745 | } | 747 | } |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index d862e396b099..f41fdc98efb1 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -544,6 +544,8 @@ static int ptrace_set_debugreg(struct task_struct *child, | |||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | #ifdef X86_BTS | ||
548 | |||
547 | static int ptrace_bts_get_size(struct task_struct *child) | 549 | static int ptrace_bts_get_size(struct task_struct *child) |
548 | { | 550 | { |
549 | if (!child->thread.ds_area_msr) | 551 | if (!child->thread.ds_area_msr) |
@@ -826,6 +828,7 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk, | |||
826 | 828 | ||
827 | ptrace_bts_write_record(tsk, &rec); | 829 | ptrace_bts_write_record(tsk, &rec); |
828 | } | 830 | } |
831 | #endif /* X86_BTS */ | ||
829 | 832 | ||
830 | /* | 833 | /* |
831 | * Called by kernel/ptrace.c when detaching.. | 834 | * Called by kernel/ptrace.c when detaching.. |
@@ -839,7 +842,9 @@ void ptrace_disable(struct task_struct *child) | |||
839 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | 842 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
840 | #endif | 843 | #endif |
841 | if (child->thread.ds_area_msr) { | 844 | if (child->thread.ds_area_msr) { |
845 | #ifdef X86_BTS | ||
842 | ptrace_bts_realloc(child, 0, 0); | 846 | ptrace_bts_realloc(child, 0, 0); |
847 | #endif | ||
843 | child->thread.debugctlmsr &= ~ds_debugctl_mask(); | 848 | child->thread.debugctlmsr &= ~ds_debugctl_mask(); |
844 | if (!child->thread.debugctlmsr) | 849 | if (!child->thread.debugctlmsr) |
845 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); | 850 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); |
@@ -961,6 +966,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
961 | break; | 966 | break; |
962 | #endif | 967 | #endif |
963 | 968 | ||
969 | /* | ||
970 | * These bits need more cooking - not enabled yet: | ||
971 | */ | ||
972 | #ifdef X86_BTS | ||
964 | case PTRACE_BTS_CONFIG: | 973 | case PTRACE_BTS_CONFIG: |
965 | ret = ptrace_bts_config | 974 | ret = ptrace_bts_config |
966 | (child, data, (struct ptrace_bts_config __user *)addr); | 975 | (child, data, (struct ptrace_bts_config __user *)addr); |
@@ -988,6 +997,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
988 | ret = ptrace_bts_drain | 997 | ret = ptrace_bts_drain |
989 | (child, data, (struct bts_struct __user *) addr); | 998 | (child, data, (struct bts_struct __user *) addr); |
990 | break; | 999 | break; |
1000 | #endif | ||
991 | 1001 | ||
992 | default: | 1002 | default: |
993 | ret = ptrace_request(child, request, addr, data); | 1003 | ret = ptrace_request(child, request, addr, data); |
@@ -1226,12 +1236,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | |||
1226 | case PTRACE_SETOPTIONS: | 1236 | case PTRACE_SETOPTIONS: |
1227 | case PTRACE_SET_THREAD_AREA: | 1237 | case PTRACE_SET_THREAD_AREA: |
1228 | case PTRACE_GET_THREAD_AREA: | 1238 | case PTRACE_GET_THREAD_AREA: |
1239 | #ifdef X86_BTS | ||
1229 | case PTRACE_BTS_CONFIG: | 1240 | case PTRACE_BTS_CONFIG: |
1230 | case PTRACE_BTS_STATUS: | 1241 | case PTRACE_BTS_STATUS: |
1231 | case PTRACE_BTS_SIZE: | 1242 | case PTRACE_BTS_SIZE: |
1232 | case PTRACE_BTS_GET: | 1243 | case PTRACE_BTS_GET: |
1233 | case PTRACE_BTS_CLEAR: | 1244 | case PTRACE_BTS_CLEAR: |
1234 | case PTRACE_BTS_DRAIN: | 1245 | case PTRACE_BTS_DRAIN: |
1246 | #endif | ||
1235 | return sys_ptrace(request, pid, addr, data); | 1247 | return sys_ptrace(request, pid, addr, data); |
1236 | 1248 | ||
1237 | default: | 1249 | default: |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 6fd804f07821..7637dc91c79b 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -1021,7 +1021,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
1021 | 1021 | ||
1022 | /* Clear all flags overriden by options */ | 1022 | /* Clear all flags overriden by options */ |
1023 | for (i = 0; i < NCAPINTS; i++) | 1023 | for (i = 0; i < NCAPINTS; i++) |
1024 | c->x86_capability[i] ^= cleared_cpu_caps[i]; | 1024 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; |
1025 | 1025 | ||
1026 | #ifdef CONFIG_X86_MCE | 1026 | #ifdef CONFIG_X86_MCE |
1027 | mcheck_init(c); | 1027 | mcheck_init(c); |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index d53bd6fcb428..0880f2c388a9 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -554,10 +554,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
554 | int timeout; | 554 | int timeout; |
555 | unsigned long start_rip; | 555 | unsigned long start_rip; |
556 | struct create_idle c_idle = { | 556 | struct create_idle c_idle = { |
557 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
558 | .cpu = cpu, | 557 | .cpu = cpu, |
559 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 558 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
560 | }; | 559 | }; |
560 | INIT_WORK(&c_idle.work, do_fork_idle); | ||
561 | 561 | ||
562 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | 562 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
563 | if (!cpu_gdt_descr[cpu].address && | 563 | if (!cpu_gdt_descr[cpu].address && |
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 02f0f61f5b11..c28c342c162f 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -25,6 +25,8 @@ static int save_stack_stack(void *data, char *name) | |||
25 | static void save_stack_address(void *data, unsigned long addr, int reliable) | 25 | static void save_stack_address(void *data, unsigned long addr, int reliable) |
26 | { | 26 | { |
27 | struct stack_trace *trace = data; | 27 | struct stack_trace *trace = data; |
28 | if (!reliable) | ||
29 | return; | ||
28 | if (trace->skip > 0) { | 30 | if (trace->skip > 0) { |
29 | trace->skip--; | 31 | trace->skip--; |
30 | return; | 32 | return; |
@@ -37,6 +39,8 @@ static void | |||
37 | save_stack_address_nosched(void *data, unsigned long addr, int reliable) | 39 | save_stack_address_nosched(void *data, unsigned long addr, int reliable) |
38 | { | 40 | { |
39 | struct stack_trace *trace = (struct stack_trace *)data; | 41 | struct stack_trace *trace = (struct stack_trace *)data; |
42 | if (!reliable) | ||
43 | return; | ||
40 | if (in_sched_functions(addr)) | 44 | if (in_sched_functions(addr)) |
41 | return; | 45 | return; |
42 | if (trace->skip > 0) { | 46 | if (trace->skip > 0) { |
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 6dfd4e76661a..022bcaa3b42e 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c | |||
@@ -91,7 +91,9 @@ int do_set_thread_area(struct task_struct *p, int idx, | |||
91 | 91 | ||
92 | asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) | 92 | asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) |
93 | { | 93 | { |
94 | return do_set_thread_area(current, -1, u_info, 1); | 94 | int ret = do_set_thread_area(current, -1, u_info, 1); |
95 | prevent_tail_call(ret); | ||
96 | return ret; | ||
95 | } | 97 | } |
96 | 98 | ||
97 | 99 | ||
@@ -139,7 +141,9 @@ int do_get_thread_area(struct task_struct *p, int idx, | |||
139 | 141 | ||
140 | asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) | 142 | asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) |
141 | { | 143 | { |
142 | return do_get_thread_area(current, -1, u_info); | 144 | int ret = do_get_thread_area(current, -1, u_info); |
145 | prevent_tail_call(ret); | ||
146 | return ret; | ||
143 | } | 147 | } |
144 | 148 | ||
145 | int regset_tls_active(struct task_struct *target, | 149 | int regset_tls_active(struct task_struct *target, |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index 43517e324be8..f14cfd9d1f94 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -28,7 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz); | |||
28 | static int __init tsc_setup(char *str) | 28 | static int __init tsc_setup(char *str) |
29 | { | 29 | { |
30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | 30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " |
31 | "cannot disable TSC.\n"); | 31 | "cannot disable TSC completely.\n"); |
32 | mark_tsc_unstable("user disabled TSC"); | ||
32 | return 1; | 33 | return 1; |
33 | } | 34 | } |
34 | #else | 35 | #else |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 3f8242774580..edff4c985485 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -44,11 +44,6 @@ | |||
44 | 44 | ||
45 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) | 45 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) |
46 | #define __syscall_clobber "r11","cx","memory" | 46 | #define __syscall_clobber "r11","cx","memory" |
47 | #define __pa_vsymbol(x) \ | ||
48 | ({unsigned long v; \ | ||
49 | extern char __vsyscall_0; \ | ||
50 | asm("" : "=r" (v) : "0" (x)); \ | ||
51 | ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); }) | ||
52 | 47 | ||
53 | /* | 48 | /* |
54 | * vsyscall_gtod_data contains data that is : | 49 | * vsyscall_gtod_data contains data that is : |
@@ -102,7 +97,7 @@ static __always_inline void do_get_tz(struct timezone * tz) | |||
102 | static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) | 97 | static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) |
103 | { | 98 | { |
104 | int ret; | 99 | int ret; |
105 | asm volatile("vsysc2: syscall" | 100 | asm volatile("syscall" |
106 | : "=a" (ret) | 101 | : "=a" (ret) |
107 | : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) | 102 | : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) |
108 | : __syscall_clobber ); | 103 | : __syscall_clobber ); |
@@ -112,7 +107,7 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) | |||
112 | static __always_inline long time_syscall(long *t) | 107 | static __always_inline long time_syscall(long *t) |
113 | { | 108 | { |
114 | long secs; | 109 | long secs; |
115 | asm volatile("vsysc1: syscall" | 110 | asm volatile("syscall" |
116 | : "=a" (secs) | 111 | : "=a" (secs) |
117 | : "0" (__NR_time),"D" (t) : __syscall_clobber); | 112 | : "0" (__NR_time),"D" (t) : __syscall_clobber); |
118 | return secs; | 113 | return secs; |
@@ -228,42 +223,11 @@ long __vsyscall(3) venosys_1(void) | |||
228 | 223 | ||
229 | #ifdef CONFIG_SYSCTL | 224 | #ifdef CONFIG_SYSCTL |
230 | 225 | ||
231 | #define SYSCALL 0x050f | 226 | static int |
232 | #define NOP2 0x9090 | 227 | vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp, |
233 | 228 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
234 | /* | ||
235 | * NOP out syscall in vsyscall page when not needed. | ||
236 | */ | ||
237 | static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp, | ||
238 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
239 | { | 229 | { |
240 | extern u16 vsysc1, vsysc2; | 230 | return proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
241 | u16 __iomem *map1; | ||
242 | u16 __iomem *map2; | ||
243 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | ||
244 | if (!write) | ||
245 | return ret; | ||
246 | /* gcc has some trouble with __va(__pa()), so just do it this | ||
247 | way. */ | ||
248 | map1 = ioremap(__pa_vsymbol(&vsysc1), 2); | ||
249 | if (!map1) | ||
250 | return -ENOMEM; | ||
251 | map2 = ioremap(__pa_vsymbol(&vsysc2), 2); | ||
252 | if (!map2) { | ||
253 | ret = -ENOMEM; | ||
254 | goto out; | ||
255 | } | ||
256 | if (!vsyscall_gtod_data.sysctl_enabled) { | ||
257 | writew(SYSCALL, map1); | ||
258 | writew(SYSCALL, map2); | ||
259 | } else { | ||
260 | writew(NOP2, map1); | ||
261 | writew(NOP2, map2); | ||
262 | } | ||
263 | iounmap(map2); | ||
264 | out: | ||
265 | iounmap(map1); | ||
266 | return ret; | ||
267 | } | 231 | } |
268 | 232 | ||
269 | static ctl_table kernel_table2[] = { | 233 | static ctl_table kernel_table2[] = { |
@@ -279,7 +243,6 @@ static ctl_table kernel_root_table2[] = { | |||
279 | .child = kernel_table2 }, | 243 | .child = kernel_table2 }, |
280 | {} | 244 | {} |
281 | }; | 245 | }; |
282 | |||
283 | #endif | 246 | #endif |
284 | 247 | ||
285 | /* Assume __initcall executes before all user space. Hopefully kmod | 248 | /* Assume __initcall executes before all user space. Hopefully kmod |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2cbee9479ce4..68a6b1511934 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -647,6 +647,10 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
647 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * | 647 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * |
648 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; | 648 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; |
649 | atomic_set(&apic->timer.pending, 0); | 649 | atomic_set(&apic->timer.pending, 0); |
650 | |||
651 | if (!apic->timer.period) | ||
652 | return; | ||
653 | |||
650 | hrtimer_start(&apic->timer.dev, | 654 | hrtimer_start(&apic->timer.dev, |
651 | ktime_add_ns(now, apic->timer.period), | 655 | ktime_add_ns(now, apic->timer.period), |
652 | HRTIMER_MODE_ABS); | 656 | HRTIMER_MODE_ABS); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8efdcdbebb03..d8172aabc660 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
681 | unsigned level, | 681 | unsigned level, |
682 | int metaphysical, | 682 | int metaphysical, |
683 | unsigned access, | 683 | unsigned access, |
684 | u64 *parent_pte, | 684 | u64 *parent_pte) |
685 | bool *new_page) | ||
686 | { | 685 | { |
687 | union kvm_mmu_page_role role; | 686 | union kvm_mmu_page_role role; |
688 | unsigned index; | 687 | unsigned index; |
@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
722 | vcpu->arch.mmu.prefetch_page(vcpu, sp); | 721 | vcpu->arch.mmu.prefetch_page(vcpu, sp); |
723 | if (!metaphysical) | 722 | if (!metaphysical) |
724 | rmap_write_protect(vcpu->kvm, gfn); | 723 | rmap_write_protect(vcpu->kvm, gfn); |
725 | if (new_page) | ||
726 | *new_page = 1; | ||
727 | return sp; | 724 | return sp; |
728 | } | 725 | } |
729 | 726 | ||
@@ -876,11 +873,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | |||
876 | 873 | ||
877 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 874 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
878 | { | 875 | { |
876 | struct page *page; | ||
877 | |||
879 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 878 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
880 | 879 | ||
881 | if (gpa == UNMAPPED_GVA) | 880 | if (gpa == UNMAPPED_GVA) |
882 | return NULL; | 881 | return NULL; |
883 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 882 | |
883 | down_read(¤t->mm->mmap_sem); | ||
884 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
885 | up_read(¤t->mm->mmap_sem); | ||
886 | |||
887 | return page; | ||
884 | } | 888 | } |
885 | 889 | ||
886 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 890 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
@@ -999,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, | |||
999 | >> PAGE_SHIFT; | 1003 | >> PAGE_SHIFT; |
1000 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, | 1004 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, |
1001 | v, level - 1, | 1005 | v, level - 1, |
1002 | 1, ACC_ALL, &table[index], | 1006 | 1, ACC_ALL, &table[index]); |
1003 | NULL); | ||
1004 | if (!new_table) { | 1007 | if (!new_table) { |
1005 | pgprintk("nonpaging_map: ENOMEM\n"); | 1008 | pgprintk("nonpaging_map: ENOMEM\n"); |
1006 | kvm_release_page_clean(page); | 1009 | kvm_release_page_clean(page); |
@@ -1020,15 +1023,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1020 | 1023 | ||
1021 | struct page *page; | 1024 | struct page *page; |
1022 | 1025 | ||
1026 | down_read(&vcpu->kvm->slots_lock); | ||
1027 | |||
1023 | down_read(¤t->mm->mmap_sem); | 1028 | down_read(¤t->mm->mmap_sem); |
1024 | page = gfn_to_page(vcpu->kvm, gfn); | 1029 | page = gfn_to_page(vcpu->kvm, gfn); |
1030 | up_read(¤t->mm->mmap_sem); | ||
1025 | 1031 | ||
1026 | spin_lock(&vcpu->kvm->mmu_lock); | 1032 | spin_lock(&vcpu->kvm->mmu_lock); |
1027 | kvm_mmu_free_some_pages(vcpu); | 1033 | kvm_mmu_free_some_pages(vcpu); |
1028 | r = __nonpaging_map(vcpu, v, write, gfn, page); | 1034 | r = __nonpaging_map(vcpu, v, write, gfn, page); |
1029 | spin_unlock(&vcpu->kvm->mmu_lock); | 1035 | spin_unlock(&vcpu->kvm->mmu_lock); |
1030 | 1036 | ||
1031 | up_read(¤t->mm->mmap_sem); | 1037 | up_read(&vcpu->kvm->slots_lock); |
1032 | 1038 | ||
1033 | return r; | 1039 | return r; |
1034 | } | 1040 | } |
@@ -1090,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1090 | 1096 | ||
1091 | ASSERT(!VALID_PAGE(root)); | 1097 | ASSERT(!VALID_PAGE(root)); |
1092 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1098 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1093 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); | 1099 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); |
1094 | root = __pa(sp->spt); | 1100 | root = __pa(sp->spt); |
1095 | ++sp->root_count; | 1101 | ++sp->root_count; |
1096 | vcpu->arch.mmu.root_hpa = root; | 1102 | vcpu->arch.mmu.root_hpa = root; |
@@ -1111,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1111 | root_gfn = 0; | 1117 | root_gfn = 0; |
1112 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1118 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1113 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 1119 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
1114 | ACC_ALL, NULL, NULL); | 1120 | ACC_ALL, NULL); |
1115 | root = __pa(sp->spt); | 1121 | root = __pa(sp->spt); |
1116 | ++sp->root_count; | 1122 | ++sp->root_count; |
1117 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; | 1123 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
@@ -1172,7 +1178,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
1172 | 1178 | ||
1173 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 1179 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
1174 | { | 1180 | { |
1175 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); | 1181 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3); |
1176 | mmu_free_roots(vcpu); | 1182 | mmu_free_roots(vcpu); |
1177 | } | 1183 | } |
1178 | 1184 | ||
@@ -1362,6 +1368,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1362 | gfn_t gfn; | 1368 | gfn_t gfn; |
1363 | int r; | 1369 | int r; |
1364 | u64 gpte = 0; | 1370 | u64 gpte = 0; |
1371 | struct page *page; | ||
1365 | 1372 | ||
1366 | if (bytes != 4 && bytes != 8) | 1373 | if (bytes != 4 && bytes != 8) |
1367 | return; | 1374 | return; |
@@ -1389,6 +1396,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1389 | if (!is_present_pte(gpte)) | 1396 | if (!is_present_pte(gpte)) |
1390 | return; | 1397 | return; |
1391 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 1398 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
1399 | |||
1400 | down_read(¤t->mm->mmap_sem); | ||
1401 | page = gfn_to_page(vcpu->kvm, gfn); | ||
1402 | up_read(¤t->mm->mmap_sem); | ||
1403 | |||
1392 | vcpu->arch.update_pte.gfn = gfn; | 1404 | vcpu->arch.update_pte.gfn = gfn; |
1393 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); | 1405 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); |
1394 | } | 1406 | } |
@@ -1496,9 +1508,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1496 | gpa_t gpa; | 1508 | gpa_t gpa; |
1497 | int r; | 1509 | int r; |
1498 | 1510 | ||
1499 | down_read(¤t->mm->mmap_sem); | 1511 | down_read(&vcpu->kvm->slots_lock); |
1500 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 1512 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
1501 | up_read(¤t->mm->mmap_sem); | 1513 | up_read(&vcpu->kvm->slots_lock); |
1502 | 1514 | ||
1503 | spin_lock(&vcpu->kvm->mmu_lock); | 1515 | spin_lock(&vcpu->kvm->mmu_lock); |
1504 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1516 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 03ba8608fe0f..ecc0856268c4 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, | |||
91 | pt_element_t *table; | 91 | pt_element_t *table; |
92 | struct page *page; | 92 | struct page *page; |
93 | 93 | ||
94 | down_read(¤t->mm->mmap_sem); | ||
94 | page = gfn_to_page(kvm, table_gfn); | 95 | page = gfn_to_page(kvm, table_gfn); |
96 | up_read(¤t->mm->mmap_sem); | ||
97 | |||
95 | table = kmap_atomic(page, KM_USER0); | 98 | table = kmap_atomic(page, KM_USER0); |
96 | 99 | ||
97 | ret = CMPXCHG(&table[index], orig_pte, new_pte); | 100 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
@@ -140,7 +143,7 @@ walk: | |||
140 | } | 143 | } |
141 | #endif | 144 | #endif |
142 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 145 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
143 | (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); | 146 | (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); |
144 | 147 | ||
145 | pt_access = ACC_ALL; | 148 | pt_access = ACC_ALL; |
146 | 149 | ||
@@ -297,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
297 | u64 shadow_pte; | 300 | u64 shadow_pte; |
298 | int metaphysical; | 301 | int metaphysical; |
299 | gfn_t table_gfn; | 302 | gfn_t table_gfn; |
300 | bool new_page = 0; | ||
301 | 303 | ||
302 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; | 304 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; |
303 | if (level == PT_PAGE_TABLE_LEVEL) | 305 | if (level == PT_PAGE_TABLE_LEVEL) |
@@ -319,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
319 | } | 321 | } |
320 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 322 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
321 | metaphysical, access, | 323 | metaphysical, access, |
322 | shadow_ent, &new_page); | 324 | shadow_ent); |
323 | if (new_page && !metaphysical) { | 325 | if (!metaphysical) { |
324 | int r; | 326 | int r; |
325 | pt_element_t curr_pte; | 327 | pt_element_t curr_pte; |
326 | r = kvm_read_guest_atomic(vcpu->kvm, | 328 | r = kvm_read_guest_atomic(vcpu->kvm, |
@@ -378,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
378 | if (r) | 380 | if (r) |
379 | return r; | 381 | return r; |
380 | 382 | ||
381 | down_read(¤t->mm->mmap_sem); | 383 | down_read(&vcpu->kvm->slots_lock); |
382 | /* | 384 | /* |
383 | * Look up the shadow pte for the faulting address. | 385 | * Look up the shadow pte for the faulting address. |
384 | */ | 386 | */ |
@@ -392,11 +394,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
392 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 394 | pgprintk("%s: guest page fault\n", __FUNCTION__); |
393 | inject_page_fault(vcpu, addr, walker.error_code); | 395 | inject_page_fault(vcpu, addr, walker.error_code); |
394 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 396 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
395 | up_read(¤t->mm->mmap_sem); | 397 | up_read(&vcpu->kvm->slots_lock); |
396 | return 0; | 398 | return 0; |
397 | } | 399 | } |
398 | 400 | ||
401 | down_read(¤t->mm->mmap_sem); | ||
399 | page = gfn_to_page(vcpu->kvm, walker.gfn); | 402 | page = gfn_to_page(vcpu->kvm, walker.gfn); |
403 | up_read(¤t->mm->mmap_sem); | ||
400 | 404 | ||
401 | spin_lock(&vcpu->kvm->mmu_lock); | 405 | spin_lock(&vcpu->kvm->mmu_lock); |
402 | kvm_mmu_free_some_pages(vcpu); | 406 | kvm_mmu_free_some_pages(vcpu); |
@@ -413,14 +417,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
413 | */ | 417 | */ |
414 | if (shadow_pte && is_io_pte(*shadow_pte)) { | 418 | if (shadow_pte && is_io_pte(*shadow_pte)) { |
415 | spin_unlock(&vcpu->kvm->mmu_lock); | 419 | spin_unlock(&vcpu->kvm->mmu_lock); |
416 | up_read(¤t->mm->mmap_sem); | 420 | up_read(&vcpu->kvm->slots_lock); |
417 | return 1; | 421 | return 1; |
418 | } | 422 | } |
419 | 423 | ||
420 | ++vcpu->stat.pf_fixed; | 424 | ++vcpu->stat.pf_fixed; |
421 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | 425 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
422 | spin_unlock(&vcpu->kvm->mmu_lock); | 426 | spin_unlock(&vcpu->kvm->mmu_lock); |
423 | up_read(¤t->mm->mmap_sem); | 427 | up_read(&vcpu->kvm->slots_lock); |
424 | 428 | ||
425 | return write_pt; | 429 | return write_pt; |
426 | } | 430 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index de755cb1431d..1a582f1090e8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -792,6 +792,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
792 | vcpu->arch.cr0 = cr0; | 792 | vcpu->arch.cr0 = cr0; |
793 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 793 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
794 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 794 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
795 | if (!vcpu->fpu_active) { | ||
796 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | ||
797 | cr0 |= X86_CR0_TS; | ||
798 | } | ||
795 | svm->vmcb->save.cr0 = cr0; | 799 | svm->vmcb->save.cr0 = cr0; |
796 | } | 800 | } |
797 | 801 | ||
@@ -1096,6 +1100,24 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
1096 | case MSR_IA32_SYSENTER_ESP: | 1100 | case MSR_IA32_SYSENTER_ESP: |
1097 | *data = svm->vmcb->save.sysenter_esp; | 1101 | *data = svm->vmcb->save.sysenter_esp; |
1098 | break; | 1102 | break; |
1103 | /* Nobody will change the following 5 values in the VMCB so | ||
1104 | we can safely return them on rdmsr. They will always be 0 | ||
1105 | until LBRV is implemented. */ | ||
1106 | case MSR_IA32_DEBUGCTLMSR: | ||
1107 | *data = svm->vmcb->save.dbgctl; | ||
1108 | break; | ||
1109 | case MSR_IA32_LASTBRANCHFROMIP: | ||
1110 | *data = svm->vmcb->save.br_from; | ||
1111 | break; | ||
1112 | case MSR_IA32_LASTBRANCHTOIP: | ||
1113 | *data = svm->vmcb->save.br_to; | ||
1114 | break; | ||
1115 | case MSR_IA32_LASTINTFROMIP: | ||
1116 | *data = svm->vmcb->save.last_excp_from; | ||
1117 | break; | ||
1118 | case MSR_IA32_LASTINTTOIP: | ||
1119 | *data = svm->vmcb->save.last_excp_to; | ||
1120 | break; | ||
1099 | default: | 1121 | default: |
1100 | return kvm_get_msr_common(vcpu, ecx, data); | 1122 | return kvm_get_msr_common(vcpu, ecx, data); |
1101 | } | 1123 | } |
@@ -1156,6 +1178,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1156 | case MSR_IA32_SYSENTER_ESP: | 1178 | case MSR_IA32_SYSENTER_ESP: |
1157 | svm->vmcb->save.sysenter_esp = data; | 1179 | svm->vmcb->save.sysenter_esp = data; |
1158 | break; | 1180 | break; |
1181 | case MSR_IA32_DEBUGCTLMSR: | ||
1182 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", | ||
1183 | __FUNCTION__, data); | ||
1184 | break; | ||
1159 | case MSR_K7_EVNTSEL0: | 1185 | case MSR_K7_EVNTSEL0: |
1160 | case MSR_K7_EVNTSEL1: | 1186 | case MSR_K7_EVNTSEL1: |
1161 | case MSR_K7_EVNTSEL2: | 1187 | case MSR_K7_EVNTSEL2: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ad36447e696e..94ea724638fd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -638,6 +638,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
638 | { | 638 | { |
639 | int save_nmsrs; | 639 | int save_nmsrs; |
640 | 640 | ||
641 | vmx_load_host_state(vmx); | ||
641 | save_nmsrs = 0; | 642 | save_nmsrs = 0; |
642 | #ifdef CONFIG_X86_64 | 643 | #ifdef CONFIG_X86_64 |
643 | if (is_long_mode(&vmx->vcpu)) { | 644 | if (is_long_mode(&vmx->vcpu)) { |
@@ -1477,7 +1478,7 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
1477 | struct kvm_userspace_memory_region kvm_userspace_mem; | 1478 | struct kvm_userspace_memory_region kvm_userspace_mem; |
1478 | int r = 0; | 1479 | int r = 0; |
1479 | 1480 | ||
1480 | down_write(¤t->mm->mmap_sem); | 1481 | down_write(&kvm->slots_lock); |
1481 | if (kvm->arch.apic_access_page) | 1482 | if (kvm->arch.apic_access_page) |
1482 | goto out; | 1483 | goto out; |
1483 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | 1484 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; |
@@ -1487,9 +1488,12 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
1487 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | 1488 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); |
1488 | if (r) | 1489 | if (r) |
1489 | goto out; | 1490 | goto out; |
1491 | |||
1492 | down_read(¤t->mm->mmap_sem); | ||
1490 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | 1493 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); |
1494 | up_read(¤t->mm->mmap_sem); | ||
1491 | out: | 1495 | out: |
1492 | up_write(¤t->mm->mmap_sem); | 1496 | up_write(&kvm->slots_lock); |
1493 | return r; | 1497 | return r; |
1494 | } | 1498 | } |
1495 | 1499 | ||
@@ -1602,9 +1606,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1602 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | 1606 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); |
1603 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | 1607 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); |
1604 | 1608 | ||
1605 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1606 | if (alloc_apic_access_page(vmx->vcpu.kvm) != 0) | ||
1607 | return -ENOMEM; | ||
1608 | 1609 | ||
1609 | return 0; | 1610 | return 0; |
1610 | } | 1611 | } |
@@ -2534,6 +2535,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
2534 | put_cpu(); | 2535 | put_cpu(); |
2535 | if (err) | 2536 | if (err) |
2536 | goto free_vmcs; | 2537 | goto free_vmcs; |
2538 | if (vm_need_virtualize_apic_accesses(kvm)) | ||
2539 | if (alloc_apic_access_page(kvm) != 0) | ||
2540 | goto free_vmcs; | ||
2537 | 2541 | ||
2538 | return &vmx->vcpu; | 2542 | return &vmx->vcpu; |
2539 | 2543 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf5308148689..6b01552bd1f1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -46,6 +46,9 @@ | |||
46 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | 46 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
47 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 47 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
48 | 48 | ||
49 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, | ||
50 | struct kvm_cpuid_entry2 __user *entries); | ||
51 | |||
49 | struct kvm_x86_ops *kvm_x86_ops; | 52 | struct kvm_x86_ops *kvm_x86_ops; |
50 | 53 | ||
51 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 54 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
@@ -181,7 +184,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
181 | int ret; | 184 | int ret; |
182 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; | 185 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; |
183 | 186 | ||
184 | down_read(¤t->mm->mmap_sem); | 187 | down_read(&vcpu->kvm->slots_lock); |
185 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, | 188 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, |
186 | offset * sizeof(u64), sizeof(pdpte)); | 189 | offset * sizeof(u64), sizeof(pdpte)); |
187 | if (ret < 0) { | 190 | if (ret < 0) { |
@@ -198,7 +201,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
198 | 201 | ||
199 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); | 202 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); |
200 | out: | 203 | out: |
201 | up_read(¤t->mm->mmap_sem); | 204 | up_read(&vcpu->kvm->slots_lock); |
202 | 205 | ||
203 | return ret; | 206 | return ret; |
204 | } | 207 | } |
@@ -212,13 +215,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
212 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 215 | if (is_long_mode(vcpu) || !is_pae(vcpu)) |
213 | return false; | 216 | return false; |
214 | 217 | ||
215 | down_read(¤t->mm->mmap_sem); | 218 | down_read(&vcpu->kvm->slots_lock); |
216 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); | 219 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); |
217 | if (r < 0) | 220 | if (r < 0) |
218 | goto out; | 221 | goto out; |
219 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; | 222 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; |
220 | out: | 223 | out: |
221 | up_read(¤t->mm->mmap_sem); | 224 | up_read(&vcpu->kvm->slots_lock); |
222 | 225 | ||
223 | return changed; | 226 | return changed; |
224 | } | 227 | } |
@@ -356,7 +359,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
356 | */ | 359 | */ |
357 | } | 360 | } |
358 | 361 | ||
359 | down_read(¤t->mm->mmap_sem); | 362 | down_read(&vcpu->kvm->slots_lock); |
360 | /* | 363 | /* |
361 | * Does the new cr3 value map to physical memory? (Note, we | 364 | * Does the new cr3 value map to physical memory? (Note, we |
362 | * catch an invalid cr3 even in real-mode, because it would | 365 | * catch an invalid cr3 even in real-mode, because it would |
@@ -372,7 +375,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
372 | vcpu->arch.cr3 = cr3; | 375 | vcpu->arch.cr3 = cr3; |
373 | vcpu->arch.mmu.new_cr3(vcpu); | 376 | vcpu->arch.mmu.new_cr3(vcpu); |
374 | } | 377 | } |
375 | up_read(¤t->mm->mmap_sem); | 378 | up_read(&vcpu->kvm->slots_lock); |
376 | } | 379 | } |
377 | EXPORT_SYMBOL_GPL(set_cr3); | 380 | EXPORT_SYMBOL_GPL(set_cr3); |
378 | 381 | ||
@@ -484,6 +487,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
484 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", | 487 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", |
485 | __FUNCTION__, data); | 488 | __FUNCTION__, data); |
486 | break; | 489 | break; |
490 | case MSR_IA32_MCG_CTL: | ||
491 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", | ||
492 | __FUNCTION__, data); | ||
493 | break; | ||
487 | case MSR_IA32_UCODE_REV: | 494 | case MSR_IA32_UCODE_REV: |
488 | case MSR_IA32_UCODE_WRITE: | 495 | case MSR_IA32_UCODE_WRITE: |
489 | case 0x200 ... 0x2ff: /* MTRRs */ | 496 | case 0x200 ... 0x2ff: /* MTRRs */ |
@@ -526,6 +533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
526 | case MSR_IA32_MC0_CTL: | 533 | case MSR_IA32_MC0_CTL: |
527 | case MSR_IA32_MCG_STATUS: | 534 | case MSR_IA32_MCG_STATUS: |
528 | case MSR_IA32_MCG_CAP: | 535 | case MSR_IA32_MCG_CAP: |
536 | case MSR_IA32_MCG_CTL: | ||
529 | case MSR_IA32_MC0_MISC: | 537 | case MSR_IA32_MC0_MISC: |
530 | case MSR_IA32_MC0_MISC+4: | 538 | case MSR_IA32_MC0_MISC+4: |
531 | case MSR_IA32_MC0_MISC+8: | 539 | case MSR_IA32_MC0_MISC+8: |
@@ -727,6 +735,24 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
727 | r = 0; | 735 | r = 0; |
728 | break; | 736 | break; |
729 | } | 737 | } |
738 | case KVM_GET_SUPPORTED_CPUID: { | ||
739 | struct kvm_cpuid2 __user *cpuid_arg = argp; | ||
740 | struct kvm_cpuid2 cpuid; | ||
741 | |||
742 | r = -EFAULT; | ||
743 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
744 | goto out; | ||
745 | r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, | ||
746 | cpuid_arg->entries); | ||
747 | if (r) | ||
748 | goto out; | ||
749 | |||
750 | r = -EFAULT; | ||
751 | if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) | ||
752 | goto out; | ||
753 | r = 0; | ||
754 | break; | ||
755 | } | ||
730 | default: | 756 | default: |
731 | r = -EINVAL; | 757 | r = -EINVAL; |
732 | } | 758 | } |
@@ -974,8 +1000,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
974 | put_cpu(); | 1000 | put_cpu(); |
975 | } | 1001 | } |
976 | 1002 | ||
977 | static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm, | 1003 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, |
978 | struct kvm_cpuid2 *cpuid, | ||
979 | struct kvm_cpuid_entry2 __user *entries) | 1004 | struct kvm_cpuid_entry2 __user *entries) |
980 | { | 1005 | { |
981 | struct kvm_cpuid_entry2 *cpuid_entries; | 1006 | struct kvm_cpuid_entry2 *cpuid_entries; |
@@ -1207,12 +1232,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
1207 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | 1232 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) |
1208 | return -EINVAL; | 1233 | return -EINVAL; |
1209 | 1234 | ||
1210 | down_write(¤t->mm->mmap_sem); | 1235 | down_write(&kvm->slots_lock); |
1211 | 1236 | ||
1212 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 1237 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
1213 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; | 1238 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
1214 | 1239 | ||
1215 | up_write(¤t->mm->mmap_sem); | 1240 | up_write(&kvm->slots_lock); |
1216 | return 0; | 1241 | return 0; |
1217 | } | 1242 | } |
1218 | 1243 | ||
@@ -1261,7 +1286,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
1261 | < alias->target_phys_addr) | 1286 | < alias->target_phys_addr) |
1262 | goto out; | 1287 | goto out; |
1263 | 1288 | ||
1264 | down_write(¤t->mm->mmap_sem); | 1289 | down_write(&kvm->slots_lock); |
1265 | 1290 | ||
1266 | p = &kvm->arch.aliases[alias->slot]; | 1291 | p = &kvm->arch.aliases[alias->slot]; |
1267 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | 1292 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; |
@@ -1275,7 +1300,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
1275 | 1300 | ||
1276 | kvm_mmu_zap_all(kvm); | 1301 | kvm_mmu_zap_all(kvm); |
1277 | 1302 | ||
1278 | up_write(¤t->mm->mmap_sem); | 1303 | up_write(&kvm->slots_lock); |
1279 | 1304 | ||
1280 | return 0; | 1305 | return 0; |
1281 | 1306 | ||
@@ -1351,7 +1376,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1351 | struct kvm_memory_slot *memslot; | 1376 | struct kvm_memory_slot *memslot; |
1352 | int is_dirty = 0; | 1377 | int is_dirty = 0; |
1353 | 1378 | ||
1354 | down_write(¤t->mm->mmap_sem); | 1379 | down_write(&kvm->slots_lock); |
1355 | 1380 | ||
1356 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1381 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
1357 | if (r) | 1382 | if (r) |
@@ -1367,7 +1392,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1367 | } | 1392 | } |
1368 | r = 0; | 1393 | r = 0; |
1369 | out: | 1394 | out: |
1370 | up_write(¤t->mm->mmap_sem); | 1395 | up_write(&kvm->slots_lock); |
1371 | return r; | 1396 | return r; |
1372 | } | 1397 | } |
1373 | 1398 | ||
@@ -1487,24 +1512,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1487 | r = 0; | 1512 | r = 0; |
1488 | break; | 1513 | break; |
1489 | } | 1514 | } |
1490 | case KVM_GET_SUPPORTED_CPUID: { | ||
1491 | struct kvm_cpuid2 __user *cpuid_arg = argp; | ||
1492 | struct kvm_cpuid2 cpuid; | ||
1493 | |||
1494 | r = -EFAULT; | ||
1495 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
1496 | goto out; | ||
1497 | r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid, | ||
1498 | cpuid_arg->entries); | ||
1499 | if (r) | ||
1500 | goto out; | ||
1501 | |||
1502 | r = -EFAULT; | ||
1503 | if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) | ||
1504 | goto out; | ||
1505 | r = 0; | ||
1506 | break; | ||
1507 | } | ||
1508 | default: | 1515 | default: |
1509 | ; | 1516 | ; |
1510 | } | 1517 | } |
@@ -1563,7 +1570,7 @@ int emulator_read_std(unsigned long addr, | |||
1563 | void *data = val; | 1570 | void *data = val; |
1564 | int r = X86EMUL_CONTINUE; | 1571 | int r = X86EMUL_CONTINUE; |
1565 | 1572 | ||
1566 | down_read(¤t->mm->mmap_sem); | 1573 | down_read(&vcpu->kvm->slots_lock); |
1567 | while (bytes) { | 1574 | while (bytes) { |
1568 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1575 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1569 | unsigned offset = addr & (PAGE_SIZE-1); | 1576 | unsigned offset = addr & (PAGE_SIZE-1); |
@@ -1585,7 +1592,7 @@ int emulator_read_std(unsigned long addr, | |||
1585 | addr += tocopy; | 1592 | addr += tocopy; |
1586 | } | 1593 | } |
1587 | out: | 1594 | out: |
1588 | up_read(¤t->mm->mmap_sem); | 1595 | up_read(&vcpu->kvm->slots_lock); |
1589 | return r; | 1596 | return r; |
1590 | } | 1597 | } |
1591 | EXPORT_SYMBOL_GPL(emulator_read_std); | 1598 | EXPORT_SYMBOL_GPL(emulator_read_std); |
@@ -1604,9 +1611,9 @@ static int emulator_read_emulated(unsigned long addr, | |||
1604 | return X86EMUL_CONTINUE; | 1611 | return X86EMUL_CONTINUE; |
1605 | } | 1612 | } |
1606 | 1613 | ||
1607 | down_read(¤t->mm->mmap_sem); | 1614 | down_read(&vcpu->kvm->slots_lock); |
1608 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1615 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1609 | up_read(¤t->mm->mmap_sem); | 1616 | up_read(&vcpu->kvm->slots_lock); |
1610 | 1617 | ||
1611 | /* For APIC access vmexit */ | 1618 | /* For APIC access vmexit */ |
1612 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | 1619 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) |
@@ -1644,14 +1651,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1644 | { | 1651 | { |
1645 | int ret; | 1652 | int ret; |
1646 | 1653 | ||
1647 | down_read(¤t->mm->mmap_sem); | 1654 | down_read(&vcpu->kvm->slots_lock); |
1648 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | 1655 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
1649 | if (ret < 0) { | 1656 | if (ret < 0) { |
1650 | up_read(¤t->mm->mmap_sem); | 1657 | up_read(&vcpu->kvm->slots_lock); |
1651 | return 0; | 1658 | return 0; |
1652 | } | 1659 | } |
1653 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 1660 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); |
1654 | up_read(¤t->mm->mmap_sem); | 1661 | up_read(&vcpu->kvm->slots_lock); |
1655 | return 1; | 1662 | return 1; |
1656 | } | 1663 | } |
1657 | 1664 | ||
@@ -1663,9 +1670,9 @@ static int emulator_write_emulated_onepage(unsigned long addr, | |||
1663 | struct kvm_io_device *mmio_dev; | 1670 | struct kvm_io_device *mmio_dev; |
1664 | gpa_t gpa; | 1671 | gpa_t gpa; |
1665 | 1672 | ||
1666 | down_read(¤t->mm->mmap_sem); | 1673 | down_read(&vcpu->kvm->slots_lock); |
1667 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1674 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1668 | up_read(¤t->mm->mmap_sem); | 1675 | up_read(&vcpu->kvm->slots_lock); |
1669 | 1676 | ||
1670 | if (gpa == UNMAPPED_GVA) { | 1677 | if (gpa == UNMAPPED_GVA) { |
1671 | kvm_inject_page_fault(vcpu, addr, 2); | 1678 | kvm_inject_page_fault(vcpu, addr, 2); |
@@ -1742,7 +1749,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
1742 | char *kaddr; | 1749 | char *kaddr; |
1743 | u64 val; | 1750 | u64 val; |
1744 | 1751 | ||
1745 | down_read(¤t->mm->mmap_sem); | 1752 | down_read(&vcpu->kvm->slots_lock); |
1746 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1753 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1747 | 1754 | ||
1748 | if (gpa == UNMAPPED_GVA || | 1755 | if (gpa == UNMAPPED_GVA || |
@@ -1753,13 +1760,17 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
1753 | goto emul_write; | 1760 | goto emul_write; |
1754 | 1761 | ||
1755 | val = *(u64 *)new; | 1762 | val = *(u64 *)new; |
1763 | |||
1764 | down_read(¤t->mm->mmap_sem); | ||
1756 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1765 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
1766 | up_read(¤t->mm->mmap_sem); | ||
1767 | |||
1757 | kaddr = kmap_atomic(page, KM_USER0); | 1768 | kaddr = kmap_atomic(page, KM_USER0); |
1758 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); | 1769 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); |
1759 | kunmap_atomic(kaddr, KM_USER0); | 1770 | kunmap_atomic(kaddr, KM_USER0); |
1760 | kvm_release_page_dirty(page); | 1771 | kvm_release_page_dirty(page); |
1761 | emul_write: | 1772 | emul_write: |
1762 | up_read(¤t->mm->mmap_sem); | 1773 | up_read(&vcpu->kvm->slots_lock); |
1763 | } | 1774 | } |
1764 | #endif | 1775 | #endif |
1765 | 1776 | ||
@@ -2152,10 +2163,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2152 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2163 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
2153 | 2164 | ||
2154 | for (i = 0; i < nr_pages; ++i) { | 2165 | for (i = 0; i < nr_pages; ++i) { |
2155 | down_read(¤t->mm->mmap_sem); | 2166 | down_read(&vcpu->kvm->slots_lock); |
2156 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); | 2167 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); |
2157 | vcpu->arch.pio.guest_pages[i] = page; | 2168 | vcpu->arch.pio.guest_pages[i] = page; |
2158 | up_read(¤t->mm->mmap_sem); | 2169 | up_read(&vcpu->kvm->slots_lock); |
2159 | if (!page) { | 2170 | if (!page) { |
2160 | kvm_inject_gp(vcpu, 0); | 2171 | kvm_inject_gp(vcpu, 0); |
2161 | free_pio_guest_pages(vcpu); | 2172 | free_pio_guest_pages(vcpu); |
@@ -2478,8 +2489,9 @@ static void vapic_enter(struct kvm_vcpu *vcpu) | |||
2478 | 2489 | ||
2479 | down_read(¤t->mm->mmap_sem); | 2490 | down_read(¤t->mm->mmap_sem); |
2480 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | 2491 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); |
2481 | vcpu->arch.apic->vapic_page = page; | ||
2482 | up_read(¤t->mm->mmap_sem); | 2492 | up_read(¤t->mm->mmap_sem); |
2493 | |||
2494 | vcpu->arch.apic->vapic_page = page; | ||
2483 | } | 2495 | } |
2484 | 2496 | ||
2485 | static void vapic_exit(struct kvm_vcpu *vcpu) | 2497 | static void vapic_exit(struct kvm_vcpu *vcpu) |
@@ -2861,8 +2873,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
2861 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); | 2873 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
2862 | 2874 | ||
2863 | mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; | 2875 | mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; |
2864 | vcpu->arch.cr0 = sregs->cr0; | ||
2865 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); | 2876 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); |
2877 | vcpu->arch.cr0 = sregs->cr0; | ||
2866 | 2878 | ||
2867 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; | 2879 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; |
2868 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 2880 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
@@ -2952,9 +2964,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
2952 | gpa_t gpa; | 2964 | gpa_t gpa; |
2953 | 2965 | ||
2954 | vcpu_load(vcpu); | 2966 | vcpu_load(vcpu); |
2955 | down_read(¤t->mm->mmap_sem); | 2967 | down_read(&vcpu->kvm->slots_lock); |
2956 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); | 2968 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); |
2957 | up_read(¤t->mm->mmap_sem); | 2969 | up_read(&vcpu->kvm->slots_lock); |
2958 | tr->physical_address = gpa; | 2970 | tr->physical_address = gpa; |
2959 | tr->valid = gpa != UNMAPPED_GVA; | 2971 | tr->valid = gpa != UNMAPPED_GVA; |
2960 | tr->writeable = 1; | 2972 | tr->writeable = 1; |
@@ -3227,11 +3239,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3227 | */ | 3239 | */ |
3228 | if (!user_alloc) { | 3240 | if (!user_alloc) { |
3229 | if (npages && !old.rmap) { | 3241 | if (npages && !old.rmap) { |
3242 | down_write(¤t->mm->mmap_sem); | ||
3230 | memslot->userspace_addr = do_mmap(NULL, 0, | 3243 | memslot->userspace_addr = do_mmap(NULL, 0, |
3231 | npages * PAGE_SIZE, | 3244 | npages * PAGE_SIZE, |
3232 | PROT_READ | PROT_WRITE, | 3245 | PROT_READ | PROT_WRITE, |
3233 | MAP_SHARED | MAP_ANONYMOUS, | 3246 | MAP_SHARED | MAP_ANONYMOUS, |
3234 | 0); | 3247 | 0); |
3248 | up_write(¤t->mm->mmap_sem); | ||
3235 | 3249 | ||
3236 | if (IS_ERR((void *)memslot->userspace_addr)) | 3250 | if (IS_ERR((void *)memslot->userspace_addr)) |
3237 | return PTR_ERR((void *)memslot->userspace_addr); | 3251 | return PTR_ERR((void *)memslot->userspace_addr); |
@@ -3239,8 +3253,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3239 | if (!old.user_alloc && old.rmap) { | 3253 | if (!old.user_alloc && old.rmap) { |
3240 | int ret; | 3254 | int ret; |
3241 | 3255 | ||
3256 | down_write(¤t->mm->mmap_sem); | ||
3242 | ret = do_munmap(current->mm, old.userspace_addr, | 3257 | ret = do_munmap(current->mm, old.userspace_addr, |
3243 | old.npages * PAGE_SIZE); | 3258 | old.npages * PAGE_SIZE); |
3259 | up_write(¤t->mm->mmap_sem); | ||
3244 | if (ret < 0) | 3260 | if (ret < 0) |
3245 | printk(KERN_WARNING | 3261 | printk(KERN_WARNING |
3246 | "kvm_vm_ioctl_set_memory_region: " | 3262 | "kvm_vm_ioctl_set_memory_region: " |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 5afdde4895dc..cccb38a59653 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/lguest_launcher.h> | 57 | #include <linux/lguest_launcher.h> |
58 | #include <linux/virtio_console.h> | 58 | #include <linux/virtio_console.h> |
59 | #include <linux/pm.h> | 59 | #include <linux/pm.h> |
60 | #include <asm/lguest.h> | ||
60 | #include <asm/paravirt.h> | 61 | #include <asm/paravirt.h> |
61 | #include <asm/param.h> | 62 | #include <asm/param.h> |
62 | #include <asm/page.h> | 63 | #include <asm/page.h> |
@@ -75,15 +76,6 @@ | |||
75 | * behaving in simplified but equivalent ways. In particular, the Guest is the | 76 | * behaving in simplified but equivalent ways. In particular, the Guest is the |
76 | * same kernel as the Host (or at least, built from the same source code). :*/ | 77 | * same kernel as the Host (or at least, built from the same source code). :*/ |
77 | 78 | ||
78 | /* Declarations for definitions in lguest_guest.S */ | ||
79 | extern char lguest_noirq_start[], lguest_noirq_end[]; | ||
80 | extern const char lgstart_cli[], lgend_cli[]; | ||
81 | extern const char lgstart_sti[], lgend_sti[]; | ||
82 | extern const char lgstart_popf[], lgend_popf[]; | ||
83 | extern const char lgstart_pushf[], lgend_pushf[]; | ||
84 | extern const char lgstart_iret[], lgend_iret[]; | ||
85 | extern void lguest_iret(void); | ||
86 | |||
87 | struct lguest_data lguest_data = { | 79 | struct lguest_data lguest_data = { |
88 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, | 80 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, |
89 | .noirq_start = (u32)lguest_noirq_start, | 81 | .noirq_start = (u32)lguest_noirq_start, |
@@ -489,7 +481,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
489 | { | 481 | { |
490 | *pmdp = pmdval; | 482 | *pmdp = pmdval; |
491 | lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK, | 483 | lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK, |
492 | (__pa(pmdp)&(PAGE_SIZE-1))/4, 0); | 484 | (__pa(pmdp)&(PAGE_SIZE-1)), 0); |
493 | } | 485 | } |
494 | 486 | ||
495 | /* There are a couple of legacy places where the kernel sets a PTE, but we | 487 | /* There are a couple of legacy places where the kernel sets a PTE, but we |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bb652f5a93fb..a02a14f0f324 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -172,8 +172,9 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | /* | 174 | /* |
175 | * The head.S code sets up the kernel high mapping from: | 175 | * The head.S code sets up the kernel high mapping: |
176 | * __START_KERNEL_map to __START_KERNEL_map + KERNEL_TEXT_SIZE | 176 | * |
177 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | ||
177 | * | 178 | * |
178 | * phys_addr holds the negative offset to the kernel, which is added | 179 | * phys_addr holds the negative offset to the kernel, which is added |
179 | * to the compile time generated pmds. This results in invalid pmds up | 180 | * to the compile time generated pmds. This results in invalid pmds up |
@@ -515,14 +516,6 @@ void __init mem_init(void) | |||
515 | 516 | ||
516 | /* clear_bss() already clear the empty_zero_page */ | 517 | /* clear_bss() already clear the empty_zero_page */ |
517 | 518 | ||
518 | /* temporary debugging - double check it's true: */ | ||
519 | { | ||
520 | int i; | ||
521 | |||
522 | for (i = 0; i < 1024; i++) | ||
523 | WARN_ON_ONCE(empty_zero_page[i]); | ||
524 | } | ||
525 | |||
526 | reservedpages = 0; | 519 | reservedpages = 0; |
527 | 520 | ||
528 | /* this will put all low memory onto the freelists */ | 521 | /* this will put all low memory onto the freelists */ |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 882328efc3db..ac3c959e271d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -162,7 +162,7 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
162 | area->phys_addr = phys_addr; | 162 | area->phys_addr = phys_addr; |
163 | vaddr = (unsigned long) area->addr; | 163 | vaddr = (unsigned long) area->addr; |
164 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { | 164 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
165 | remove_vm_area((void *)(vaddr & PAGE_MASK)); | 165 | free_vm_area(area); |
166 | return NULL; | 166 | return NULL; |
167 | } | 167 | } |
168 | 168 | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 59898fb0a4aa..8ccfee10f5b5 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -622,13 +622,17 @@ void __init init_cpu_to_node(void) | |||
622 | int i; | 622 | int i; |
623 | 623 | ||
624 | for (i = 0; i < NR_CPUS; i++) { | 624 | for (i = 0; i < NR_CPUS; i++) { |
625 | int node; | ||
625 | u16 apicid = x86_cpu_to_apicid_init[i]; | 626 | u16 apicid = x86_cpu_to_apicid_init[i]; |
626 | 627 | ||
627 | if (apicid == BAD_APICID) | 628 | if (apicid == BAD_APICID) |
628 | continue; | 629 | continue; |
629 | if (apicid_to_node[apicid] == NUMA_NO_NODE) | 630 | node = apicid_to_node[apicid]; |
631 | if (node == NUMA_NO_NODE) | ||
630 | continue; | 632 | continue; |
631 | numa_set_node(i, apicid_to_node[apicid]); | 633 | if (!node_online(node)) |
634 | continue; | ||
635 | numa_set_node(i, node); | ||
632 | } | 636 | } |
633 | } | 637 | } |
634 | 638 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 464d8fc21ce6..14e48b5a94ba 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -44,6 +44,12 @@ static inline unsigned long highmap_end_pfn(void) | |||
44 | 44 | ||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
48 | # define debug_pagealloc 1 | ||
49 | #else | ||
50 | # define debug_pagealloc 0 | ||
51 | #endif | ||
52 | |||
47 | static inline int | 53 | static inline int |
48 | within(unsigned long addr, unsigned long start, unsigned long end) | 54 | within(unsigned long addr, unsigned long start, unsigned long end) |
49 | { | 55 | { |
@@ -355,45 +361,48 @@ out_unlock: | |||
355 | 361 | ||
356 | static LIST_HEAD(page_pool); | 362 | static LIST_HEAD(page_pool); |
357 | static unsigned long pool_size, pool_pages, pool_low; | 363 | static unsigned long pool_size, pool_pages, pool_low; |
358 | static unsigned long pool_used, pool_failed, pool_refill; | 364 | static unsigned long pool_used, pool_failed; |
359 | 365 | ||
360 | static void cpa_fill_pool(void) | 366 | static void cpa_fill_pool(struct page **ret) |
361 | { | 367 | { |
362 | struct page *p; | ||
363 | gfp_t gfp = GFP_KERNEL; | 368 | gfp_t gfp = GFP_KERNEL; |
369 | unsigned long flags; | ||
370 | struct page *p; | ||
364 | 371 | ||
365 | /* Do not allocate from interrupt context */ | ||
366 | if (in_irq() || irqs_disabled()) | ||
367 | return; | ||
368 | /* | 372 | /* |
369 | * Check unlocked. I does not matter when we have one more | 373 | * Avoid recursion (on debug-pagealloc) and also signal |
370 | * page in the pool. The bit lock avoids recursive pool | 374 | * our priority to get to these pagetables: |
371 | * allocations: | ||
372 | */ | 375 | */ |
373 | if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill)) | 376 | if (current->flags & PF_MEMALLOC) |
374 | return; | 377 | return; |
378 | current->flags |= PF_MEMALLOC; | ||
375 | 379 | ||
376 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
377 | /* | 380 | /* |
378 | * We could do: | 381 | * Allocate atomically from atomic contexts: |
379 | * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; | ||
380 | * but this fails on !PREEMPT kernels | ||
381 | */ | 382 | */ |
382 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 383 | if (in_atomic() || irqs_disabled() || debug_pagealloc) |
383 | #endif | 384 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
384 | 385 | ||
385 | while (pool_pages < pool_size) { | 386 | while (pool_pages < pool_size || (ret && !*ret)) { |
386 | p = alloc_pages(gfp, 0); | 387 | p = alloc_pages(gfp, 0); |
387 | if (!p) { | 388 | if (!p) { |
388 | pool_failed++; | 389 | pool_failed++; |
389 | break; | 390 | break; |
390 | } | 391 | } |
391 | spin_lock_irq(&pgd_lock); | 392 | /* |
393 | * If the call site needs a page right now, provide it: | ||
394 | */ | ||
395 | if (ret && !*ret) { | ||
396 | *ret = p; | ||
397 | continue; | ||
398 | } | ||
399 | spin_lock_irqsave(&pgd_lock, flags); | ||
392 | list_add(&p->lru, &page_pool); | 400 | list_add(&p->lru, &page_pool); |
393 | pool_pages++; | 401 | pool_pages++; |
394 | spin_unlock_irq(&pgd_lock); | 402 | spin_unlock_irqrestore(&pgd_lock, flags); |
395 | } | 403 | } |
396 | clear_bit_unlock(0, &pool_refill); | 404 | |
405 | current->flags &= ~PF_MEMALLOC; | ||
397 | } | 406 | } |
398 | 407 | ||
399 | #define SHIFT_MB (20 - PAGE_SHIFT) | 408 | #define SHIFT_MB (20 - PAGE_SHIFT) |
@@ -414,11 +423,15 @@ void __init cpa_init(void) | |||
414 | * GiB. Shift MiB to Gib and multiply the result by | 423 | * GiB. Shift MiB to Gib and multiply the result by |
415 | * POOL_PAGES_PER_GB: | 424 | * POOL_PAGES_PER_GB: |
416 | */ | 425 | */ |
417 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | 426 | if (debug_pagealloc) { |
418 | pool_size = POOL_PAGES_PER_GB * gb; | 427 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; |
428 | pool_size = POOL_PAGES_PER_GB * gb; | ||
429 | } else { | ||
430 | pool_size = 1; | ||
431 | } | ||
419 | pool_low = pool_size; | 432 | pool_low = pool_size; |
420 | 433 | ||
421 | cpa_fill_pool(); | 434 | cpa_fill_pool(NULL); |
422 | printk(KERN_DEBUG | 435 | printk(KERN_DEBUG |
423 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | 436 | "CPA: page pool initialized %lu of %lu pages preallocated\n", |
424 | pool_pages, pool_size); | 437 | pool_pages, pool_size); |
@@ -440,16 +453,20 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
440 | spin_lock_irqsave(&pgd_lock, flags); | 453 | spin_lock_irqsave(&pgd_lock, flags); |
441 | if (list_empty(&page_pool)) { | 454 | if (list_empty(&page_pool)) { |
442 | spin_unlock_irqrestore(&pgd_lock, flags); | 455 | spin_unlock_irqrestore(&pgd_lock, flags); |
443 | return -ENOMEM; | 456 | base = NULL; |
457 | cpa_fill_pool(&base); | ||
458 | if (!base) | ||
459 | return -ENOMEM; | ||
460 | spin_lock_irqsave(&pgd_lock, flags); | ||
461 | } else { | ||
462 | base = list_first_entry(&page_pool, struct page, lru); | ||
463 | list_del(&base->lru); | ||
464 | pool_pages--; | ||
465 | |||
466 | if (pool_pages < pool_low) | ||
467 | pool_low = pool_pages; | ||
444 | } | 468 | } |
445 | 469 | ||
446 | base = list_first_entry(&page_pool, struct page, lru); | ||
447 | list_del(&base->lru); | ||
448 | pool_pages--; | ||
449 | |||
450 | if (pool_pages < pool_low) | ||
451 | pool_low = pool_pages; | ||
452 | |||
453 | /* | 470 | /* |
454 | * Check for races, another CPU might have split this page | 471 | * Check for races, another CPU might have split this page |
455 | * up for us already: | 472 | * up for us already: |
@@ -734,7 +751,8 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
734 | cpa_flush_all(cache); | 751 | cpa_flush_all(cache); |
735 | 752 | ||
736 | out: | 753 | out: |
737 | cpa_fill_pool(); | 754 | cpa_fill_pool(NULL); |
755 | |||
738 | return ret; | 756 | return ret; |
739 | } | 757 | } |
740 | 758 | ||
@@ -897,7 +915,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
897 | * Try to refill the page pool here. We can do this only after | 915 | * Try to refill the page pool here. We can do this only after |
898 | * the tlb flush. | 916 | * the tlb flush. |
899 | */ | 917 | */ |
900 | cpa_fill_pool(); | 918 | cpa_fill_pool(NULL); |
901 | } | 919 | } |
902 | 920 | ||
903 | #ifdef CONFIG_HIBERNATION | 921 | #ifdef CONFIG_HIBERNATION |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index f385a4b4a484..0a8f4742ef51 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -50,7 +50,9 @@ obj-$(VDSO64-y) += vdso-syms.lds | |||
50 | sed-vdsosym := -e 's/^00*/0/' \ | 50 | sed-vdsosym := -e 's/^00*/0/' \ |
51 | -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p' | 51 | -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p' |
52 | quiet_cmd_vdsosym = VDSOSYM $@ | 52 | quiet_cmd_vdsosym = VDSOSYM $@ |
53 | cmd_vdsosym = $(NM) $< | sed -n $(sed-vdsosym) | LC_ALL=C sort > $@ | 53 | define cmd_vdsosym |
54 | $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@ | ||
55 | endef | ||
54 | 56 | ||
55 | $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE | 57 | $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE |
56 | $(call if_changed,vdsosym) | 58 | $(call if_changed,vdsosym) |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 49e5358f481a..8b9ee27805fd 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -153,6 +153,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
153 | if (*ax == 1) | 153 | if (*ax == 1) |
154 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ | 154 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ |
155 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | 155 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ |
156 | (1 << X86_FEATURE_SEP) | /* disable SEP */ | ||
156 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | 157 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ |
157 | 158 | ||
158 | asm(XEN_EMULATE_PREFIX "cpuid" | 159 | asm(XEN_EMULATE_PREFIX "cpuid" |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3bad4773a2f3..2341492bf7a0 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -38,7 +38,8 @@ char * __init xen_memory_setup(void) | |||
38 | unsigned long max_pfn = xen_start_info->nr_pages; | 38 | unsigned long max_pfn = xen_start_info->nr_pages; |
39 | 39 | ||
40 | e820.nr_map = 0; | 40 | e820.nr_map = 0; |
41 | add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM); | 41 | add_memory_region(0, LOWMEMSIZE(), E820_RAM); |
42 | add_memory_region(HIGH_MEMORY, PFN_PHYS(max_pfn)-HIGH_MEMORY, E820_RAM); | ||
42 | 43 | ||
43 | return "Xen"; | 44 | return "Xen"; |
44 | } | 45 | } |
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6901eedeffce..55c5f1fc4f1f 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -259,8 +259,11 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
259 | 259 | ||
260 | static void bio_end_empty_barrier(struct bio *bio, int err) | 260 | static void bio_end_empty_barrier(struct bio *bio, int err) |
261 | { | 261 | { |
262 | if (err) | 262 | if (err) { |
263 | if (err == -EOPNOTSUPP) | ||
264 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | ||
263 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 265 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
266 | } | ||
264 | 267 | ||
265 | complete(bio->bi_private); | 268 | complete(bio->bi_private); |
266 | } | 269 | } |
@@ -309,7 +312,9 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | |||
309 | *error_sector = bio->bi_sector; | 312 | *error_sector = bio->bi_sector; |
310 | 313 | ||
311 | ret = 0; | 314 | ret = 0; |
312 | if (!bio_flagged(bio, BIO_UPTODATE)) | 315 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) |
316 | ret = -EOPNOTSUPP; | ||
317 | else if (!bio_flagged(bio, BIO_UPTODATE)) | ||
313 | ret = -EIO; | 318 | ret = -EIO; |
314 | 319 | ||
315 | bio_put(bio); | 320 | bio_put(bio); |
diff --git a/block/blk-core.c b/block/blk-core.c index 775c8516abf5..2a438a93f723 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq) | |||
127 | rq->nr_hw_segments = 0; | 127 | rq->nr_hw_segments = 0; |
128 | rq->ioprio = 0; | 128 | rq->ioprio = 0; |
129 | rq->special = NULL; | 129 | rq->special = NULL; |
130 | rq->raw_data_len = 0; | ||
131 | rq->buffer = NULL; | 130 | rq->buffer = NULL; |
132 | rq->tag = -1; | 131 | rq->tag = -1; |
133 | rq->errors = 0; | 132 | rq->errors = 0; |
@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq) | |||
135 | rq->cmd_len = 0; | 134 | rq->cmd_len = 0; |
136 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 135 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
137 | rq->data_len = 0; | 136 | rq->data_len = 0; |
137 | rq->extra_len = 0; | ||
138 | rq->sense_len = 0; | 138 | rq->sense_len = 0; |
139 | rq->data = NULL; | 139 | rq->data = NULL; |
140 | rq->sense = NULL; | 140 | rq->sense = NULL; |
@@ -424,7 +424,6 @@ void blk_put_queue(struct request_queue *q) | |||
424 | { | 424 | { |
425 | kobject_put(&q->kobj); | 425 | kobject_put(&q->kobj); |
426 | } | 426 | } |
427 | EXPORT_SYMBOL(blk_put_queue); | ||
428 | 427 | ||
429 | void blk_cleanup_queue(struct request_queue *q) | 428 | void blk_cleanup_queue(struct request_queue *q) |
430 | { | 429 | { |
@@ -592,7 +591,6 @@ int blk_get_queue(struct request_queue *q) | |||
592 | 591 | ||
593 | return 1; | 592 | return 1; |
594 | } | 593 | } |
595 | EXPORT_SYMBOL(blk_get_queue); | ||
596 | 594 | ||
597 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 595 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
598 | { | 596 | { |
@@ -1768,6 +1766,7 @@ static inline void __end_request(struct request *rq, int uptodate, | |||
1768 | 1766 | ||
1769 | /** | 1767 | /** |
1770 | * blk_rq_bytes - Returns bytes left to complete in the entire request | 1768 | * blk_rq_bytes - Returns bytes left to complete in the entire request |
1769 | * @rq: the request being processed | ||
1771 | **/ | 1770 | **/ |
1772 | unsigned int blk_rq_bytes(struct request *rq) | 1771 | unsigned int blk_rq_bytes(struct request *rq) |
1773 | { | 1772 | { |
@@ -1780,6 +1779,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes); | |||
1780 | 1779 | ||
1781 | /** | 1780 | /** |
1782 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | 1781 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment |
1782 | * @rq: the request being processed | ||
1783 | **/ | 1783 | **/ |
1784 | unsigned int blk_rq_cur_bytes(struct request *rq) | 1784 | unsigned int blk_rq_cur_bytes(struct request *rq) |
1785 | { | 1785 | { |
@@ -2016,7 +2016,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2016 | rq->hard_cur_sectors = rq->current_nr_sectors; | 2016 | rq->hard_cur_sectors = rq->current_nr_sectors; |
2017 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | 2017 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); |
2018 | rq->buffer = bio_data(bio); | 2018 | rq->buffer = bio_data(bio); |
2019 | rq->raw_data_len = bio->bi_size; | ||
2020 | rq->data_len = bio->bi_size; | 2019 | rq->data_len = bio->bi_size; |
2021 | 2020 | ||
2022 | rq->bio = rq->biotail = bio; | 2021 | rq->bio = rq->biotail = bio; |
diff --git a/block/blk-map.c b/block/blk-map.c index 09f7fd0bcb73..c07d9c8317f4 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |||
19 | rq->biotail->bi_next = bio; | 19 | rq->biotail->bi_next = bio; |
20 | rq->biotail = bio; | 20 | rq->biotail = bio; |
21 | 21 | ||
22 | rq->raw_data_len += bio->bi_size; | ||
23 | rq->data_len += bio->bi_size; | 22 | rq->data_len += bio->bi_size; |
24 | } | 23 | } |
25 | return 0; | 24 | return 0; |
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
44 | void __user *ubuf, unsigned int len) | 43 | void __user *ubuf, unsigned int len) |
45 | { | 44 | { |
46 | unsigned long uaddr; | 45 | unsigned long uaddr; |
46 | unsigned int alignment; | ||
47 | struct bio *bio, *orig_bio; | 47 | struct bio *bio, *orig_bio; |
48 | int reading, ret; | 48 | int reading, ret; |
49 | 49 | ||
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
54 | * direct dma. else, set up kernel bounce buffers | 54 | * direct dma. else, set up kernel bounce buffers |
55 | */ | 55 | */ |
56 | uaddr = (unsigned long) ubuf; | 56 | uaddr = (unsigned long) ubuf; |
57 | if (!(uaddr & queue_dma_alignment(q)) && | 57 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
58 | !(len & queue_dma_alignment(q))) | 58 | if (!(uaddr & alignment) && !(len & alignment)) |
59 | bio = bio_map_user(q, NULL, uaddr, len, reading); | 59 | bio = bio_map_user(q, NULL, uaddr, len, reading); |
60 | else | 60 | else |
61 | bio = bio_copy_user(q, uaddr, len, reading); | 61 | bio = bio_copy_user(q, uaddr, len, reading); |
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
142 | 142 | ||
143 | /* | 143 | /* |
144 | * __blk_rq_map_user() copies the buffers if starting address | 144 | * __blk_rq_map_user() copies the buffers if starting address |
145 | * or length isn't aligned. As the copied buffer is always | 145 | * or length isn't aligned to dma_pad_mask. As the copied |
146 | * page aligned, we know that there's enough room for padding. | 146 | * buffer is always page aligned, we know that there's enough |
147 | * Extend the last bio and update rq->data_len accordingly. | 147 | * room for padding. Extend the last bio and update |
148 | * rq->data_len accordingly. | ||
148 | * | 149 | * |
149 | * On unmap, bio_uncopy_user() will use unmodified | 150 | * On unmap, bio_uncopy_user() will use unmodified |
150 | * bio_map_data pointed to by bio->bi_private. | 151 | * bio_map_data pointed to by bio->bi_private. |
151 | */ | 152 | */ |
152 | if (len & queue_dma_alignment(q)) { | 153 | if (len & q->dma_pad_mask) { |
153 | unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; | 154 | unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; |
154 | struct bio *bio = rq->biotail; | 155 | struct bio *tail = rq->biotail; |
155 | 156 | ||
156 | bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; | 157 | tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len; |
157 | bio->bi_size += pad_len; | 158 | tail->bi_size += pad_len; |
158 | rq->data_len += pad_len; | 159 | |
160 | rq->extra_len += pad_len; | ||
159 | } | 161 | } |
160 | 162 | ||
161 | rq->buffer = rq->data = NULL; | 163 | rq->buffer = rq->data = NULL; |
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
215 | rq->buffer = rq->data = NULL; | 217 | rq->buffer = rq->data = NULL; |
216 | return 0; | 218 | return 0; |
217 | } | 219 | } |
218 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
219 | 220 | ||
220 | /** | 221 | /** |
221 | * blk_rq_unmap_user - unmap a request with user data | 222 | * blk_rq_unmap_user - unmap a request with user data |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 7506c4fe0264..0f58616bcd7f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -231,7 +231,7 @@ new_segment: | |||
231 | ((unsigned long)q->dma_drain_buffer) & | 231 | ((unsigned long)q->dma_drain_buffer) & |
232 | (PAGE_SIZE - 1)); | 232 | (PAGE_SIZE - 1)); |
233 | nsegs++; | 233 | nsegs++; |
234 | rq->data_len += q->dma_drain_size; | 234 | rq->extra_len += q->dma_drain_size; |
235 | } | 235 | } |
236 | 236 | ||
237 | if (sg) | 237 | if (sg) |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 9a8ffdd0ce3d..1344a0ea5cc6 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | |||
140 | /* Assume anything <= 4GB can be handled by IOMMU. | 140 | /* Assume anything <= 4GB can be handled by IOMMU. |
141 | Actually some IOMMUs can handle everything, but I don't | 141 | Actually some IOMMUs can handle everything, but I don't |
142 | know of a way to test this here. */ | 142 | know of a way to test this here. */ |
143 | if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 143 | if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
144 | dma = 1; | 144 | dma = 1; |
145 | q->bounce_pfn = max_low_pfn; | 145 | q->bounce_pfn = max_low_pfn; |
146 | #else | 146 | #else |
@@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
293 | EXPORT_SYMBOL(blk_queue_stack_limits); | 293 | EXPORT_SYMBOL(blk_queue_stack_limits); |
294 | 294 | ||
295 | /** | 295 | /** |
296 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | 296 | * blk_queue_dma_pad - set pad mask |
297 | * @q: the request queue for the device | ||
298 | * @mask: pad mask | ||
299 | * | ||
300 | * Set pad mask. Direct IO requests are padded to the mask specified. | ||
297 | * | 301 | * |
302 | * Appending pad buffer to a request modifies ->data_len such that it | ||
303 | * includes the pad buffer. The original requested data length can be | ||
304 | * obtained using blk_rq_raw_data_len(). | ||
305 | **/ | ||
306 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) | ||
307 | { | ||
308 | q->dma_pad_mask = mask; | ||
309 | } | ||
310 | EXPORT_SYMBOL(blk_queue_dma_pad); | ||
311 | |||
312 | /** | ||
313 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
298 | * @q: the request queue for the device | 314 | * @q: the request queue for the device |
299 | * @dma_drain_needed: fn which returns non-zero if drain is necessary | 315 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
300 | * @buf: physically contiguous buffer | 316 | * @buf: physically contiguous buffer |
@@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); | |||
316 | * device can support otherwise there won't be room for the drain | 332 | * device can support otherwise there won't be room for the drain |
317 | * buffer. | 333 | * buffer. |
318 | */ | 334 | */ |
319 | extern int blk_queue_dma_drain(struct request_queue *q, | 335 | int blk_queue_dma_drain(struct request_queue *q, |
320 | dma_drain_needed_fn *dma_drain_needed, | 336 | dma_drain_needed_fn *dma_drain_needed, |
321 | void *buf, unsigned int size) | 337 | void *buf, unsigned int size) |
322 | { | 338 | { |
diff --git a/block/blk-tag.c b/block/blk-tag.c index a8c37d4bbb32..4780a46ce234 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/bio.h> | 6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
8 | 8 | ||
9 | #include "blk.h" | ||
10 | |||
9 | /** | 11 | /** |
10 | * blk_queue_find_tag - find a request by its tag and queue | 12 | * blk_queue_find_tag - find a request by its tag and queue |
11 | * @q: The request queue for the device | 13 | * @q: The request queue for the device |
diff --git a/block/blk.h b/block/blk.h index ec898dd0c65c..ec9120fb789a 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -32,6 +32,8 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect); | |||
32 | 32 | ||
33 | void blk_queue_congestion_threshold(struct request_queue *q); | 33 | void blk_queue_congestion_threshold(struct request_queue *q); |
34 | 34 | ||
35 | int blk_dev_init(void); | ||
36 | |||
35 | /* | 37 | /* |
36 | * Return the threshold (number of used requests) at which the queue is | 38 | * Return the threshold (number of used requests) at which the queue is |
37 | * considered to be congested. It include a little hysteresis to keep the | 39 | * considered to be congested. It include a little hysteresis to keep the |
diff --git a/block/bsg.c b/block/bsg.c index 7f3c09549e4b..8917c5174dc2 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
437 | } | 437 | } |
438 | 438 | ||
439 | if (rq->next_rq) { | 439 | if (rq->next_rq) { |
440 | hdr->dout_resid = rq->raw_data_len; | 440 | hdr->dout_resid = rq->data_len; |
441 | hdr->din_resid = rq->next_rq->raw_data_len; | 441 | hdr->din_resid = rq->next_rq->data_len; |
442 | blk_rq_unmap_user(bidi_bio); | 442 | blk_rq_unmap_user(bidi_bio); |
443 | blk_put_request(rq->next_rq); | 443 | blk_put_request(rq->next_rq); |
444 | } else if (rq_data_dir(rq) == READ) | 444 | } else if (rq_data_dir(rq) == READ) |
445 | hdr->din_resid = rq->raw_data_len; | 445 | hdr->din_resid = rq->data_len; |
446 | else | 446 | else |
447 | hdr->dout_resid = rq->raw_data_len; | 447 | hdr->dout_resid = rq->data_len; |
448 | 448 | ||
449 | /* | 449 | /* |
450 | * If the request generated a negative error number, return it | 450 | * If the request generated a negative error number, return it |
diff --git a/block/genhd.c b/block/genhd.c index 53f2238e69c8..c44527d16c52 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -17,11 +17,15 @@ | |||
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | 19 | ||
20 | #include "blk.h" | ||
21 | |||
20 | static DEFINE_MUTEX(block_class_lock); | 22 | static DEFINE_MUTEX(block_class_lock); |
21 | #ifndef CONFIG_SYSFS_DEPRECATED | 23 | #ifndef CONFIG_SYSFS_DEPRECATED |
22 | struct kobject *block_depr; | 24 | struct kobject *block_depr; |
23 | #endif | 25 | #endif |
24 | 26 | ||
27 | static struct device_type disk_type; | ||
28 | |||
25 | /* | 29 | /* |
26 | * Can be deleted altogether. Later. | 30 | * Can be deleted altogether. Later. |
27 | * | 31 | * |
@@ -346,8 +350,6 @@ const struct seq_operations partitions_op = { | |||
346 | #endif | 350 | #endif |
347 | 351 | ||
348 | 352 | ||
349 | extern int blk_dev_init(void); | ||
350 | |||
351 | static struct kobject *base_probe(dev_t devt, int *part, void *data) | 353 | static struct kobject *base_probe(dev_t devt, int *part, void *data) |
352 | { | 354 | { |
353 | if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) | 355 | if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) |
@@ -502,7 +504,7 @@ struct class block_class = { | |||
502 | .name = "block", | 504 | .name = "block", |
503 | }; | 505 | }; |
504 | 506 | ||
505 | struct device_type disk_type = { | 507 | static struct device_type disk_type = { |
506 | .name = "disk", | 508 | .name = "disk", |
507 | .groups = disk_attr_groups, | 509 | .groups = disk_attr_groups, |
508 | .release = disk_release, | 510 | .release = disk_release, |
@@ -632,12 +634,14 @@ static void media_change_notify_thread(struct work_struct *work) | |||
632 | put_device(gd->driverfs_dev); | 634 | put_device(gd->driverfs_dev); |
633 | } | 635 | } |
634 | 636 | ||
637 | #if 0 | ||
635 | void genhd_media_change_notify(struct gendisk *disk) | 638 | void genhd_media_change_notify(struct gendisk *disk) |
636 | { | 639 | { |
637 | get_device(disk->driverfs_dev); | 640 | get_device(disk->driverfs_dev); |
638 | schedule_work(&disk->async_notify); | 641 | schedule_work(&disk->async_notify); |
639 | } | 642 | } |
640 | EXPORT_SYMBOL_GPL(genhd_media_change_notify); | 643 | EXPORT_SYMBOL_GPL(genhd_media_change_notify); |
644 | #endif /* 0 */ | ||
641 | 645 | ||
642 | dev_t blk_lookup_devt(const char *name) | 646 | dev_t blk_lookup_devt(const char *name) |
643 | { | 647 | { |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index e993cac4911d..a2c3a936ebf9 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, | |||
266 | hdr->info = 0; | 266 | hdr->info = 0; |
267 | if (hdr->masked_status || hdr->host_status || hdr->driver_status) | 267 | if (hdr->masked_status || hdr->host_status || hdr->driver_status) |
268 | hdr->info |= SG_INFO_CHECK; | 268 | hdr->info |= SG_INFO_CHECK; |
269 | hdr->resid = rq->raw_data_len; | 269 | hdr->resid = rq->data_len; |
270 | hdr->sb_len_wr = 0; | 270 | hdr->sb_len_wr = 0; |
271 | 271 | ||
272 | if (rq->sense_len && hdr->sbp) { | 272 | if (rq->sense_len && hdr->sbp) { |
@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, | |||
528 | rq = blk_get_request(q, WRITE, __GFP_WAIT); | 528 | rq = blk_get_request(q, WRITE, __GFP_WAIT); |
529 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 529 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
530 | rq->data = NULL; | 530 | rq->data = NULL; |
531 | rq->raw_data_len = 0; | ||
532 | rq->data_len = 0; | 531 | rq->data_len = 0; |
532 | rq->extra_len = 0; | ||
533 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | 533 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
534 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 534 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
535 | rq->cmd[0] = cmd; | 535 | rq->cmd[0] = cmd; |
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c index 28a5fbc6aa1a..93d80a1c36f9 100644 --- a/drivers/acorn/char/defkeymap-l7200.c +++ b/drivers/acorn/char/defkeymap-l7200.c | |||
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
347 | }; | 347 | }; |
348 | 348 | ||
349 | struct kbdiacruc accent_table[MAX_DIACR] = { | 349 | struct kbdiacruc accent_table[MAX_DIACR] = { |
350 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 350 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
351 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 351 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
352 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 352 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
353 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 353 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
354 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 354 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
355 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 355 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
356 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 356 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
357 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 357 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
358 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 358 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
359 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 359 | {',', 'C', 0307}, {',', 'c', 0347}, |
360 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 360 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
361 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 361 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
362 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 362 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
363 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 363 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
364 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 364 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
365 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 365 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
366 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 366 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
367 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 367 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
368 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 368 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
369 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 369 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
370 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 370 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
371 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 371 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
372 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 372 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
373 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 373 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
374 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 374 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
375 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 375 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
376 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 376 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
377 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 377 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
378 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 378 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
379 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 379 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
380 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 380 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
381 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 381 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
382 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 382 | {'s', 's', 0337}, {'"', 'y', 0377}, |
383 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 383 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
384 | }; | 384 | }; |
385 | 385 | ||
386 | unsigned int accent_table_size = 68; | 386 | unsigned int accent_table_size = 68; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fbc24358ada0..4fbcce758b04 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -113,7 +113,7 @@ int atapi_enabled = 1; | |||
113 | module_param(atapi_enabled, int, 0444); | 113 | module_param(atapi_enabled, int, 0444); |
114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
115 | 115 | ||
116 | int atapi_dmadir = 0; | 116 | static int atapi_dmadir = 0; |
117 | module_param(atapi_dmadir, int, 0444); | 117 | module_param(atapi_dmadir, int, 0444); |
118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
119 | 119 | ||
@@ -6567,6 +6567,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6567 | ata_lpm_enable(host); | 6567 | ata_lpm_enable(host); |
6568 | 6568 | ||
6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
6570 | if (rc == 0) | ||
6571 | host->dev->power.power_state = mesg; | ||
6570 | return rc; | 6572 | return rc; |
6571 | } | 6573 | } |
6572 | 6574 | ||
@@ -6585,6 +6587,7 @@ void ata_host_resume(struct ata_host *host) | |||
6585 | { | 6587 | { |
6586 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 6588 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
6587 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 6589 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6590 | host->dev->power.power_state = PMSG_ON; | ||
6588 | 6591 | ||
6589 | /* reenable link pm */ | 6592 | /* reenable link pm */ |
6590 | ata_lpm_disable(host); | 6593 | ata_lpm_disable(host); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0562b0a49f3b..8f0e8f2bc628 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
862 | struct request_queue *q = sdev->request_queue; | 862 | struct request_queue *q = sdev->request_queue; |
863 | void *buf; | 863 | void *buf; |
864 | 864 | ||
865 | /* set the min alignment */ | 865 | /* set the min alignment and padding */ |
866 | blk_queue_update_dma_alignment(sdev->request_queue, | 866 | blk_queue_update_dma_alignment(sdev->request_queue, |
867 | ATA_DMA_PAD_SZ - 1); | 867 | ATA_DMA_PAD_SZ - 1); |
868 | blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); | ||
868 | 869 | ||
869 | /* configure draining */ | 870 | /* configure draining */ |
870 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); | 871 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); |
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
1694 | u8 *rbuf; | 1695 | u8 *rbuf; |
1695 | unsigned int buflen, rc; | 1696 | unsigned int buflen, rc; |
1696 | struct scsi_cmnd *cmd = args->cmd; | 1697 | struct scsi_cmnd *cmd = args->cmd; |
1698 | unsigned long flags; | ||
1699 | |||
1700 | local_irq_save(flags); | ||
1697 | 1701 | ||
1698 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); | 1702 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); |
1699 | memset(rbuf, 0, buflen); | 1703 | memset(rbuf, 0, buflen); |
1700 | rc = actor(args, rbuf, buflen); | 1704 | rc = actor(args, rbuf, buflen); |
1701 | ata_scsi_rbuf_put(cmd, rbuf); | 1705 | ata_scsi_rbuf_put(cmd, rbuf); |
1702 | 1706 | ||
1707 | local_irq_restore(flags); | ||
1708 | |||
1703 | if (rc == 0) | 1709 | if (rc == 0) |
1704 | cmd->result = SAM_STAT_GOOD; | 1710 | cmd->result = SAM_STAT_GOOD; |
1705 | args->done(cmd); | 1711 | args->done(cmd); |
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2473 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { | 2479 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { |
2474 | u8 *buf = NULL; | 2480 | u8 *buf = NULL; |
2475 | unsigned int buflen; | 2481 | unsigned int buflen; |
2482 | unsigned long flags; | ||
2483 | |||
2484 | local_irq_save(flags); | ||
2476 | 2485 | ||
2477 | buflen = ata_scsi_rbuf_get(cmd, &buf); | 2486 | buflen = ata_scsi_rbuf_get(cmd, &buf); |
2478 | 2487 | ||
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2490 | } | 2499 | } |
2491 | 2500 | ||
2492 | ata_scsi_rbuf_put(cmd, buf); | 2501 | ata_scsi_rbuf_put(cmd, buf); |
2502 | |||
2503 | local_irq_restore(flags); | ||
2493 | } | 2504 | } |
2494 | 2505 | ||
2495 | cmd->result = SAM_STAT_GOOD; | 2506 | cmd->result = SAM_STAT_GOOD; |
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2528 | } | 2539 | } |
2529 | 2540 | ||
2530 | qc->tf.command = ATA_CMD_PACKET; | 2541 | qc->tf.command = ATA_CMD_PACKET; |
2531 | qc->nbytes = scsi_bufflen(scmd); | 2542 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2532 | 2543 | ||
2533 | /* check whether ATAPI DMA is safe */ | 2544 | /* check whether ATAPI DMA is safe */ |
2534 | if (!using_pio && ata_check_atapi_dma(qc)) | 2545 | if (!using_pio && ata_check_atapi_dma(qc)) |
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2539 | * want to set it properly, and for DMA where it is | 2550 | * want to set it properly, and for DMA where it is |
2540 | * effectively meaningless. | 2551 | * effectively meaningless. |
2541 | */ | 2552 | */ |
2542 | nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); | 2553 | nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); |
2543 | 2554 | ||
2544 | /* Most ATAPI devices which honor transfer chunk size don't | 2555 | /* Most ATAPI devices which honor transfer chunk size don't |
2545 | * behave according to the spec when odd chunk size which | 2556 | * behave according to the spec when odd chunk size which |
@@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2865 | * TODO: find out if we need to do more here to | 2876 | * TODO: find out if we need to do more here to |
2866 | * cover scatter/gather case. | 2877 | * cover scatter/gather case. |
2867 | */ | 2878 | */ |
2868 | qc->nbytes = scsi_bufflen(scmd); | 2879 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2869 | 2880 | ||
2870 | /* request result TF and be quiet about device error */ | 2881 | /* request result TF and be quiet about device error */ |
2871 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | 2882 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6036dedfe377..aa884f71a12a 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -56,7 +56,6 @@ enum { | |||
56 | extern unsigned int ata_print_id; | 56 | extern unsigned int ata_print_id; |
57 | extern struct workqueue_struct *ata_aux_wq; | 57 | extern struct workqueue_struct *ata_aux_wq; |
58 | extern int atapi_enabled; | 58 | extern int atapi_enabled; |
59 | extern int atapi_dmadir; | ||
60 | extern int atapi_passthru16; | 59 | extern int atapi_passthru16; |
61 | extern int libata_fua; | 60 | extern int libata_fua; |
62 | extern int libata_noacpi; | 61 | extern int libata_noacpi; |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 69f651e0bc98..840d1c4a7850 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
48 | #include <scsi/scsi_cmnd.h> | ||
49 | #include <scsi/scsi.h> | ||
48 | #include <linux/libata.h> | 50 | #include <linux/libata.h> |
49 | 51 | ||
50 | #ifdef CONFIG_PPC_OF | 52 | #ifdef CONFIG_PPC_OF |
@@ -59,6 +61,7 @@ enum { | |||
59 | /* ap->flags bits */ | 61 | /* ap->flags bits */ |
60 | K2_FLAG_SATA_8_PORTS = (1 << 24), | 62 | K2_FLAG_SATA_8_PORTS = (1 << 24), |
61 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), | 63 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), |
64 | K2_FLAG_BAR_POS_3 = (1 << 26), | ||
62 | 65 | ||
63 | /* Taskfile registers offsets */ | 66 | /* Taskfile registers offsets */ |
64 | K2_SATA_TF_CMD_OFFSET = 0x00, | 67 | K2_SATA_TF_CMD_OFFSET = 0x00, |
@@ -88,8 +91,10 @@ enum { | |||
88 | /* Port stride */ | 91 | /* Port stride */ |
89 | K2_SATA_PORT_OFFSET = 0x100, | 92 | K2_SATA_PORT_OFFSET = 0x100, |
90 | 93 | ||
91 | board_svw4 = 0, | 94 | chip_svw4 = 0, |
92 | board_svw8 = 1, | 95 | chip_svw8 = 1, |
96 | chip_svw42 = 2, /* bar 3 */ | ||
97 | chip_svw43 = 3, /* bar 5 */ | ||
93 | }; | 98 | }; |
94 | 99 | ||
95 | static u8 k2_stat_check_status(struct ata_port *ap); | 100 | static u8 k2_stat_check_status(struct ata_port *ap); |
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap); | |||
97 | 102 | ||
98 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) | 103 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) |
99 | { | 104 | { |
105 | u8 cmnd = qc->scsicmd->cmnd[0]; | ||
106 | |||
100 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) | 107 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) |
101 | return -1; /* ATAPI DMA not supported */ | 108 | return -1; /* ATAPI DMA not supported */ |
109 | else { | ||
110 | switch (cmnd) { | ||
111 | case READ_10: | ||
112 | case READ_12: | ||
113 | case READ_16: | ||
114 | case WRITE_10: | ||
115 | case WRITE_12: | ||
116 | case WRITE_16: | ||
117 | return 0; | ||
118 | |||
119 | default: | ||
120 | return -1; | ||
121 | } | ||
102 | 122 | ||
103 | return 0; | 123 | } |
104 | } | 124 | } |
105 | 125 | ||
106 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | 126 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = { | |||
354 | }; | 374 | }; |
355 | 375 | ||
356 | static const struct ata_port_info k2_port_info[] = { | 376 | static const struct ata_port_info k2_port_info[] = { |
357 | /* board_svw4 */ | 377 | /* chip_svw4 */ |
358 | { | 378 | { |
359 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 379 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
360 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, | 380 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, |
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = { | |||
363 | .udma_mask = ATA_UDMA6, | 383 | .udma_mask = ATA_UDMA6, |
364 | .port_ops = &k2_sata_ops, | 384 | .port_ops = &k2_sata_ops, |
365 | }, | 385 | }, |
366 | /* board_svw8 */ | 386 | /* chip_svw8 */ |
367 | { | 387 | { |
368 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 388 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
369 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | | 389 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = { | |||
373 | .udma_mask = ATA_UDMA6, | 393 | .udma_mask = ATA_UDMA6, |
374 | .port_ops = &k2_sata_ops, | 394 | .port_ops = &k2_sata_ops, |
375 | }, | 395 | }, |
396 | /* chip_svw42 */ | ||
397 | { | ||
398 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
399 | ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, | ||
400 | .pio_mask = 0x1f, | ||
401 | .mwdma_mask = 0x07, | ||
402 | .udma_mask = ATA_UDMA6, | ||
403 | .port_ops = &k2_sata_ops, | ||
404 | }, | ||
405 | /* chip_svw43 */ | ||
406 | { | ||
407 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
408 | ATA_FLAG_MMIO, | ||
409 | .pio_mask = 0x1f, | ||
410 | .mwdma_mask = 0x07, | ||
411 | .udma_mask = ATA_UDMA6, | ||
412 | .port_ops = &k2_sata_ops, | ||
413 | }, | ||
376 | }; | 414 | }; |
377 | 415 | ||
378 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) | 416 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) |
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
402 | { &k2_port_info[ent->driver_data], NULL }; | 440 | { &k2_port_info[ent->driver_data], NULL }; |
403 | struct ata_host *host; | 441 | struct ata_host *host; |
404 | void __iomem *mmio_base; | 442 | void __iomem *mmio_base; |
405 | int n_ports, i, rc; | 443 | int n_ports, i, rc, bar_pos; |
406 | 444 | ||
407 | if (!printed_version++) | 445 | if (!printed_version++) |
408 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 446 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
416 | if (!host) | 454 | if (!host) |
417 | return -ENOMEM; | 455 | return -ENOMEM; |
418 | 456 | ||
457 | bar_pos = 5; | ||
458 | if (ppi[0]->flags & K2_FLAG_BAR_POS_3) | ||
459 | bar_pos = 3; | ||
419 | /* | 460 | /* |
420 | * If this driver happens to only be useful on Apple's K2, then | 461 | * If this driver happens to only be useful on Apple's K2, then |
421 | * we should check that here as it has a normal Serverworks ID | 462 | * we should check that here as it has a normal Serverworks ID |
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
428 | * Check if we have resources mapped at all (second function may | 469 | * Check if we have resources mapped at all (second function may |
429 | * have been disabled by firmware) | 470 | * have been disabled by firmware) |
430 | */ | 471 | */ |
431 | if (pci_resource_len(pdev, 5) == 0) | 472 | if (pci_resource_len(pdev, bar_pos) == 0) { |
473 | /* In IDE mode we need to pin the device to ensure that | ||
474 | pcim_release does not clear the busmaster bit in config | ||
475 | space, clearing causes busmaster DMA to fail on | ||
476 | ports 3 & 4 */ | ||
477 | pcim_pin_device(pdev); | ||
432 | return -ENODEV; | 478 | return -ENODEV; |
479 | } | ||
433 | 480 | ||
434 | /* Request and iomap PCI regions */ | 481 | /* Request and iomap PCI regions */ |
435 | rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); | 482 | rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); |
436 | if (rc == -EBUSY) | 483 | if (rc == -EBUSY) |
437 | pcim_pin_device(pdev); | 484 | pcim_pin_device(pdev); |
438 | if (rc) | 485 | if (rc) |
439 | return rc; | 486 | return rc; |
440 | host->iomap = pcim_iomap_table(pdev); | 487 | host->iomap = pcim_iomap_table(pdev); |
441 | mmio_base = host->iomap[5]; | 488 | mmio_base = host->iomap[bar_pos]; |
442 | 489 | ||
443 | /* different controllers have different number of ports - currently 4 or 8 */ | 490 | /* different controllers have different number of ports - currently 4 or 8 */ |
444 | /* All ports are on the same function. Multi-function device is no | 491 | /* All ports are on the same function. Multi-function device is no |
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
483 | * controller | 530 | * controller |
484 | * */ | 531 | * */ |
485 | static const struct pci_device_id k2_sata_pci_tbl[] = { | 532 | static const struct pci_device_id k2_sata_pci_tbl[] = { |
486 | { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, | 533 | { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, |
487 | { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, | 534 | { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 }, |
488 | { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, | 535 | { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 }, |
489 | { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, | 536 | { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, |
490 | { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, | 537 | { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, |
538 | { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, | ||
539 | { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, | ||
491 | 540 | ||
492 | { } | 541 | { } |
493 | }; | 542 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 9c0070b5bd3e..7de543d1d0b4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) | 621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
622 | { | 622 | { |
623 | /* see if we live in a "glue" directory */ | 623 | /* see if we live in a "glue" directory */ |
624 | if (!dev->class || glue_dir->kset != &dev->class->class_dirs) | 624 | if (!glue_dir || !dev->class || |
625 | glue_dir->kset != &dev->class->class_dirs) | ||
625 | return; | 626 | return; |
626 | 627 | ||
627 | kobject_put(glue_dir); | 628 | kobject_put(glue_dir); |
@@ -770,17 +771,10 @@ int device_add(struct device *dev) | |||
770 | struct class_interface *class_intf; | 771 | struct class_interface *class_intf; |
771 | int error; | 772 | int error; |
772 | 773 | ||
773 | error = pm_sleep_lock(); | ||
774 | if (error) { | ||
775 | dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); | ||
776 | dump_stack(); | ||
777 | return error; | ||
778 | } | ||
779 | |||
780 | dev = get_device(dev); | 774 | dev = get_device(dev); |
781 | if (!dev || !strlen(dev->bus_id)) { | 775 | if (!dev || !strlen(dev->bus_id)) { |
782 | error = -EINVAL; | 776 | error = -EINVAL; |
783 | goto Error; | 777 | goto Done; |
784 | } | 778 | } |
785 | 779 | ||
786 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); | 780 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); |
@@ -843,11 +837,9 @@ int device_add(struct device *dev) | |||
843 | } | 837 | } |
844 | Done: | 838 | Done: |
845 | put_device(dev); | 839 | put_device(dev); |
846 | pm_sleep_unlock(); | ||
847 | return error; | 840 | return error; |
848 | BusError: | 841 | BusError: |
849 | device_pm_remove(dev); | 842 | device_pm_remove(dev); |
850 | dpm_sysfs_remove(dev); | ||
851 | PMError: | 843 | PMError: |
852 | if (dev->bus) | 844 | if (dev->bus) |
853 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 845 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ee9d1c8db0d6..d887d5cb5bef 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -48,7 +48,6 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
52 | static LIST_HEAD(dpm_off); | 51 | static LIST_HEAD(dpm_off); |
53 | static LIST_HEAD(dpm_off_irq); | 52 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | 53 | static LIST_HEAD(dpm_destroy); |
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev) | |||
81 | */ | 80 | */ |
82 | void device_pm_remove(struct device *dev) | 81 | void device_pm_remove(struct device *dev) |
83 | { | 82 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
106 | pr_debug("PM: Removing info for %s:%s\n", | 83 | pr_debug("PM: Removing info for %s:%s\n", |
107 | dev->bus ? dev->bus->name : "No Bus", | 84 | dev->bus ? dev->bus->name : "No Bus", |
108 | kobject_name(&dev->kobj)); | 85 | kobject_name(&dev->kobj)); |
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev) | |||
110 | dpm_sysfs_remove(dev); | 87 | dpm_sysfs_remove(dev); |
111 | list_del_init(&dev->power.entry); | 88 | list_del_init(&dev->power.entry); |
112 | mutex_unlock(&dpm_list_mtx); | 89 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | 90 | } |
115 | 91 | ||
116 | /** | 92 | /** |
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev) | |||
230 | TRACE_DEVICE(dev); | 206 | TRACE_DEVICE(dev); |
231 | TRACE_RESUME(0); | 207 | TRACE_RESUME(0); |
232 | 208 | ||
209 | down(&dev->sem); | ||
210 | |||
233 | if (dev->bus && dev->bus->resume) { | 211 | if (dev->bus && dev->bus->resume) { |
234 | dev_dbg(dev,"resuming\n"); | 212 | dev_dbg(dev,"resuming\n"); |
235 | error = dev->bus->resume(dev); | 213 | error = dev->bus->resume(dev); |
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev) | |||
245 | error = dev->class->resume(dev); | 223 | error = dev->class->resume(dev); |
246 | } | 224 | } |
247 | 225 | ||
226 | up(&dev->sem); | ||
227 | |||
248 | TRACE_RESUME(error); | 228 | TRACE_RESUME(error); |
249 | return error; | 229 | return error; |
250 | } | 230 | } |
@@ -266,7 +246,7 @@ static void dpm_resume(void) | |||
266 | struct list_head *entry = dpm_off.next; | 246 | struct list_head *entry = dpm_off.next; |
267 | struct device *dev = to_device(entry); | 247 | struct device *dev = to_device(entry); |
268 | 248 | ||
269 | list_move_tail(entry, &dpm_locked); | 249 | list_move_tail(entry, &dpm_active); |
270 | mutex_unlock(&dpm_list_mtx); | 250 | mutex_unlock(&dpm_list_mtx); |
271 | resume_device(dev); | 251 | resume_device(dev); |
272 | mutex_lock(&dpm_list_mtx); | 252 | mutex_lock(&dpm_list_mtx); |
@@ -275,25 +255,6 @@ static void dpm_resume(void) | |||
275 | } | 255 | } |
276 | 256 | ||
277 | /** | 257 | /** |
278 | * unlock_all_devices - Release each device's semaphore | ||
279 | * | ||
280 | * Go through the dpm_off list. Put each device on the dpm_active | ||
281 | * list and unlock it. | ||
282 | */ | ||
283 | static void unlock_all_devices(void) | ||
284 | { | ||
285 | mutex_lock(&dpm_list_mtx); | ||
286 | while (!list_empty(&dpm_locked)) { | ||
287 | struct list_head *entry = dpm_locked.prev; | ||
288 | struct device *dev = to_device(entry); | ||
289 | |||
290 | list_move(entry, &dpm_active); | ||
291 | up(&dev->sem); | ||
292 | } | ||
293 | mutex_unlock(&dpm_list_mtx); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * unregister_dropped_devices - Unregister devices scheduled for removal | 258 | * unregister_dropped_devices - Unregister devices scheduled for removal |
298 | * | 259 | * |
299 | * Unregister all devices on the dpm_destroy list. | 260 | * Unregister all devices on the dpm_destroy list. |
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void) | |||
305 | struct list_head *entry = dpm_destroy.next; | 266 | struct list_head *entry = dpm_destroy.next; |
306 | struct device *dev = to_device(entry); | 267 | struct device *dev = to_device(entry); |
307 | 268 | ||
308 | up(&dev->sem); | ||
309 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
310 | /* This also removes the device from the list */ | 270 | /* This also removes the device from the list */ |
311 | device_unregister(dev); | 271 | device_unregister(dev); |
@@ -324,7 +284,6 @@ void device_resume(void) | |||
324 | { | 284 | { |
325 | might_sleep(); | 285 | might_sleep(); |
326 | dpm_resume(); | 286 | dpm_resume(); |
327 | unlock_all_devices(); | ||
328 | unregister_dropped_devices(); | 287 | unregister_dropped_devices(); |
329 | up_write(&pm_sleep_rwsem); | 288 | up_write(&pm_sleep_rwsem); |
330 | } | 289 | } |
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state) | |||
388 | struct list_head *entry = dpm_off.prev; | 347 | struct list_head *entry = dpm_off.prev; |
389 | struct device *dev = to_device(entry); | 348 | struct device *dev = to_device(entry); |
390 | 349 | ||
391 | list_del_init(&dev->power.entry); | ||
392 | error = suspend_device_late(dev, state); | 350 | error = suspend_device_late(dev, state); |
393 | if (error) { | 351 | if (error) { |
394 | printk(KERN_ERR "Could not power down device %s: " | 352 | printk(KERN_ERR "Could not power down device %s: " |
395 | "error %d\n", | 353 | "error %d\n", |
396 | kobject_name(&dev->kobj), error); | 354 | kobject_name(&dev->kobj), error); |
397 | if (list_empty(&dev->power.entry)) | ||
398 | list_add(&dev->power.entry, &dpm_off); | ||
399 | break; | 355 | break; |
400 | } | 356 | } |
401 | if (list_empty(&dev->power.entry)) | 357 | if (!list_empty(&dev->power.entry)) |
402 | list_add(&dev->power.entry, &dpm_off_irq); | 358 | list_move(&dev->power.entry, &dpm_off_irq); |
403 | } | 359 | } |
404 | 360 | ||
405 | if (!error) | 361 | if (!error) |
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
419 | { | 375 | { |
420 | int error = 0; | 376 | int error = 0; |
421 | 377 | ||
378 | down(&dev->sem); | ||
379 | |||
422 | if (dev->power.power_state.event) { | 380 | if (dev->power.power_state.event) { |
423 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 381 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
424 | dev->power.power_state.event, state.event); | 382 | dev->power.power_state.event, state.event); |
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
441 | error = dev->bus->suspend(dev, state); | 399 | error = dev->bus->suspend(dev, state); |
442 | suspend_report_result(dev->bus->suspend, error); | 400 | suspend_report_result(dev->bus->suspend, error); |
443 | } | 401 | } |
402 | |||
403 | up(&dev->sem); | ||
404 | |||
444 | return error; | 405 | return error; |
445 | } | 406 | } |
446 | 407 | ||
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state) | |||
461 | int error = 0; | 422 | int error = 0; |
462 | 423 | ||
463 | mutex_lock(&dpm_list_mtx); | 424 | mutex_lock(&dpm_list_mtx); |
464 | while (!list_empty(&dpm_locked)) { | 425 | while (!list_empty(&dpm_active)) { |
465 | struct list_head *entry = dpm_locked.prev; | 426 | struct list_head *entry = dpm_active.prev; |
466 | struct device *dev = to_device(entry); | 427 | struct device *dev = to_device(entry); |
467 | 428 | ||
468 | list_del_init(&dev->power.entry); | ||
469 | mutex_unlock(&dpm_list_mtx); | 429 | mutex_unlock(&dpm_list_mtx); |
470 | error = suspend_device(dev, state); | 430 | error = suspend_device(dev, state); |
431 | mutex_lock(&dpm_list_mtx); | ||
471 | if (error) { | 432 | if (error) { |
472 | printk(KERN_ERR "Could not suspend device %s: " | 433 | printk(KERN_ERR "Could not suspend device %s: " |
473 | "error %d%s\n", | 434 | "error %d%s\n", |
@@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state) | |||
476 | (error == -EAGAIN ? | 437 | (error == -EAGAIN ? |
477 | " (please convert to suspend_late)" : | 438 | " (please convert to suspend_late)" : |
478 | "")); | 439 | "")); |
479 | mutex_lock(&dpm_list_mtx); | ||
480 | if (list_empty(&dev->power.entry)) | ||
481 | list_add(&dev->power.entry, &dpm_locked); | ||
482 | break; | 440 | break; |
483 | } | 441 | } |
484 | mutex_lock(&dpm_list_mtx); | 442 | if (!list_empty(&dev->power.entry)) |
485 | if (list_empty(&dev->power.entry)) | 443 | list_move(&dev->power.entry, &dpm_off); |
486 | list_add(&dev->power.entry, &dpm_off); | ||
487 | } | 444 | } |
488 | mutex_unlock(&dpm_list_mtx); | 445 | mutex_unlock(&dpm_list_mtx); |
489 | 446 | ||
@@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state) | |||
491 | } | 448 | } |
492 | 449 | ||
493 | /** | 450 | /** |
494 | * lock_all_devices - Acquire every device's semaphore | ||
495 | * | ||
496 | * Go through the dpm_active list. Carefully lock each device's | ||
497 | * semaphore and put it in on the dpm_locked list. | ||
498 | */ | ||
499 | static void lock_all_devices(void) | ||
500 | { | ||
501 | mutex_lock(&dpm_list_mtx); | ||
502 | while (!list_empty(&dpm_active)) { | ||
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
505 | |||
506 | /* Required locking order is dev->sem first, | ||
507 | * then dpm_list_mutex. Hence this awkward code. | ||
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
513 | |||
514 | if (list_empty(entry)) | ||
515 | up(&dev->sem); /* Device was removed */ | ||
516 | else | ||
517 | list_move_tail(entry, &dpm_locked); | ||
518 | put_device(dev); | ||
519 | } | ||
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | 451 | * device_suspend - Save state and stop all devices in system. |
525 | * @state: new power management state | 452 | * @state: new power management state |
526 | * | 453 | * |
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state) | |||
533 | 460 | ||
534 | might_sleep(); | 461 | might_sleep(); |
535 | down_write(&pm_sleep_rwsem); | 462 | down_write(&pm_sleep_rwsem); |
536 | lock_all_devices(); | ||
537 | error = dpm_suspend(state); | 463 | error = dpm_suspend(state); |
538 | if (error) | 464 | if (error) |
539 | device_resume(); | 465 | device_resume(); |
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index f25e7c6b2d27..40bca48abc12 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont, | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * transport_setup_device - declare a new dev for transport class association | 129 | * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. |
130 | * but don't make it visible yet. | ||
131 | * | ||
132 | * @dev: the generic device representing the entity being added | 130 | * @dev: the generic device representing the entity being added |
133 | * | 131 | * |
134 | * Usually, dev represents some component in the HBA system (either | 132 | * Usually, dev represents some component in the HBA system (either |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9715be3f2487..55bd35c0f082 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/blkpg.h> | 33 | #include <linux/blkpg.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | ||
36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
37 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
@@ -131,7 +132,6 @@ static struct board_type products[] = { | |||
131 | /*define how many times we will try a command because of bus resets */ | 132 | /*define how many times we will try a command because of bus resets */ |
132 | #define MAX_CMD_RETRIES 3 | 133 | #define MAX_CMD_RETRIES 3 |
133 | 134 | ||
134 | #define READ_AHEAD 1024 | ||
135 | #define MAX_CTLR 32 | 135 | #define MAX_CTLR 32 |
136 | 136 | ||
137 | /* Originally cciss driver only supports 8 major numbers */ | 137 | /* Originally cciss driver only supports 8 major numbers */ |
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | |||
174 | static void fail_all_cmds(unsigned long ctlr); | 174 | static void fail_all_cmds(unsigned long ctlr); |
175 | 175 | ||
176 | #ifdef CONFIG_PROC_FS | 176 | #ifdef CONFIG_PROC_FS |
177 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | ||
178 | int length, int *eof, void *data); | ||
179 | static void cciss_procinit(int i); | 177 | static void cciss_procinit(int i); |
180 | #else | 178 | #else |
181 | static void cciss_procinit(int i) | 179 | static void cciss_procinit(int i) |
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr, | |||
240 | */ | 238 | */ |
241 | #define ENG_GIG 1000000000 | 239 | #define ENG_GIG 1000000000 |
242 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 240 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
241 | #define ENGAGE_SCSI "engage scsi" | ||
243 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 242 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
244 | "UNKNOWN" | 243 | "UNKNOWN" |
245 | }; | 244 | }; |
246 | 245 | ||
247 | static struct proc_dir_entry *proc_cciss; | 246 | static struct proc_dir_entry *proc_cciss; |
248 | 247 | ||
249 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | 248 | static void cciss_seq_show_header(struct seq_file *seq) |
250 | int length, int *eof, void *data) | ||
251 | { | 249 | { |
252 | off_t pos = 0; | 250 | ctlr_info_t *h = seq->private; |
253 | off_t len = 0; | 251 | |
254 | int size, i, ctlr; | 252 | seq_printf(seq, "%s: HP %s Controller\n" |
255 | ctlr_info_t *h = (ctlr_info_t *) data; | 253 | "Board ID: 0x%08lx\n" |
256 | drive_info_struct *drv; | 254 | "Firmware Version: %c%c%c%c\n" |
257 | unsigned long flags; | 255 | "IRQ: %d\n" |
258 | sector_t vol_sz, vol_sz_frac; | 256 | "Logical drives: %d\n" |
257 | "Current Q depth: %d\n" | ||
258 | "Current # commands on controller: %d\n" | ||
259 | "Max Q depth since init: %d\n" | ||
260 | "Max # commands on controller since init: %d\n" | ||
261 | "Max SG entries since init: %d\n", | ||
262 | h->devname, | ||
263 | h->product_name, | ||
264 | (unsigned long)h->board_id, | ||
265 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
266 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
267 | h->num_luns, | ||
268 | h->Qdepth, h->commands_outstanding, | ||
269 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
259 | 270 | ||
260 | ctlr = h->ctlr; | 271 | #ifdef CONFIG_CISS_SCSI_TAPE |
272 | cciss_seq_tape_report(seq, h->ctlr); | ||
273 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
274 | } | ||
275 | |||
276 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) | ||
277 | { | ||
278 | ctlr_info_t *h = seq->private; | ||
279 | unsigned ctlr = h->ctlr; | ||
280 | unsigned long flags; | ||
261 | 281 | ||
262 | /* prevent displaying bogus info during configuration | 282 | /* prevent displaying bogus info during configuration |
263 | * or deconfiguration of a logical volume | 283 | * or deconfiguration of a logical volume |
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | |||
265 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 285 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
266 | if (h->busy_configuring) { | 286 | if (h->busy_configuring) { |
267 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 287 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
268 | return -EBUSY; | 288 | return ERR_PTR(-EBUSY); |
269 | } | 289 | } |
270 | h->busy_configuring = 1; | 290 | h->busy_configuring = 1; |
271 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 291 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
272 | 292 | ||
273 | size = sprintf(buffer, "%s: HP %s Controller\n" | 293 | if (*pos == 0) |
274 | "Board ID: 0x%08lx\n" | 294 | cciss_seq_show_header(seq); |
275 | "Firmware Version: %c%c%c%c\n" | ||
276 | "IRQ: %d\n" | ||
277 | "Logical drives: %d\n" | ||
278 | "Max sectors: %d\n" | ||
279 | "Current Q depth: %d\n" | ||
280 | "Current # commands on controller: %d\n" | ||
281 | "Max Q depth since init: %d\n" | ||
282 | "Max # commands on controller since init: %d\n" | ||
283 | "Max SG entries since init: %d\n\n", | ||
284 | h->devname, | ||
285 | h->product_name, | ||
286 | (unsigned long)h->board_id, | ||
287 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
288 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
289 | h->num_luns, | ||
290 | h->cciss_max_sectors, | ||
291 | h->Qdepth, h->commands_outstanding, | ||
292 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
293 | |||
294 | pos += size; | ||
295 | len += size; | ||
296 | cciss_proc_tape_report(ctlr, buffer, &pos, &len); | ||
297 | for (i = 0; i <= h->highest_lun; i++) { | ||
298 | |||
299 | drv = &h->drv[i]; | ||
300 | if (drv->heads == 0) | ||
301 | continue; | ||
302 | 295 | ||
303 | vol_sz = drv->nr_blocks; | 296 | return pos; |
304 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | 297 | } |
305 | vol_sz_frac *= 100; | 298 | |
306 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 299 | static int cciss_seq_show(struct seq_file *seq, void *v) |
300 | { | ||
301 | sector_t vol_sz, vol_sz_frac; | ||
302 | ctlr_info_t *h = seq->private; | ||
303 | unsigned ctlr = h->ctlr; | ||
304 | loff_t *pos = v; | ||
305 | drive_info_struct *drv = &h->drv[*pos]; | ||
306 | |||
307 | if (*pos > h->highest_lun) | ||
308 | return 0; | ||
309 | |||
310 | if (drv->heads == 0) | ||
311 | return 0; | ||
312 | |||
313 | vol_sz = drv->nr_blocks; | ||
314 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | ||
315 | vol_sz_frac *= 100; | ||
316 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | ||
317 | |||
318 | if (drv->raid_level > 5) | ||
319 | drv->raid_level = RAID_UNKNOWN; | ||
320 | seq_printf(seq, "cciss/c%dd%d:" | ||
321 | "\t%4u.%02uGB\tRAID %s\n", | ||
322 | ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, | ||
323 | raid_label[drv->raid_level]); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
328 | { | ||
329 | ctlr_info_t *h = seq->private; | ||
330 | |||
331 | if (*pos > h->highest_lun) | ||
332 | return NULL; | ||
333 | *pos += 1; | ||
334 | |||
335 | return pos; | ||
336 | } | ||
337 | |||
338 | static void cciss_seq_stop(struct seq_file *seq, void *v) | ||
339 | { | ||
340 | ctlr_info_t *h = seq->private; | ||
341 | |||
342 | /* Only reset h->busy_configuring if we succeeded in setting | ||
343 | * it during cciss_seq_start. */ | ||
344 | if (v == ERR_PTR(-EBUSY)) | ||
345 | return; | ||
307 | 346 | ||
308 | if (drv->raid_level > 5) | ||
309 | drv->raid_level = RAID_UNKNOWN; | ||
310 | size = sprintf(buffer + len, "cciss/c%dd%d:" | ||
311 | "\t%4u.%02uGB\tRAID %s\n", | ||
312 | ctlr, i, (int)vol_sz, (int)vol_sz_frac, | ||
313 | raid_label[drv->raid_level]); | ||
314 | pos += size; | ||
315 | len += size; | ||
316 | } | ||
317 | |||
318 | *eof = 1; | ||
319 | *start = buffer + offset; | ||
320 | len -= offset; | ||
321 | if (len > length) | ||
322 | len = length; | ||
323 | h->busy_configuring = 0; | 347 | h->busy_configuring = 0; |
324 | return len; | ||
325 | } | 348 | } |
326 | 349 | ||
327 | static int | 350 | static struct seq_operations cciss_seq_ops = { |
328 | cciss_proc_write(struct file *file, const char __user *buffer, | 351 | .start = cciss_seq_start, |
329 | unsigned long count, void *data) | 352 | .show = cciss_seq_show, |
353 | .next = cciss_seq_next, | ||
354 | .stop = cciss_seq_stop, | ||
355 | }; | ||
356 | |||
357 | static int cciss_seq_open(struct inode *inode, struct file *file) | ||
330 | { | 358 | { |
331 | unsigned char cmd[80]; | 359 | int ret = seq_open(file, &cciss_seq_ops); |
332 | int len; | 360 | struct seq_file *seq = file->private_data; |
333 | #ifdef CONFIG_CISS_SCSI_TAPE | 361 | |
334 | ctlr_info_t *h = (ctlr_info_t *) data; | 362 | if (!ret) |
335 | int rc; | 363 | seq->private = PDE(inode)->data; |
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static ssize_t | ||
369 | cciss_proc_write(struct file *file, const char __user *buf, | ||
370 | size_t length, loff_t *ppos) | ||
371 | { | ||
372 | int err; | ||
373 | char *buffer; | ||
374 | |||
375 | #ifndef CONFIG_CISS_SCSI_TAPE | ||
376 | return -EINVAL; | ||
336 | #endif | 377 | #endif |
337 | 378 | ||
338 | if (count > sizeof(cmd) - 1) | 379 | if (!buf || length > PAGE_SIZE - 1) |
339 | return -EINVAL; | 380 | return -EINVAL; |
340 | if (copy_from_user(cmd, buffer, count)) | 381 | |
341 | return -EFAULT; | 382 | buffer = (char *)__get_free_page(GFP_KERNEL); |
342 | cmd[count] = '\0'; | 383 | if (!buffer) |
343 | len = strlen(cmd); // above 3 lines ensure safety | 384 | return -ENOMEM; |
344 | if (len && cmd[len - 1] == '\n') | 385 | |
345 | cmd[--len] = '\0'; | 386 | err = -EFAULT; |
346 | # ifdef CONFIG_CISS_SCSI_TAPE | 387 | if (copy_from_user(buffer, buf, length)) |
347 | if (strcmp("engage scsi", cmd) == 0) { | 388 | goto out; |
389 | buffer[length] = '\0'; | ||
390 | |||
391 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
392 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { | ||
393 | struct seq_file *seq = file->private_data; | ||
394 | ctlr_info_t *h = seq->private; | ||
395 | int rc; | ||
396 | |||
348 | rc = cciss_engage_scsi(h->ctlr); | 397 | rc = cciss_engage_scsi(h->ctlr); |
349 | if (rc != 0) | 398 | if (rc != 0) |
350 | return -rc; | 399 | err = -rc; |
351 | return count; | 400 | else |
352 | } | 401 | err = length; |
402 | } else | ||
403 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
404 | err = -EINVAL; | ||
353 | /* might be nice to have "disengage" too, but it's not | 405 | /* might be nice to have "disengage" too, but it's not |
354 | safely possible. (only 1 module use count, lock issues.) */ | 406 | safely possible. (only 1 module use count, lock issues.) */ |
355 | # endif | 407 | |
356 | return -EINVAL; | 408 | out: |
409 | free_page((unsigned long)buffer); | ||
410 | return err; | ||
357 | } | 411 | } |
358 | 412 | ||
359 | /* | 413 | static struct file_operations cciss_proc_fops = { |
360 | * Get us a file in /proc/cciss that says something about each controller. | 414 | .owner = THIS_MODULE, |
361 | * Create /proc/cciss if it doesn't exist yet. | 415 | .open = cciss_seq_open, |
362 | */ | 416 | .read = seq_read, |
417 | .llseek = seq_lseek, | ||
418 | .release = seq_release, | ||
419 | .write = cciss_proc_write, | ||
420 | }; | ||
421 | |||
363 | static void __devinit cciss_procinit(int i) | 422 | static void __devinit cciss_procinit(int i) |
364 | { | 423 | { |
365 | struct proc_dir_entry *pde; | 424 | struct proc_dir_entry *pde; |
366 | 425 | ||
367 | if (proc_cciss == NULL) { | 426 | if (proc_cciss == NULL) |
368 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 427 | proc_cciss = proc_mkdir("cciss", proc_root_driver); |
369 | if (!proc_cciss) | 428 | if (!proc_cciss) |
370 | return; | 429 | return; |
371 | } | 430 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
431 | S_IROTH, proc_cciss, | ||
432 | &cciss_proc_fops); | ||
433 | if (!pde) | ||
434 | return; | ||
372 | 435 | ||
373 | pde = create_proc_read_entry(hba[i]->devname, | 436 | pde->data = hba[i]; |
374 | S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, | ||
375 | proc_cciss, cciss_proc_get_info, hba[i]); | ||
376 | pde->write_proc = cciss_proc_write; | ||
377 | } | 437 | } |
378 | #endif /* CONFIG_PROC_FS */ | 438 | #endif /* CONFIG_PROC_FS */ |
379 | 439 | ||
@@ -1341,7 +1401,6 @@ geo_inq: | |||
1341 | disk->private_data = &h->drv[drv_index]; | 1401 | disk->private_data = &h->drv[drv_index]; |
1342 | 1402 | ||
1343 | /* Set up queue information */ | 1403 | /* Set up queue information */ |
1344 | disk->queue->backing_dev_info.ra_pages = READ_AHEAD; | ||
1345 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); | 1404 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); |
1346 | 1405 | ||
1347 | /* This is a hardware imposed limit. */ | 1406 | /* This is a hardware imposed limit. */ |
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3434 | } | 3493 | } |
3435 | drv->queue = q; | 3494 | drv->queue = q; |
3436 | 3495 | ||
3437 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
3438 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 3496 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
3439 | 3497 | ||
3440 | /* This is a hardware imposed limit. */ | 3498 | /* This is a hardware imposed limit. */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 55178e9973a0..45ac09300eb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr) | |||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static void | 1406 | static void |
1407 | cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) | 1407 | cciss_seq_tape_report(struct seq_file *seq, int ctlr) |
1408 | { | 1408 | { |
1409 | unsigned long flags; | 1409 | unsigned long flags; |
1410 | int size; | ||
1411 | |||
1412 | *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline | ||
1413 | 1410 | ||
1414 | CPQ_TAPE_LOCK(ctlr, flags); | 1411 | CPQ_TAPE_LOCK(ctlr, flags); |
1415 | size = sprintf(buffer + *len, | 1412 | seq_printf(seq, |
1416 | "Sequential access devices: %d\n\n", | 1413 | "Sequential access devices: %d\n\n", |
1417 | ccissscsi[ctlr].ndevices); | 1414 | ccissscsi[ctlr].ndevices); |
1418 | CPQ_TAPE_UNLOCK(ctlr, flags); | 1415 | CPQ_TAPE_UNLOCK(ctlr, flags); |
1419 | *pos += size; *len += size; | ||
1420 | } | 1416 | } |
1421 | 1417 | ||
1418 | |||
1422 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | 1419 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
1423 | * complaining. Doing a host- or bus-reset can't do anything good here. | 1420 | * complaining. Doing a host- or bus-reset can't do anything good here. |
1424 | * Despite what it might say in scsi_error.c, there may well be commands | 1421 | * Despite what it might say in scsi_error.c, there may well be commands |
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1498 | #define cciss_scsi_setup(cntl_num) | 1495 | #define cciss_scsi_setup(cntl_num) |
1499 | #define cciss_unregister_scsi(ctlr) | 1496 | #define cciss_unregister_scsi(ctlr) |
1500 | #define cciss_register_scsi(ctlr) | 1497 | #define cciss_register_scsi(ctlr) |
1501 | #define cciss_proc_tape_report(ctlr, buffer, pos, len) | ||
1502 | 1498 | ||
1503 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 1499 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 674cd66dcaba..18feb1c7c33b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd) | |||
849 | /* | 849 | /* |
850 | * speed is given as the normal factor, e.g. 4 for 4x | 850 | * speed is given as the normal factor, e.g. 4 for 4x |
851 | */ | 851 | */ |
852 | static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) | 852 | static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, |
853 | unsigned write_speed, unsigned read_speed) | ||
853 | { | 854 | { |
854 | struct packet_command cgc; | 855 | struct packet_command cgc; |
855 | struct request_sense sense; | 856 | struct request_sense sense; |
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, | |||
1776 | return pkt_generic_packet(pd, &cgc); | 1777 | return pkt_generic_packet(pd, &cgc); |
1777 | } | 1778 | } |
1778 | 1779 | ||
1779 | static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | 1780 | static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, |
1781 | long *last_written) | ||
1780 | { | 1782 | { |
1781 | disc_information di; | 1783 | disc_information di; |
1782 | track_information ti; | 1784 | track_information ti; |
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | |||
1813 | /* | 1815 | /* |
1814 | * write mode select package based on pd->settings | 1816 | * write mode select package based on pd->settings |
1815 | */ | 1817 | */ |
1816 | static int pkt_set_write_settings(struct pktcdvd_device *pd) | 1818 | static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) |
1817 | { | 1819 | { |
1818 | struct packet_command cgc; | 1820 | struct packet_command cgc; |
1819 | struct request_sense sense; | 1821 | struct request_sense sense; |
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) | |||
1972 | return 1; | 1974 | return 1; |
1973 | } | 1975 | } |
1974 | 1976 | ||
1975 | static int pkt_probe_settings(struct pktcdvd_device *pd) | 1977 | static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) |
1976 | { | 1978 | { |
1977 | struct packet_command cgc; | 1979 | struct packet_command cgc; |
1978 | unsigned char buf[12]; | 1980 | unsigned char buf[12]; |
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) | |||
2071 | /* | 2073 | /* |
2072 | * enable/disable write caching on drive | 2074 | * enable/disable write caching on drive |
2073 | */ | 2075 | */ |
2074 | static int pkt_write_caching(struct pktcdvd_device *pd, int set) | 2076 | static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, |
2077 | int set) | ||
2075 | { | 2078 | { |
2076 | struct packet_command cgc; | 2079 | struct packet_command cgc; |
2077 | struct request_sense sense; | 2080 | struct request_sense sense; |
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) | |||
2116 | /* | 2119 | /* |
2117 | * Returns drive maximum write speed | 2120 | * Returns drive maximum write speed |
2118 | */ | 2121 | */ |
2119 | static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) | 2122 | static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, |
2123 | unsigned *write_speed) | ||
2120 | { | 2124 | { |
2121 | struct packet_command cgc; | 2125 | struct packet_command cgc; |
2122 | struct request_sense sense; | 2126 | struct request_sense sense; |
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = { | |||
2177 | /* | 2181 | /* |
2178 | * reads the maximum media speed from ATIP | 2182 | * reads the maximum media speed from ATIP |
2179 | */ | 2183 | */ |
2180 | static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | 2184 | static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, |
2185 | unsigned *speed) | ||
2181 | { | 2186 | { |
2182 | struct packet_command cgc; | 2187 | struct packet_command cgc; |
2183 | struct request_sense sense; | 2188 | struct request_sense sense; |
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | |||
2249 | } | 2254 | } |
2250 | } | 2255 | } |
2251 | 2256 | ||
2252 | static int pkt_perform_opc(struct pktcdvd_device *pd) | 2257 | static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) |
2253 | { | 2258 | { |
2254 | struct packet_command cgc; | 2259 | struct packet_command cgc; |
2255 | struct request_sense sense; | 2260 | struct request_sense sense; |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index db259e60289b..12f5baea439b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1152,8 +1152,8 @@ clean_up_and_return: | |||
1152 | /* This code is similar to that in open_for_data. The routine is called | 1152 | /* This code is similar to that in open_for_data. The routine is called |
1153 | whenever an audio play operation is requested. | 1153 | whenever an audio play operation is requested. |
1154 | */ | 1154 | */ |
1155 | int check_for_audio_disc(struct cdrom_device_info * cdi, | 1155 | static int check_for_audio_disc(struct cdrom_device_info * cdi, |
1156 | struct cdrom_device_ops * cdo) | 1156 | struct cdrom_device_ops * cdo) |
1157 | { | 1157 | { |
1158 | int ret; | 1158 | int ret; |
1159 | tracktype tracks; | 1159 | tracktype tracks; |
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped index 0aa419a61767..d2208dfe3f67 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/char/defkeymap.c_shipped | |||
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | struct kbdiacruc accent_table[MAX_DIACR] = { | 225 | struct kbdiacruc accent_table[MAX_DIACR] = { |
226 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 226 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
227 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 227 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
228 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 228 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
229 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 229 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
230 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 230 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
231 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 231 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
232 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 232 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
233 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 233 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
234 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 234 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
235 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 235 | {',', 'C', 0307}, {',', 'c', 0347}, |
236 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 236 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
237 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 237 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
238 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 238 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
239 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 239 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
240 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 240 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
241 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 241 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
242 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 242 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
243 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 243 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
244 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 244 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
245 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 245 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
246 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 246 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
247 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 247 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
248 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 248 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
249 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 249 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
250 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 250 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
251 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 251 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
252 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 252 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
253 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 253 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
254 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 254 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
255 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 255 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
256 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 256 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
257 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 257 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
258 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 258 | {'s', 's', 0337}, {'"', 'y', 0377}, |
259 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 259 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | unsigned int accent_table_size = 68; | 262 | unsigned int accent_table_size = 68; |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 85d596a3c18c..eba2883b630e 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev, | |||
1527 | msleep(10); | 1527 | msleep(10); |
1528 | 1528 | ||
1529 | portcount = inw(base + 0x2); | 1529 | portcount = inw(base + 0x2); |
1530 | if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && | 1530 | if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && |
1531 | portcount != 8 && portcount != 16)) { | 1531 | portcount != 8 && portcount != 16)) { |
1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", | 1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", |
1533 | card + 1); | 1533 | card + 1); |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index ff35230058d3..d793e68b3e0d 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network, | |||
377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { | 377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { |
378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; | 378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; |
379 | 379 | ||
380 | if (!tty) | ||
381 | continue; | ||
382 | |||
380 | /* | 383 | /* |
381 | * If it's associated with a tty (other than the RAS channel | 384 | * If it's associated with a tty (other than the RAS channel |
382 | * when we're online), then send the data to that tty. The RAS | 385 | * when we're online), then send the data to that tty. The RAS |
383 | * channel's data is handled above - it always goes through | 386 | * channel's data is handled above - it always goes through |
384 | * ppp_generic. | 387 | * ppp_generic. |
385 | */ | 388 | */ |
386 | if (tty && channel_idx == IPW_CHANNEL_RAS | 389 | if (channel_idx == IPW_CHANNEL_RAS |
387 | && (network->ras_control_lines & | 390 | && (network->ras_control_lines & |
388 | IPW_CONTROL_LINE_DCD) != 0 | 391 | IPW_CONTROL_LINE_DCD) != 0 |
389 | && ipwireless_tty_is_modem(tty)) { | 392 | && ipwireless_tty_is_modem(tty)) { |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 78b151c4d20f..5c3142b6f1fc 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1; | |||
110 | #define hpet_set_rtc_irq_bit(arg) 0 | 110 | #define hpet_set_rtc_irq_bit(arg) 0 |
111 | #define hpet_rtc_timer_init() do { } while (0) | 111 | #define hpet_rtc_timer_init() do { } while (0) |
112 | #define hpet_rtc_dropped_irq() 0 | 112 | #define hpet_rtc_dropped_irq() 0 |
113 | #define hpet_register_irq_handler(h) 0 | 113 | #define hpet_register_irq_handler(h) ({ 0; }) |
114 | #define hpet_unregister_irq_handler(h) 0 | 114 | #define hpet_unregister_irq_handler(h) ({ 0; }) |
115 | #ifdef RTC_IRQ | 115 | #ifdef RTC_IRQ |
116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | 116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) |
117 | { | 117 | { |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c0e08c7bca2f..5ff83df67b44 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty) | |||
2109 | sx_out(bp, CD186x_CAR, port_No(port)); | 2109 | sx_out(bp, CD186x_CAR, port_No(port)); |
2110 | spin_unlock_irqrestore(&bp->lock, flags); | 2110 | spin_unlock_irqrestore(&bp->lock, flags); |
2111 | if (I_IXOFF(tty)) { | 2111 | if (I_IXOFF(tty)) { |
2112 | spin_unlock_irqrestore(&bp->lock, flags); | ||
2113 | sx_wait_CCR(bp); | 2112 | sx_wait_CCR(bp); |
2114 | spin_lock_irqsave(&bp->lock, flags); | 2113 | spin_lock_irqsave(&bp->lock, flags); |
2115 | sx_out(bp, CD186x_CCR, CCR_SSCH2); | 2114 | sx_out(bp, CD186x_CCR, CCR_SSCH2); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 367be9175061..9b58b894f823 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) | |||
702 | if (is_switch) { | 702 | if (is_switch) { |
703 | set_leds(); | 703 | set_leds(); |
704 | compute_shiftstate(); | 704 | compute_shiftstate(); |
705 | notify_update(vc); | ||
705 | } | 706 | } |
706 | } | 707 | } |
707 | 708 | ||
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index dfea2bde162b..f577daedb630 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c | |||
@@ -73,8 +73,8 @@ | |||
73 | #define XHI_BUFFER_START 0 | 73 | #define XHI_BUFFER_START 0 |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * buffer_icap_get_status: Get the contents of the status register. | 76 | * buffer_icap_get_status - Get the contents of the status register. |
77 | * @parameter base_address: is the base address of the device | 77 | * @base_address: is the base address of the device |
78 | * | 78 | * |
79 | * The status register contains the ICAP status and the done bit. | 79 | * The status register contains the ICAP status and the done bit. |
80 | * | 80 | * |
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * buffer_icap_get_bram: Reads data from the storage buffer bram. | 97 | * buffer_icap_get_bram - Reads data from the storage buffer bram. |
98 | * @parameter base_address: contains the base address of the component. | 98 | * @base_address: contains the base address of the component. |
99 | * @parameter offset: The word offset from which the data should be read. | 99 | * @offset: The word offset from which the data should be read. |
100 | * | 100 | * |
101 | * A bram is used as a configuration memory cache. One frame of data can | 101 | * A bram is used as a configuration memory cache. One frame of data can |
102 | * be stored in this "storage buffer". | 102 | * be stored in this "storage buffer". |
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * buffer_icap_busy: Return true if the icap device is busy | 111 | * buffer_icap_busy - Return true if the icap device is busy |
112 | * @parameter base_address: is the base address of the device | 112 | * @base_address: is the base address of the device |
113 | * | 113 | * |
114 | * The queries the low order bit of the status register, which | 114 | * The queries the low order bit of the status register, which |
115 | * indicates whether the current configuration or readback operation | 115 | * indicates whether the current configuration or readback operation |
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * buffer_icap_busy: Return true if the icap device is not busy | 124 | * buffer_icap_busy - Return true if the icap device is not busy |
125 | * @parameter base_address: is the base address of the device | 125 | * @base_address: is the base address of the device |
126 | * | 126 | * |
127 | * The queries the low order bit of the status register, which | 127 | * The queries the low order bit of the status register, which |
128 | * indicates whether the current configuration or readback operation | 128 | * indicates whether the current configuration or readback operation |
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * buffer_icap_set_size: Set the size register. | 137 | * buffer_icap_set_size - Set the size register. |
138 | * @parameter base_address: is the base address of the device | 138 | * @base_address: is the base address of the device |
139 | * @parameter data: The size in bytes. | 139 | * @data: The size in bytes. |
140 | * | 140 | * |
141 | * The size register holds the number of 8 bit bytes to transfer between | 141 | * The size register holds the number of 8 bit bytes to transfer between |
142 | * bram and the icap (or icap to bram). | 142 | * bram and the icap (or icap to bram). |
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address, | |||
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * buffer_icap_mSetoffsetReg: Set the bram offset register. | 151 | * buffer_icap_set_offset - Set the bram offset register. |
152 | * @parameter base_address: contains the base address of the device. | 152 | * @base_address: contains the base address of the device. |
153 | * @parameter data: is the value to be written to the data register. | 153 | * @data: is the value to be written to the data register. |
154 | * | 154 | * |
155 | * The bram offset register holds the starting bram address to transfer | 155 | * The bram offset register holds the starting bram address to transfer |
156 | * data from during configuration or write data to during readback. | 156 | * data from during configuration or write data to during readback. |
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address, | |||
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. | 165 | * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. |
166 | * @parameter base_address: contains the base address of the device. | 166 | * @base_address: contains the base address of the device. |
167 | * @parameter data: is the value to be written to the data register. | 167 | * @data: is the value to be written to the data register. |
168 | * | 168 | * |
169 | * The RNC register determines the direction of the data transfer. It | 169 | * The RNC register determines the direction of the data transfer. It |
170 | * controls whether a configuration or readback take place. Writing to | 170 | * controls whether a configuration or readback take place. Writing to |
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * buffer_icap_set_bram: Write data to the storage buffer bram. | 181 | * buffer_icap_set_bram - Write data to the storage buffer bram. |
182 | * @parameter base_address: contains the base address of the component. | 182 | * @base_address: contains the base address of the component. |
183 | * @parameter offset: The word offset at which the data should be written. | 183 | * @offset: The word offset at which the data should be written. |
184 | * @parameter data: The value to be written to the bram offset. | 184 | * @data: The value to be written to the bram offset. |
185 | * | 185 | * |
186 | * A bram is used as a configuration memory cache. One frame of data can | 186 | * A bram is used as a configuration memory cache. One frame of data can |
187 | * be stored in this "storage buffer". | 187 | * be stored in this "storage buffer". |
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. | 196 | * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. |
197 | * @parameter drvdata: a pointer to the drvdata. | 197 | * @drvdata: a pointer to the drvdata. |
198 | * @parameter offset: The storage buffer start address. | 198 | * @offset: The storage buffer start address. |
199 | * @parameter count: The number of words (32 bit) to read from the | 199 | * @count: The number of words (32 bit) to read from the |
200 | * device (ICAP). | 200 | * device (ICAP). |
201 | **/ | 201 | **/ |
202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | 202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, |
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | |||
227 | }; | 227 | }; |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. | 230 | * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. |
231 | * @parameter drvdata: a pointer to the drvdata. | 231 | * @drvdata: a pointer to the drvdata. |
232 | * @parameter offset: The storage buffer start address. | 232 | * @offset: The storage buffer start address. |
233 | * @parameter count: The number of words (32 bit) to read from the | 233 | * @count: The number of words (32 bit) to read from the |
234 | * device (ICAP). | 234 | * device (ICAP). |
235 | **/ | 235 | **/ |
236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | 236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, |
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * buffer_icap_reset: Reset the logic of the icap device. | 264 | * buffer_icap_reset - Reset the logic of the icap device. |
265 | * @parameter drvdata: a pointer to the drvdata. | 265 | * @drvdata: a pointer to the drvdata. |
266 | * | 266 | * |
267 | * Writing to the status register resets the ICAP logic in an internal | 267 | * Writing to the status register resets the ICAP logic in an internal |
268 | * version of the core. For the version of the core published in EDK, | 268 | * version of the core. For the version of the core published in EDK, |
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * buffer_icap_set_configuration: Load a partial bitstream from system memory. | 277 | * buffer_icap_set_configuration - Load a partial bitstream from system memory. |
278 | * @parameter drvdata: a pointer to the drvdata. | 278 | * @drvdata: a pointer to the drvdata. |
279 | * @parameter data: Kernel address of the partial bitstream. | 279 | * @data: Kernel address of the partial bitstream. |
280 | * @parameter size: the size of the partial bitstream in 32 bit words. | 280 | * @size: the size of the partial bitstream in 32 bit words. |
281 | **/ | 281 | **/ |
282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
283 | u32 size) | 283 | u32 size) |
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | |||
333 | }; | 333 | }; |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * buffer_icap_get_configuration: Read configuration data from the device. | 336 | * buffer_icap_get_configuration - Read configuration data from the device. |
337 | * @parameter drvdata: a pointer to the drvdata. | 337 | * @drvdata: a pointer to the drvdata. |
338 | * @parameter data: Address of the data representing the partial bitstream | 338 | * @data: Address of the data representing the partial bitstream |
339 | * @parameter size: the size of the partial bitstream in 32 bit words. | 339 | * @size: the size of the partial bitstream in 32 bit words. |
340 | **/ | 340 | **/ |
341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
342 | u32 size) | 342 | u32 size) |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 0988314694a6..6f45dbd47125 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c | |||
@@ -94,9 +94,9 @@ | |||
94 | 94 | ||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * fifo_icap_fifo_write: Write data to the write FIFO. | 97 | * fifo_icap_fifo_write - Write data to the write FIFO. |
98 | * @parameter drvdata: a pointer to the drvdata. | 98 | * @drvdata: a pointer to the drvdata. |
99 | * @parameter data: the 32-bit value to be written to the FIFO. | 99 | * @data: the 32-bit value to be written to the FIFO. |
100 | * | 100 | * |
101 | * This function will silently fail if the fifo is full. | 101 | * This function will silently fail if the fifo is full. |
102 | **/ | 102 | **/ |
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * fifo_icap_fifo_read: Read data from the Read FIFO. | 111 | * fifo_icap_fifo_read - Read data from the Read FIFO. |
112 | * @parameter drvdata: a pointer to the drvdata. | 112 | * @drvdata: a pointer to the drvdata. |
113 | * | 113 | * |
114 | * This function will silently fail if the fifo is empty. | 114 | * This function will silently fail if the fifo is empty. |
115 | **/ | 115 | **/ |
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * fifo_icap_set_read_size: Set the the size register. | 124 | * fifo_icap_set_read_size - Set the the size register. |
125 | * @parameter drvdata: a pointer to the drvdata. | 125 | * @drvdata: a pointer to the drvdata. |
126 | * @parameter data: the size of the following read transaction, in words. | 126 | * @data: the size of the following read transaction, in words. |
127 | **/ | 127 | **/ |
128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | 128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, |
129 | u32 data) | 129 | u32 data) |
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * fifo_icap_start_config: Initiate a configuration (write) to the device. | 135 | * fifo_icap_start_config - Initiate a configuration (write) to the device. |
136 | * @parameter drvdata: a pointer to the drvdata. | 136 | * @drvdata: a pointer to the drvdata. |
137 | **/ | 137 | **/ |
138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | 138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) |
139 | { | 139 | { |
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * fifo_icap_start_readback: Initiate a readback from the device. | 145 | * fifo_icap_start_readback - Initiate a readback from the device. |
146 | * @parameter drvdata: a pointer to the drvdata. | 146 | * @drvdata: a pointer to the drvdata. |
147 | **/ | 147 | **/ |
148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | 148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) |
149 | { | 149 | { |
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * fifo_icap_busy: Return true if the ICAP is still processing a transaction. | 155 | * fifo_icap_busy - Return true if the ICAP is still processing a transaction. |
156 | * @parameter drvdata: a pointer to the drvdata. | 156 | * @drvdata: a pointer to the drvdata. |
157 | **/ | 157 | **/ |
158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | 158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) |
159 | { | 159 | { |
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /** | 165 | /** |
166 | * fifo_icap_write_fifo_vacancy: Query the write fifo available space. | 166 | * fifo_icap_write_fifo_vacancy - Query the write fifo available space. |
167 | * @parameter drvdata: a pointer to the drvdata. | 167 | * @drvdata: a pointer to the drvdata. |
168 | * | 168 | * |
169 | * Return the number of words that can be safely pushed into the write fifo. | 169 | * Return the number of words that can be safely pushed into the write fifo. |
170 | **/ | 170 | **/ |
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy( | |||
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * fifo_icap_read_fifo_occupancy: Query the read fifo available data. | 178 | * fifo_icap_read_fifo_occupancy - Query the read fifo available data. |
179 | * @parameter drvdata: a pointer to the drvdata. | 179 | * @drvdata: a pointer to the drvdata. |
180 | * | 180 | * |
181 | * Return the number of words that can be safely read from the read fifo. | 181 | * Return the number of words that can be safely read from the read fifo. |
182 | **/ | 182 | **/ |
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy( | |||
187 | } | 187 | } |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * fifo_icap_set_configuration: Send configuration data to the ICAP. | 190 | * fifo_icap_set_configuration - Send configuration data to the ICAP. |
191 | * @parameter drvdata: a pointer to the drvdata. | 191 | * @drvdata: a pointer to the drvdata. |
192 | * @parameter frame_buffer: a pointer to the data to be written to the | 192 | * @frame_buffer: a pointer to the data to be written to the |
193 | * ICAP device. | 193 | * ICAP device. |
194 | * @parameter num_words: the number of words (32 bit) to write to the ICAP | 194 | * @num_words: the number of words (32 bit) to write to the ICAP |
195 | * device. | 195 | * device. |
196 | 196 | ||
197 | * This function writes the given user data to the Write FIFO in | 197 | * This function writes the given user data to the Write FIFO in |
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * fifo_icap_get_configuration: Read configuration data from the device. | 269 | * fifo_icap_get_configuration - Read configuration data from the device. |
270 | * @parameter drvdata: a pointer to the drvdata. | 270 | * @drvdata: a pointer to the drvdata. |
271 | * @parameter data: Address of the data representing the partial bitstream | 271 | * @data: Address of the data representing the partial bitstream |
272 | * @parameter size: the size of the partial bitstream in 32 bit words. | 272 | * @size: the size of the partial bitstream in 32 bit words. |
273 | * | 273 | * |
274 | * This function reads the specified number of words from the ICAP device in | 274 | * This function reads the specified number of words from the ICAP device in |
275 | * the polled mode. | 275 | * the polled mode. |
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /** | 337 | /** |
338 | * buffer_icap_reset: Reset the logic of the icap device. | 338 | * buffer_icap_reset - Reset the logic of the icap device. |
339 | * @parameter drvdata: a pointer to the drvdata. | 339 | * @drvdata: a pointer to the drvdata. |
340 | * | 340 | * |
341 | * This function forces the software reset of the complete HWICAP device. | 341 | * This function forces the software reset of the complete HWICAP device. |
342 | * All the registers will return to the default value and the FIFO is also | 342 | * All the registers will return to the default value and the FIFO is also |
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata) | |||
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. | 363 | * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. |
364 | * @parameter drvdata: a pointer to the drvdata. | 364 | * @drvdata: a pointer to the drvdata. |
365 | */ | 365 | */ |
366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) | 366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) |
367 | { | 367 | { |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 24f6aef0fd3c..2284fa2a5a57 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -84,7 +84,7 @@ | |||
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/poll.h> | 85 | #include <linux/poll.h> |
86 | #include <linux/proc_fs.h> | 86 | #include <linux/proc_fs.h> |
87 | #include <asm/semaphore.h> | 87 | #include <linux/mutex.h> |
88 | #include <linux/sysctl.h> | 88 | #include <linux/sysctl.h> |
89 | #include <linux/version.h> | 89 | #include <linux/version.h> |
90 | #include <linux/fs.h> | 90 | #include <linux/fs.h> |
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO); | |||
119 | 119 | ||
120 | /* An array, which is set to true when the device is registered. */ | 120 | /* An array, which is set to true when the device is registered. */ |
121 | static bool probed_devices[HWICAP_DEVICES]; | 121 | static bool probed_devices[HWICAP_DEVICES]; |
122 | static struct mutex icap_sem; | ||
122 | 123 | ||
123 | static struct class *icap_class; | 124 | static struct class *icap_class; |
124 | 125 | ||
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = { | |||
199 | }; | 200 | }; |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * hwicap_command_desync: Send a DESYNC command to the ICAP port. | 203 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. |
203 | * @parameter drvdata: a pointer to the drvdata. | 204 | * @drvdata: a pointer to the drvdata. |
204 | * | 205 | * |
205 | * This command desynchronizes the ICAP After this command, a | 206 | * This command desynchronizes the ICAP After this command, a |
206 | * bitstream containing a NULL packet, followed by a SYNCH packet is | 207 | * bitstream containing a NULL packet, followed by a SYNCH packet is |
207 | * required before the ICAP will recognize commands. | 208 | * required before the ICAP will recognize commands. |
208 | */ | 209 | */ |
209 | int hwicap_command_desync(struct hwicap_drvdata *drvdata) | 210 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) |
210 | { | 211 | { |
211 | u32 buffer[4]; | 212 | u32 buffer[4]; |
212 | u32 index = 0; | 213 | u32 index = 0; |
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata) | |||
228 | } | 229 | } |
229 | 230 | ||
230 | /** | 231 | /** |
231 | * hwicap_command_capture: Send a CAPTURE command to the ICAP port. | 232 | * hwicap_get_configuration_register - Query a configuration register. |
232 | * @parameter drvdata: a pointer to the drvdata. | 233 | * @drvdata: a pointer to the drvdata. |
233 | * | 234 | * @reg: a constant which represents the configuration |
234 | * This command captures all of the flip flop states so they will be | ||
235 | * available during readback. One can use this command instead of | ||
236 | * enabling the CAPTURE block in the design. | ||
237 | */ | ||
238 | int hwicap_command_capture(struct hwicap_drvdata *drvdata) | ||
239 | { | ||
240 | u32 buffer[7]; | ||
241 | u32 index = 0; | ||
242 | |||
243 | /* | ||
244 | * Create the data to be written to the ICAP. | ||
245 | */ | ||
246 | buffer[index++] = XHI_DUMMY_PACKET; | ||
247 | buffer[index++] = XHI_SYNC_PACKET; | ||
248 | buffer[index++] = XHI_NOOP_PACKET; | ||
249 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; | ||
250 | buffer[index++] = XHI_CMD_GCAPTURE; | ||
251 | buffer[index++] = XHI_DUMMY_PACKET; | ||
252 | buffer[index++] = XHI_DUMMY_PACKET; | ||
253 | |||
254 | /* | ||
255 | * Write the data to the FIFO and intiate the transfer of data | ||
256 | * present in the FIFO to the ICAP device. | ||
257 | */ | ||
258 | return drvdata->config->set_configuration(drvdata, | ||
259 | &buffer[0], index); | ||
260 | |||
261 | } | ||
262 | |||
263 | /** | ||
264 | * hwicap_get_configuration_register: Query a configuration register. | ||
265 | * @parameter drvdata: a pointer to the drvdata. | ||
266 | * @parameter reg: a constant which represents the configuration | ||
267 | * register value to be returned. | 235 | * register value to be returned. |
268 | * Examples: XHI_IDCODE, XHI_FLR. | 236 | * Examples: XHI_IDCODE, XHI_FLR. |
269 | * @parameter RegData: returns the value of the register. | 237 | * @reg_data: returns the value of the register. |
270 | * | 238 | * |
271 | * Sends a query packet to the ICAP and then receives the response. | 239 | * Sends a query packet to the ICAP and then receives the response. |
272 | * The icap is left in Synched state. | 240 | * The icap is left in Synched state. |
273 | */ | 241 | */ |
274 | int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | 242 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, |
275 | u32 reg, u32 *RegData) | 243 | u32 reg, u32 *reg_data) |
276 | { | 244 | { |
277 | int status; | 245 | int status; |
278 | u32 buffer[6]; | 246 | u32 buffer[6]; |
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | |||
300 | /* | 268 | /* |
301 | * Read the configuration register | 269 | * Read the configuration register |
302 | */ | 270 | */ |
303 | status = drvdata->config->get_configuration(drvdata, RegData, 1); | 271 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); |
304 | if (status) | 272 | if (status) |
305 | return status; | 273 | return status; |
306 | 274 | ||
307 | return 0; | 275 | return 0; |
308 | } | 276 | } |
309 | 277 | ||
310 | int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | 278 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) |
311 | { | 279 | { |
312 | int status; | 280 | int status; |
313 | u32 idcode; | 281 | u32 idcode; |
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | |||
344 | } | 312 | } |
345 | 313 | ||
346 | static ssize_t | 314 | static ssize_t |
347 | hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | 315 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
348 | { | 316 | { |
349 | struct hwicap_drvdata *drvdata = file->private_data; | 317 | struct hwicap_drvdata *drvdata = file->private_data; |
350 | ssize_t bytes_to_read = 0; | 318 | ssize_t bytes_to_read = 0; |
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
353 | u32 bytes_remaining; | 321 | u32 bytes_remaining; |
354 | int status; | 322 | int status; |
355 | 323 | ||
356 | if (down_interruptible(&drvdata->sem)) | 324 | status = mutex_lock_interruptible(&drvdata->sem); |
357 | return -ERESTARTSYS; | 325 | if (status) |
326 | return status; | ||
358 | 327 | ||
359 | if (drvdata->read_buffer_in_use) { | 328 | if (drvdata->read_buffer_in_use) { |
360 | /* If there are leftover bytes in the buffer, just */ | 329 | /* If there are leftover bytes in the buffer, just */ |
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
370 | goto error; | 339 | goto error; |
371 | } | 340 | } |
372 | drvdata->read_buffer_in_use -= bytes_to_read; | 341 | drvdata->read_buffer_in_use -= bytes_to_read; |
373 | memcpy(drvdata->read_buffer + bytes_to_read, | 342 | memmove(drvdata->read_buffer, |
374 | drvdata->read_buffer, 4 - bytes_to_read); | 343 | drvdata->read_buffer + bytes_to_read, |
344 | 4 - bytes_to_read); | ||
375 | } else { | 345 | } else { |
376 | /* Get new data from the ICAP, and return was was requested. */ | 346 | /* Get new data from the ICAP, and return was was requested. */ |
377 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); | 347 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); |
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
414 | status = -EFAULT; | 384 | status = -EFAULT; |
415 | goto error; | 385 | goto error; |
416 | } | 386 | } |
417 | memcpy(kbuf, drvdata->read_buffer, bytes_remaining); | 387 | memcpy(drvdata->read_buffer, |
388 | kbuf, | ||
389 | bytes_remaining); | ||
418 | drvdata->read_buffer_in_use = bytes_remaining; | 390 | drvdata->read_buffer_in_use = bytes_remaining; |
419 | free_page((unsigned long)kbuf); | 391 | free_page((unsigned long)kbuf); |
420 | } | 392 | } |
421 | status = bytes_to_read; | 393 | status = bytes_to_read; |
422 | error: | 394 | error: |
423 | up(&drvdata->sem); | 395 | mutex_unlock(&drvdata->sem); |
424 | return status; | 396 | return status; |
425 | } | 397 | } |
426 | 398 | ||
427 | static ssize_t | 399 | static ssize_t |
428 | hwicap_write(struct file *file, const char *buf, | 400 | hwicap_write(struct file *file, const char __user *buf, |
429 | size_t count, loff_t *ppos) | 401 | size_t count, loff_t *ppos) |
430 | { | 402 | { |
431 | struct hwicap_drvdata *drvdata = file->private_data; | 403 | struct hwicap_drvdata *drvdata = file->private_data; |
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf, | |||
435 | ssize_t len; | 407 | ssize_t len; |
436 | ssize_t status; | 408 | ssize_t status; |
437 | 409 | ||
438 | if (down_interruptible(&drvdata->sem)) | 410 | status = mutex_lock_interruptible(&drvdata->sem); |
439 | return -ERESTARTSYS; | 411 | if (status) |
412 | return status; | ||
440 | 413 | ||
441 | left += drvdata->write_buffer_in_use; | 414 | left += drvdata->write_buffer_in_use; |
442 | 415 | ||
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf, | |||
465 | memcpy(kbuf, drvdata->write_buffer, | 438 | memcpy(kbuf, drvdata->write_buffer, |
466 | drvdata->write_buffer_in_use); | 439 | drvdata->write_buffer_in_use); |
467 | if (copy_from_user( | 440 | if (copy_from_user( |
468 | (((char *)kbuf) + (drvdata->write_buffer_in_use)), | 441 | (((char *)kbuf) + drvdata->write_buffer_in_use), |
469 | buf + written, | 442 | buf + written, |
470 | len - (drvdata->write_buffer_in_use))) { | 443 | len - (drvdata->write_buffer_in_use))) { |
471 | free_page((unsigned long)kbuf); | 444 | free_page((unsigned long)kbuf); |
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf, | |||
508 | free_page((unsigned long)kbuf); | 481 | free_page((unsigned long)kbuf); |
509 | status = written; | 482 | status = written; |
510 | error: | 483 | error: |
511 | up(&drvdata->sem); | 484 | mutex_unlock(&drvdata->sem); |
512 | return status; | 485 | return status; |
513 | } | 486 | } |
514 | 487 | ||
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
519 | 492 | ||
520 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); | 493 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); |
521 | 494 | ||
522 | if (down_interruptible(&drvdata->sem)) | 495 | status = mutex_lock_interruptible(&drvdata->sem); |
523 | return -ERESTARTSYS; | 496 | if (status) |
497 | return status; | ||
524 | 498 | ||
525 | if (drvdata->is_open) { | 499 | if (drvdata->is_open) { |
526 | status = -EBUSY; | 500 | status = -EBUSY; |
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
539 | drvdata->is_open = 1; | 513 | drvdata->is_open = 1; |
540 | 514 | ||
541 | error: | 515 | error: |
542 | up(&drvdata->sem); | 516 | mutex_unlock(&drvdata->sem); |
543 | return status; | 517 | return status; |
544 | } | 518 | } |
545 | 519 | ||
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
549 | int i; | 523 | int i; |
550 | int status = 0; | 524 | int status = 0; |
551 | 525 | ||
552 | if (down_interruptible(&drvdata->sem)) | 526 | mutex_lock(&drvdata->sem); |
553 | return -ERESTARTSYS; | ||
554 | 527 | ||
555 | if (drvdata->write_buffer_in_use) { | 528 | if (drvdata->write_buffer_in_use) { |
556 | /* Flush write buffer. */ | 529 | /* Flush write buffer. */ |
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
569 | 542 | ||
570 | error: | 543 | error: |
571 | drvdata->is_open = 0; | 544 | drvdata->is_open = 0; |
572 | up(&drvdata->sem); | 545 | mutex_unlock(&drvdata->sem); |
573 | return status; | 546 | return status; |
574 | } | 547 | } |
575 | 548 | ||
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
592 | 565 | ||
593 | dev_info(dev, "Xilinx icap port driver\n"); | 566 | dev_info(dev, "Xilinx icap port driver\n"); |
594 | 567 | ||
568 | mutex_lock(&icap_sem); | ||
569 | |||
595 | if (id < 0) { | 570 | if (id < 0) { |
596 | for (id = 0; id < HWICAP_DEVICES; id++) | 571 | for (id = 0; id < HWICAP_DEVICES; id++) |
597 | if (!probed_devices[id]) | 572 | if (!probed_devices[id]) |
598 | break; | 573 | break; |
599 | } | 574 | } |
600 | if (id < 0 || id >= HWICAP_DEVICES) { | 575 | if (id < 0 || id >= HWICAP_DEVICES) { |
576 | mutex_unlock(&icap_sem); | ||
601 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); | 577 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); |
602 | return -EINVAL; | 578 | return -EINVAL; |
603 | } | 579 | } |
604 | if (probed_devices[id]) { | 580 | if (probed_devices[id]) { |
581 | mutex_unlock(&icap_sem); | ||
605 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", | 582 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", |
606 | DRIVER_NAME, id); | 583 | DRIVER_NAME, id); |
607 | return -EBUSY; | 584 | return -EBUSY; |
608 | } | 585 | } |
609 | 586 | ||
610 | probed_devices[id] = 1; | 587 | probed_devices[id] = 1; |
588 | mutex_unlock(&icap_sem); | ||
611 | 589 | ||
612 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); | 590 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); |
613 | 591 | ||
614 | drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); | 592 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); |
615 | if (!drvdata) { | 593 | if (!drvdata) { |
616 | dev_err(dev, "Couldn't allocate device private record\n"); | 594 | dev_err(dev, "Couldn't allocate device private record\n"); |
617 | return -ENOMEM; | 595 | retval = -ENOMEM; |
596 | goto failed0; | ||
618 | } | 597 | } |
619 | memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata)); | ||
620 | dev_set_drvdata(dev, (void *)drvdata); | 598 | dev_set_drvdata(dev, (void *)drvdata); |
621 | 599 | ||
622 | if (!regs_res) { | 600 | if (!regs_res) { |
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
648 | drvdata->config = config; | 626 | drvdata->config = config; |
649 | drvdata->config_regs = config_regs; | 627 | drvdata->config_regs = config_regs; |
650 | 628 | ||
651 | init_MUTEX(&drvdata->sem); | 629 | mutex_init(&drvdata->sem); |
652 | drvdata->is_open = 0; | 630 | drvdata->is_open = 0; |
653 | 631 | ||
654 | dev_info(dev, "ioremap %lx to %p with size %x\n", | 632 | dev_info(dev, "ioremap %lx to %p with size %x\n", |
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
663 | goto failed3; | 641 | goto failed3; |
664 | } | 642 | } |
665 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ | 643 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ |
666 | class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); | 644 | device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id); |
667 | return 0; /* success */ | 645 | return 0; /* success */ |
668 | 646 | ||
669 | failed3: | 647 | failed3: |
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
675 | failed1: | 653 | failed1: |
676 | kfree(drvdata); | 654 | kfree(drvdata); |
677 | 655 | ||
656 | failed0: | ||
657 | mutex_lock(&icap_sem); | ||
658 | probed_devices[id] = 0; | ||
659 | mutex_unlock(&icap_sem); | ||
660 | |||
678 | return retval; | 661 | return retval; |
679 | } | 662 | } |
680 | 663 | ||
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev) | |||
699 | if (!drvdata) | 682 | if (!drvdata) |
700 | return 0; | 683 | return 0; |
701 | 684 | ||
702 | class_device_destroy(icap_class, drvdata->devt); | 685 | device_destroy(icap_class, drvdata->devt); |
703 | cdev_del(&drvdata->cdev); | 686 | cdev_del(&drvdata->cdev); |
704 | iounmap(drvdata->base_address); | 687 | iounmap(drvdata->base_address); |
705 | release_mem_region(drvdata->mem_start, drvdata->mem_size); | 688 | release_mem_region(drvdata->mem_start, drvdata->mem_size); |
706 | kfree(drvdata); | 689 | kfree(drvdata); |
707 | dev_set_drvdata(dev, NULL); | 690 | dev_set_drvdata(dev, NULL); |
708 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
709 | 691 | ||
692 | mutex_lock(&icap_sem); | ||
693 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
694 | mutex_unlock(&icap_sem); | ||
710 | return 0; /* success */ | 695 | return 0; /* success */ |
711 | } | 696 | } |
712 | 697 | ||
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = { | |||
821 | }; | 806 | }; |
822 | 807 | ||
823 | /* Registration helpers to keep the number of #ifdefs to a minimum */ | 808 | /* Registration helpers to keep the number of #ifdefs to a minimum */ |
824 | static inline int __devinit hwicap_of_register(void) | 809 | static inline int __init hwicap_of_register(void) |
825 | { | 810 | { |
826 | pr_debug("hwicap: calling of_register_platform_driver()\n"); | 811 | pr_debug("hwicap: calling of_register_platform_driver()\n"); |
827 | return of_register_platform_driver(&hwicap_of_driver); | 812 | return of_register_platform_driver(&hwicap_of_driver); |
828 | } | 813 | } |
829 | 814 | ||
830 | static inline void __devexit hwicap_of_unregister(void) | 815 | static inline void __exit hwicap_of_unregister(void) |
831 | { | 816 | { |
832 | of_unregister_platform_driver(&hwicap_of_driver); | 817 | of_unregister_platform_driver(&hwicap_of_driver); |
833 | } | 818 | } |
834 | #else /* CONFIG_OF */ | 819 | #else /* CONFIG_OF */ |
835 | /* CONFIG_OF not enabled; do nothing helpers */ | 820 | /* CONFIG_OF not enabled; do nothing helpers */ |
836 | static inline int __devinit hwicap_of_register(void) { return 0; } | 821 | static inline int __init hwicap_of_register(void) { return 0; } |
837 | static inline void __devexit hwicap_of_unregister(void) { } | 822 | static inline void __exit hwicap_of_unregister(void) { } |
838 | #endif /* CONFIG_OF */ | 823 | #endif /* CONFIG_OF */ |
839 | 824 | ||
840 | static int __devinit hwicap_module_init(void) | 825 | static int __init hwicap_module_init(void) |
841 | { | 826 | { |
842 | dev_t devt; | 827 | dev_t devt; |
843 | int retval; | 828 | int retval; |
844 | 829 | ||
845 | icap_class = class_create(THIS_MODULE, "xilinx_config"); | 830 | icap_class = class_create(THIS_MODULE, "xilinx_config"); |
831 | mutex_init(&icap_sem); | ||
846 | 832 | ||
847 | if (xhwicap_major) { | 833 | if (xhwicap_major) { |
848 | devt = MKDEV(xhwicap_major, xhwicap_minor); | 834 | devt = MKDEV(xhwicap_major, xhwicap_minor); |
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void) | |||
883 | return retval; | 869 | return retval; |
884 | } | 870 | } |
885 | 871 | ||
886 | static void __devexit hwicap_module_cleanup(void) | 872 | static void __exit hwicap_module_cleanup(void) |
887 | { | 873 | { |
888 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); | 874 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); |
889 | 875 | ||
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index ae771cac1629..405fee7e189b 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -48,9 +48,9 @@ struct hwicap_drvdata { | |||
48 | u8 write_buffer[4]; | 48 | u8 write_buffer[4]; |
49 | u32 read_buffer_in_use; /* Always in [0,3] */ | 49 | u32 read_buffer_in_use; /* Always in [0,3] */ |
50 | u8 read_buffer[4]; | 50 | u8 read_buffer[4]; |
51 | u32 mem_start; /* phys. address of the control registers */ | 51 | resource_size_t mem_start;/* phys. address of the control registers */ |
52 | u32 mem_end; /* phys. address of the control registers */ | 52 | resource_size_t mem_end; /* phys. address of the control registers */ |
53 | u32 mem_size; | 53 | resource_size_t mem_size; |
54 | void __iomem *base_address;/* virt. address of the control registers */ | 54 | void __iomem *base_address;/* virt. address of the control registers */ |
55 | 55 | ||
56 | struct device *dev; | 56 | struct device *dev; |
@@ -61,7 +61,7 @@ struct hwicap_drvdata { | |||
61 | const struct config_registers *config_regs; | 61 | const struct config_registers *config_regs; |
62 | void *private_data; | 62 | void *private_data; |
63 | bool is_open; | 63 | bool is_open; |
64 | struct semaphore sem; | 64 | struct mutex sem; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct hwicap_driver_config { | 67 | struct hwicap_driver_config { |
@@ -164,29 +164,29 @@ struct config_registers { | |||
164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL | 164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * hwicap_type_1_read: Generates a Type 1 read packet header. | 167 | * hwicap_type_1_read - Generates a Type 1 read packet header. |
168 | * @parameter: Register is the address of the register to be read back. | 168 | * @reg: is the address of the register to be read back. |
169 | * | 169 | * |
170 | * Generates a Type 1 read packet header, which is used to indirectly | 170 | * Generates a Type 1 read packet header, which is used to indirectly |
171 | * read registers in the configuration logic. This packet must then | 171 | * read registers in the configuration logic. This packet must then |
172 | * be sent through the icap device, and a return packet received with | 172 | * be sent through the icap device, and a return packet received with |
173 | * the information. | 173 | * the information. |
174 | **/ | 174 | **/ |
175 | static inline u32 hwicap_type_1_read(u32 Register) | 175 | static inline u32 hwicap_type_1_read(u32 reg) |
176 | { | 176 | { |
177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
178 | (Register << XHI_REGISTER_SHIFT) | | 178 | (reg << XHI_REGISTER_SHIFT) | |
179 | (XHI_OP_READ << XHI_OP_SHIFT); | 179 | (XHI_OP_READ << XHI_OP_SHIFT); |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * hwicap_type_1_write: Generates a Type 1 write packet header | 183 | * hwicap_type_1_write - Generates a Type 1 write packet header |
184 | * @parameter: Register is the address of the register to be read back. | 184 | * @reg: is the address of the register to be read back. |
185 | **/ | 185 | **/ |
186 | static inline u32 hwicap_type_1_write(u32 Register) | 186 | static inline u32 hwicap_type_1_write(u32 reg) |
187 | { | 187 | { |
188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
189 | (Register << XHI_REGISTER_SHIFT) | | 189 | (reg << XHI_REGISTER_SHIFT) | |
190 | (XHI_OP_WRITE << XHI_OP_SHIFT); | 190 | (XHI_OP_WRITE << XHI_OP_SHIFT); |
191 | } | 191 | } |
192 | 192 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb795..27340a7b19dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC |
8 | depends on !HIGHMEM64G | 8 | depends on !HIGHMEM64G |
9 | help | 9 | help |
10 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA | |||
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config FSL_DMA | ||
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | ||
42 | depends on PPC | ||
43 | select DMA_ENGINE | ||
44 | ---help--- | ||
45 | Enable support for the Freescale DMA engine. Now, it support | ||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | ||
47 | The MPC8349, MPC8360 is also supported. | ||
48 | |||
49 | config FSL_DMA_SELFTEST | ||
50 | bool "Enable the self test for each DMA channel" | ||
51 | depends on FSL_DMA | ||
52 | default y | ||
53 | ---help--- | ||
54 | Enable the self test for each DMA channel. A self test will be | ||
55 | performed after the channel probed to ensure the DMA works well. | ||
56 | |||
40 | config DMA_ENGINE | 57 | config DMA_ENGINE |
41 | bool | 58 | bool |
42 | 59 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e123..c8036d945902 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o | |||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 000000000000..cc9a68158d99 --- /dev/null +++ b/drivers/dma/fsldma.c | |||
@@ -0,0 +1,1067 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: | ||
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
9 | * | ||
10 | * Description: | ||
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | ||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | ||
13 | * The support for MPC8349 DMA contorller is also added. | ||
14 | * | ||
15 | * This is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | |||
32 | #include "fsldma.h" | ||
33 | |||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | ||
35 | { | ||
36 | /* Reset the channel */ | ||
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | ||
38 | |||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
40 | case FSL_DMA_IP_85XX: | ||
41 | /* Set the channel to below modes: | ||
42 | * EIE - Error interrupt enable | ||
43 | * EOSIE - End of segments interrupt enable (basic mode) | ||
44 | * EOLNIE - End of links interrupt enable | ||
45 | */ | ||
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | ||
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
48 | break; | ||
49 | case FSL_DMA_IP_83XX: | ||
50 | /* Set the channel to below modes: | ||
51 | * EOTIE - End-of-transfer interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | ||
54 | 32); | ||
55 | break; | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) | ||
61 | { | ||
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | ||
63 | } | ||
64 | |||
65 | static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) | ||
66 | { | ||
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | ||
68 | } | ||
69 | |||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | ||
71 | struct fsl_dma_ld_hw *hw, u32 count) | ||
72 | { | ||
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | ||
74 | } | ||
75 | |||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | ||
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||
78 | { | ||
79 | u64 snoop_bits; | ||
80 | |||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | ||
84 | } | ||
85 | |||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | ||
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | ||
88 | { | ||
89 | u64 snoop_bits; | ||
90 | |||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | ||
94 | } | ||
95 | |||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | ||
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||
98 | { | ||
99 | u64 snoop_bits; | ||
100 | |||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
102 | ? FSL_DMA_SNEN : 0; | ||
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | ||
104 | } | ||
105 | |||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
107 | { | ||
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
109 | } | ||
110 | |||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | ||
112 | { | ||
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | ||
114 | } | ||
115 | |||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
117 | { | ||
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | ||
119 | } | ||
120 | |||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | ||
122 | { | ||
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | ||
124 | } | ||
125 | |||
126 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | ||
127 | { | ||
128 | u32 sr = get_sr(fsl_chan); | ||
129 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | ||
130 | } | ||
131 | |||
132 | static void dma_start(struct fsl_dma_chan *fsl_chan) | ||
133 | { | ||
134 | u32 mr_set = 0;; | ||
135 | |||
136 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | ||
137 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | ||
138 | mr_set |= FSL_DMA_MR_EMP_EN; | ||
139 | } else | ||
140 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
141 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
142 | & ~FSL_DMA_MR_EMP_EN, 32); | ||
143 | |||
144 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | ||
145 | mr_set |= FSL_DMA_MR_EMS_EN; | ||
146 | else | ||
147 | mr_set |= FSL_DMA_MR_CS; | ||
148 | |||
149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
150 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
151 | | mr_set, 32); | ||
152 | } | ||
153 | |||
154 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | ||
155 | { | ||
156 | int i = 0; | ||
157 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
158 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | ||
159 | 32); | ||
160 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
161 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | ||
162 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | ||
163 | |||
164 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | ||
165 | udelay(10); | ||
166 | if (i >= 100 && !dma_is_idle(fsl_chan)) | ||
167 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | ||
168 | } | ||
169 | |||
170 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | ||
171 | struct fsl_desc_sw *desc) | ||
172 | { | ||
173 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
174 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | ||
175 | 64); | ||
176 | } | ||
177 | |||
178 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
179 | struct fsl_desc_sw *new_desc) | ||
180 | { | ||
181 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
182 | |||
183 | if (list_empty(&fsl_chan->ld_queue)) | ||
184 | return; | ||
185 | |||
186 | /* Link to the new descriptor physical address and | ||
187 | * Enable End-of-segment interrupt for | ||
188 | * the last link descriptor. | ||
189 | * (the previous node's next link descriptor) | ||
190 | * | ||
191 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
192 | */ | ||
193 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
194 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
195 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
196 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | ||
201 | * @fsl_chan : Freescale DMA channel | ||
202 | * @size : Address loop size, 0 for disable loop | ||
203 | * | ||
204 | * The set source address hold transfer size. The source | ||
205 | * address hold or loop transfer size is when the DMA transfer | ||
206 | * data from source address (SA), if the loop size is 4, the DMA will | ||
207 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | ||
208 | * SA + 1 ... and so on. | ||
209 | */ | ||
210 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
211 | { | ||
212 | switch (size) { | ||
213 | case 0: | ||
214 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
215 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
216 | (~FSL_DMA_MR_SAHE), 32); | ||
217 | break; | ||
218 | case 1: | ||
219 | case 2: | ||
220 | case 4: | ||
221 | case 8: | ||
222 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
223 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
224 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
225 | 32); | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | ||
232 | * @fsl_chan : Freescale DMA channel | ||
233 | * @size : Address loop size, 0 for disable loop | ||
234 | * | ||
235 | * The set destination address hold transfer size. The destination | ||
236 | * address hold or loop transfer size is when the DMA transfer | ||
237 | * data to destination address (TA), if the loop size is 4, the DMA will | ||
238 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | ||
239 | * TA + 1 ... and so on. | ||
240 | */ | ||
241 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
242 | { | ||
243 | switch (size) { | ||
244 | case 0: | ||
245 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
246 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
247 | (~FSL_DMA_MR_DAHE), 32); | ||
248 | break; | ||
249 | case 1: | ||
250 | case 2: | ||
251 | case 4: | ||
252 | case 8: | ||
253 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
254 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
255 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
256 | 32); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | ||
263 | * @fsl_chan : Freescale DMA channel | ||
264 | * @size : Pause control size, 0 for disable external pause control. | ||
265 | * The maximum is 1024. | ||
266 | * | ||
267 | * The Freescale DMA channel can be controlled by the external | ||
268 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
269 | * to transfer before pausing the channel, after which a new assertion | ||
270 | * of DREQ# resumes channel operation. | ||
271 | */ | ||
272 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | ||
273 | { | ||
274 | if (size > 1024) | ||
275 | return; | ||
276 | |||
277 | if (size) { | ||
278 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
279 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
280 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
281 | 32); | ||
282 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | ||
283 | } else | ||
284 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * fsl_chan_toggle_ext_start - Toggle channel external start status | ||
289 | * @fsl_chan : Freescale DMA channel | ||
290 | * @enable : 0 is disabled, 1 is enabled. | ||
291 | * | ||
292 | * If enable the external start, the channel can be started by an | ||
293 | * external DMA start pin. So the dma_start() does not start the | ||
294 | * transfer immediately. The DMA channel will wait for the | ||
295 | * control pin asserted. | ||
296 | */ | ||
297 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | ||
298 | { | ||
299 | if (enable) | ||
300 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | ||
301 | else | ||
302 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | ||
303 | } | ||
304 | |||
305 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
306 | { | ||
307 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
308 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | ||
309 | unsigned long flags; | ||
310 | dma_cookie_t cookie; | ||
311 | |||
312 | /* cookie increment and adding to ld_queue must be atomic */ | ||
313 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
314 | |||
315 | cookie = fsl_chan->common.cookie; | ||
316 | cookie++; | ||
317 | if (cookie < 0) | ||
318 | cookie = 1; | ||
319 | desc->async_tx.cookie = cookie; | ||
320 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
321 | |||
322 | append_ld_queue(fsl_chan, desc); | ||
323 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | ||
324 | |||
325 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
326 | |||
327 | return cookie; | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | ||
332 | * @fsl_chan : Freescale DMA channel | ||
333 | * | ||
334 | * Return - The descriptor allocated. NULL for failed. | ||
335 | */ | ||
336 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | ||
337 | struct fsl_dma_chan *fsl_chan) | ||
338 | { | ||
339 | dma_addr_t pdesc; | ||
340 | struct fsl_desc_sw *desc_sw; | ||
341 | |||
342 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
343 | if (desc_sw) { | ||
344 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | ||
345 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
346 | &fsl_chan->common); | ||
347 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
348 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | ||
349 | desc_sw->async_tx.phys = pdesc; | ||
350 | } | ||
351 | |||
352 | return desc_sw; | ||
353 | } | ||
354 | |||
355 | |||
356 | /** | ||
357 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * @fsl_chan : Freescale DMA channel | ||
359 | * | ||
360 | * This function will create a dma pool for descriptor allocation. | ||
361 | * | ||
362 | * Return - The number of descriptors allocated. | ||
363 | */ | ||
364 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | ||
365 | { | ||
366 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
367 | LIST_HEAD(tmp_list); | ||
368 | |||
369 | /* We need the descriptor to be aligned to 32bytes | ||
370 | * for meeting FSL DMA specification requirement. | ||
371 | */ | ||
372 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | ||
373 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | ||
374 | 32, 0); | ||
375 | if (!fsl_chan->desc_pool) { | ||
376 | dev_err(fsl_chan->dev, "No memory for channel %d " | ||
377 | "descriptor dma pool.\n", fsl_chan->id); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
386 | * @fsl_chan : Freescale DMA channel | ||
387 | */ | ||
388 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | ||
389 | { | ||
390 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
391 | struct fsl_desc_sw *desc, *_desc; | ||
392 | unsigned long flags; | ||
393 | |||
394 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | ||
395 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
396 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
397 | #ifdef FSL_DMA_LD_DEBUG | ||
398 | dev_dbg(fsl_chan->dev, | ||
399 | "LD %p will be released.\n", desc); | ||
400 | #endif | ||
401 | list_del(&desc->node); | ||
402 | /* free link descriptor */ | ||
403 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
404 | } | ||
405 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
406 | dma_pool_destroy(fsl_chan->desc_pool); | ||
407 | } | ||
408 | |||
409 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | ||
410 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
411 | size_t len, unsigned long flags) | ||
412 | { | ||
413 | struct fsl_dma_chan *fsl_chan; | ||
414 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | ||
415 | size_t copy; | ||
416 | LIST_HEAD(link_chain); | ||
417 | |||
418 | if (!chan) | ||
419 | return NULL; | ||
420 | |||
421 | if (!len) | ||
422 | return NULL; | ||
423 | |||
424 | fsl_chan = to_fsl_chan(chan); | ||
425 | |||
426 | do { | ||
427 | |||
428 | /* Allocate the link descriptor from DMA pool */ | ||
429 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
430 | if (!new) { | ||
431 | dev_err(fsl_chan->dev, | ||
432 | "No free memory for link descriptor\n"); | ||
433 | return NULL; | ||
434 | } | ||
435 | #ifdef FSL_DMA_LD_DEBUG | ||
436 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
437 | #endif | ||
438 | |||
439 | copy = min(len, FSL_DMA_BCR_MAX_CNT); | ||
440 | |||
441 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
442 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
443 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | ||
444 | |||
445 | if (!first) | ||
446 | first = new; | ||
447 | else | ||
448 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | ||
449 | |||
450 | new->async_tx.cookie = 0; | ||
451 | new->async_tx.ack = 1; | ||
452 | |||
453 | prev = new; | ||
454 | len -= copy; | ||
455 | dma_src += copy; | ||
456 | dma_dest += copy; | ||
457 | |||
458 | /* Insert the link descriptor to the LD ring */ | ||
459 | list_add_tail(&new->node, &first->async_tx.tx_list); | ||
460 | } while (len); | ||
461 | |||
462 | new->async_tx.ack = 0; /* client is in control of this ack */ | ||
463 | new->async_tx.cookie = -EBUSY; | ||
464 | |||
465 | /* Set End-of-link to the last link descriptor of new list*/ | ||
466 | set_ld_eol(fsl_chan, new); | ||
467 | |||
468 | return first ? &first->async_tx : NULL; | ||
469 | } | ||
470 | |||
471 | /** | ||
472 | * fsl_dma_update_completed_cookie - Update the completed cookie. | ||
473 | * @fsl_chan : Freescale DMA channel | ||
474 | */ | ||
475 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | ||
476 | { | ||
477 | struct fsl_desc_sw *cur_desc, *desc; | ||
478 | dma_addr_t ld_phy; | ||
479 | |||
480 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | ||
481 | |||
482 | if (ld_phy) { | ||
483 | cur_desc = NULL; | ||
484 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | ||
485 | if (desc->async_tx.phys == ld_phy) { | ||
486 | cur_desc = desc; | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (cur_desc && cur_desc->async_tx.cookie) { | ||
491 | if (dma_is_idle(fsl_chan)) | ||
492 | fsl_chan->completed_cookie = | ||
493 | cur_desc->async_tx.cookie; | ||
494 | else | ||
495 | fsl_chan->completed_cookie = | ||
496 | cur_desc->async_tx.cookie - 1; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
503 | * @fsl_chan : Freescale DMA channel | ||
504 | * | ||
505 | * This function clean up the ld_queue of DMA channel. | ||
506 | * If 'in_intr' is set, the function will move the link descriptor to | ||
507 | * the recycle list. Otherwise, free it directly. | ||
508 | */ | ||
509 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | ||
510 | { | ||
511 | struct fsl_desc_sw *desc, *_desc; | ||
512 | unsigned long flags; | ||
513 | |||
514 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
515 | |||
516 | fsl_dma_update_completed_cookie(fsl_chan); | ||
517 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | ||
518 | fsl_chan->completed_cookie); | ||
519 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
520 | dma_async_tx_callback callback; | ||
521 | void *callback_param; | ||
522 | |||
523 | if (dma_async_is_complete(desc->async_tx.cookie, | ||
524 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
525 | == DMA_IN_PROGRESS) | ||
526 | break; | ||
527 | |||
528 | callback = desc->async_tx.callback; | ||
529 | callback_param = desc->async_tx.callback_param; | ||
530 | |||
531 | /* Remove from ld_queue list */ | ||
532 | list_del(&desc->node); | ||
533 | |||
534 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
535 | desc); | ||
536 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
537 | |||
538 | /* Run the link descriptor callback function */ | ||
539 | if (callback) { | ||
540 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
541 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | ||
542 | desc); | ||
543 | callback(callback_param); | ||
544 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
545 | } | ||
546 | } | ||
547 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | ||
552 | * @fsl_chan : Freescale DMA channel | ||
553 | */ | ||
554 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | ||
555 | { | ||
556 | struct list_head *ld_node; | ||
557 | dma_addr_t next_dest_addr; | ||
558 | unsigned long flags; | ||
559 | |||
560 | if (!dma_is_idle(fsl_chan)) | ||
561 | return; | ||
562 | |||
563 | dma_halt(fsl_chan); | ||
564 | |||
565 | /* If there are some link descriptors | ||
566 | * not transfered in queue. We need to start it. | ||
567 | */ | ||
568 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
569 | |||
570 | /* Find the first un-transfer desciptor */ | ||
571 | for (ld_node = fsl_chan->ld_queue.next; | ||
572 | (ld_node != &fsl_chan->ld_queue) | ||
573 | && (dma_async_is_complete( | ||
574 | to_fsl_desc(ld_node)->async_tx.cookie, | ||
575 | fsl_chan->completed_cookie, | ||
576 | fsl_chan->common.cookie) == DMA_SUCCESS); | ||
577 | ld_node = ld_node->next); | ||
578 | |||
579 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
580 | |||
581 | if (ld_node != &fsl_chan->ld_queue) { | ||
582 | /* Get the ld start address from ld_queue */ | ||
583 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | ||
584 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", | ||
585 | (u64)next_dest_addr); | ||
586 | set_cdar(fsl_chan, next_dest_addr); | ||
587 | dma_start(fsl_chan); | ||
588 | } else { | ||
589 | set_cdar(fsl_chan, 0); | ||
590 | set_ndar(fsl_chan, 0); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | /** | ||
595 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | ||
596 | * @fsl_chan : Freescale DMA channel | ||
597 | */ | ||
598 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
599 | { | ||
600 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
601 | |||
602 | #ifdef FSL_DMA_LD_DEBUG | ||
603 | struct fsl_desc_sw *ld; | ||
604 | unsigned long flags; | ||
605 | |||
606 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
607 | if (list_empty(&fsl_chan->ld_queue)) { | ||
608 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
609 | return; | ||
610 | } | ||
611 | |||
612 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
613 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
614 | int i; | ||
615 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
616 | fsl_chan->id, ld->async_tx.phys); | ||
617 | for (i = 0; i < 8; i++) | ||
618 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
619 | i, *(((u32 *)&ld->hw) + i)); | ||
620 | } | ||
621 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
622 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
623 | #endif | ||
624 | |||
625 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
626 | } | ||
627 | |||
628 | static void fsl_dma_dependency_added(struct dma_chan *chan) | ||
629 | { | ||
630 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
631 | |||
632 | fsl_chan_ld_cleanup(fsl_chan); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * fsl_dma_is_complete - Determine the DMA status | ||
637 | * @fsl_chan : Freescale DMA channel | ||
638 | */ | ||
639 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | ||
640 | dma_cookie_t cookie, | ||
641 | dma_cookie_t *done, | ||
642 | dma_cookie_t *used) | ||
643 | { | ||
644 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
645 | dma_cookie_t last_used; | ||
646 | dma_cookie_t last_complete; | ||
647 | |||
648 | fsl_chan_ld_cleanup(fsl_chan); | ||
649 | |||
650 | last_used = chan->cookie; | ||
651 | last_complete = fsl_chan->completed_cookie; | ||
652 | |||
653 | if (done) | ||
654 | *done = last_complete; | ||
655 | |||
656 | if (used) | ||
657 | *used = last_used; | ||
658 | |||
659 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
660 | } | ||
661 | |||
662 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | ||
663 | { | ||
664 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
665 | dma_addr_t stat; | ||
666 | |||
667 | stat = get_sr(fsl_chan); | ||
668 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | ||
669 | fsl_chan->id, stat); | ||
670 | set_sr(fsl_chan, stat); /* Clear the event register */ | ||
671 | |||
672 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | ||
673 | if (!stat) | ||
674 | return IRQ_NONE; | ||
675 | |||
676 | if (stat & FSL_DMA_SR_TE) | ||
677 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | ||
678 | |||
679 | /* If the link descriptor segment transfer finishes, | ||
680 | * we will recycle the used descriptor. | ||
681 | */ | ||
682 | if (stat & FSL_DMA_SR_EOSI) { | ||
683 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | ||
684 | dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " | ||
685 | "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), | ||
686 | (u64)get_ndar(fsl_chan)); | ||
687 | stat &= ~FSL_DMA_SR_EOSI; | ||
688 | } | ||
689 | |||
690 | /* If it current transfer is the end-of-transfer, | ||
691 | * we should clear the Channel Start bit for | ||
692 | * prepare next transfer. | ||
693 | */ | ||
694 | if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { | ||
695 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | ||
696 | stat &= ~FSL_DMA_SR_EOLNI; | ||
697 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
698 | } | ||
699 | |||
700 | if (stat) | ||
701 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | ||
702 | stat); | ||
703 | |||
704 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | ||
705 | tasklet_schedule(&fsl_chan->tasklet); | ||
706 | return IRQ_HANDLED; | ||
707 | } | ||
708 | |||
709 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | ||
710 | { | ||
711 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | ||
712 | u32 gsr; | ||
713 | int ch_nr; | ||
714 | |||
715 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | ||
716 | : in_le32(fdev->reg_base); | ||
717 | ch_nr = (32 - ffs(gsr)) / 8; | ||
718 | |||
719 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | ||
720 | fdev->chan[ch_nr]) : IRQ_NONE; | ||
721 | } | ||
722 | |||
723 | static void dma_do_tasklet(unsigned long data) | ||
724 | { | ||
725 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
726 | fsl_chan_ld_cleanup(fsl_chan); | ||
727 | } | ||
728 | |||
729 | static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) | ||
730 | { | ||
731 | if (fsl_chan) | ||
732 | dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); | ||
733 | } | ||
734 | |||
735 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | ||
736 | { | ||
737 | struct dma_chan *chan; | ||
738 | int err = 0; | ||
739 | dma_addr_t dma_dest, dma_src; | ||
740 | dma_cookie_t cookie; | ||
741 | u8 *src, *dest; | ||
742 | int i; | ||
743 | size_t test_size; | ||
744 | struct dma_async_tx_descriptor *tx1, *tx2, *tx3; | ||
745 | |||
746 | test_size = 4096; | ||
747 | |||
748 | src = kmalloc(test_size * 2, GFP_KERNEL); | ||
749 | if (!src) { | ||
750 | dev_err(fsl_chan->dev, | ||
751 | "selftest: Cannot alloc memory for test!\n"); | ||
752 | err = -ENOMEM; | ||
753 | goto out; | ||
754 | } | ||
755 | |||
756 | dest = src + test_size; | ||
757 | |||
758 | for (i = 0; i < test_size; i++) | ||
759 | src[i] = (u8) i; | ||
760 | |||
761 | chan = &fsl_chan->common; | ||
762 | |||
763 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
764 | dev_err(fsl_chan->dev, | ||
765 | "selftest: Cannot alloc resources for DMA\n"); | ||
766 | err = -ENODEV; | ||
767 | goto out; | ||
768 | } | ||
769 | |||
770 | /* TX 1 */ | ||
771 | dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, | ||
772 | DMA_TO_DEVICE); | ||
773 | dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, | ||
774 | DMA_FROM_DEVICE); | ||
775 | tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); | ||
776 | async_tx_ack(tx1); | ||
777 | |||
778 | cookie = fsl_dma_tx_submit(tx1); | ||
779 | fsl_dma_memcpy_issue_pending(chan); | ||
780 | msleep(2); | ||
781 | |||
782 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
783 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
784 | err = -ENODEV; | ||
785 | goto out; | ||
786 | } | ||
787 | |||
788 | /* Test free and re-alloc channel resources */ | ||
789 | fsl_dma_free_chan_resources(chan); | ||
790 | |||
791 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
792 | dev_err(fsl_chan->dev, | ||
793 | "selftest: Cannot alloc resources for DMA\n"); | ||
794 | err = -ENODEV; | ||
795 | goto free_resources; | ||
796 | } | ||
797 | |||
798 | /* Continue to test | ||
799 | * TX 2 | ||
800 | */ | ||
801 | dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, | ||
802 | test_size / 4, DMA_TO_DEVICE); | ||
803 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, | ||
804 | test_size / 4, DMA_FROM_DEVICE); | ||
805 | tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
806 | async_tx_ack(tx2); | ||
807 | |||
808 | /* TX 3 */ | ||
809 | dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, | ||
810 | test_size / 4, DMA_TO_DEVICE); | ||
811 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, | ||
812 | test_size / 4, DMA_FROM_DEVICE); | ||
813 | tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
814 | async_tx_ack(tx3); | ||
815 | |||
816 | /* Test exchanging the prepared tx sort */ | ||
817 | cookie = fsl_dma_tx_submit(tx3); | ||
818 | cookie = fsl_dma_tx_submit(tx2); | ||
819 | |||
820 | #ifdef FSL_DMA_CALLBACKTEST | ||
821 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | ||
822 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | ||
823 | tx3->callback = fsl_dma_callback_test; | ||
824 | tx3->callback_param = fsl_chan; | ||
825 | } | ||
826 | #endif | ||
827 | fsl_dma_memcpy_issue_pending(chan); | ||
828 | msleep(2); | ||
829 | |||
830 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
831 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
832 | err = -ENODEV; | ||
833 | goto free_resources; | ||
834 | } | ||
835 | |||
836 | err = memcmp(src, dest, test_size); | ||
837 | if (err) { | ||
838 | for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); | ||
839 | i++); | ||
840 | dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " | ||
841 | "error! src 0x%x, dest 0x%x\n", | ||
842 | i, test_size, *(src + i), *(dest + i)); | ||
843 | } | ||
844 | |||
845 | free_resources: | ||
846 | fsl_dma_free_chan_resources(chan); | ||
847 | out: | ||
848 | kfree(src); | ||
849 | return err; | ||
850 | } | ||
851 | |||
852 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | ||
853 | const struct of_device_id *match) | ||
854 | { | ||
855 | struct fsl_dma_device *fdev; | ||
856 | struct fsl_dma_chan *new_fsl_chan; | ||
857 | int err; | ||
858 | |||
859 | fdev = dev_get_drvdata(dev->dev.parent); | ||
860 | BUG_ON(!fdev); | ||
861 | |||
862 | /* alloc channel */ | ||
863 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | ||
864 | if (!new_fsl_chan) { | ||
865 | dev_err(&dev->dev, "No free memory for allocating " | ||
866 | "dma channels!\n"); | ||
867 | err = -ENOMEM; | ||
868 | goto err; | ||
869 | } | ||
870 | |||
871 | /* get dma channel register base */ | ||
872 | err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); | ||
873 | if (err) { | ||
874 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
875 | dev->node->full_name); | ||
876 | goto err; | ||
877 | } | ||
878 | |||
879 | new_fsl_chan->feature = *(u32 *)match->data; | ||
880 | |||
881 | if (!fdev->feature) | ||
882 | fdev->feature = new_fsl_chan->feature; | ||
883 | |||
884 | /* If the DMA device's feature is different than its channels', | ||
885 | * report the bug. | ||
886 | */ | ||
887 | WARN_ON(fdev->feature != new_fsl_chan->feature); | ||
888 | |||
889 | new_fsl_chan->dev = &dev->dev; | ||
890 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | ||
891 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | ||
892 | |||
893 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
894 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
895 | dev_err(&dev->dev, "There is no %d channel!\n", | ||
896 | new_fsl_chan->id); | ||
897 | err = -EINVAL; | ||
898 | goto err; | ||
899 | } | ||
900 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
901 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
902 | (unsigned long)new_fsl_chan); | ||
903 | |||
904 | /* Init the channel */ | ||
905 | dma_init(new_fsl_chan); | ||
906 | |||
907 | /* Clear cdar registers */ | ||
908 | set_cdar(new_fsl_chan, 0); | ||
909 | |||
910 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
911 | case FSL_DMA_IP_85XX: | ||
912 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
913 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | ||
914 | case FSL_DMA_IP_83XX: | ||
915 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | ||
916 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | ||
917 | } | ||
918 | |||
919 | spin_lock_init(&new_fsl_chan->desc_lock); | ||
920 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | ||
921 | |||
922 | new_fsl_chan->common.device = &fdev->common; | ||
923 | |||
924 | /* Add the channel to DMA device channel list */ | ||
925 | list_add_tail(&new_fsl_chan->common.device_node, | ||
926 | &fdev->common.channels); | ||
927 | fdev->common.chancnt++; | ||
928 | |||
929 | new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); | ||
930 | if (new_fsl_chan->irq != NO_IRQ) { | ||
931 | err = request_irq(new_fsl_chan->irq, | ||
932 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
933 | "fsldma-channel", new_fsl_chan); | ||
934 | if (err) { | ||
935 | dev_err(&dev->dev, "DMA channel %s request_irq error " | ||
936 | "with return %d\n", dev->node->full_name, err); | ||
937 | goto err; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
942 | err = fsl_dma_self_test(new_fsl_chan); | ||
943 | if (err) | ||
944 | goto err; | ||
945 | #endif | ||
946 | |||
947 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
948 | match->compatible, new_fsl_chan->irq); | ||
949 | |||
950 | return 0; | ||
951 | err: | ||
952 | dma_halt(new_fsl_chan); | ||
953 | iounmap(new_fsl_chan->reg_base); | ||
954 | free_irq(new_fsl_chan->irq, new_fsl_chan); | ||
955 | list_del(&new_fsl_chan->common.device_node); | ||
956 | kfree(new_fsl_chan); | ||
957 | return err; | ||
958 | } | ||
959 | |||
960 | const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; | ||
961 | const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; | ||
962 | |||
963 | static struct of_device_id of_fsl_dma_chan_ids[] = { | ||
964 | { | ||
965 | .compatible = "fsl,mpc8540-dma-channel", | ||
966 | .data = (void *)&mpc8540_dma_ip_feature, | ||
967 | }, | ||
968 | { | ||
969 | .compatible = "fsl,mpc8349-dma-channel", | ||
970 | .data = (void *)&mpc8349_dma_ip_feature, | ||
971 | }, | ||
972 | {} | ||
973 | }; | ||
974 | |||
975 | static struct of_platform_driver of_fsl_dma_chan_driver = { | ||
976 | .name = "of-fsl-dma-channel", | ||
977 | .match_table = of_fsl_dma_chan_ids, | ||
978 | .probe = of_fsl_dma_chan_probe, | ||
979 | }; | ||
980 | |||
981 | static __init int of_fsl_dma_chan_init(void) | ||
982 | { | ||
983 | return of_register_platform_driver(&of_fsl_dma_chan_driver); | ||
984 | } | ||
985 | |||
986 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | ||
987 | const struct of_device_id *match) | ||
988 | { | ||
989 | int err; | ||
990 | unsigned int irq; | ||
991 | struct fsl_dma_device *fdev; | ||
992 | |||
993 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | ||
994 | if (!fdev) { | ||
995 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | ||
996 | err = -ENOMEM; | ||
997 | goto err; | ||
998 | } | ||
999 | fdev->dev = &dev->dev; | ||
1000 | INIT_LIST_HEAD(&fdev->common.channels); | ||
1001 | |||
1002 | /* get DMA controller register base */ | ||
1003 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | ||
1004 | if (err) { | ||
1005 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
1006 | dev->node->full_name); | ||
1007 | goto err; | ||
1008 | } | ||
1009 | |||
1010 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | ||
1011 | "controller at 0x%08x...\n", | ||
1012 | match->compatible, fdev->reg.start); | ||
1013 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
1014 | - fdev->reg.start + 1); | ||
1015 | |||
1016 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||
1017 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1018 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||
1019 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||
1020 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||
1021 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | ||
1022 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||
1023 | fdev->common.device_dependency_added = fsl_dma_dependency_added; | ||
1024 | fdev->common.dev = &dev->dev; | ||
1025 | |||
1026 | irq = irq_of_parse_and_map(dev->node, 0); | ||
1027 | if (irq != NO_IRQ) { | ||
1028 | err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
1029 | "fsldma-device", fdev); | ||
1030 | if (err) { | ||
1031 | dev_err(&dev->dev, "DMA device request_irq error " | ||
1032 | "with return %d\n", err); | ||
1033 | goto err; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | dev_set_drvdata(&(dev->dev), fdev); | ||
1038 | of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); | ||
1039 | |||
1040 | dma_async_device_register(&fdev->common); | ||
1041 | return 0; | ||
1042 | |||
1043 | err: | ||
1044 | iounmap(fdev->reg_base); | ||
1045 | kfree(fdev); | ||
1046 | return err; | ||
1047 | } | ||
1048 | |||
1049 | static struct of_device_id of_fsl_dma_ids[] = { | ||
1050 | { .compatible = "fsl,mpc8540-dma", }, | ||
1051 | { .compatible = "fsl,mpc8349-dma", }, | ||
1052 | {} | ||
1053 | }; | ||
1054 | |||
1055 | static struct of_platform_driver of_fsl_dma_driver = { | ||
1056 | .name = "of-fsl-dma", | ||
1057 | .match_table = of_fsl_dma_ids, | ||
1058 | .probe = of_fsl_dma_probe, | ||
1059 | }; | ||
1060 | |||
1061 | static __init int of_fsl_dma_init(void) | ||
1062 | { | ||
1063 | return of_register_platform_driver(&of_fsl_dma_driver); | ||
1064 | } | ||
1065 | |||
1066 | subsys_initcall(of_fsl_dma_chan_init); | ||
1067 | subsys_initcall(of_fsl_dma_init); | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 000000000000..ba78c42121ba --- /dev/null +++ b/drivers/dma/fsldma.h | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
6 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __DMA_FSLDMA_H | ||
15 | #define __DMA_FSLDMA_H | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /* Define data structures needed by Freescale | ||
22 | * MPC8540 and MPC8349 DMA controller. | ||
23 | */ | ||
24 | #define FSL_DMA_MR_CS 0x00000001 | ||
25 | #define FSL_DMA_MR_CC 0x00000002 | ||
26 | #define FSL_DMA_MR_CA 0x00000008 | ||
27 | #define FSL_DMA_MR_EIE 0x00000040 | ||
28 | #define FSL_DMA_MR_XFE 0x00000020 | ||
29 | #define FSL_DMA_MR_EOLNIE 0x00000100 | ||
30 | #define FSL_DMA_MR_EOLSIE 0x00000080 | ||
31 | #define FSL_DMA_MR_EOSIE 0x00000200 | ||
32 | #define FSL_DMA_MR_CDSM 0x00000010 | ||
33 | #define FSL_DMA_MR_CTM 0x00000004 | ||
34 | #define FSL_DMA_MR_EMP_EN 0x00200000 | ||
35 | #define FSL_DMA_MR_EMS_EN 0x00040000 | ||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | ||
37 | #define FSL_DMA_MR_SAHE 0x00001000 | ||
38 | |||
39 | /* Special MR definition for MPC8349 */ | ||
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | ||
41 | |||
42 | #define FSL_DMA_SR_CH 0x00000020 | ||
43 | #define FSL_DMA_SR_CB 0x00000004 | ||
44 | #define FSL_DMA_SR_TE 0x00000080 | ||
45 | #define FSL_DMA_SR_EOSI 0x00000002 | ||
46 | #define FSL_DMA_SR_EOLSI 0x00000001 | ||
47 | #define FSL_DMA_SR_EOCDI 0x00000001 | ||
48 | #define FSL_DMA_SR_EOLNI 0x00000008 | ||
49 | |||
50 | #define FSL_DMA_SATR_SBPATMU 0x20000000 | ||
51 | #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 | ||
52 | #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 | ||
53 | #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 | ||
54 | #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 | ||
55 | #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 | ||
56 | |||
57 | #define FSL_DMA_DATR_DBPATMU 0x20000000 | ||
58 | #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 | ||
59 | #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 | ||
60 | #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 | ||
61 | |||
62 | #define FSL_DMA_EOL ((u64)0x1) | ||
63 | #define FSL_DMA_SNEN ((u64)0x10) | ||
64 | #define FSL_DMA_EOSIE 0x8 | ||
65 | #define FSL_DMA_NLDA_MASK (~(u64)0x1f) | ||
66 | |||
67 | #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu | ||
68 | |||
69 | #define FSL_DMA_DGSR_TE 0x80 | ||
70 | #define FSL_DMA_DGSR_CH 0x20 | ||
71 | #define FSL_DMA_DGSR_PE 0x10 | ||
72 | #define FSL_DMA_DGSR_EOLNI 0x08 | ||
73 | #define FSL_DMA_DGSR_CB 0x04 | ||
74 | #define FSL_DMA_DGSR_EOSI 0x02 | ||
75 | #define FSL_DMA_DGSR_EOLSI 0x01 | ||
76 | |||
77 | struct fsl_dma_ld_hw { | ||
78 | u64 __bitwise src_addr; | ||
79 | u64 __bitwise dst_addr; | ||
80 | u64 __bitwise next_ln_addr; | ||
81 | u32 __bitwise count; | ||
82 | u32 __bitwise reserve; | ||
83 | } __attribute__((aligned(32))); | ||
84 | |||
85 | struct fsl_desc_sw { | ||
86 | struct fsl_dma_ld_hw hw; | ||
87 | struct list_head node; | ||
88 | struct dma_async_tx_descriptor async_tx; | ||
89 | struct list_head *ld; | ||
90 | void *priv; | ||
91 | } __attribute__((aligned(32))); | ||
92 | |||
93 | struct fsl_dma_chan_regs { | ||
94 | u32 __bitwise mr; /* 0x00 - Mode Register */ | ||
95 | u32 __bitwise sr; /* 0x04 - Status Register */ | ||
96 | u64 __bitwise cdar; /* 0x08 - Current descriptor address register */ | ||
97 | u64 __bitwise sar; /* 0x10 - Source Address Register */ | ||
98 | u64 __bitwise dar; /* 0x18 - Destination Address Register */ | ||
99 | u32 __bitwise bcr; /* 0x20 - Byte Count Register */ | ||
100 | u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */ | ||
101 | }; | ||
102 | |||
103 | struct fsl_dma_chan; | ||
104 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | ||
105 | |||
106 | struct fsl_dma_device { | ||
107 | void __iomem *reg_base; /* DGSR register base */ | ||
108 | struct resource reg; /* Resource for register */ | ||
109 | struct device *dev; | ||
110 | struct dma_device common; | ||
111 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | ||
112 | u32 feature; /* The same as DMA channels */ | ||
113 | }; | ||
114 | |||
115 | /* Define macros for fsl_dma_chan->feature property */ | ||
116 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | ||
117 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | ||
118 | |||
119 | #define FSL_DMA_IP_MASK 0x00000ff0 | ||
120 | #define FSL_DMA_IP_85XX 0x00000010 | ||
121 | #define FSL_DMA_IP_83XX 0x00000020 | ||
122 | |||
123 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | ||
124 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | ||
125 | |||
126 | struct fsl_dma_chan { | ||
127 | struct fsl_dma_chan_regs __iomem *reg_base; | ||
128 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
129 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
130 | struct list_head ld_queue; /* Link descriptors queue */ | ||
131 | struct dma_chan common; /* DMA common channel */ | ||
132 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
133 | struct device *dev; /* Channel device */ | ||
134 | struct resource reg; /* Resource for register */ | ||
135 | int irq; /* Channel IRQ */ | ||
136 | int id; /* Raw id of this channel */ | ||
137 | struct tasklet_struct tasklet; | ||
138 | u32 feature; | ||
139 | |||
140 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | ||
141 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | ||
142 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
143 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
144 | }; | ||
145 | |||
146 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | ||
147 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | ||
148 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | ||
149 | |||
150 | #ifndef __powerpc64__ | ||
151 | static u64 in_be64(const u64 __iomem *addr) | ||
152 | { | ||
153 | return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1)); | ||
154 | } | ||
155 | |||
156 | static void out_be64(u64 __iomem *addr, u64 val) | ||
157 | { | ||
158 | out_be32((u32 *)addr, val >> 32); | ||
159 | out_be32((u32 *)addr + 1, (u32)val); | ||
160 | } | ||
161 | |||
162 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
163 | static u64 in_le64(const u64 __iomem *addr) | ||
164 | { | ||
165 | return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr)); | ||
166 | } | ||
167 | |||
168 | static void out_le64(u64 __iomem *addr, u64 val) | ||
169 | { | ||
170 | out_le32((u32 *)addr + 1, val >> 32); | ||
171 | out_le32((u32 *)addr, (u32)val); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | #define DMA_IN(fsl_chan, addr, width) \ | ||
176 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
177 | in_be##width(addr) : in_le##width(addr)) | ||
178 | #define DMA_OUT(fsl_chan, addr, val, width) \ | ||
179 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
180 | out_be##width(addr, val) : out_le##width(addr, val)) | ||
181 | |||
182 | #define DMA_TO_CPU(fsl_chan, d, width) \ | ||
183 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
184 | be##width##_to_cpu(d) : le##width##_to_cpu(d)) | ||
185 | #define CPU_TO_DMA(fsl_chan, c, width) \ | ||
186 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
187 | cpu_to_be##width(c) : cpu_to_le##width(c)) | ||
188 | |||
189 | #endif /* __DMA_FSLDMA_H */ | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c1..4017d9e7acd2 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.ack = 0; | ||
717 | return &new->async_tx; | 718 | return &new->async_tx; |
718 | } else | 719 | } else |
719 | return NULL; | 720 | return NULL; |
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
741 | new->len = len; | 742 | new->len = len; |
742 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
743 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.ack = 0; | ||
744 | return &new->async_tx; | 746 | return &new->async_tx; |
745 | } else | 747 | } else |
746 | return NULL; | 748 | return NULL; |
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8e..a03462750b95 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/crc-itu-t.h> | 24 | #include <linux/crc-itu-t.h> |
@@ -214,17 +215,29 @@ static void | |||
214 | fw_card_bm_work(struct work_struct *work) | 215 | fw_card_bm_work(struct work_struct *work) |
215 | { | 216 | { |
216 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 217 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
217 | struct fw_device *root; | 218 | struct fw_device *root_device; |
219 | struct fw_node *root_node, *local_node; | ||
218 | struct bm_data bmd; | 220 | struct bm_data bmd; |
219 | unsigned long flags; | 221 | unsigned long flags; |
220 | int root_id, new_root_id, irm_id, gap_count, generation, grace; | 222 | int root_id, new_root_id, irm_id, gap_count, generation, grace; |
221 | int do_reset = 0; | 223 | int do_reset = 0; |
222 | 224 | ||
223 | spin_lock_irqsave(&card->lock, flags); | 225 | spin_lock_irqsave(&card->lock, flags); |
226 | local_node = card->local_node; | ||
227 | root_node = card->root_node; | ||
228 | |||
229 | if (local_node == NULL) { | ||
230 | spin_unlock_irqrestore(&card->lock, flags); | ||
231 | return; | ||
232 | } | ||
233 | fw_node_get(local_node); | ||
234 | fw_node_get(root_node); | ||
224 | 235 | ||
225 | generation = card->generation; | 236 | generation = card->generation; |
226 | root = card->root_node->data; | 237 | root_device = root_node->data; |
227 | root_id = card->root_node->node_id; | 238 | if (root_device) |
239 | fw_device_get(root_device); | ||
240 | root_id = root_node->node_id; | ||
228 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 241 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); |
229 | 242 | ||
230 | if (card->bm_generation + 1 == generation || | 243 | if (card->bm_generation + 1 == generation || |
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) | |||
243 | 256 | ||
244 | irm_id = card->irm_node->node_id; | 257 | irm_id = card->irm_node->node_id; |
245 | if (!card->irm_node->link_on) { | 258 | if (!card->irm_node->link_on) { |
246 | new_root_id = card->local_node->node_id; | 259 | new_root_id = local_node->node_id; |
247 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 260 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
248 | new_root_id); | 261 | new_root_id); |
249 | goto pick_me; | 262 | goto pick_me; |
250 | } | 263 | } |
251 | 264 | ||
252 | bmd.lock.arg = cpu_to_be32(0x3f); | 265 | bmd.lock.arg = cpu_to_be32(0x3f); |
253 | bmd.lock.data = cpu_to_be32(card->local_node->node_id); | 266 | bmd.lock.data = cpu_to_be32(local_node->node_id); |
254 | 267 | ||
255 | spin_unlock_irqrestore(&card->lock, flags); | 268 | spin_unlock_irqrestore(&card->lock, flags); |
256 | 269 | ||
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) | |||
267 | * Another bus reset happened. Just return, | 280 | * Another bus reset happened. Just return, |
268 | * the BM work has been rescheduled. | 281 | * the BM work has been rescheduled. |
269 | */ | 282 | */ |
270 | return; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) | 286 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) |
274 | /* Somebody else is BM, let them do the work. */ | 287 | /* Somebody else is BM, let them do the work. */ |
275 | return; | 288 | goto out; |
276 | 289 | ||
277 | spin_lock_irqsave(&card->lock, flags); | 290 | spin_lock_irqsave(&card->lock, flags); |
278 | if (bmd.rcode != RCODE_COMPLETE) { | 291 | if (bmd.rcode != RCODE_COMPLETE) { |
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 295 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 296 | * root, and thus, IRM. |
284 | */ | 297 | */ |
285 | new_root_id = card->local_node->node_id; | 298 | new_root_id = local_node->node_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 299 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 300 | new_root_id); |
288 | goto pick_me; | 301 | goto pick_me; |
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) | |||
295 | */ | 308 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 309 | spin_unlock_irqrestore(&card->lock, flags); |
297 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); | 310 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); |
298 | return; | 311 | goto out; |
299 | } | 312 | } |
300 | 313 | ||
301 | /* | 314 | /* |
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) | |||
305 | */ | 318 | */ |
306 | card->bm_generation = generation; | 319 | card->bm_generation = generation; |
307 | 320 | ||
308 | if (root == NULL) { | 321 | if (root_device == NULL) { |
309 | /* | 322 | /* |
310 | * Either link_on is false, or we failed to read the | 323 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 324 | * config rom. In either case, pick another root. |
312 | */ | 325 | */ |
313 | new_root_id = card->local_node->node_id; | 326 | new_root_id = local_node->node_id; |
314 | } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { | 327 | } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { |
315 | /* | 328 | /* |
316 | * If we haven't probed this device yet, bail out now | 329 | * If we haven't probed this device yet, bail out now |
317 | * and let's try again once that's done. | 330 | * and let's try again once that's done. |
318 | */ | 331 | */ |
319 | spin_unlock_irqrestore(&card->lock, flags); | 332 | spin_unlock_irqrestore(&card->lock, flags); |
320 | return; | 333 | goto out; |
321 | } else if (root->config_rom[2] & BIB_CMC) { | 334 | } else if (root_device->config_rom[2] & BIB_CMC) { |
322 | /* | 335 | /* |
323 | * FIXME: I suppose we should set the cmstr bit in the | 336 | * FIXME: I suppose we should set the cmstr bit in the |
324 | * STATE_CLEAR register of this node, as described in | 337 | * STATE_CLEAR register of this node, as described in |
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 345 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 346 | * cycle master capable. |
334 | */ | 347 | */ |
335 | new_root_id = card->local_node->node_id; | 348 | new_root_id = local_node->node_id; |
336 | } | 349 | } |
337 | 350 | ||
338 | pick_me: | 351 | pick_me: |
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) | |||
341 | * the typically much larger 1394b beta repeater delays though. | 354 | * the typically much larger 1394b beta repeater delays though. |
342 | */ | 355 | */ |
343 | if (!card->beta_repeaters_present && | 356 | if (!card->beta_repeaters_present && |
344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 357 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
345 | gap_count = gap_count_table[card->root_node->max_hops]; | 358 | gap_count = gap_count_table[root_node->max_hops]; |
346 | else | 359 | else |
347 | gap_count = 63; | 360 | gap_count = 63; |
348 | 361 | ||
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) | |||
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 377 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 378 | fw_core_initiate_bus_reset(card, 1); |
366 | } | 379 | } |
380 | out: | ||
381 | if (root_device) | ||
382 | fw_device_put(root_device); | ||
383 | fw_node_put(root_node); | ||
384 | fw_node_put(local_node); | ||
367 | } | 385 | } |
368 | 386 | ||
369 | static void | 387 | static void |
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
381 | static atomic_t index = ATOMIC_INIT(-1); | 399 | static atomic_t index = ATOMIC_INIT(-1); |
382 | 400 | ||
383 | kref_init(&card->kref); | 401 | kref_init(&card->kref); |
402 | atomic_set(&card->device_count, 0); | ||
384 | card->index = atomic_inc_return(&index); | 403 | card->index = atomic_inc_return(&index); |
385 | card->driver = driver; | 404 | card->driver = driver; |
386 | card->device = device; | 405 | card->device = device; |
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) | |||
511 | card->driver = &dummy_driver; | 530 | card->driver = &dummy_driver; |
512 | 531 | ||
513 | fw_destroy_nodes(card); | 532 | fw_destroy_nodes(card); |
514 | flush_scheduled_work(); | 533 | /* |
534 | * Wait for all device workqueue jobs to finish. Otherwise the | ||
535 | * firewire-core module could be unloaded before the jobs ran. | ||
536 | */ | ||
537 | while (atomic_read(&card->device_count) > 0) | ||
538 | msleep(100); | ||
515 | 539 | ||
540 | cancel_delayed_work_sync(&card->work); | ||
516 | fw_flush_transactions(card); | 541 | fw_flush_transactions(card); |
517 | del_timer_sync(&card->flush_timer); | 542 | del_timer_sync(&card->flush_timer); |
518 | 543 | ||
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa4121..46bc197a047f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
109 | struct client *client; | 109 | struct client *client; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | 111 | ||
112 | device = fw_device_from_devt(inode->i_rdev); | 112 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 113 | if (device == NULL) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 116 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
117 | if (client == NULL) | 117 | if (client == NULL) { |
118 | fw_device_put(device); | ||
118 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | } | ||
119 | 121 | ||
120 | client->device = fw_device_get(device); | 122 | client->device = device; |
121 | INIT_LIST_HEAD(&client->event_list); | 123 | INIT_LIST_HEAD(&client->event_list); |
122 | INIT_LIST_HEAD(&client->resource_list); | 124 | INIT_LIST_HEAD(&client->resource_list); |
123 | spin_lock_init(&client->lock); | 125 | spin_lock_init(&client->lock); |
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) | |||
644 | struct fw_cdev_create_iso_context *request = buffer; | 646 | struct fw_cdev_create_iso_context *request = buffer; |
645 | struct fw_iso_context *context; | 647 | struct fw_iso_context *context; |
646 | 648 | ||
649 | /* We only support one context at this time. */ | ||
650 | if (client->iso_context != NULL) | ||
651 | return -EBUSY; | ||
652 | |||
647 | if (request->channel > 63) | 653 | if (request->channel > 63) |
648 | return -EINVAL; | 654 | return -EINVAL; |
649 | 655 | ||
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) | |||
790 | { | 796 | { |
791 | struct fw_cdev_start_iso *request = buffer; | 797 | struct fw_cdev_start_iso *request = buffer; |
792 | 798 | ||
793 | if (request->handle != 0) | 799 | if (client->iso_context == NULL || request->handle != 0) |
794 | return -EINVAL; | 800 | return -EINVAL; |
801 | |||
795 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 802 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
796 | if (request->tags == 0 || request->tags > 15) | 803 | if (request->tags == 0 || request->tags > 15) |
797 | return -EINVAL; | 804 | return -EINVAL; |
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) | |||
808 | { | 815 | { |
809 | struct fw_cdev_stop_iso *request = buffer; | 816 | struct fw_cdev_stop_iso *request = buffer; |
810 | 817 | ||
811 | if (request->handle != 0) | 818 | if (client->iso_context == NULL || request->handle != 0) |
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | 820 | ||
814 | return fw_iso_context_stop(client->iso_context); | 821 | return fw_iso_context_stop(client->iso_context); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69adf..870125a3638e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { | |||
150 | }; | 150 | }; |
151 | EXPORT_SYMBOL(fw_bus_type); | 151 | EXPORT_SYMBOL(fw_bus_type); |
152 | 152 | ||
153 | struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
165 | static void fw_device_release(struct device *dev) | 153 | static void fw_device_release(struct device *dev) |
166 | { | 154 | { |
167 | struct fw_device *device = fw_device(dev); | 155 | struct fw_device *device = fw_device(dev); |
156 | struct fw_card *card = device->card; | ||
168 | unsigned long flags; | 157 | unsigned long flags; |
169 | 158 | ||
170 | /* | 159 | /* |
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) | |||
176 | spin_unlock_irqrestore(&device->card->lock, flags); | 165 | spin_unlock_irqrestore(&device->card->lock, flags); |
177 | 166 | ||
178 | fw_node_put(device->node); | 167 | fw_node_put(device->node); |
179 | fw_card_put(device->card); | ||
180 | kfree(device->config_rom); | 168 | kfree(device->config_rom); |
181 | kfree(device); | 169 | kfree(device); |
170 | atomic_dec(&card->device_count); | ||
182 | } | 171 | } |
183 | 172 | ||
184 | int fw_device_enable_phys_dma(struct fw_device *device) | 173 | int fw_device_enable_phys_dma(struct fw_device *device) |
@@ -358,12 +347,9 @@ static ssize_t | |||
358 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 347 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) |
359 | { | 348 | { |
360 | struct fw_device *device = fw_device(dev); | 349 | struct fw_device *device = fw_device(dev); |
361 | u64 guid; | ||
362 | |||
363 | guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; | ||
364 | 350 | ||
365 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | 351 | return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", |
366 | (unsigned long long)guid); | 352 | device->config_rom[3], device->config_rom[4]); |
367 | } | 353 | } |
368 | 354 | ||
369 | static struct device_attribute fw_device_attributes[] = { | 355 | static struct device_attribute fw_device_attributes[] = { |
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); | |||
610 | static DEFINE_IDR(fw_device_idr); | 596 | static DEFINE_IDR(fw_device_idr); |
611 | int fw_cdev_major; | 597 | int fw_cdev_major; |
612 | 598 | ||
613 | struct fw_device *fw_device_from_devt(dev_t devt) | 599 | struct fw_device *fw_device_get_by_devt(dev_t devt) |
614 | { | 600 | { |
615 | struct fw_device *device; | 601 | struct fw_device *device; |
616 | 602 | ||
617 | down_read(&idr_rwsem); | 603 | down_read(&idr_rwsem); |
618 | device = idr_find(&fw_device_idr, MINOR(devt)); | 604 | device = idr_find(&fw_device_idr, MINOR(devt)); |
605 | if (device) | ||
606 | fw_device_get(device); | ||
619 | up_read(&idr_rwsem); | 607 | up_read(&idr_rwsem); |
620 | 608 | ||
621 | return device; | 609 | return device; |
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) | |||
627 | container_of(work, struct fw_device, work.work); | 615 | container_of(work, struct fw_device, work.work); |
628 | int minor = MINOR(device->device.devt); | 616 | int minor = MINOR(device->device.devt); |
629 | 617 | ||
630 | down_write(&idr_rwsem); | ||
631 | idr_remove(&fw_device_idr, minor); | ||
632 | up_write(&idr_rwsem); | ||
633 | |||
634 | fw_device_cdev_remove(device); | 618 | fw_device_cdev_remove(device); |
635 | device_for_each_child(&device->device, NULL, shutdown_unit); | 619 | device_for_each_child(&device->device, NULL, shutdown_unit); |
636 | device_unregister(&device->device); | 620 | device_unregister(&device->device); |
621 | |||
622 | down_write(&idr_rwsem); | ||
623 | idr_remove(&fw_device_idr, minor); | ||
624 | up_write(&idr_rwsem); | ||
625 | fw_device_put(device); | ||
637 | } | 626 | } |
638 | 627 | ||
639 | static struct device_type fw_device_type = { | 628 | static struct device_type fw_device_type = { |
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) | |||
668 | */ | 657 | */ |
669 | 658 | ||
670 | if (read_bus_info_block(device, device->generation) < 0) { | 659 | if (read_bus_info_block(device, device->generation) < 0) { |
671 | if (device->config_rom_retries < MAX_RETRIES) { | 660 | if (device->config_rom_retries < MAX_RETRIES && |
661 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | ||
672 | device->config_rom_retries++; | 662 | device->config_rom_retries++; |
673 | schedule_delayed_work(&device->work, RETRY_DELAY); | 663 | schedule_delayed_work(&device->work, RETRY_DELAY); |
674 | } else { | 664 | } else { |
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) | |||
682 | } | 672 | } |
683 | 673 | ||
684 | err = -ENOMEM; | 674 | err = -ENOMEM; |
675 | |||
676 | fw_device_get(device); | ||
685 | down_write(&idr_rwsem); | 677 | down_write(&idr_rwsem); |
686 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) | 678 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) |
687 | err = idr_get_new(&fw_device_idr, device, &minor); | 679 | err = idr_get_new(&fw_device_idr, device, &minor); |
688 | up_write(&idr_rwsem); | 680 | up_write(&idr_rwsem); |
681 | |||
689 | if (err < 0) | 682 | if (err < 0) |
690 | goto error; | 683 | goto error; |
691 | 684 | ||
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) | |||
717 | */ | 710 | */ |
718 | if (atomic_cmpxchg(&device->state, | 711 | if (atomic_cmpxchg(&device->state, |
719 | FW_DEVICE_INITIALIZING, | 712 | FW_DEVICE_INITIALIZING, |
720 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 713 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { |
721 | fw_device_shutdown(&device->work.work); | 714 | fw_device_shutdown(&device->work.work); |
722 | else | 715 | } else { |
723 | fw_notify("created new fw device %s " | 716 | if (device->config_rom_retries) |
724 | "(%d config rom retries, S%d00)\n", | 717 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
725 | device->device.bus_id, device->config_rom_retries, | 718 | "%d config ROM retries\n", |
726 | 1 << device->max_speed); | 719 | device->device.bus_id, |
720 | device->config_rom[3], device->config_rom[4], | ||
721 | 1 << device->max_speed, | ||
722 | device->config_rom_retries); | ||
723 | else | ||
724 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
725 | device->device.bus_id, | ||
726 | device->config_rom[3], device->config_rom[4], | ||
727 | 1 << device->max_speed); | ||
728 | } | ||
727 | 729 | ||
728 | /* | 730 | /* |
729 | * Reschedule the IRM work if we just finished reading the | 731 | * Reschedule the IRM work if we just finished reading the |
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) | |||
741 | idr_remove(&fw_device_idr, minor); | 743 | idr_remove(&fw_device_idr, minor); |
742 | up_write(&idr_rwsem); | 744 | up_write(&idr_rwsem); |
743 | error: | 745 | error: |
744 | put_device(&device->device); | 746 | fw_device_put(device); /* fw_device_idr's reference */ |
747 | |||
748 | put_device(&device->device); /* our reference */ | ||
745 | } | 749 | } |
746 | 750 | ||
747 | static int update_unit(struct device *dev, void *data) | 751 | static int update_unit(struct device *dev, void *data) |
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
791 | */ | 795 | */ |
792 | device_initialize(&device->device); | 796 | device_initialize(&device->device); |
793 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); | 797 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); |
794 | device->card = fw_card_get(card); | 798 | atomic_inc(&card->device_count); |
799 | device->card = card; | ||
795 | device->node = fw_node_get(node); | 800 | device->node = fw_node_get(node); |
796 | device->node_id = node->node_id; | 801 | device->node_id = node->node_id; |
797 | device->generation = card->generation; | 802 | device->generation = card->generation; |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc110..78ecd3991b7f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) | |||
76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
77 | } | 77 | } |
78 | 78 | ||
79 | struct fw_device *fw_device_get(struct fw_device *device); | 79 | static inline struct fw_device * |
80 | void fw_device_put(struct fw_device *device); | 80 | fw_device_get(struct fw_device *device) |
81 | { | ||
82 | get_device(&device->device); | ||
83 | |||
84 | return device; | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | fw_device_put(struct fw_device *device) | ||
89 | { | ||
90 | put_device(&device->device); | ||
91 | } | ||
92 | |||
93 | struct fw_device *fw_device_get_by_devt(dev_t devt); | ||
81 | int fw_device_enable_phys_dma(struct fw_device *device); | 94 | int fw_device_enable_phys_dma(struct fw_device *device); |
82 | 95 | ||
83 | void fw_device_cdev_update(struct fw_device *device); | 96 | void fw_device_cdev_update(struct fw_device *device); |
84 | void fw_device_cdev_remove(struct fw_device *device); | 97 | void fw_device_cdev_remove(struct fw_device *device); |
85 | 98 | ||
86 | struct fw_device *fw_device_from_devt(dev_t devt); | ||
87 | extern int fw_cdev_major; | 99 | extern int fw_cdev_major; |
88 | 100 | ||
89 | struct fw_unit { | 101 | struct fw_unit { |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d742..03069a454c07 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -28,14 +28,15 @@ | |||
28 | * and many others. | 28 | * and many others. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/mod_devicetable.h> | ||
32 | #include <linux/module.h> | 37 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
34 | #include <linux/mod_devicetable.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
40 | #include <linux/stringify.h> | 41 | #include <linux/stringify.h> |
41 | #include <linux/timer.h> | 42 | #include <linux/timer.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 49 | #include <scsi/scsi_host.h> |
49 | 50 | ||
50 | #include "fw-transaction.h" | ||
51 | #include "fw-topology.h" | ||
52 | #include "fw-device.h" | 51 | #include "fw-device.h" |
52 | #include "fw-topology.h" | ||
53 | #include "fw-transaction.h" | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * So far only bridges from Oxford Semiconductor are known to support | 56 | * So far only bridges from Oxford Semiconductor are known to support |
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
82 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 83 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
83 | * Don't use this with devices which don't have this bug. | 84 | * Don't use this with devices which don't have this bug. |
84 | * | 85 | * |
86 | * - delay inquiry | ||
87 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
88 | * | ||
85 | * - override internal blacklist | 89 | * - override internal blacklist |
86 | * Instead of adding to the built-in blacklist, use only the workarounds | 90 | * Instead of adding to the built-in blacklist, use only the workarounds |
87 | * specified in the module load parameter. | 91 | * specified in the module load parameter. |
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
91 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 95 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
92 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 96 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
93 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 97 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
98 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
99 | #define SBP2_INQUIRY_DELAY 12 | ||
94 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 100 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
95 | 101 | ||
96 | static int sbp2_param_workarounds; | 102 | static int sbp2_param_workarounds; |
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
100 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 106 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
101 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 107 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
102 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 108 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
109 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
103 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 110 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
104 | ", or a combination)"); | 111 | ", or a combination)"); |
105 | 112 | ||
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; | |||
115 | struct sbp2_logical_unit { | 122 | struct sbp2_logical_unit { |
116 | struct sbp2_target *tgt; | 123 | struct sbp2_target *tgt; |
117 | struct list_head link; | 124 | struct list_head link; |
118 | struct scsi_device *sdev; | ||
119 | struct fw_address_handler address_handler; | 125 | struct fw_address_handler address_handler; |
120 | struct list_head orb_list; | 126 | struct list_head orb_list; |
121 | 127 | ||
@@ -132,6 +138,8 @@ struct sbp2_logical_unit { | |||
132 | int generation; | 138 | int generation; |
133 | int retries; | 139 | int retries; |
134 | struct delayed_work work; | 140 | struct delayed_work work; |
141 | bool has_sdev; | ||
142 | bool blocked; | ||
135 | }; | 143 | }; |
136 | 144 | ||
137 | /* | 145 | /* |
@@ -141,16 +149,18 @@ struct sbp2_logical_unit { | |||
141 | struct sbp2_target { | 149 | struct sbp2_target { |
142 | struct kref kref; | 150 | struct kref kref; |
143 | struct fw_unit *unit; | 151 | struct fw_unit *unit; |
152 | const char *bus_id; | ||
153 | struct list_head lu_list; | ||
144 | 154 | ||
145 | u64 management_agent_address; | 155 | u64 management_agent_address; |
146 | int directory_id; | 156 | int directory_id; |
147 | int node_id; | 157 | int node_id; |
148 | int address_high; | 158 | int address_high; |
149 | 159 | unsigned int workarounds; | |
150 | unsigned workarounds; | ||
151 | struct list_head lu_list; | ||
152 | |||
153 | unsigned int mgt_orb_timeout; | 160 | unsigned int mgt_orb_timeout; |
161 | |||
162 | int dont_block; /* counter for each logical unit */ | ||
163 | int blocked; /* ditto */ | ||
154 | }; | 164 | }; |
155 | 165 | ||
156 | /* | 166 | /* |
@@ -160,7 +170,7 @@ struct sbp2_target { | |||
160 | */ | 170 | */ |
161 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | 171 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ |
162 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 172 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
163 | #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ | 173 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
164 | #define SBP2_ORB_NULL 0x80000000 | 174 | #define SBP2_ORB_NULL 0x80000000 |
165 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | 175 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 |
166 | 176 | ||
@@ -297,7 +307,7 @@ struct sbp2_command_orb { | |||
297 | static const struct { | 307 | static const struct { |
298 | u32 firmware_revision; | 308 | u32 firmware_revision; |
299 | u32 model; | 309 | u32 model; |
300 | unsigned workarounds; | 310 | unsigned int workarounds; |
301 | } sbp2_workarounds_table[] = { | 311 | } sbp2_workarounds_table[] = { |
302 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | 312 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { |
303 | .firmware_revision = 0x002800, | 313 | .firmware_revision = 0x002800, |
@@ -305,6 +315,11 @@ static const struct { | |||
305 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 315 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
306 | SBP2_WORKAROUND_MODE_SENSE_8, | 316 | SBP2_WORKAROUND_MODE_SENSE_8, |
307 | }, | 317 | }, |
318 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
319 | .firmware_revision = 0x002800, | ||
320 | .model = 0x000000, | ||
321 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
322 | }, | ||
308 | /* Initio bridges, actually only needed for some older ones */ { | 323 | /* Initio bridges, actually only needed for some older ones */ { |
309 | .firmware_revision = 0x000200, | 324 | .firmware_revision = 0x000200, |
310 | .model = ~0, | 325 | .model = ~0, |
@@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
501 | unsigned int timeout; | 516 | unsigned int timeout; |
502 | int retval = -ENOMEM; | 517 | int retval = -ENOMEM; |
503 | 518 | ||
519 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
520 | return 0; | ||
521 | |||
504 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 522 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
505 | if (orb == NULL) | 523 | if (orb == NULL) |
506 | return -ENOMEM; | 524 | return -ENOMEM; |
@@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
553 | 571 | ||
554 | retval = -EIO; | 572 | retval = -EIO; |
555 | if (sbp2_cancel_orbs(lu) == 0) { | 573 | if (sbp2_cancel_orbs(lu) == 0) { |
556 | fw_error("orb reply timed out, rcode=0x%02x\n", | 574 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", |
557 | orb->base.rcode); | 575 | lu->tgt->bus_id, orb->base.rcode); |
558 | goto out; | 576 | goto out; |
559 | } | 577 | } |
560 | 578 | ||
561 | if (orb->base.rcode != RCODE_COMPLETE) { | 579 | if (orb->base.rcode != RCODE_COMPLETE) { |
562 | fw_error("management write failed, rcode 0x%02x\n", | 580 | fw_error("%s: management write failed, rcode 0x%02x\n", |
563 | orb->base.rcode); | 581 | lu->tgt->bus_id, orb->base.rcode); |
564 | goto out; | 582 | goto out; |
565 | } | 583 | } |
566 | 584 | ||
567 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 585 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
568 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 586 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
569 | fw_error("error status: %d:%d\n", | 587 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, |
570 | STATUS_GET_RESPONSE(orb->status), | 588 | STATUS_GET_RESPONSE(orb->status), |
571 | STATUS_GET_SBP_STATUS(orb->status)); | 589 | STATUS_GET_SBP_STATUS(orb->status)); |
572 | goto out; | 590 | goto out; |
@@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
590 | 608 | ||
591 | static void | 609 | static void |
592 | complete_agent_reset_write(struct fw_card *card, int rcode, | 610 | complete_agent_reset_write(struct fw_card *card, int rcode, |
593 | void *payload, size_t length, void *data) | 611 | void *payload, size_t length, void *done) |
594 | { | 612 | { |
595 | struct fw_transaction *t = data; | 613 | complete(done); |
614 | } | ||
596 | 615 | ||
597 | kfree(t); | 616 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
617 | { | ||
618 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
619 | DECLARE_COMPLETION_ONSTACK(done); | ||
620 | struct fw_transaction t; | ||
621 | static u32 z; | ||
622 | |||
623 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
624 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
625 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
626 | &z, sizeof(z), complete_agent_reset_write, &done); | ||
627 | wait_for_completion(&done); | ||
628 | } | ||
629 | |||
630 | static void | ||
631 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | ||
632 | void *payload, size_t length, void *data) | ||
633 | { | ||
634 | kfree(data); | ||
598 | } | 635 | } |
599 | 636 | ||
600 | static int sbp2_agent_reset(struct sbp2_logical_unit *lu) | 637 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) |
601 | { | 638 | { |
602 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 639 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
603 | struct fw_transaction *t; | 640 | struct fw_transaction *t; |
604 | static u32 zero; | 641 | static u32 z; |
605 | 642 | ||
606 | t = kzalloc(sizeof(*t), GFP_ATOMIC); | 643 | t = kmalloc(sizeof(*t), GFP_ATOMIC); |
607 | if (t == NULL) | 644 | if (t == NULL) |
608 | return -ENOMEM; | 645 | return; |
609 | 646 | ||
610 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 647 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
611 | lu->tgt->node_id, lu->generation, device->max_speed, | 648 | lu->tgt->node_id, lu->generation, device->max_speed, |
612 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 649 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
613 | &zero, sizeof(zero), complete_agent_reset_write, t); | 650 | &z, sizeof(z), complete_agent_reset_write_no_wait, t); |
651 | } | ||
614 | 652 | ||
615 | return 0; | 653 | static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) |
654 | { | ||
655 | struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; | ||
656 | unsigned long flags; | ||
657 | |||
658 | /* serialize with comparisons of lu->generation and card->generation */ | ||
659 | spin_lock_irqsave(&card->lock, flags); | ||
660 | lu->generation = generation; | ||
661 | spin_unlock_irqrestore(&card->lock, flags); | ||
662 | } | ||
663 | |||
664 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
665 | { | ||
666 | /* | ||
667 | * We may access dont_block without taking card->lock here: | ||
668 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
669 | * are currently serialized against each other. | ||
670 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
671 | * dont_block is rather harmless, it simply misses its first chance. | ||
672 | */ | ||
673 | --lu->tgt->dont_block; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Blocks lu->tgt if all of the following conditions are met: | ||
678 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
679 | * logical units have been finished (indicated by dont_block == 0). | ||
680 | * - lu->generation is stale. | ||
681 | * | ||
682 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
683 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
684 | * unblock the target. | ||
685 | */ | ||
686 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
687 | { | ||
688 | struct sbp2_target *tgt = lu->tgt; | ||
689 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
690 | struct Scsi_Host *shost = | ||
691 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
692 | unsigned long flags; | ||
693 | |||
694 | spin_lock_irqsave(&card->lock, flags); | ||
695 | if (!tgt->dont_block && !lu->blocked && | ||
696 | lu->generation != card->generation) { | ||
697 | lu->blocked = true; | ||
698 | if (++tgt->blocked == 1) { | ||
699 | scsi_block_requests(shost); | ||
700 | fw_notify("blocked %s\n", lu->tgt->bus_id); | ||
701 | } | ||
702 | } | ||
703 | spin_unlock_irqrestore(&card->lock, flags); | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
708 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
709 | * card->lock protected section. On the other hand, running it inside | ||
710 | * the section might clash with shost->host_lock. | ||
711 | */ | ||
712 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
713 | { | ||
714 | struct sbp2_target *tgt = lu->tgt; | ||
715 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
716 | struct Scsi_Host *shost = | ||
717 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
718 | unsigned long flags; | ||
719 | bool unblock = false; | ||
720 | |||
721 | spin_lock_irqsave(&card->lock, flags); | ||
722 | if (lu->blocked && lu->generation == card->generation) { | ||
723 | lu->blocked = false; | ||
724 | unblock = --tgt->blocked == 0; | ||
725 | } | ||
726 | spin_unlock_irqrestore(&card->lock, flags); | ||
727 | |||
728 | if (unblock) { | ||
729 | scsi_unblock_requests(shost); | ||
730 | fw_notify("unblocked %s\n", lu->tgt->bus_id); | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Prevents future blocking of tgt and unblocks it. | ||
736 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
737 | * card->lock protected section. On the other hand, running it inside | ||
738 | * the section might clash with shost->host_lock. | ||
739 | */ | ||
740 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
741 | { | ||
742 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
743 | struct Scsi_Host *shost = | ||
744 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
745 | unsigned long flags; | ||
746 | |||
747 | spin_lock_irqsave(&card->lock, flags); | ||
748 | ++tgt->dont_block; | ||
749 | spin_unlock_irqrestore(&card->lock, flags); | ||
750 | |||
751 | scsi_unblock_requests(shost); | ||
752 | } | ||
753 | |||
754 | static int sbp2_lun2int(u16 lun) | ||
755 | { | ||
756 | struct scsi_lun eight_bytes_lun; | ||
757 | |||
758 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
759 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
760 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
761 | |||
762 | return scsilun_to_int(&eight_bytes_lun); | ||
616 | } | 763 | } |
617 | 764 | ||
618 | static void sbp2_release_target(struct kref *kref) | 765 | static void sbp2_release_target(struct kref *kref) |
@@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref) | |||
621 | struct sbp2_logical_unit *lu, *next; | 768 | struct sbp2_logical_unit *lu, *next; |
622 | struct Scsi_Host *shost = | 769 | struct Scsi_Host *shost = |
623 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 770 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
771 | struct scsi_device *sdev; | ||
624 | struct fw_device *device = fw_device(tgt->unit->device.parent); | 772 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
625 | 773 | ||
626 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | 774 | /* prevent deadlocks */ |
627 | if (lu->sdev) | 775 | sbp2_unblock(tgt); |
628 | scsi_remove_device(lu->sdev); | ||
629 | 776 | ||
630 | if (!fw_device_is_shutdown(device)) | 777 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { |
631 | sbp2_send_management_orb(lu, tgt->node_id, | 778 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); |
632 | lu->generation, SBP2_LOGOUT_REQUEST, | 779 | if (sdev) { |
633 | lu->login_id, NULL); | 780 | scsi_remove_device(sdev); |
781 | scsi_device_put(sdev); | ||
782 | } | ||
783 | sbp2_send_management_orb(lu, tgt->node_id, lu->generation, | ||
784 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
634 | 785 | ||
635 | fw_core_remove_address_handler(&lu->address_handler); | 786 | fw_core_remove_address_handler(&lu->address_handler); |
636 | list_del(&lu->link); | 787 | list_del(&lu->link); |
637 | kfree(lu); | 788 | kfree(lu); |
638 | } | 789 | } |
639 | scsi_remove_host(shost); | 790 | scsi_remove_host(shost); |
640 | fw_notify("released %s\n", tgt->unit->device.bus_id); | 791 | fw_notify("released %s\n", tgt->bus_id); |
641 | 792 | ||
642 | put_device(&tgt->unit->device); | 793 | put_device(&tgt->unit->device); |
643 | scsi_host_put(shost); | 794 | scsi_host_put(shost); |
795 | fw_device_put(device); | ||
644 | } | 796 | } |
645 | 797 | ||
646 | static struct workqueue_struct *sbp2_wq; | 798 | static struct workqueue_struct *sbp2_wq; |
@@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work) | |||
666 | { | 818 | { |
667 | struct sbp2_logical_unit *lu = | 819 | struct sbp2_logical_unit *lu = |
668 | container_of(work, struct sbp2_logical_unit, work.work); | 820 | container_of(work, struct sbp2_logical_unit, work.work); |
669 | struct Scsi_Host *shost = | 821 | struct sbp2_target *tgt = lu->tgt; |
670 | container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); | 822 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
823 | struct Scsi_Host *shost; | ||
671 | struct scsi_device *sdev; | 824 | struct scsi_device *sdev; |
672 | struct scsi_lun eight_bytes_lun; | ||
673 | struct fw_unit *unit = lu->tgt->unit; | ||
674 | struct fw_device *device = fw_device(unit->device.parent); | ||
675 | struct sbp2_login_response response; | 825 | struct sbp2_login_response response; |
676 | int generation, node_id, local_node_id; | 826 | int generation, node_id, local_node_id; |
677 | 827 | ||
828 | if (fw_device_is_shutdown(device)) | ||
829 | goto out; | ||
830 | |||
678 | generation = device->generation; | 831 | generation = device->generation; |
679 | smp_rmb(); /* node_id must not be older than generation */ | 832 | smp_rmb(); /* node_id must not be older than generation */ |
680 | node_id = device->node_id; | 833 | node_id = device->node_id; |
681 | local_node_id = device->card->node_id; | 834 | local_node_id = device->card->node_id; |
682 | 835 | ||
836 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
837 | if (lu->has_sdev) | ||
838 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
839 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
840 | |||
683 | if (sbp2_send_management_orb(lu, node_id, generation, | 841 | if (sbp2_send_management_orb(lu, node_id, generation, |
684 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | 842 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { |
685 | if (lu->retries++ < 5) | 843 | if (lu->retries++ < 5) { |
686 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 844 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
687 | else | 845 | } else { |
688 | fw_error("failed to login to %s LUN %04x\n", | 846 | fw_error("%s: failed to login to LUN %04x\n", |
689 | unit->device.bus_id, lu->lun); | 847 | tgt->bus_id, lu->lun); |
848 | /* Let any waiting I/O fail from now on. */ | ||
849 | sbp2_unblock(lu->tgt); | ||
850 | } | ||
690 | goto out; | 851 | goto out; |
691 | } | 852 | } |
692 | 853 | ||
693 | lu->generation = generation; | 854 | tgt->node_id = node_id; |
694 | lu->tgt->node_id = node_id; | 855 | tgt->address_high = local_node_id << 16; |
695 | lu->tgt->address_high = local_node_id << 16; | 856 | sbp2_set_generation(lu, generation); |
696 | 857 | ||
697 | /* Get command block agent offset and login id. */ | 858 | /* Get command block agent offset and login id. */ |
698 | lu->command_block_agent_address = | 859 | lu->command_block_agent_address = |
@@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work) | |||
700 | response.command_block_agent.low; | 861 | response.command_block_agent.low; |
701 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); | 862 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); |
702 | 863 | ||
703 | fw_notify("logged in to %s LUN %04x (%d retries)\n", | 864 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", |
704 | unit->device.bus_id, lu->lun, lu->retries); | 865 | tgt->bus_id, lu->lun, lu->retries); |
705 | 866 | ||
706 | #if 0 | 867 | #if 0 |
707 | /* FIXME: The linux1394 sbp2 does this last step. */ | 868 | /* FIXME: The linux1394 sbp2 does this last step. */ |
@@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work) | |||
711 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 872 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); |
712 | sbp2_agent_reset(lu); | 873 | sbp2_agent_reset(lu); |
713 | 874 | ||
714 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | 875 | /* This was a re-login. */ |
715 | eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; | 876 | if (lu->has_sdev) { |
716 | eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; | 877 | sbp2_cancel_orbs(lu); |
878 | sbp2_conditionally_unblock(lu); | ||
879 | goto out; | ||
880 | } | ||
717 | 881 | ||
718 | sdev = __scsi_add_device(shost, 0, 0, | 882 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) |
719 | scsilun_to_int(&eight_bytes_lun), lu); | 883 | ssleep(SBP2_INQUIRY_DELAY); |
720 | if (IS_ERR(sdev)) { | 884 | |
721 | sbp2_send_management_orb(lu, node_id, generation, | 885 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
722 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | 886 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); |
723 | /* | 887 | /* |
724 | * Set this back to sbp2_login so we fall back and | 888 | * FIXME: We are unable to perform reconnects while in sbp2_login(). |
725 | * retry login on bus reset. | 889 | * Therefore __scsi_add_device() will get into trouble if a bus reset |
726 | */ | 890 | * happens in parallel. It will either fail or leave us with an |
727 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 891 | * unusable sdev. As a workaround we check for this and retry the |
728 | } else { | 892 | * whole login and SCSI probing. |
729 | lu->sdev = sdev; | 893 | */ |
894 | |||
895 | /* Reported error during __scsi_add_device() */ | ||
896 | if (IS_ERR(sdev)) | ||
897 | goto out_logout_login; | ||
898 | |||
899 | /* Unreported error during __scsi_add_device() */ | ||
900 | smp_rmb(); /* get current card generation */ | ||
901 | if (generation != device->card->generation) { | ||
902 | scsi_remove_device(sdev); | ||
730 | scsi_device_put(sdev); | 903 | scsi_device_put(sdev); |
904 | goto out_logout_login; | ||
731 | } | 905 | } |
906 | |||
907 | /* No error during __scsi_add_device() */ | ||
908 | lu->has_sdev = true; | ||
909 | scsi_device_put(sdev); | ||
910 | sbp2_allow_block(lu); | ||
911 | goto out; | ||
912 | |||
913 | out_logout_login: | ||
914 | smp_rmb(); /* generation may have changed */ | ||
915 | generation = device->generation; | ||
916 | smp_rmb(); /* node_id must not be older than generation */ | ||
917 | |||
918 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
919 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
920 | /* | ||
921 | * If a bus reset happened, sbp2_update will have requeued | ||
922 | * lu->work already. Reset the work from reconnect to login. | ||
923 | */ | ||
924 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
732 | out: | 925 | out: |
733 | sbp2_target_put(lu->tgt); | 926 | sbp2_target_put(tgt); |
734 | } | 927 | } |
735 | 928 | ||
736 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 929 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
@@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
751 | return -ENOMEM; | 944 | return -ENOMEM; |
752 | } | 945 | } |
753 | 946 | ||
754 | lu->tgt = tgt; | 947 | lu->tgt = tgt; |
755 | lu->sdev = NULL; | 948 | lu->lun = lun_entry & 0xffff; |
756 | lu->lun = lun_entry & 0xffff; | 949 | lu->retries = 0; |
757 | lu->retries = 0; | 950 | lu->has_sdev = false; |
951 | lu->blocked = false; | ||
952 | ++tgt->dont_block; | ||
758 | INIT_LIST_HEAD(&lu->orb_list); | 953 | INIT_LIST_HEAD(&lu->orb_list); |
759 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 954 | INIT_DELAYED_WORK(&lu->work, sbp2_login); |
760 | 955 | ||
@@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | |||
813 | if (timeout > tgt->mgt_orb_timeout) | 1008 | if (timeout > tgt->mgt_orb_timeout) |
814 | fw_notify("%s: config rom contains %ds " | 1009 | fw_notify("%s: config rom contains %ds " |
815 | "management ORB timeout, limiting " | 1010 | "management ORB timeout, limiting " |
816 | "to %ds\n", tgt->unit->device.bus_id, | 1011 | "to %ds\n", tgt->bus_id, |
817 | timeout / 1000, | 1012 | timeout / 1000, |
818 | tgt->mgt_orb_timeout / 1000); | 1013 | tgt->mgt_orb_timeout / 1000); |
819 | break; | 1014 | break; |
@@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
836 | u32 firmware_revision) | 1031 | u32 firmware_revision) |
837 | { | 1032 | { |
838 | int i; | 1033 | int i; |
839 | unsigned w = sbp2_param_workarounds; | 1034 | unsigned int w = sbp2_param_workarounds; |
840 | 1035 | ||
841 | if (w) | 1036 | if (w) |
842 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1037 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " |
843 | "if you need the workarounds parameter for %s\n", | 1038 | "if you need the workarounds parameter for %s\n", |
844 | tgt->unit->device.bus_id); | 1039 | tgt->bus_id); |
845 | 1040 | ||
846 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1041 | if (w & SBP2_WORKAROUND_OVERRIDE) |
847 | goto out; | 1042 | goto out; |
@@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
863 | if (w) | 1058 | if (w) |
864 | fw_notify("Workarounds for %s: 0x%x " | 1059 | fw_notify("Workarounds for %s: 0x%x " |
865 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1060 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
866 | tgt->unit->device.bus_id, | 1061 | tgt->bus_id, w, firmware_revision, model); |
867 | w, firmware_revision, model); | ||
868 | tgt->workarounds = w; | 1062 | tgt->workarounds = w; |
869 | } | 1063 | } |
870 | 1064 | ||
@@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev) | |||
888 | tgt->unit = unit; | 1082 | tgt->unit = unit; |
889 | kref_init(&tgt->kref); | 1083 | kref_init(&tgt->kref); |
890 | INIT_LIST_HEAD(&tgt->lu_list); | 1084 | INIT_LIST_HEAD(&tgt->lu_list); |
1085 | tgt->bus_id = unit->device.bus_id; | ||
891 | 1086 | ||
892 | if (fw_device_enable_phys_dma(device) < 0) | 1087 | if (fw_device_enable_phys_dma(device) < 0) |
893 | goto fail_shost_put; | 1088 | goto fail_shost_put; |
@@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev) | |||
895 | if (scsi_add_host(shost, &unit->device) < 0) | 1090 | if (scsi_add_host(shost, &unit->device) < 0) |
896 | goto fail_shost_put; | 1091 | goto fail_shost_put; |
897 | 1092 | ||
1093 | fw_device_get(device); | ||
1094 | |||
898 | /* Initialize to values that won't match anything in our table. */ | 1095 | /* Initialize to values that won't match anything in our table. */ |
899 | firmware_revision = 0xff000000; | 1096 | firmware_revision = 0xff000000; |
900 | model = 0xff000000; | 1097 | model = 0xff000000; |
@@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
938 | { | 1135 | { |
939 | struct sbp2_logical_unit *lu = | 1136 | struct sbp2_logical_unit *lu = |
940 | container_of(work, struct sbp2_logical_unit, work.work); | 1137 | container_of(work, struct sbp2_logical_unit, work.work); |
941 | struct fw_unit *unit = lu->tgt->unit; | 1138 | struct sbp2_target *tgt = lu->tgt; |
942 | struct fw_device *device = fw_device(unit->device.parent); | 1139 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
943 | int generation, node_id, local_node_id; | 1140 | int generation, node_id, local_node_id; |
944 | 1141 | ||
1142 | if (fw_device_is_shutdown(device)) | ||
1143 | goto out; | ||
1144 | |||
945 | generation = device->generation; | 1145 | generation = device->generation; |
946 | smp_rmb(); /* node_id must not be older than generation */ | 1146 | smp_rmb(); /* node_id must not be older than generation */ |
947 | node_id = device->node_id; | 1147 | node_id = device->node_id; |
@@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work) | |||
950 | if (sbp2_send_management_orb(lu, node_id, generation, | 1150 | if (sbp2_send_management_orb(lu, node_id, generation, |
951 | SBP2_RECONNECT_REQUEST, | 1151 | SBP2_RECONNECT_REQUEST, |
952 | lu->login_id, NULL) < 0) { | 1152 | lu->login_id, NULL) < 0) { |
953 | if (lu->retries++ >= 5) { | 1153 | /* |
954 | fw_error("failed to reconnect to %s\n", | 1154 | * If reconnect was impossible even though we are in the |
955 | unit->device.bus_id); | 1155 | * current generation, fall back and try to log in again. |
956 | /* Fall back and try to log in again. */ | 1156 | * |
1157 | * We could check for "Function rejected" status, but | ||
1158 | * looking at the bus generation as simpler and more general. | ||
1159 | */ | ||
1160 | smp_rmb(); /* get current card generation */ | ||
1161 | if (generation == device->card->generation || | ||
1162 | lu->retries++ >= 5) { | ||
1163 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
957 | lu->retries = 0; | 1164 | lu->retries = 0; |
958 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 1165 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
959 | } | 1166 | } |
@@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work) | |||
961 | goto out; | 1168 | goto out; |
962 | } | 1169 | } |
963 | 1170 | ||
964 | lu->generation = generation; | 1171 | tgt->node_id = node_id; |
965 | lu->tgt->node_id = node_id; | 1172 | tgt->address_high = local_node_id << 16; |
966 | lu->tgt->address_high = local_node_id << 16; | 1173 | sbp2_set_generation(lu, generation); |
967 | 1174 | ||
968 | fw_notify("reconnected to %s LUN %04x (%d retries)\n", | 1175 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", |
969 | unit->device.bus_id, lu->lun, lu->retries); | 1176 | tgt->bus_id, lu->lun, lu->retries); |
970 | 1177 | ||
971 | sbp2_agent_reset(lu); | 1178 | sbp2_agent_reset(lu); |
972 | sbp2_cancel_orbs(lu); | 1179 | sbp2_cancel_orbs(lu); |
1180 | sbp2_conditionally_unblock(lu); | ||
973 | out: | 1181 | out: |
974 | sbp2_target_put(lu->tgt); | 1182 | sbp2_target_put(tgt); |
975 | } | 1183 | } |
976 | 1184 | ||
977 | static void sbp2_update(struct fw_unit *unit) | 1185 | static void sbp2_update(struct fw_unit *unit) |
@@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit) | |||
986 | * Iteration over tgt->lu_list is therefore safe here. | 1194 | * Iteration over tgt->lu_list is therefore safe here. |
987 | */ | 1195 | */ |
988 | list_for_each_entry(lu, &tgt->lu_list, link) { | 1196 | list_for_each_entry(lu, &tgt->lu_list, link) { |
1197 | sbp2_conditionally_block(lu); | ||
989 | lu->retries = 0; | 1198 | lu->retries = 0; |
990 | sbp2_queue_work(lu, 0); | 1199 | sbp2_queue_work(lu, 0); |
991 | } | 1200 | } |
@@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1063 | 1272 | ||
1064 | if (status != NULL) { | 1273 | if (status != NULL) { |
1065 | if (STATUS_GET_DEAD(*status)) | 1274 | if (STATUS_GET_DEAD(*status)) |
1066 | sbp2_agent_reset(orb->lu); | 1275 | sbp2_agent_reset_no_wait(orb->lu); |
1067 | 1276 | ||
1068 | switch (STATUS_GET_RESPONSE(*status)) { | 1277 | switch (STATUS_GET_RESPONSE(*status)) { |
1069 | case SBP2_STATUS_REQUEST_COMPLETE: | 1278 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1089 | * or when sending the write (less likely). | 1298 | * or when sending the write (less likely). |
1090 | */ | 1299 | */ |
1091 | result = DID_BUS_BUSY << 16; | 1300 | result = DID_BUS_BUSY << 16; |
1301 | sbp2_conditionally_block(orb->lu); | ||
1092 | } | 1302 | } |
1093 | 1303 | ||
1094 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1304 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1197 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1407 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1198 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 1408 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
1199 | struct sbp2_command_orb *orb; | 1409 | struct sbp2_command_orb *orb; |
1200 | unsigned max_payload; | 1410 | unsigned int max_payload; |
1201 | int retval = SCSI_MLQUEUE_HOST_BUSY; | 1411 | int retval = SCSI_MLQUEUE_HOST_BUSY; |
1202 | 1412 | ||
1203 | /* | 1413 | /* |
@@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | |||
1275 | { | 1485 | { |
1276 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1486 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1277 | 1487 | ||
1488 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1489 | if (!lu) | ||
1490 | return -ENOSYS; | ||
1491 | |||
1278 | sdev->allow_restart = 1; | 1492 | sdev->allow_restart = 1; |
1279 | 1493 | ||
1280 | /* | 1494 | /* |
@@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1319 | { | 1533 | { |
1320 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1534 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1321 | 1535 | ||
1322 | fw_notify("sbp2_scsi_abort\n"); | 1536 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); |
1323 | sbp2_agent_reset(lu); | 1537 | sbp2_agent_reset(lu); |
1324 | sbp2_cancel_orbs(lu); | 1538 | sbp2_cancel_orbs(lu); |
1325 | 1539 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9aa..e47bb040197a 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card) | |||
383 | card->color++; | 383 | card->color++; |
384 | if (card->local_node != NULL) | 384 | if (card->local_node != NULL) |
385 | for_each_fw_node(card, card->local_node, report_lost_node); | 385 | for_each_fw_node(card, card->local_node, report_lost_node); |
386 | card->local_node = NULL; | ||
386 | spin_unlock_irqrestore(&card->lock, flags); | 387 | spin_unlock_irqrestore(&card->lock, flags); |
387 | } | 388 | } |
388 | 389 | ||
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b57408..09cb72870454 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 31 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
31 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 32 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; | |||
219 | struct fw_card { | 220 | struct fw_card { |
220 | const struct fw_card_driver *driver; | 221 | const struct fw_card_driver *driver; |
221 | struct device *device; | 222 | struct device *device; |
223 | atomic_t device_count; | ||
222 | struct kref kref; | 224 | struct kref kref; |
223 | 225 | ||
224 | int node_id; | 226 | int node_id; |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 310e497b5838..c8d0e8715997 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, | |||
670 | * and attempt to recover if there are problems. Returns 0 if everything's | 670 | * and attempt to recover if there are problems. Returns 0 if everything's |
671 | * ok; nonzero if the request has been terminated. | 671 | * ok; nonzero if the request has been terminated. |
672 | */ | 672 | */ |
673 | static | 673 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, |
674 | int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | 674 | int len, int ireason, int rw) |
675 | { | 675 | { |
676 | /* | 676 | /* |
677 | * ireason == 0: the drive wants to receive data from us | 677 | * ireason == 0: the drive wants to receive data from us |
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | |||
701 | drive->name, __FUNCTION__, ireason); | 701 | drive->name, __FUNCTION__, ireason); |
702 | } | 702 | } |
703 | 703 | ||
704 | if (rq->cmd_type == REQ_TYPE_ATA_PC) | ||
705 | rq->cmd_flags |= REQ_FAILED; | ||
706 | |||
704 | cdrom_end_request(drive, 0); | 707 | cdrom_end_request(drive, 0); |
705 | return -1; | 708 | return -1; |
706 | } | 709 | } |
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1071 | /* | 1074 | /* |
1072 | * check which way to transfer data | 1075 | * check which way to transfer data |
1073 | */ | 1076 | */ |
1074 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | 1077 | if (ide_cd_check_ireason(drive, rq, len, ireason, write)) |
1075 | if (ide_cd_check_ireason(drive, len, ireason, write)) | 1078 | return ide_stopped; |
1076 | return ide_stopped; | ||
1077 | 1079 | ||
1078 | if (blk_fs_request(rq) && write == 0) { | 1080 | if (blk_fs_request(rq)) { |
1081 | if (write == 0) { | ||
1079 | int nskip; | 1082 | int nskip; |
1080 | 1083 | ||
1081 | if (ide_cd_check_transfer_size(drive, len)) { | 1084 | if (ide_cd_check_transfer_size(drive, len)) { |
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1101 | if (ireason == 0) { | 1104 | if (ireason == 0) { |
1102 | write = 1; | 1105 | write = 1; |
1103 | xferfunc = HWIF(drive)->atapi_output_bytes; | 1106 | xferfunc = HWIF(drive)->atapi_output_bytes; |
1104 | } else if (ireason == 2 || (ireason == 1 && | 1107 | } else { |
1105 | (blk_fs_request(rq) || blk_pc_request(rq)))) { | ||
1106 | write = 0; | 1108 | write = 0; |
1107 | xferfunc = HWIF(drive)->atapi_input_bytes; | 1109 | xferfunc = HWIF(drive)->atapi_input_bytes; |
1108 | } else { | ||
1109 | printk(KERN_ERR "%s: %s: The drive " | ||
1110 | "appears confused (ireason = 0x%02x). " | ||
1111 | "Trying to recover by ending request.\n", | ||
1112 | drive->name, __FUNCTION__, ireason); | ||
1113 | goto end_request; | ||
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | /* | 1112 | /* |
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1182 | else | 1178 | else |
1183 | rq->data += blen; | 1179 | rq->data += blen; |
1184 | } | 1180 | } |
1181 | if (!write && blk_sense_request(rq)) | ||
1182 | rq->sense_len += blen; | ||
1185 | } | 1183 | } |
1186 | 1184 | ||
1187 | if (write && blk_sense_request(rq)) | ||
1188 | rq->sense_len += thislen; | ||
1189 | |||
1190 | /* | 1185 | /* |
1191 | * pad, if necessary | 1186 | * pad, if necessary |
1192 | */ | 1187 | */ |
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { | |||
1931 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1926 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1932 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1927 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1933 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1928 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1929 | { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | ||
1934 | { NULL, NULL, 0 } | 1930 | { NULL, NULL, 0 } |
1935 | }; | 1931 | }; |
1936 | 1932 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 8f5bed471050..39501d130256 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive) | |||
867 | 867 | ||
868 | /* Only print cache size when it was specified */ | 868 | /* Only print cache size when it was specified */ |
869 | if (id->buf_size) | 869 | if (id->buf_size) |
870 | printk (" w/%dKiB Cache", id->buf_size/2); | 870 | printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2); |
871 | 871 | ||
872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", | 872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", |
873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); | 873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); |
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive) | |||
949 | return; | 949 | return; |
950 | } | 950 | } |
951 | 951 | ||
952 | printk("Shutdown: %s\n", drive->name); | 952 | printk(KERN_INFO "Shutdown: %s\n", drive->name); |
953 | |||
953 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); | 954 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); |
954 | } | 955 | } |
955 | 956 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d0e7b537353e..2de99e4be5c9 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -1,9 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * IDE DMA support (including IDE PCI BM-DMA). | ||
3 | * | ||
2 | * Copyright (C) 1995-1998 Mark Lord | 4 | * Copyright (C) 1995-1998 Mark Lord |
3 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 5 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
4 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz | 6 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz |
5 | * | 7 | * |
6 | * May be copied or modified under the terms of the GNU General Public License | 8 | * May be copied or modified under the terms of the GNU General Public License |
9 | * | ||
10 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
7 | */ | 11 | */ |
8 | 12 | ||
9 | /* | 13 | /* |
@@ -11,49 +15,6 @@ | |||
11 | */ | 15 | */ |
12 | 16 | ||
13 | /* | 17 | /* |
14 | * This module provides support for the bus-master IDE DMA functions | ||
15 | * of various PCI chipsets, including the Intel PIIX (i82371FB for | ||
16 | * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and | ||
17 | * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset) | ||
18 | * ("PIIX" stands for "PCI ISA IDE Xcellerator"). | ||
19 | * | ||
20 | * Pretty much the same code works for other IDE PCI bus-mastering chipsets. | ||
21 | * | ||
22 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
23 | * | ||
24 | * By default, DMA support is prepared for use, but is currently enabled only | ||
25 | * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single), | ||
26 | * or which are recognized as "good" (see table below). Drives with only mode0 | ||
27 | * or mode1 (multi/single) DMA should also work with this chipset/driver | ||
28 | * (eg. MC2112A) but are not enabled by default. | ||
29 | * | ||
30 | * Use "hdparm -i" to view modes supported by a given drive. | ||
31 | * | ||
32 | * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling | ||
33 | * DMA support, but must be (re-)compiled against this kernel version or later. | ||
34 | * | ||
35 | * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting. | ||
36 | * If problems arise, ide.c will disable DMA operation after a few retries. | ||
37 | * This error recovery mechanism works and has been extremely well exercised. | ||
38 | * | ||
39 | * IDE drives, depending on their vintage, may support several different modes | ||
40 | * of DMA operation. The boot-time modes are indicated with a "*" in | ||
41 | * the "hdparm -i" listing, and can be changed with *knowledgeable* use of | ||
42 | * the "hdparm -X" feature. There is seldom a need to do this, as drives | ||
43 | * normally power-up with their "best" PIO/DMA modes enabled. | ||
44 | * | ||
45 | * Testing has been done with a rather extensive number of drives, | ||
46 | * with Quantum & Western Digital models generally outperforming the pack, | ||
47 | * and Fujitsu & Conner (and some Seagate which are really Conner) drives | ||
48 | * showing more lackluster throughput. | ||
49 | * | ||
50 | * Keep an eye on /var/adm/messages for "DMA disabled" messages. | ||
51 | * | ||
52 | * Some people have reported trouble with Intel Zappa motherboards. | ||
53 | * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0, | ||
54 | * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe | ||
55 | * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this). | ||
56 | * | ||
57 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for | 18 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for |
58 | * fixing the problem with the BIOS on some Acer motherboards. | 19 | * fixing the problem with the BIOS on some Acer motherboards. |
59 | * | 20 | * |
@@ -65,11 +26,6 @@ | |||
65 | * | 26 | * |
66 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> | 27 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> |
67 | * for supplying a Promise UDMA board & WD UDMA drive for this work! | 28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! |
68 | * | ||
69 | * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports. | ||
70 | * | ||
71 | * ATA-66/100 and recovery functions, I forgot the rest...... | ||
72 | * | ||
73 | */ | 29 | */ |
74 | 30 | ||
75 | #include <linux/module.h> | 31 | #include <linux/module.h> |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4a2cb2868226..194ecb0049eb 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
756 | 756 | ||
757 | BUG_ON(hwif->present); | 757 | BUG_ON(hwif->present); |
758 | 758 | ||
759 | if (hwif->noprobe) | 759 | if (hwif->noprobe || |
760 | (hwif->drives[0].noprobe && hwif->drives[1].noprobe)) | ||
760 | return -EACCES; | 761 | return -EACCES; |
761 | 762 | ||
762 | /* | 763 | /* |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0598ecfd5f37..43e0e0557776 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive) | |||
3765 | g->fops = &idetape_block_ops; | 3765 | g->fops = &idetape_block_ops; |
3766 | ide_register_region(g); | 3766 | ide_register_region(g); |
3767 | 3767 | ||
3768 | printk(KERN_WARNING "It is possible that this driver does not have any" | ||
3769 | " users anymore and, as a result, it will be REMOVED soon." | ||
3770 | " Please notify Bart <bzolnier@gmail.com> or Boris" | ||
3771 | " <petkovbb@gmail.com> in case you still need it.\n"); | ||
3772 | |||
3768 | return 0; | 3773 | return 0; |
3769 | 3774 | ||
3770 | out_free_tape: | 3775 | out_free_tape: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 477833f0daf5..fa16bc30bbc9 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore) | |||
590 | hwif->extra_ports = 0; | 590 | hwif->extra_ports = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | ||
594 | * Note that we only release the standard ports, | ||
595 | * and do not even try to handle any extra ports | ||
596 | * allocated for weird IDE interface chipsets. | ||
597 | */ | ||
598 | ide_hwif_release_regions(hwif); | 593 | ide_hwif_release_regions(hwif); |
599 | 594 | ||
600 | /* copy original settings */ | 595 | /* copy original settings */ |
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1036 | drive->nice1 = (arg >> IDE_NICE_1) & 1; | 1031 | drive->nice1 = (arg >> IDE_NICE_1) & 1; |
1037 | return 0; | 1032 | return 0; |
1038 | case HDIO_DRIVE_RESET: | 1033 | case HDIO_DRIVE_RESET: |
1039 | { | 1034 | if (!capable(CAP_SYS_ADMIN)) |
1040 | unsigned long flags; | 1035 | return -EACCES; |
1041 | if (!capable(CAP_SYS_ADMIN)) return -EACCES; | 1036 | |
1042 | |||
1043 | /* | 1037 | /* |
1044 | * Abort the current command on the | 1038 | * Abort the current command on the |
1045 | * group if there is one, taking | 1039 | * group if there is one, taking |
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1058 | ide_abort(drive, "drive reset"); | 1052 | ide_abort(drive, "drive reset"); |
1059 | 1053 | ||
1060 | BUG_ON(HWGROUP(drive)->handler); | 1054 | BUG_ON(HWGROUP(drive)->handler); |
1061 | 1055 | ||
1062 | /* Ensure nothing gets queued after we | 1056 | /* Ensure nothing gets queued after we |
1063 | drop the lock. Reset will clear the busy */ | 1057 | drop the lock. Reset will clear the busy */ |
1064 | 1058 | ||
1065 | HWGROUP(drive)->busy = 1; | 1059 | HWGROUP(drive)->busy = 1; |
1066 | spin_unlock_irqrestore(&ide_lock, flags); | 1060 | spin_unlock_irqrestore(&ide_lock, flags); |
1067 | (void) ide_do_reset(drive); | 1061 | (void) ide_do_reset(drive); |
1068 | 1062 | ||
1069 | return 0; | 1063 | return 0; |
1070 | } | ||
1071 | |||
1072 | case HDIO_GET_BUSSTATE: | 1064 | case HDIO_GET_BUSSTATE: |
1073 | if (!capable(CAP_SYS_ADMIN)) | 1065 | if (!capable(CAP_SYS_ADMIN)) |
1074 | return -EACCES; | 1066 | return -EACCES; |
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s) | |||
1449 | 1441 | ||
1450 | case -1: /* "noprobe" */ | 1442 | case -1: /* "noprobe" */ |
1451 | hwif->noprobe = 1; | 1443 | hwif->noprobe = 1; |
1452 | goto done; | 1444 | goto obsolete_option; |
1453 | 1445 | ||
1454 | case 1: /* base */ | 1446 | case 1: /* base */ |
1455 | vals[1] = vals[0] + 0x206; /* default ctl */ | 1447 | vals[1] = vals[0] + 0x206; /* default ctl */ |
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index bba29df5f21d..2f4f47ad602f 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) | |||
334 | hwif->drives[1].drive_data = t2; | 334 | hwif->drives[1].drive_data = t2; |
335 | } | 335 | } |
336 | 336 | ||
337 | /* | ||
338 | * qd_unsetup: | ||
339 | * | ||
340 | * called to unsetup an ata channel : back to default values, unlinks tuning | ||
341 | */ | ||
342 | /* | ||
343 | static void __exit qd_unsetup(ide_hwif_t *hwif) | ||
344 | { | ||
345 | u8 config = hwif->config_data; | ||
346 | int base = hwif->select_data; | ||
347 | void *set_pio_mode = (void *)hwif->set_pio_mode; | ||
348 | |||
349 | if (hwif->chipset != ide_qd65xx) | ||
350 | return; | ||
351 | |||
352 | printk(KERN_NOTICE "%s: back to defaults\n", hwif->name); | ||
353 | |||
354 | hwif->selectproc = NULL; | ||
355 | hwif->set_pio_mode = NULL; | ||
356 | |||
357 | if (set_pio_mode == (void *)qd6500_set_pio_mode) { | ||
358 | // will do it for both | ||
359 | outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
360 | } else if (set_pio_mode == (void *)qd6580_set_pio_mode) { | ||
361 | if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { | ||
362 | outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
363 | outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1])); | ||
364 | } else { | ||
365 | outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
366 | } | ||
367 | } else { | ||
368 | printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n"); | ||
369 | printk(KERN_WARNING "keeping settings !\n"); | ||
370 | } | ||
371 | } | ||
372 | */ | ||
373 | |||
374 | static const struct ide_port_info qd65xx_port_info __initdata = { | 337 | static const struct ide_port_info qd65xx_port_info __initdata = { |
375 | .chipset = ide_qd65xx, | 338 | .chipset = ide_qd65xx, |
376 | .host_flags = IDE_HFLAG_IO_32BIT | | 339 | .host_flags = IDE_HFLAG_IO_32BIT | |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base) | |||
444 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", | 407 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", |
445 | config, control, QD_ID3); | 408 | config, control, QD_ID3); |
446 | 409 | ||
410 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
411 | |||
447 | if (control & QD_CONTR_SEC_DISABLED) { | 412 | if (control & QD_CONTR_SEC_DISABLED) { |
448 | /* secondary disabled */ | 413 | /* secondary disabled */ |
449 | 414 | ||
@@ -460,8 +425,6 @@ static int __init qd_probe(int base) | |||
460 | 425 | ||
461 | ide_device_add(idx, &qd65xx_port_info); | 426 | ide_device_add(idx, &qd65xx_port_info); |
462 | 427 | ||
463 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
464 | |||
465 | return 1; | 428 | return 1; |
466 | } else { | 429 | } else { |
467 | ide_hwif_t *mate; | 430 | ide_hwif_t *mate; |
@@ -487,8 +450,6 @@ static int __init qd_probe(int base) | |||
487 | 450 | ||
488 | ide_device_add(idx, &qd65xx_port_info); | 451 | ide_device_add(idx, &qd65xx_port_info); |
489 | 452 | ||
490 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
491 | |||
492 | return 0; /* no other qd65xx possible */ | 453 | return 0; /* no other qd65xx possible */ |
493 | } | 454 | } |
494 | } | 455 | } |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index bd24dad3cfc6..ec667982809c 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void) | |||
787 | /* | 787 | /* |
788 | * Try to enable the secondary interface, if not already enabled | 788 | * Try to enable the secondary interface, if not already enabled |
789 | */ | 789 | */ |
790 | if (cmd_hwif1->noprobe) { | 790 | if (cmd_hwif1->noprobe || |
791 | (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) { | ||
791 | port2 = "not probed"; | 792 | port2 = "not probed"; |
792 | } else { | 793 | } else { |
793 | b = get_cmd640_reg(CNTRL); | 794 | b = get_cmd640_reg(CNTRL); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index d0f7bb8b8adf..6357bb6269ab 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1570 | if (rev < 3) | 1570 | if (rev < 3) |
1571 | info = &hpt36x; | 1571 | info = &hpt36x; |
1572 | else { | 1572 | else { |
1573 | static const struct hpt_info *hpt37x_info[] = | 1573 | switch (min_t(u8, rev, 6)) { |
1574 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; | 1574 | case 3: info = &hpt370; break; |
1575 | 1575 | case 4: info = &hpt370a; break; | |
1576 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; | 1576 | case 5: info = &hpt372; break; |
1577 | case 6: info = &hpt372n; break; | ||
1578 | } | ||
1577 | idx++; | 1579 | idx++; |
1578 | } | 1580 | } |
1579 | break; | 1581 | break; |
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1626 | return ide_setup_pci_device(dev, &d); | 1628 | return ide_setup_pci_device(dev, &d); |
1627 | } | 1629 | } |
1628 | 1630 | ||
1629 | static const struct pci_device_id hpt366_pci_tbl[] = { | 1631 | static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { |
1630 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, | 1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, |
1631 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, | 1633 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, |
1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, | 1634 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 28e155a9e2a5..9e2b1964d71a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
184 | * Don't use this with devices which don't have this bug. | 184 | * Don't use this with devices which don't have this bug. |
185 | * | 185 | * |
186 | * - delay inquiry | ||
187 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
188 | * | ||
186 | * - override internal blacklist | 189 | * - override internal blacklist |
187 | * Instead of adding to the built-in blacklist, use only the workarounds | 190 | * Instead of adding to the built-in blacklist, use only the workarounds |
188 | * specified in the module load parameter. | 191 | * specified in the module load parameter. |
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
195 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 198 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
196 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 199 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
197 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 200 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
201 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
198 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 202 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
199 | ", or a combination)"); | 203 | ", or a combination)"); |
200 | 204 | ||
@@ -357,6 +361,11 @@ static const struct { | |||
357 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 361 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
358 | SBP2_WORKAROUND_MODE_SENSE_8, | 362 | SBP2_WORKAROUND_MODE_SENSE_8, |
359 | }, | 363 | }, |
364 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
365 | .firmware_revision = 0x002800, | ||
366 | .model_id = 0x000000, | ||
367 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
368 | }, | ||
360 | /* Initio bridges, actually only needed for some older ones */ { | 369 | /* Initio bridges, actually only needed for some older ones */ { |
361 | .firmware_revision = 0x000200, | 370 | .firmware_revision = 0x000200, |
362 | .model_id = SBP2_ROM_VALUE_WILDCARD, | 371 | .model_id = SBP2_ROM_VALUE_WILDCARD, |
@@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu) | |||
914 | sbp2_agent_reset(lu, 1); | 923 | sbp2_agent_reset(lu, 1); |
915 | sbp2_max_speed_and_size(lu); | 924 | sbp2_max_speed_and_size(lu); |
916 | 925 | ||
926 | if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
927 | ssleep(SBP2_INQUIRY_DELAY); | ||
928 | |||
917 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); | 929 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); |
918 | if (error) { | 930 | if (error) { |
919 | SBP2_ERR("scsi_add_device failed"); | 931 | SBP2_ERR("scsi_add_device failed"); |
@@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
1962 | { | 1974 | { |
1963 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; | 1975 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; |
1964 | 1976 | ||
1977 | if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0) | ||
1978 | return -ENODEV; | ||
1979 | |||
1965 | lu->sdev = sdev; | 1980 | lu->sdev = sdev; |
1966 | sdev->allow_restart = 1; | 1981 | sdev->allow_restart = 1; |
1967 | 1982 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index d2ecb0d8a1bb..80d8e097b065 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -343,6 +343,8 @@ enum sbp2lu_state_types { | |||
343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
346 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
347 | #define SBP2_INQUIRY_DELAY 12 | ||
346 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 348 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
347 | 349 | ||
348 | #endif /* SBP2_H */ | 350 | #endif /* SBP2_H */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index 73bfd1656f86..b8797c66676d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c | |||
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, | |||
136 | 136 | ||
137 | /* Find largest page shift we can use to cover buffers */ | 137 | /* Find largest page shift we can use to cover buffers */ |
138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) | 138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) |
139 | if (num_phys_buf > 1) { | 139 | if ((1ULL << *shift) & mask) |
140 | if ((1ULL << *shift) & mask) | 140 | break; |
141 | break; | ||
142 | } else | ||
143 | if (1ULL << *shift >= | ||
144 | buffer_list[0].size + | ||
145 | (buffer_list[0].addr & ((1ULL << *shift) - 1))) | ||
146 | break; | ||
147 | 141 | ||
148 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); | 142 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); |
149 | buffer_list[0].addr &= ~0ull << *shift; | 143 | buffer_list[0].addr &= ~0ull << *shift; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7f8853b44ee1..b2112f5a422f 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
567 | 567 | ||
568 | /* Init the adapter */ | 568 | /* Init the adapter */ |
569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); | 569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); |
570 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
571 | if (!nesdev->nesadapter) { | 570 | if (!nesdev->nesadapter) { |
572 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); | 571 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); |
573 | ret = -ENOMEM; | 572 | ret = -ENOMEM; |
574 | goto bail5; | 573 | goto bail5; |
575 | } | 574 | } |
575 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
576 | 576 | ||
577 | /* nesdev->base_doorbell_index = | 577 | /* nesdev->base_doorbell_index = |
578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ | 578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index fd57e8a1582f..a48b288618ec 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -285,6 +285,21 @@ struct nes_device { | |||
285 | }; | 285 | }; |
286 | 286 | ||
287 | 287 | ||
288 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | ||
289 | { | ||
290 | u32 crc_value; | ||
291 | crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad)); | ||
292 | |||
293 | /* | ||
294 | * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc | ||
295 | * state in cpu order"), behavior of crc32c changes on | ||
296 | * big-endian platforms. Our algorithm expects the previous | ||
297 | * behavior; otherwise we have RDMA connection establishment | ||
298 | * issue on big-endian. | ||
299 | */ | ||
300 | return cpu_to_le32(crc_value); | ||
301 | } | ||
302 | |||
288 | static inline void | 303 | static inline void |
289 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | 304 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) |
290 | { | 305 | { |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index bd5cfeaac203..39adb267fb15 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
370 | int ret = 0; | 370 | int ret = 0; |
371 | u32 was_timer_set; | 371 | u32 was_timer_set; |
372 | 372 | ||
373 | if (!cm_node) | ||
374 | return -EINVAL; | ||
373 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | 375 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); |
374 | if (!new_send) | 376 | if (!new_send) |
375 | return -1; | 377 | return -1; |
376 | if (!cm_node) | ||
377 | return -EINVAL; | ||
378 | 378 | ||
379 | /* new_send->timetosend = currenttime */ | 379 | /* new_send->timetosend = currenttime */ |
380 | new_send->retrycount = NES_DEFAULT_RETRYS; | 380 | new_send->retrycount = NES_DEFAULT_RETRYS; |
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
948 | 948 | ||
949 | kfree(listener); | 949 | kfree(listener); |
950 | listener = NULL; | ||
950 | ret = 0; | 951 | ret = 0; |
951 | cm_listens_destroyed++; | 952 | cm_listens_destroyed++; |
952 | } else { | 953 | } else { |
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2319 | struct iw_cm_event cm_event; | 2320 | struct iw_cm_event cm_event; |
2320 | struct nes_hw_qp_wqe *wqe; | 2321 | struct nes_hw_qp_wqe *wqe; |
2321 | struct nes_v4_quad nes_quad; | 2322 | struct nes_v4_quad nes_quad; |
2323 | u32 crc_value; | ||
2322 | int ret; | 2324 | int ret; |
2323 | 2325 | ||
2324 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 2326 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2435 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2437 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2436 | 2438 | ||
2437 | /* Produce hash key */ | 2439 | /* Produce hash key */ |
2438 | nesqp->hte_index = cpu_to_be32( | 2440 | crc_value = get_crc_value(&nes_quad); |
2439 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2441 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2440 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 2442 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2441 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 2443 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2442 | 2444 | ||
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2750 | struct iw_cm_event cm_event; | 2752 | struct iw_cm_event cm_event; |
2751 | struct nes_hw_qp_wqe *wqe; | 2753 | struct nes_hw_qp_wqe *wqe; |
2752 | struct nes_v4_quad nes_quad; | 2754 | struct nes_v4_quad nes_quad; |
2755 | u32 crc_value; | ||
2753 | int ret; | 2756 | int ret; |
2754 | 2757 | ||
2755 | /* get all our handles */ | 2758 | /* get all our handles */ |
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2827 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2830 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2828 | 2831 | ||
2829 | /* Produce hash key */ | 2832 | /* Produce hash key */ |
2830 | nesqp->hte_index = cpu_to_be32( | 2833 | crc_value = get_crc_value(&nes_quad); |
2831 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2834 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2832 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 2835 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
2833 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 2836 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
2834 | 2837 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 7c4c0fbf0abd..49e53e4c1ebe 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev) | |||
156 | 156 | ||
157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | 157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); |
158 | 158 | ||
159 | if (shared_timer->cq_count_old < cq_count) { | 159 | if (shared_timer->cq_count_old <= cq_count) |
160 | if (cq_count > shared_timer->threshold_low) | 160 | shared_timer->cq_direction_downward = 0; |
161 | shared_timer->cq_direction_downward=0; | 161 | else |
162 | } | ||
163 | if (shared_timer->cq_count_old >= cq_count) | ||
164 | shared_timer->cq_direction_downward++; | 162 | shared_timer->cq_direction_downward++; |
165 | shared_timer->cq_count_old = cq_count; | 163 | shared_timer->cq_count_old = cq_count; |
166 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { | 164 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { |
167 | if (cq_count <= shared_timer->threshold_low) { | 165 | if (cq_count <= shared_timer->threshold_low && |
166 | shared_timer->threshold_low > 4) { | ||
168 | shared_timer->threshold_low = shared_timer->threshold_low/2; | 167 | shared_timer->threshold_low = shared_timer->threshold_low/2; |
169 | shared_timer->cq_direction_downward=0; | 168 | shared_timer->cq_direction_downward=0; |
170 | nesdev->currcq_count = 0; | 169 | nesdev->currcq_count = 0; |
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev) | |||
1728 | nesdev->int_req &= ~NES_INT_TIMER; | 1727 | nesdev->int_req &= ~NES_INT_TIMER; |
1729 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1728 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1730 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1729 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1731 | nesadapter->tune_timer.timer_in_use_old = 0; | ||
1732 | } | 1730 | } |
1733 | nesdev->deepcq_count = 0; | 1731 | nesdev->deepcq_count = 0; |
1734 | return 1; | 1732 | return 1; |
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param) | |||
1867 | nesdev->int_req &= ~NES_INT_TIMER; | 1865 | nesdev->int_req &= ~NES_INT_TIMER; |
1868 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1866 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1869 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1867 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1870 | nesdev->nesadapter->tune_timer.timer_in_use_old = 0; | ||
1871 | } else { | 1868 | } else { |
1872 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | 1869 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); |
1873 | } | 1870 | } |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1e10df550c9e..b7e2844f096b 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -962,7 +962,7 @@ struct nes_arp_entry { | |||
962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 | 962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 |
963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 | 963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 |
964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 | 964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 |
965 | #define NES_NIC_CQ_DOWNWARD_TREND 8 | 965 | #define NES_NIC_CQ_DOWNWARD_TREND 16 |
966 | 966 | ||
967 | struct nes_hw_tune_timer { | 967 | struct nes_hw_tune_timer { |
968 | //u16 cq_count; | 968 | //u16 cq_count; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4dafbe16e82a..a651e9d9f0ef 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, | |||
929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); | 929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); |
930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", | 930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", |
931 | nespd->mmap_db_index, nespd->pd_id); | 931 | nespd->mmap_db_index, nespd->pd_id); |
932 | if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { | 932 | if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { |
933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); | 933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); |
934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | 934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); |
935 | kfree(nespd); | 935 | kfree(nespd); |
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
1327 | (long long unsigned int)req.user_wqe_buffers); | 1327 | (long long unsigned int)req.user_wqe_buffers); |
1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | 1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); |
1329 | kfree(nesqp->allocated_buffer); | 1329 | kfree(nesqp->allocated_buffer); |
1330 | return ERR_PTR(-ENOMEM); | 1330 | return ERR_PTR(-EFAULT); |
1331 | } | 1331 | } |
1332 | } | 1332 | } |
1333 | 1333 | ||
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1674 | } | 1674 | } |
1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", | 1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", |
1676 | (unsigned long)req.user_cq_buffer, entries); | 1676 | (unsigned long)req.user_cq_buffer, entries); |
1677 | err = 1; | ||
1677 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { | 1678 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { |
1678 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { | 1679 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { |
1679 | list_del(&nespbl->list); | 1680 | list_del(&nespbl->list); |
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1686 | if (err) { | 1687 | if (err) { |
1687 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1688 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
1688 | kfree(nescq); | 1689 | kfree(nescq); |
1689 | return ERR_PTR(err); | 1690 | return ERR_PTR(-EFAULT); |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | pbl_entries = nespbl->pbl_size >> 3; | 1693 | pbl_entries = nespbl->pbl_size >> 3; |
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1831 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 1832 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
1832 | } | 1833 | } |
1833 | } | 1834 | } |
1834 | nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," | ||
1835 | " minor code = 0x%04X\n", | ||
1836 | nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); | ||
1837 | if (!context) | 1835 | if (!context) |
1838 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1836 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1839 | nescq->hw_cq.cq_pbase); | 1837 | nescq->hw_cq.cq_pbase); |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 8b10d9f23bef..c5263d63aca3 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP | |||
42 | 42 | ||
43 | config INPUT_APANEL | 43 | config INPUT_APANEL |
44 | tristate "Fujitsu Lifebook Application Panel buttons" | 44 | tristate "Fujitsu Lifebook Application Panel buttons" |
45 | depends on X86 | 45 | depends on X86 && I2C && LEDS_CLASS |
46 | select I2C_I801 | ||
47 | select INPUT_POLLDEV | 46 | select INPUT_POLLDEV |
48 | select CHECK_SIGNATURE | 47 | select CHECK_SIGNATURE |
49 | help | 48 | help |
50 | Say Y here for support of the Application Panel buttons, used on | 49 | Say Y here for support of the Application Panel buttons, used on |
51 | Fujitsu Lifebook. These are attached to the mainboard through | 50 | Fujitsu Lifebook. These are attached to the mainboard through |
52 | an SMBus interface managed by the I2C Intel ICH (i801) driver. | 51 | an SMBus interface managed by the I2C Intel ICH (i801) driver, |
52 | which you should also build for this kernel. | ||
53 | 53 | ||
54 | To compile this driver as a module, choose M here: the module will | 54 | To compile this driver as a module, choose M here: the module will |
55 | be called apanel. | 55 | be called apanel. |
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 7993e01f9fc5..76043dedba5b 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c | |||
@@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
725 | 725 | ||
726 | switch (adapter->type) { | 726 | switch (adapter->type) { |
727 | case AVM_FRITZ_PCIV2: | 727 | case AVM_FRITZ_PCIV2: |
728 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
729 | "fcpcipnp", adapter); | ||
730 | break; | ||
731 | case AVM_FRITZ_PCI: | ||
732 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
733 | "fcpcipnp", adapter); | ||
734 | break; | ||
735 | case AVM_FRITZ_PNP: | ||
736 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
737 | "fcpcipnp", adapter); | ||
738 | break; | ||
739 | } | ||
740 | if (retval) | ||
741 | goto err_region; | ||
742 | |||
743 | switch (adapter->type) { | ||
744 | case AVM_FRITZ_PCIV2: | ||
745 | case AVM_FRITZ_PCI: | 728 | case AVM_FRITZ_PCI: |
746 | val = inl(adapter->io); | 729 | val = inl(adapter->io); |
747 | break; | 730 | break; |
@@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
796 | 779 | ||
797 | switch (adapter->type) { | 780 | switch (adapter->type) { |
798 | case AVM_FRITZ_PCIV2: | 781 | case AVM_FRITZ_PCIV2: |
782 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
783 | "fcpcipnp", adapter); | ||
784 | break; | ||
785 | case AVM_FRITZ_PCI: | ||
786 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
787 | "fcpcipnp", adapter); | ||
788 | break; | ||
789 | case AVM_FRITZ_PNP: | ||
790 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
791 | "fcpcipnp", adapter); | ||
792 | break; | ||
793 | } | ||
794 | if (retval) | ||
795 | goto err_region; | ||
796 | |||
797 | switch (adapter->type) { | ||
798 | case AVM_FRITZ_PCIV2: | ||
799 | fcpci2_init(adapter); | 799 | fcpci2_init(adapter); |
800 | isacsx_setup(&adapter->isac); | 800 | isacsx_setup(&adapter->isac); |
801 | break; | 801 | break; |
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index f93de4a30355..78f7660c1d0e 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c | |||
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) | |||
906 | sprintf(rs, "\r\n0-2"); | 906 | sprintf(rs, "\r\n0-2"); |
907 | isdn_tty_at_cout(rs, info); | 907 | isdn_tty_at_cout(rs, info); |
908 | } else { | 908 | } else { |
909 | if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) | 909 | if ((f->phase != ISDN_FAX_PHASE_D) || |
910 | (!(info->faxonline & 1))) | ||
910 | PARSE_ERROR1; | 911 | PARSE_ERROR1; |
911 | par = isdn_getnum(p); | 912 | par = isdn_getnum(p); |
912 | if ((par < 0) || (par > 2)) | 913 | if ((par < 0) || (par > 2)) |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 655ef9a3f4df..a335c85a736e 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1289 | } | 1289 | } |
1290 | break; | 1290 | break; |
1291 | case ISDN_CMD_CLREAZ: | 1291 | case ISDN_CMD_CLREAZ: |
1292 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1292 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1293 | return -ENODEV; | 1293 | return -ENODEV; |
1294 | if (card->leased) | 1294 | if (card->leased) |
1295 | break; | 1295 | break; |
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1333 | } | 1333 | } |
1334 | break; | 1334 | break; |
1335 | case ISDN_CMD_SETL3: | 1335 | case ISDN_CMD_SETL3: |
1336 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1336 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1337 | return -ENODEV; | 1337 | return -ENODEV; |
1338 | return 0; | 1338 | return 0; |
1339 | default: | 1339 | default: |
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) | |||
1380 | isdnloop_card *card = isdnloop_findcard(id); | 1380 | isdnloop_card *card = isdnloop_findcard(id); |
1381 | 1381 | ||
1382 | if (card) { | 1382 | if (card) { |
1383 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1383 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1384 | return -ENODEV; | 1384 | return -ENODEV; |
1385 | return (isdnloop_writecmd(buf, len, 1, card)); | 1385 | return (isdnloop_writecmd(buf, len, 1, card)); |
1386 | } | 1386 | } |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7aeceedcf7d4..831aed9c56ff 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1047,6 +1047,11 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) | 1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) |
1048 | return; | 1048 | return; |
1049 | bitmap->daemon_lastrun = jiffies; | 1049 | bitmap->daemon_lastrun = jiffies; |
1050 | if (bitmap->allclean) { | ||
1051 | bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
1052 | return; | ||
1053 | } | ||
1054 | bitmap->allclean = 1; | ||
1050 | 1055 | ||
1051 | for (j = 0; j < bitmap->chunks; j++) { | 1056 | for (j = 0; j < bitmap->chunks; j++) { |
1052 | bitmap_counter_t *bmc; | 1057 | bitmap_counter_t *bmc; |
@@ -1068,8 +1073,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1068 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1073 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1069 | 1074 | ||
1070 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1075 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1071 | if (need_write) | 1076 | if (need_write) { |
1072 | write_page(bitmap, page, 0); | 1077 | write_page(bitmap, page, 0); |
1078 | bitmap->allclean = 0; | ||
1079 | } | ||
1073 | continue; | 1080 | continue; |
1074 | } | 1081 | } |
1075 | 1082 | ||
@@ -1098,6 +1105,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1098 | /* | 1105 | /* |
1099 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); | 1106 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); |
1100 | */ | 1107 | */ |
1108 | if (*bmc) | ||
1109 | bitmap->allclean = 0; | ||
1110 | |||
1101 | if (*bmc == 2) { | 1111 | if (*bmc == 2) { |
1102 | *bmc=1; /* maybe clear the bit next time */ | 1112 | *bmc=1; /* maybe clear the bit next time */ |
1103 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1113 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
@@ -1132,6 +1142,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1132 | } | 1142 | } |
1133 | } | 1143 | } |
1134 | 1144 | ||
1145 | if (bitmap->allclean == 0) | ||
1146 | bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; | ||
1135 | } | 1147 | } |
1136 | 1148 | ||
1137 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1149 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
@@ -1226,6 +1238,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1226 | sectors -= blocks; | 1238 | sectors -= blocks; |
1227 | else sectors = 0; | 1239 | else sectors = 0; |
1228 | } | 1240 | } |
1241 | bitmap->allclean = 0; | ||
1229 | return 0; | 1242 | return 0; |
1230 | } | 1243 | } |
1231 | 1244 | ||
@@ -1296,6 +1309,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | |||
1296 | } | 1309 | } |
1297 | } | 1310 | } |
1298 | spin_unlock_irq(&bitmap->lock); | 1311 | spin_unlock_irq(&bitmap->lock); |
1312 | bitmap->allclean = 0; | ||
1299 | return rv; | 1313 | return rv; |
1300 | } | 1314 | } |
1301 | 1315 | ||
@@ -1332,6 +1346,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab | |||
1332 | } | 1346 | } |
1333 | unlock: | 1347 | unlock: |
1334 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1348 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1349 | bitmap->allclean = 0; | ||
1335 | } | 1350 | } |
1336 | 1351 | ||
1337 | void bitmap_close_sync(struct bitmap *bitmap) | 1352 | void bitmap_close_sync(struct bitmap *bitmap) |
@@ -1399,7 +1414,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
1399 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1414 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
1400 | } | 1415 | } |
1401 | spin_unlock_irq(&bitmap->lock); | 1416 | spin_unlock_irq(&bitmap->lock); |
1402 | 1417 | bitmap->allclean = 0; | |
1403 | } | 1418 | } |
1404 | 1419 | ||
1405 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ | 1420 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 7da6ec244e15..827824a9f3e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; | 1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; |
1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; | 1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; |
1107 | if (rdev->sb_size & bmask) | 1107 | if (rdev->sb_size & bmask) |
1108 | rdev-> sb_size = (rdev->sb_size | bmask)+1; | 1108 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1109 | |||
1110 | if (minor_version | ||
1111 | && rdev->data_offset < sb_offset + (rdev->sb_size/512)) | ||
1112 | return -EINVAL; | ||
1109 | 1113 | ||
1110 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) | 1114 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) |
1111 | rdev->desc_nr = -1; | 1115 | rdev->desc_nr = -1; |
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1137 | else | 1141 | else |
1138 | ret = 0; | 1142 | ret = 0; |
1139 | } | 1143 | } |
1140 | if (minor_version) | 1144 | if (minor_version) |
1141 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; | 1145 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; |
1142 | else | 1146 | else |
1143 | rdev->size = rdev->sb_offset; | 1147 | rdev->size = rdev->sb_offset; |
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
1499 | free_disk_sb(rdev); | 1503 | free_disk_sb(rdev); |
1500 | list_del_init(&rdev->same_set); | 1504 | list_del_init(&rdev->same_set); |
1501 | #ifndef MODULE | 1505 | #ifndef MODULE |
1502 | md_autodetect_dev(rdev->bdev->bd_dev); | 1506 | if (test_bit(AutoDetected, &rdev->flags)) |
1507 | md_autodetect_dev(rdev->bdev->bd_dev); | ||
1503 | #endif | 1508 | #endif |
1504 | unlock_rdev(rdev); | 1509 | unlock_rdev(rdev); |
1505 | kobject_put(&rdev->kobj); | 1510 | kobject_put(&rdev->kobj); |
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
1996 | char *e; | 2001 | char *e; |
1997 | unsigned long long size = simple_strtoull(buf, &e, 10); | 2002 | unsigned long long size = simple_strtoull(buf, &e, 10); |
1998 | unsigned long long oldsize = rdev->size; | 2003 | unsigned long long oldsize = rdev->size; |
2004 | mddev_t *my_mddev = rdev->mddev; | ||
2005 | |||
1999 | if (e==buf || (*e && *e != '\n')) | 2006 | if (e==buf || (*e && *e != '\n')) |
2000 | return -EINVAL; | 2007 | return -EINVAL; |
2001 | if (rdev->mddev->pers) | 2008 | if (my_mddev->pers) |
2002 | return -EBUSY; | 2009 | return -EBUSY; |
2003 | rdev->size = size; | 2010 | rdev->size = size; |
2004 | if (size > oldsize && rdev->mddev->external) { | 2011 | if (size > oldsize && rdev->mddev->external) { |
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2011 | int overlap = 0; | 2018 | int overlap = 0; |
2012 | struct list_head *tmp, *tmp2; | 2019 | struct list_head *tmp, *tmp2; |
2013 | 2020 | ||
2014 | mddev_unlock(rdev->mddev); | 2021 | mddev_unlock(my_mddev); |
2015 | for_each_mddev(mddev, tmp) { | 2022 | for_each_mddev(mddev, tmp) { |
2016 | mdk_rdev_t *rdev2; | 2023 | mdk_rdev_t *rdev2; |
2017 | 2024 | ||
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2031 | break; | 2038 | break; |
2032 | } | 2039 | } |
2033 | } | 2040 | } |
2034 | mddev_lock(rdev->mddev); | 2041 | mddev_lock(my_mddev); |
2035 | if (overlap) { | 2042 | if (overlap) { |
2036 | /* Someone else could have slipped in a size | 2043 | /* Someone else could have slipped in a size |
2037 | * change here, but doing so is just silly. | 2044 | * change here, but doing so is just silly. |
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2043 | return -EBUSY; | 2050 | return -EBUSY; |
2044 | } | 2051 | } |
2045 | } | 2052 | } |
2046 | if (size < rdev->mddev->size || rdev->mddev->size == 0) | 2053 | if (size < my_mddev->size || my_mddev->size == 0) |
2047 | rdev->mddev->size = size; | 2054 | my_mddev->size = size; |
2048 | return len; | 2055 | return len; |
2049 | } | 2056 | } |
2050 | 2057 | ||
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2065 | { | 2072 | { |
2066 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2073 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2067 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2074 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2075 | mddev_t *mddev = rdev->mddev; | ||
2076 | ssize_t rv; | ||
2068 | 2077 | ||
2069 | if (!entry->show) | 2078 | if (!entry->show) |
2070 | return -EIO; | 2079 | return -EIO; |
2071 | return entry->show(rdev, page); | 2080 | |
2081 | rv = mddev ? mddev_lock(mddev) : -EBUSY; | ||
2082 | if (!rv) { | ||
2083 | if (rdev->mddev == NULL) | ||
2084 | rv = -EBUSY; | ||
2085 | else | ||
2086 | rv = entry->show(rdev, page); | ||
2087 | mddev_unlock(mddev); | ||
2088 | } | ||
2089 | return rv; | ||
2072 | } | 2090 | } |
2073 | 2091 | ||
2074 | static ssize_t | 2092 | static ssize_t |
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2077 | { | 2095 | { |
2078 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2096 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2079 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2097 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2080 | int rv; | 2098 | ssize_t rv; |
2099 | mddev_t *mddev = rdev->mddev; | ||
2081 | 2100 | ||
2082 | if (!entry->store) | 2101 | if (!entry->store) |
2083 | return -EIO; | 2102 | return -EIO; |
2084 | if (!capable(CAP_SYS_ADMIN)) | 2103 | if (!capable(CAP_SYS_ADMIN)) |
2085 | return -EACCES; | 2104 | return -EACCES; |
2086 | rv = mddev_lock(rdev->mddev); | 2105 | rv = mddev ? mddev_lock(mddev): -EBUSY; |
2087 | if (!rv) { | 2106 | if (!rv) { |
2088 | rv = entry->store(rdev, page, length); | 2107 | if (rdev->mddev == NULL) |
2108 | rv = -EBUSY; | ||
2109 | else | ||
2110 | rv = entry->store(rdev, page, length); | ||
2089 | mddev_unlock(rdev->mddev); | 2111 | mddev_unlock(rdev->mddev); |
2090 | } | 2112 | } |
2091 | return rv; | 2113 | return rv; |
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5351 | mddev->ro = 0; | 5373 | mddev->ro = 0; |
5352 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5374 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5353 | md_wakeup_thread(mddev->thread); | 5375 | md_wakeup_thread(mddev->thread); |
5376 | md_wakeup_thread(mddev->sync_thread); | ||
5354 | } | 5377 | } |
5355 | atomic_inc(&mddev->writes_pending); | 5378 | atomic_inc(&mddev->writes_pending); |
5356 | if (mddev->in_sync) { | 5379 | if (mddev->in_sync) { |
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part) | |||
6021 | MD_BUG(); | 6044 | MD_BUG(); |
6022 | continue; | 6045 | continue; |
6023 | } | 6046 | } |
6047 | set_bit(AutoDetected, &rdev->flags); | ||
6024 | list_add(&rdev->same_set, &pending_raid_disks); | 6048 | list_add(&rdev->same_set, &pending_raid_disks); |
6025 | i_passed++; | 6049 | i_passed++; |
6026 | } | 6050 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5c7fef091cec..ff61b309129a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits) | |||
592 | } | 592 | } |
593 | 593 | ||
594 | 594 | ||
595 | static int flush_pending_writes(conf_t *conf) | ||
596 | { | ||
597 | /* Any writes that have been queued but are awaiting | ||
598 | * bitmap updates get flushed here. | ||
599 | * We return 1 if any requests were actually submitted. | ||
600 | */ | ||
601 | int rv = 0; | ||
602 | |||
603 | spin_lock_irq(&conf->device_lock); | ||
604 | |||
605 | if (conf->pending_bio_list.head) { | ||
606 | struct bio *bio; | ||
607 | bio = bio_list_get(&conf->pending_bio_list); | ||
608 | blk_remove_plug(conf->mddev->queue); | ||
609 | spin_unlock_irq(&conf->device_lock); | ||
610 | /* flush any pending bitmap writes to | ||
611 | * disk before proceeding w/ I/O */ | ||
612 | bitmap_unplug(conf->mddev->bitmap); | ||
613 | |||
614 | while (bio) { /* submit pending writes */ | ||
615 | struct bio *next = bio->bi_next; | ||
616 | bio->bi_next = NULL; | ||
617 | generic_make_request(bio); | ||
618 | bio = next; | ||
619 | } | ||
620 | rv = 1; | ||
621 | } else | ||
622 | spin_unlock_irq(&conf->device_lock); | ||
623 | return rv; | ||
624 | } | ||
625 | |||
595 | /* Barriers.... | 626 | /* Barriers.... |
596 | * Sometimes we need to suspend IO while we do something else, | 627 | * Sometimes we need to suspend IO while we do something else, |
597 | * either some resync/recovery, or reconfigure the array. | 628 | * either some resync/recovery, or reconfigure the array. |
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf) | |||
673 | /* stop syncio and normal IO and wait for everything to | 704 | /* stop syncio and normal IO and wait for everything to |
674 | * go quite. | 705 | * go quite. |
675 | * We increment barrier and nr_waiting, and then | 706 | * We increment barrier and nr_waiting, and then |
676 | * wait until barrier+nr_pending match nr_queued+2 | 707 | * wait until nr_pending match nr_queued+1 |
708 | * This is called in the context of one normal IO request | ||
709 | * that has failed. Thus any sync request that might be pending | ||
710 | * will be blocked by nr_pending, and we need to wait for | ||
711 | * pending IO requests to complete or be queued for re-try. | ||
712 | * Thus the number queued (nr_queued) plus this request (1) | ||
713 | * must match the number of pending IOs (nr_pending) before | ||
714 | * we continue. | ||
677 | */ | 715 | */ |
678 | spin_lock_irq(&conf->resync_lock); | 716 | spin_lock_irq(&conf->resync_lock); |
679 | conf->barrier++; | 717 | conf->barrier++; |
680 | conf->nr_waiting++; | 718 | conf->nr_waiting++; |
681 | wait_event_lock_irq(conf->wait_barrier, | 719 | wait_event_lock_irq(conf->wait_barrier, |
682 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 720 | conf->nr_pending == conf->nr_queued+1, |
683 | conf->resync_lock, | 721 | conf->resync_lock, |
684 | raid1_unplug(conf->mddev->queue)); | 722 | ({ flush_pending_writes(conf); |
723 | raid1_unplug(conf->mddev->queue); })); | ||
685 | spin_unlock_irq(&conf->resync_lock); | 724 | spin_unlock_irq(&conf->resync_lock); |
686 | } | 725 | } |
687 | static void unfreeze_array(conf_t *conf) | 726 | static void unfreeze_array(conf_t *conf) |
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
907 | blk_plug_device(mddev->queue); | 946 | blk_plug_device(mddev->queue); |
908 | spin_unlock_irqrestore(&conf->device_lock, flags); | 947 | spin_unlock_irqrestore(&conf->device_lock, flags); |
909 | 948 | ||
949 | /* In case raid1d snuck into freeze_array */ | ||
950 | wake_up(&conf->wait_barrier); | ||
951 | |||
910 | if (do_sync) | 952 | if (do_sync) |
911 | md_wakeup_thread(mddev->thread); | 953 | md_wakeup_thread(mddev->thread); |
912 | #if 0 | 954 | #if 0 |
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev) | |||
1473 | 1515 | ||
1474 | for (;;) { | 1516 | for (;;) { |
1475 | char b[BDEVNAME_SIZE]; | 1517 | char b[BDEVNAME_SIZE]; |
1476 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1477 | |||
1478 | if (conf->pending_bio_list.head) { | ||
1479 | bio = bio_list_get(&conf->pending_bio_list); | ||
1480 | blk_remove_plug(mddev->queue); | ||
1481 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1482 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1483 | bitmap_unplug(mddev->bitmap); | ||
1484 | 1518 | ||
1485 | while (bio) { /* submit pending writes */ | 1519 | unplug += flush_pending_writes(conf); |
1486 | struct bio *next = bio->bi_next; | ||
1487 | bio->bi_next = NULL; | ||
1488 | generic_make_request(bio); | ||
1489 | bio = next; | ||
1490 | } | ||
1491 | unplug = 1; | ||
1492 | 1520 | ||
1493 | continue; | 1521 | spin_lock_irqsave(&conf->device_lock, flags); |
1494 | } | 1522 | if (list_empty(head)) { |
1495 | 1523 | spin_unlock_irqrestore(&conf->device_lock, flags); | |
1496 | if (list_empty(head)) | ||
1497 | break; | 1524 | break; |
1525 | } | ||
1498 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); | 1526 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); |
1499 | list_del(head->prev); | 1527 | list_del(head->prev); |
1500 | conf->nr_queued--; | 1528 | conf->nr_queued--; |
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev) | |||
1590 | } | 1618 | } |
1591 | } | 1619 | } |
1592 | } | 1620 | } |
1593 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1594 | if (unplug) | 1621 | if (unplug) |
1595 | unplug_slaves(mddev); | 1622 | unplug_slaves(mddev); |
1596 | } | 1623 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 017f58113c33..32389d2f18fc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
537 | current_distance = abs(r10_bio->devs[slot].addr - | 537 | current_distance = abs(r10_bio->devs[slot].addr - |
538 | conf->mirrors[disk].head_position); | 538 | conf->mirrors[disk].head_position); |
539 | 539 | ||
540 | /* Find the disk whose head is closest */ | 540 | /* Find the disk whose head is closest, |
541 | * or - for far > 1 - find the closest to partition beginning */ | ||
541 | 542 | ||
542 | for (nslot = slot; nslot < conf->copies; nslot++) { | 543 | for (nslot = slot; nslot < conf->copies; nslot++) { |
543 | int ndisk = r10_bio->devs[nslot].devnum; | 544 | int ndisk = r10_bio->devs[nslot].devnum; |
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
557 | slot = nslot; | 558 | slot = nslot; |
558 | break; | 559 | break; |
559 | } | 560 | } |
560 | new_distance = abs(r10_bio->devs[nslot].addr - | 561 | |
561 | conf->mirrors[ndisk].head_position); | 562 | /* for far > 1 always use the lowest address */ |
563 | if (conf->far_copies > 1) | ||
564 | new_distance = r10_bio->devs[nslot].addr; | ||
565 | else | ||
566 | new_distance = abs(r10_bio->devs[nslot].addr - | ||
567 | conf->mirrors[ndisk].head_position); | ||
562 | if (new_distance < current_distance) { | 568 | if (new_distance < current_distance) { |
563 | current_distance = new_distance; | 569 | current_distance = new_distance; |
564 | disk = ndisk; | 570 | disk = ndisk; |
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits) | |||
629 | return ret; | 635 | return ret; |
630 | } | 636 | } |
631 | 637 | ||
632 | 638 | static int flush_pending_writes(conf_t *conf) | |
639 | { | ||
640 | /* Any writes that have been queued but are awaiting | ||
641 | * bitmap updates get flushed here. | ||
642 | * We return 1 if any requests were actually submitted. | ||
643 | */ | ||
644 | int rv = 0; | ||
645 | |||
646 | spin_lock_irq(&conf->device_lock); | ||
647 | |||
648 | if (conf->pending_bio_list.head) { | ||
649 | struct bio *bio; | ||
650 | bio = bio_list_get(&conf->pending_bio_list); | ||
651 | blk_remove_plug(conf->mddev->queue); | ||
652 | spin_unlock_irq(&conf->device_lock); | ||
653 | /* flush any pending bitmap writes to disk | ||
654 | * before proceeding w/ I/O */ | ||
655 | bitmap_unplug(conf->mddev->bitmap); | ||
656 | |||
657 | while (bio) { /* submit pending writes */ | ||
658 | struct bio *next = bio->bi_next; | ||
659 | bio->bi_next = NULL; | ||
660 | generic_make_request(bio); | ||
661 | bio = next; | ||
662 | } | ||
663 | rv = 1; | ||
664 | } else | ||
665 | spin_unlock_irq(&conf->device_lock); | ||
666 | return rv; | ||
667 | } | ||
633 | /* Barriers.... | 668 | /* Barriers.... |
634 | * Sometimes we need to suspend IO while we do something else, | 669 | * Sometimes we need to suspend IO while we do something else, |
635 | * either some resync/recovery, or reconfigure the array. | 670 | * either some resync/recovery, or reconfigure the array. |
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf) | |||
712 | /* stop syncio and normal IO and wait for everything to | 747 | /* stop syncio and normal IO and wait for everything to |
713 | * go quiet. | 748 | * go quiet. |
714 | * We increment barrier and nr_waiting, and then | 749 | * We increment barrier and nr_waiting, and then |
715 | * wait until barrier+nr_pending match nr_queued+2 | 750 | * wait until nr_pending match nr_queued+1 |
751 | * This is called in the context of one normal IO request | ||
752 | * that has failed. Thus any sync request that might be pending | ||
753 | * will be blocked by nr_pending, and we need to wait for | ||
754 | * pending IO requests to complete or be queued for re-try. | ||
755 | * Thus the number queued (nr_queued) plus this request (1) | ||
756 | * must match the number of pending IOs (nr_pending) before | ||
757 | * we continue. | ||
716 | */ | 758 | */ |
717 | spin_lock_irq(&conf->resync_lock); | 759 | spin_lock_irq(&conf->resync_lock); |
718 | conf->barrier++; | 760 | conf->barrier++; |
719 | conf->nr_waiting++; | 761 | conf->nr_waiting++; |
720 | wait_event_lock_irq(conf->wait_barrier, | 762 | wait_event_lock_irq(conf->wait_barrier, |
721 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 763 | conf->nr_pending == conf->nr_queued+1, |
722 | conf->resync_lock, | 764 | conf->resync_lock, |
723 | raid10_unplug(conf->mddev->queue)); | 765 | ({ flush_pending_writes(conf); |
766 | raid10_unplug(conf->mddev->queue); })); | ||
724 | spin_unlock_irq(&conf->resync_lock); | 767 | spin_unlock_irq(&conf->resync_lock); |
725 | } | 768 | } |
726 | 769 | ||
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
892 | blk_plug_device(mddev->queue); | 935 | blk_plug_device(mddev->queue); |
893 | spin_unlock_irqrestore(&conf->device_lock, flags); | 936 | spin_unlock_irqrestore(&conf->device_lock, flags); |
894 | 937 | ||
938 | /* In case raid10d snuck in to freeze_array */ | ||
939 | wake_up(&conf->wait_barrier); | ||
940 | |||
895 | if (do_sync) | 941 | if (do_sync) |
896 | md_wakeup_thread(mddev->thread); | 942 | md_wakeup_thread(mddev->thread); |
897 | 943 | ||
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev) | |||
1464 | 1510 | ||
1465 | for (;;) { | 1511 | for (;;) { |
1466 | char b[BDEVNAME_SIZE]; | 1512 | char b[BDEVNAME_SIZE]; |
1467 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1468 | 1513 | ||
1469 | if (conf->pending_bio_list.head) { | 1514 | unplug += flush_pending_writes(conf); |
1470 | bio = bio_list_get(&conf->pending_bio_list); | ||
1471 | blk_remove_plug(mddev->queue); | ||
1472 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1473 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1474 | bitmap_unplug(mddev->bitmap); | ||
1475 | |||
1476 | while (bio) { /* submit pending writes */ | ||
1477 | struct bio *next = bio->bi_next; | ||
1478 | bio->bi_next = NULL; | ||
1479 | generic_make_request(bio); | ||
1480 | bio = next; | ||
1481 | } | ||
1482 | unplug = 1; | ||
1483 | |||
1484 | continue; | ||
1485 | } | ||
1486 | 1515 | ||
1487 | if (list_empty(head)) | 1516 | spin_lock_irqsave(&conf->device_lock, flags); |
1517 | if (list_empty(head)) { | ||
1518 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1488 | break; | 1519 | break; |
1520 | } | ||
1489 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); | 1521 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); |
1490 | list_del(head->prev); | 1522 | list_del(head->prev); |
1491 | conf->nr_queued--; | 1523 | conf->nr_queued--; |
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev) | |||
1548 | } | 1580 | } |
1549 | } | 1581 | } |
1550 | } | 1582 | } |
1551 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1552 | if (unplug) | 1583 | if (unplug) |
1553 | unplug_slaves(mddev); | 1584 | unplug_slaves(mddev); |
1554 | } | 1585 | } |
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1787 | if (j == conf->copies) { | 1818 | if (j == conf->copies) { |
1788 | /* Cannot recover, so abort the recovery */ | 1819 | /* Cannot recover, so abort the recovery */ |
1789 | put_buf(r10_bio); | 1820 | put_buf(r10_bio); |
1821 | if (rb2) | ||
1822 | atomic_dec(&rb2->remaining); | ||
1790 | r10_bio = rb2; | 1823 | r10_bio = rb2; |
1791 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 1824 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) |
1792 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", | 1825 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0c303c84b37b..6b6df8679585 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx) | |||
632 | 632 | ||
633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
634 | /** | 634 | /** |
635 | * mpt_event_register - Register protocol-specific event callback | 635 | * mpt_event_register - Register protocol-specific event callback handler. |
636 | * handler. | ||
637 | * @cb_idx: previously registered (via mpt_register) callback handle | 636 | * @cb_idx: previously registered (via mpt_register) callback handle |
638 | * @ev_cbfunc: callback function | 637 | * @ev_cbfunc: callback function |
639 | * | 638 | * |
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) | |||
654 | 653 | ||
655 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 654 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
656 | /** | 655 | /** |
657 | * mpt_event_deregister - Deregister protocol-specific event callback | 656 | * mpt_event_deregister - Deregister protocol-specific event callback handler |
658 | * handler. | ||
659 | * @cb_idx: previously registered callback handle | 657 | * @cb_idx: previously registered callback handle |
660 | * | 658 | * |
661 | * Each protocol-specific driver should call this routine | 659 | * Each protocol-specific driver should call this routine |
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx) | |||
765 | 763 | ||
766 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 764 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
767 | /** | 765 | /** |
768 | * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) | 766 | * mpt_get_msg_frame - Obtain an MPT request frame from the pool |
769 | * allocated per MPT adapter. | ||
770 | * @cb_idx: Handle of registered MPT protocol driver | 767 | * @cb_idx: Handle of registered MPT protocol driver |
771 | * @ioc: Pointer to MPT adapter structure | 768 | * @ioc: Pointer to MPT adapter structure |
772 | * | 769 | * |
770 | * Obtain an MPT request frame from the pool (of 1024) that are | ||
771 | * allocated per MPT adapter. | ||
772 | * | ||
773 | * Returns pointer to a MPT request frame or %NULL if none are available | 773 | * Returns pointer to a MPT request frame or %NULL if none are available |
774 | * or IOC is not active. | 774 | * or IOC is not active. |
775 | */ | 775 | */ |
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) | |||
834 | 834 | ||
835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
836 | /** | 836 | /** |
837 | * mpt_put_msg_frame - Send a protocol specific MPT request frame | 837 | * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC |
838 | * to a IOC. | ||
839 | * @cb_idx: Handle of registered MPT protocol driver | 838 | * @cb_idx: Handle of registered MPT protocol driver |
840 | * @ioc: Pointer to MPT adapter structure | 839 | * @ioc: Pointer to MPT adapter structure |
841 | * @mf: Pointer to MPT request frame | 840 | * @mf: Pointer to MPT request frame |
842 | * | 841 | * |
843 | * This routine posts a MPT request frame to the request post FIFO of a | 842 | * This routine posts an MPT request frame to the request post FIFO of a |
844 | * specific MPT adapter. | 843 | * specific MPT adapter. |
845 | */ | 844 | */ |
846 | void | 845 | void |
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) | |||
868 | } | 867 | } |
869 | 868 | ||
870 | /** | 869 | /** |
871 | * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame | 870 | * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame |
872 | * to a IOC using hi priority request queue. | ||
873 | * @cb_idx: Handle of registered MPT protocol driver | 871 | * @cb_idx: Handle of registered MPT protocol driver |
874 | * @ioc: Pointer to MPT adapter structure | 872 | * @ioc: Pointer to MPT adapter structure |
875 | * @mf: Pointer to MPT request frame | 873 | * @mf: Pointer to MPT request frame |
876 | * | 874 | * |
877 | * This routine posts a MPT request frame to the request post FIFO of a | 875 | * Send a protocol-specific MPT request frame to an IOC using |
876 | * hi-priority request queue. | ||
877 | * | ||
878 | * This routine posts an MPT request frame to the request post FIFO of a | ||
878 | * specific MPT adapter. | 879 | * specific MPT adapter. |
879 | **/ | 880 | **/ |
880 | void | 881 | void |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index af1de0ccee2f..0c252f60c4c1 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) | |||
1533 | * | 1533 | * |
1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). | 1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). |
1535 | * | 1535 | * |
1536 | * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC | 1536 | * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC |
1537 | * will be active. | 1537 | * will be active. |
1538 | * | 1538 | * |
1539 | * Returns 0 for SUCCESS, or %FAILED. | 1539 | * Returns 0 for SUCCESS, or %FAILED. |
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR | |||
2537 | 2537 | ||
2538 | /** | 2538 | /** |
2539 | * mptscsih_get_scsi_lookup | 2539 | * mptscsih_get_scsi_lookup |
2540 | * | ||
2541 | * retrieves scmd entry from ScsiLookup[] array list | ||
2542 | * | ||
2543 | * @ioc: Pointer to MPT_ADAPTER structure | 2540 | * @ioc: Pointer to MPT_ADAPTER structure |
2544 | * @i: index into the array | 2541 | * @i: index into the array |
2545 | * | 2542 | * |
2546 | * Returns the scsi_cmd pointer | 2543 | * retrieves scmd entry from ScsiLookup[] array list |
2547 | * | 2544 | * |
2545 | * Returns the scsi_cmd pointer | ||
2548 | **/ | 2546 | **/ |
2549 | static struct scsi_cmnd * | 2547 | static struct scsi_cmnd * |
2550 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2548 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) |
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | |||
2561 | 2559 | ||
2562 | /** | 2560 | /** |
2563 | * mptscsih_getclear_scsi_lookup | 2561 | * mptscsih_getclear_scsi_lookup |
2564 | * | ||
2565 | * retrieves and clears scmd entry from ScsiLookup[] array list | ||
2566 | * | ||
2567 | * @ioc: Pointer to MPT_ADAPTER structure | 2562 | * @ioc: Pointer to MPT_ADAPTER structure |
2568 | * @i: index into the array | 2563 | * @i: index into the array |
2569 | * | 2564 | * |
2570 | * Returns the scsi_cmd pointer | 2565 | * retrieves and clears scmd entry from ScsiLookup[] array list |
2571 | * | 2566 | * |
2567 | * Returns the scsi_cmd pointer | ||
2572 | **/ | 2568 | **/ |
2573 | static struct scsi_cmnd * | 2569 | static struct scsi_cmnd * |
2574 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2570 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) |
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index afd82966f9a0..13bac53db69a 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
@@ -48,31 +48,13 @@ struct sm501_devdata { | |||
48 | unsigned int pdev_id; | 48 | unsigned int pdev_id; |
49 | unsigned int irq; | 49 | unsigned int irq; |
50 | void __iomem *regs; | 50 | void __iomem *regs; |
51 | unsigned int rev; | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define MHZ (1000 * 1000) | 54 | #define MHZ (1000 * 1000) |
54 | 55 | ||
55 | #ifdef DEBUG | 56 | #ifdef DEBUG |
56 | static const unsigned int misc_div[] = { | 57 | static const unsigned int div_tab[] = { |
57 | [0] = 1, | ||
58 | [1] = 2, | ||
59 | [2] = 4, | ||
60 | [3] = 8, | ||
61 | [4] = 16, | ||
62 | [5] = 32, | ||
63 | [6] = 64, | ||
64 | [7] = 128, | ||
65 | [8] = 3, | ||
66 | [9] = 6, | ||
67 | [10] = 12, | ||
68 | [11] = 24, | ||
69 | [12] = 48, | ||
70 | [13] = 96, | ||
71 | [14] = 192, | ||
72 | [15] = 384, | ||
73 | }; | ||
74 | |||
75 | static const unsigned int px_div[] = { | ||
76 | [0] = 1, | 58 | [0] = 1, |
77 | [1] = 2, | 59 | [1] = 2, |
78 | [2] = 4, | 60 | [2] = 4, |
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = { | |||
101 | 83 | ||
102 | static unsigned long decode_div(unsigned long pll2, unsigned long val, | 84 | static unsigned long decode_div(unsigned long pll2, unsigned long val, |
103 | unsigned int lshft, unsigned int selbit, | 85 | unsigned int lshft, unsigned int selbit, |
104 | unsigned long mask, const unsigned int *dtab) | 86 | unsigned long mask) |
105 | { | 87 | { |
106 | if (val & selbit) | 88 | if (val & selbit) |
107 | pll2 = 288 * MHZ; | 89 | pll2 = 288 * MHZ; |
108 | 90 | ||
109 | return pll2 / dtab[(val >> lshft) & mask]; | 91 | return pll2 / div_tab[(val >> lshft) & mask]; |
110 | } | 92 | } |
111 | 93 | ||
112 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) | 94 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) |
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
141 | } | 123 | } |
142 | 124 | ||
143 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; | 125 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; |
144 | sdclk0 /= misc_div[((misct >> 8) & 0xf)]; | 126 | sdclk0 /= div_tab[((misct >> 8) & 0xf)]; |
145 | 127 | ||
146 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; | 128 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; |
147 | sdclk1 /= misc_div[((misct >> 16) & 0xf)]; | 129 | sdclk1 /= div_tab[((misct >> 16) & 0xf)]; |
148 | 130 | ||
149 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", | 131 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", |
150 | misct, pm0, pm1); | 132 | misct, pm0, pm1); |
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
158 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 140 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
159 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 141 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
160 | (pmc & 3 ) == 0 ? '*' : '-', | 142 | (pmc & 3 ) == 0 ? '*' : '-', |
161 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), | 143 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)), |
162 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), | 144 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)), |
163 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), | 145 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)), |
164 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); | 146 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15))); |
165 | 147 | ||
166 | dev_dbg(sm->dev, "PM1[%c]: " | 148 | dev_dbg(sm->dev, "PM1[%c]: " |
167 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 149 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
168 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 150 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
169 | (pmc & 3 ) == 1 ? '*' : '-', | 151 | (pmc & 3 ) == 1 ? '*' : '-', |
170 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), | 152 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)), |
171 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), | 153 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)), |
172 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), | 154 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)), |
173 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); | 155 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15))); |
174 | } | 156 | } |
175 | 157 | ||
176 | static void sm501_dump_regs(struct sm501_devdata *sm) | 158 | static void sm501_dump_regs(struct sm501_devdata *sm) |
@@ -436,46 +418,108 @@ struct sm501_clock { | |||
436 | unsigned long mclk; | 418 | unsigned long mclk; |
437 | int divider; | 419 | int divider; |
438 | int shift; | 420 | int shift; |
421 | unsigned int m, n, k; | ||
439 | }; | 422 | }; |
440 | 423 | ||
424 | /* sm501_calc_clock | ||
425 | * | ||
426 | * Calculates the nearest discrete clock frequency that | ||
427 | * can be achieved with the specified input clock. | ||
428 | * the maximum divisor is 3 or 5 | ||
429 | */ | ||
430 | |||
431 | static int sm501_calc_clock(unsigned long freq, | ||
432 | struct sm501_clock *clock, | ||
433 | int max_div, | ||
434 | unsigned long mclk, | ||
435 | long *best_diff) | ||
436 | { | ||
437 | int ret = 0; | ||
438 | int divider; | ||
439 | int shift; | ||
440 | long diff; | ||
441 | |||
442 | /* try dividers 1 and 3 for CRT and for panel, | ||
443 | try divider 5 for panel only.*/ | ||
444 | |||
445 | for (divider = 1; divider <= max_div; divider += 2) { | ||
446 | /* try all 8 shift values.*/ | ||
447 | for (shift = 0; shift < 8; shift++) { | ||
448 | /* Calculate difference to requested clock */ | ||
449 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
450 | if (diff < 0) | ||
451 | diff = -diff; | ||
452 | |||
453 | /* If it is less than the current, use it */ | ||
454 | if (diff < *best_diff) { | ||
455 | *best_diff = diff; | ||
456 | |||
457 | clock->mclk = mclk; | ||
458 | clock->divider = divider; | ||
459 | clock->shift = shift; | ||
460 | ret = 1; | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /* sm501_calc_pll | ||
469 | * | ||
470 | * Calculates the nearest discrete clock frequency that can be | ||
471 | * achieved using the programmable PLL. | ||
472 | * the maximum divisor is 3 or 5 | ||
473 | */ | ||
474 | |||
475 | static unsigned long sm501_calc_pll(unsigned long freq, | ||
476 | struct sm501_clock *clock, | ||
477 | int max_div) | ||
478 | { | ||
479 | unsigned long mclk; | ||
480 | unsigned int m, n, k; | ||
481 | long best_diff = 999999999; | ||
482 | |||
483 | /* | ||
484 | * The SM502 datasheet doesn't specify the min/max values for M and N. | ||
485 | * N = 1 at least doesn't work in practice. | ||
486 | */ | ||
487 | for (m = 2; m <= 255; m++) { | ||
488 | for (n = 2; n <= 127; n++) { | ||
489 | for (k = 0; k <= 1; k++) { | ||
490 | mclk = (24000000UL * m / n) >> k; | ||
491 | |||
492 | if (sm501_calc_clock(freq, clock, max_div, | ||
493 | mclk, &best_diff)) { | ||
494 | clock->m = m; | ||
495 | clock->n = n; | ||
496 | clock->k = k; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Return best clock. */ | ||
503 | return clock->mclk / (clock->divider << clock->shift); | ||
504 | } | ||
505 | |||
441 | /* sm501_select_clock | 506 | /* sm501_select_clock |
442 | * | 507 | * |
443 | * selects nearest discrete clock frequency the SM501 can achive | 508 | * Calculates the nearest discrete clock frequency that can be |
509 | * achieved using the 288MHz and 336MHz PLLs. | ||
444 | * the maximum divisor is 3 or 5 | 510 | * the maximum divisor is 3 or 5 |
445 | */ | 511 | */ |
512 | |||
446 | static unsigned long sm501_select_clock(unsigned long freq, | 513 | static unsigned long sm501_select_clock(unsigned long freq, |
447 | struct sm501_clock *clock, | 514 | struct sm501_clock *clock, |
448 | int max_div) | 515 | int max_div) |
449 | { | 516 | { |
450 | unsigned long mclk; | 517 | unsigned long mclk; |
451 | int divider; | ||
452 | int shift; | ||
453 | long diff; | ||
454 | long best_diff = 999999999; | 518 | long best_diff = 999999999; |
455 | 519 | ||
456 | /* Try 288MHz and 336MHz clocks. */ | 520 | /* Try 288MHz and 336MHz clocks. */ |
457 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { | 521 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { |
458 | /* try dividers 1 and 3 for CRT and for panel, | 522 | sm501_calc_clock(freq, clock, max_div, mclk, &best_diff); |
459 | try divider 5 for panel only.*/ | ||
460 | |||
461 | for (divider = 1; divider <= max_div; divider += 2) { | ||
462 | /* try all 8 shift values.*/ | ||
463 | for (shift = 0; shift < 8; shift++) { | ||
464 | /* Calculate difference to requested clock */ | ||
465 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
466 | if (diff < 0) | ||
467 | diff = -diff; | ||
468 | |||
469 | /* If it is less than the current, use it */ | ||
470 | if (diff < best_diff) { | ||
471 | best_diff = diff; | ||
472 | |||
473 | clock->mclk = mclk; | ||
474 | clock->divider = divider; | ||
475 | clock->shift = shift; | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | } | 523 | } |
480 | 524 | ||
481 | /* Return best clock. */ | 525 | /* Return best clock. */ |
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev, | |||
497 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); | 541 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); |
498 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); | 542 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); |
499 | unsigned char reg; | 543 | unsigned char reg; |
544 | unsigned int pll_reg = 0; | ||
500 | unsigned long sm501_freq; /* the actual frequency acheived */ | 545 | unsigned long sm501_freq; /* the actual frequency acheived */ |
501 | 546 | ||
502 | struct sm501_clock to; | 547 | struct sm501_clock to; |
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev, | |||
511 | * requested frequency the value must be multiplied by | 556 | * requested frequency the value must be multiplied by |
512 | * 2. This clock also has an additional pre divisor */ | 557 | * 2. This clock also has an additional pre divisor */ |
513 | 558 | ||
514 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 559 | if (sm->rev >= 0xC0) { |
515 | reg=to.shift & 0x07;/* bottom 3 bits are shift */ | 560 | /* SM502 -> use the programmable PLL */ |
516 | if (to.divider == 3) | 561 | sm501_freq = (sm501_calc_pll(2 * req_freq, |
517 | reg |= 0x08; /* /3 divider required */ | 562 | &to, 5) / 2); |
518 | else if (to.divider == 5) | 563 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ |
519 | reg |= 0x10; /* /5 divider required */ | 564 | if (to.divider == 3) |
520 | if (to.mclk != 288000000) | 565 | reg |= 0x08; /* /3 divider required */ |
521 | reg |= 0x20; /* which mclk pll is source */ | 566 | else if (to.divider == 5) |
567 | reg |= 0x10; /* /5 divider required */ | ||
568 | reg |= 0x40; /* select the programmable PLL */ | ||
569 | pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; | ||
570 | } else { | ||
571 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
572 | &to, 5) / 2); | ||
573 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ | ||
574 | if (to.divider == 3) | ||
575 | reg |= 0x08; /* /3 divider required */ | ||
576 | else if (to.divider == 5) | ||
577 | reg |= 0x10; /* /5 divider required */ | ||
578 | if (to.mclk != 288000000) | ||
579 | reg |= 0x20; /* which mclk pll is source */ | ||
580 | } | ||
522 | break; | 581 | break; |
523 | 582 | ||
524 | case SM501_CLOCK_V2XCLK: | 583 | case SM501_CLOCK_V2XCLK: |
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev, | |||
579 | } | 638 | } |
580 | 639 | ||
581 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); | 640 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); |
641 | |||
642 | if (pll_reg) | ||
643 | writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL); | ||
644 | |||
582 | sm501_sync_regs(sm); | 645 | sm501_sync_regs(sm); |
583 | 646 | ||
584 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", | 647 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", |
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock); | |||
599 | * finds the closest available frequency for a given clock | 662 | * finds the closest available frequency for a given clock |
600 | */ | 663 | */ |
601 | 664 | ||
602 | unsigned long sm501_find_clock(int clksrc, | 665 | unsigned long sm501_find_clock(struct device *dev, |
666 | int clksrc, | ||
603 | unsigned long req_freq) | 667 | unsigned long req_freq) |
604 | { | 668 | { |
669 | struct sm501_devdata *sm = dev_get_drvdata(dev); | ||
605 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ | 670 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ |
606 | struct sm501_clock to; | 671 | struct sm501_clock to; |
607 | 672 | ||
608 | switch (clksrc) { | 673 | switch (clksrc) { |
609 | case SM501_CLOCK_P2XCLK: | 674 | case SM501_CLOCK_P2XCLK: |
610 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 675 | if (sm->rev >= 0xC0) { |
676 | /* SM502 -> use the programmable PLL */ | ||
677 | sm501_freq = (sm501_calc_pll(2 * req_freq, | ||
678 | &to, 5) / 2); | ||
679 | } else { | ||
680 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
681 | &to, 5) / 2); | ||
682 | } | ||
611 | break; | 683 | break; |
612 | 684 | ||
613 | case SM501_CLOCK_V2XCLK: | 685 | case SM501_CLOCK_V2XCLK: |
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm) | |||
914 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", | 986 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", |
915 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); | 987 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); |
916 | 988 | ||
989 | sm->rev = devid & SM501_DEVICEID_REVMASK; | ||
990 | |||
917 | sm501_dump_gate(sm); | 991 | sm501_dump_gate(sm); |
918 | 992 | ||
919 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); | 993 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index bb269d0c677e..6cb781262f94 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c | |||
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status) | |||
1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) | 1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) |
1079 | return -EIO; | 1079 | return -EIO; |
1080 | 1080 | ||
1081 | return ((s & TP_HOTKEY_TABLET_MASK) != 0); | 1081 | *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); |
1082 | return 0; | ||
1082 | } | 1083 | } |
1083 | 1084 | ||
1084 | /* | 1085 | /* |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0fbf1bbbaee9..d7a3ea88eddb 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1253 | 1253 | ||
1254 | /* Setup interrupt handlers. */ | 1254 | /* Setup interrupt handlers. */ |
1255 | for (idp = id; idp->name; idp++) { | 1255 | for (idp = id; idp->name; idp++) { |
1256 | if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) | 1256 | if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) |
1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); | 1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); |
1258 | } | 1258 | } |
1259 | 1259 | ||
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1382 | 1382 | ||
1383 | /* Setup interrupt handlers. */ | 1383 | /* Setup interrupt handlers. */ |
1384 | for (idp = id; idp->name; idp++) { | 1384 | for (idp = id; idp->name; idp++) { |
1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) | 1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) |
1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1387 | } | 1387 | } |
1388 | 1388 | ||
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1553 | 1553 | ||
1554 | /* Setup interrupt handlers. */ | 1554 | /* Setup interrupt handlers. */ |
1555 | for (idp = id; idp->name; idp++) { | 1555 | for (idp = id; idp->name; idp++) { |
1556 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1556 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1558 | } | 1558 | } |
1559 | 1559 | ||
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1680 | 1680 | ||
1681 | /* Setup interrupt handlers. */ | 1681 | /* Setup interrupt handlers. */ |
1682 | for (idp = id; idp->name; idp++) { | 1682 | for (idp = id; idp->name; idp++) { |
1683 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1683 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", | 1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", |
1685 | idp->name, b+idp->irq); | 1685 | idp->name, b+idp->irq); |
1686 | } | 1686 | } |
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 1d3b84b4af3f..553a9905299a 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig | |||
@@ -103,6 +103,11 @@ config IOMMU_SBA | |||
103 | depends on PCI_LBA | 103 | depends on PCI_LBA |
104 | default PCI_LBA | 104 | default PCI_LBA |
105 | 105 | ||
106 | config IOMMU_HELPER | ||
107 | bool | ||
108 | depends on IOMMU_SBA || IOMMU_CCIO | ||
109 | default y | ||
110 | |||
106 | #config PCI_EPIC | 111 | #config PCI_EPIC |
107 | # bool "EPIC/SAGA PCI support" | 112 | # bool "EPIC/SAGA PCI support" |
108 | # depends on PCI | 113 | # depends on PCI |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d08b284de196..60d338cd8009 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/iommu-helper.h> | ||
46 | 47 | ||
47 | #include <asm/byteorder.h> | 48 | #include <asm/byteorder.h> |
48 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ | 49 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ |
@@ -302,13 +303,17 @@ static int ioc_count; | |||
302 | */ | 303 | */ |
303 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ | 304 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ |
304 | for(; res_ptr < res_end; ++res_ptr) { \ | 305 | for(; res_ptr < res_end; ++res_ptr) { \ |
305 | if(0 == (*res_ptr & mask)) { \ | 306 | int ret;\ |
306 | *res_ptr |= mask; \ | 307 | unsigned int idx;\ |
307 | res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ | 308 | idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ |
308 | ioc->res_hint = res_idx + (size >> 3); \ | 309 | ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\ |
309 | goto resource_found; \ | 310 | if ((0 == (*res_ptr & mask)) && !ret) { \ |
310 | } \ | 311 | *res_ptr |= mask; \ |
311 | } | 312 | res_idx = idx;\ |
313 | ioc->res_hint = res_idx + (size >> 3); \ | ||
314 | goto resource_found; \ | ||
315 | } \ | ||
316 | } | ||
312 | 317 | ||
313 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ | 318 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ |
314 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ | 319 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ |
@@ -341,10 +346,11 @@ static int ioc_count; | |||
341 | * of available pages for the requested size. | 346 | * of available pages for the requested size. |
342 | */ | 347 | */ |
343 | static int | 348 | static int |
344 | ccio_alloc_range(struct ioc *ioc, size_t size) | 349 | ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
345 | { | 350 | { |
346 | unsigned int pages_needed = size >> IOVP_SHIFT; | 351 | unsigned int pages_needed = size >> IOVP_SHIFT; |
347 | unsigned int res_idx; | 352 | unsigned int res_idx; |
353 | unsigned long boundary_size; | ||
348 | #ifdef CCIO_SEARCH_TIME | 354 | #ifdef CCIO_SEARCH_TIME |
349 | unsigned long cr_start = mfctl(16); | 355 | unsigned long cr_start = mfctl(16); |
350 | #endif | 356 | #endif |
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size) | |||
360 | ** ggg sacrifices another 710 to the computer gods. | 366 | ** ggg sacrifices another 710 to the computer gods. |
361 | */ | 367 | */ |
362 | 368 | ||
369 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | ||
370 | boundary_size >>= IOVP_SHIFT; | ||
371 | |||
363 | if (pages_needed <= 8) { | 372 | if (pages_needed <= 8) { |
364 | /* | 373 | /* |
365 | * LAN traffic will not thrash the TLB IFF the same NIC | 374 | * LAN traffic will not thrash the TLB IFF the same NIC |
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
760 | ioc->msingle_pages += size >> IOVP_SHIFT; | 769 | ioc->msingle_pages += size >> IOVP_SHIFT; |
761 | #endif | 770 | #endif |
762 | 771 | ||
763 | idx = ccio_alloc_range(ioc, size); | 772 | idx = ccio_alloc_range(ioc, dev, size); |
764 | iovp = (dma_addr_t)MKIOVP(idx); | 773 | iovp = (dma_addr_t)MKIOVP(idx); |
765 | 774 | ||
766 | pdir_start = &(ioc->pdir_base[idx]); | 775 | pdir_start = &(ioc->pdir_base[idx]); |
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 97ba8286c596..a9c46cc2db37 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h | |||
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
96 | 96 | ||
97 | static inline unsigned int | 97 | static inline unsigned int |
98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | 98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
99 | struct scatterlist *startsg, int nents, | 99 | struct scatterlist *startsg, int nents, |
100 | int (*iommu_alloc_range)(struct ioc *, size_t)) | 100 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
101 | { | 101 | { |
102 | struct scatterlist *contig_sg; /* contig chunk head */ | 102 | struct scatterlist *contig_sg; /* contig chunk head */ |
103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); | 166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
167 | sg_dma_address(contig_sg) = | 167 | sg_dma_address(contig_sg) = |
168 | PIDE_FLAG | 168 | PIDE_FLAG |
169 | | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) | 169 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
170 | | dma_offset; | 170 | | dma_offset; |
171 | n_mappings++; | 171 | n_mappings++; |
172 | } | 172 | } |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d06627c3f353..e834127a8505 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/iommu-helper.h> | ||
32 | 33 | ||
33 | #include <asm/byteorder.h> | 34 | #include <asm/byteorder.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
313 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) | 314 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) |
314 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | 315 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) |
315 | 316 | ||
317 | unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, | ||
318 | unsigned int bitshiftcnt) | ||
319 | { | ||
320 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) | ||
321 | + bitshiftcnt; | ||
322 | } | ||
316 | 323 | ||
317 | /** | 324 | /** |
318 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 325 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
324 | * Cool perf optimization: search for log2(size) bits at a time. | 331 | * Cool perf optimization: search for log2(size) bits at a time. |
325 | */ | 332 | */ |
326 | static SBA_INLINE unsigned long | 333 | static SBA_INLINE unsigned long |
327 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | 334 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
335 | unsigned long bits_wanted) | ||
328 | { | 336 | { |
329 | unsigned long *res_ptr = ioc->res_hint; | 337 | unsigned long *res_ptr = ioc->res_hint; |
330 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 338 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
331 | unsigned long pide = ~0UL; | 339 | unsigned long pide = ~0UL, tpide; |
340 | unsigned long boundary_size; | ||
341 | unsigned long shift; | ||
342 | int ret; | ||
343 | |||
344 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | ||
345 | boundary_size >>= IOVP_SHIFT; | ||
346 | |||
347 | #if defined(ZX1_SUPPORT) | ||
348 | BUG_ON(ioc->ibase & ~IOVP_MASK); | ||
349 | shift = ioc->ibase >> IOVP_SHIFT; | ||
350 | #else | ||
351 | shift = 0; | ||
352 | #endif | ||
332 | 353 | ||
333 | if (bits_wanted > (BITS_PER_LONG/2)) { | 354 | if (bits_wanted > (BITS_PER_LONG/2)) { |
334 | /* Search word at a time - no mask needed */ | 355 | /* Search word at a time - no mask needed */ |
335 | for(; res_ptr < res_end; ++res_ptr) { | 356 | for(; res_ptr < res_end; ++res_ptr) { |
336 | if (*res_ptr == 0) { | 357 | tpide = ptr_to_pide(ioc, res_ptr, 0); |
358 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
359 | shift, | ||
360 | boundary_size); | ||
361 | if ((*res_ptr == 0) && !ret) { | ||
337 | *res_ptr = RESMAP_MASK(bits_wanted); | 362 | *res_ptr = RESMAP_MASK(bits_wanted); |
338 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 363 | pide = tpide; |
339 | pide <<= 3; /* convert to bit address */ | ||
340 | break; | 364 | break; |
341 | } | 365 | } |
342 | } | 366 | } |
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
365 | { | 389 | { |
366 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 390 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
367 | WARN_ON(mask == 0); | 391 | WARN_ON(mask == 0); |
368 | if(((*res_ptr) & mask) == 0) { | 392 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
393 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
394 | shift, | ||
395 | boundary_size); | ||
396 | if ((((*res_ptr) & mask) == 0) && !ret) { | ||
369 | *res_ptr |= mask; /* mark resources busy! */ | 397 | *res_ptr |= mask; /* mark resources busy! */ |
370 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 398 | pide = tpide; |
371 | pide <<= 3; /* convert to bit address */ | ||
372 | pide += bitshiftcnt; | ||
373 | break; | 399 | break; |
374 | } | 400 | } |
375 | mask >>= o; | 401 | mask >>= o; |
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
404 | * resource bit map. | 430 | * resource bit map. |
405 | */ | 431 | */ |
406 | static int | 432 | static int |
407 | sba_alloc_range(struct ioc *ioc, size_t size) | 433 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
408 | { | 434 | { |
409 | unsigned int pages_needed = size >> IOVP_SHIFT; | 435 | unsigned int pages_needed = size >> IOVP_SHIFT; |
410 | #ifdef SBA_COLLECT_STATS | 436 | #ifdef SBA_COLLECT_STATS |
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
412 | #endif | 438 | #endif |
413 | unsigned long pide; | 439 | unsigned long pide; |
414 | 440 | ||
415 | pide = sba_search_bitmap(ioc, pages_needed); | 441 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
416 | if (pide >= (ioc->res_size << 3)) { | 442 | if (pide >= (ioc->res_size << 3)) { |
417 | pide = sba_search_bitmap(ioc, pages_needed); | 443 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
418 | if (pide >= (ioc->res_size << 3)) | 444 | if (pide >= (ioc->res_size << 3)) |
419 | panic("%s: I/O MMU @ %p is out of mapping resources\n", | 445 | panic("%s: I/O MMU @ %p is out of mapping resources\n", |
420 | __FILE__, ioc->ioc_hpa); | 446 | __FILE__, ioc->ioc_hpa); |
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
710 | ioc->msingle_calls++; | 736 | ioc->msingle_calls++; |
711 | ioc->msingle_pages += size >> IOVP_SHIFT; | 737 | ioc->msingle_pages += size >> IOVP_SHIFT; |
712 | #endif | 738 | #endif |
713 | pide = sba_alloc_range(ioc, size); | 739 | pide = sba_alloc_range(ioc, dev, size); |
714 | iovp = (dma_addr_t) pide << IOVP_SHIFT; | 740 | iovp = (dma_addr_t) pide << IOVP_SHIFT; |
715 | 741 | ||
716 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | 742 | DBG_RUN("%s() 0x%p -> 0x%lx\n", |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ef5a6a245f5f..6a9403d79e0c 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
145 | child_bus = dev->subordinate; | 145 | child_bus = dev->subordinate; |
146 | child_bus->dev.parent = child_bus->bridge; | 146 | child_bus->dev.parent = child_bus->bridge; |
147 | retval = device_register(&child_bus->dev); | 147 | retval = device_register(&child_bus->dev); |
148 | if (!retval) | 148 | if (retval) |
149 | dev_err(&dev->dev, "Error registering pci_bus," | ||
150 | " continuing...\n"); | ||
151 | else | ||
149 | retval = device_create_file(&child_bus->dev, | 152 | retval = device_create_file(&child_bus->dev, |
150 | &dev_attr_cpuaffinity); | 153 | &dev_attr_cpuaffinity); |
151 | if (retval) | 154 | if (retval) |
152 | dev_err(&dev->dev, "Error registering pci_bus" | 155 | dev_err(&dev->dev, "Error creating cpuaffinity" |
153 | " device bridge symlink," | 156 | " file, continuing...\n"); |
154 | " continuing...\n"); | ||
155 | } | 157 | } |
156 | } | 158 | } |
157 | } | 159 | } |
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index a590ef682153..4d4a64478404 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | 6 | ||
7 | unsigned int pci_do_scan_bus(struct pci_bus *bus) | 7 | unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) |
8 | { | 8 | { |
9 | unsigned int max; | 9 | unsigned int max; |
10 | 10 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cf22f9e01e00..5e50008d1181 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle) | |||
1085 | * This function should be called per *physical slot*, | 1085 | * This function should be called per *physical slot*, |
1086 | * not per each slot object in ACPI namespace. | 1086 | * not per each slot object in ACPI namespace. |
1087 | */ | 1087 | */ |
1088 | static int enable_device(struct acpiphp_slot *slot) | 1088 | static int __ref enable_device(struct acpiphp_slot *slot) |
1089 | { | 1089 | { |
1090 | struct pci_dev *dev; | 1090 | struct pci_dev *dev; |
1091 | struct pci_bus *bus = slot->bridge->pci_bus; | 1091 | struct pci_bus *bus = slot->bridge->pci_bus; |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 5e9be44817cb..b3515fc4cd38 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) | |||
250 | * Device configuration functions | 250 | * Device configuration functions |
251 | */ | 251 | */ |
252 | 252 | ||
253 | int cpci_configure_slot(struct slot* slot) | 253 | int __ref cpci_configure_slot(struct slot *slot) |
254 | { | 254 | { |
255 | struct pci_bus *parent; | 255 | struct pci_bus *parent; |
256 | int fn; | 256 | int fn; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6eba9b2cfb90..698975a6a21c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot) | |||
711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); | 711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); |
712 | if (retval) { | 712 | if (retval) { |
713 | err("%s: Write command failed!\n", __FUNCTION__); | 713 | err("%s: Write command failed!\n", __FUNCTION__); |
714 | return -1; | 714 | retval = -1; |
715 | goto out; | ||
715 | } | 716 | } |
716 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 717 | dbg("%s: SLOTCTRL %x write cmd %x\n", |
717 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 718 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
722 | * removed from the slot/adapter. | 723 | * removed from the slot/adapter. |
723 | */ | 724 | */ |
724 | msleep(1000); | 725 | msleep(1000); |
725 | 726 | out: | |
726 | if (changed) | 727 | if (changed) |
727 | pcie_unmask_bad_dllp(ctrl); | 728 | pcie_unmask_bad_dllp(ctrl); |
728 | 729 | ||
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index dd50713966d1..9372a840b63d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
167 | } | 167 | } |
168 | } | 168 | } |
169 | 169 | ||
170 | static int pciehp_add_bridge(struct pci_dev *dev) | 170 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
171 | { | 171 | { |
172 | struct pci_bus *parent = dev->bus; | 172 | struct pci_bus *parent = dev->bus; |
173 | int pass, busnr, start = parent->secondary; | 173 | int pass, busnr, start = parent->secondary; |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 0a6b25ef194c..a69a21520895 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | int shpchp_configure_device(struct slot *p_slot) | 99 | int __ref shpchp_configure_device(struct slot *p_slot) |
100 | { | 100 | { |
101 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4d23b9fb551b..2db2e4bb0d1e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | void pci_read_bridge_bases(struct pci_bus *child) | 289 | void __devinit pci_read_bridge_bases(struct pci_bus *child) |
290 | { | 290 | { |
291 | struct pci_dev *dev = child->self; | 291 | struct pci_dev *dev = child->self; |
292 | u8 io_base_lo, io_limit_lo; | 292 | u8 io_base_lo, io_limit_lo; |
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) | |||
472 | * them, we proceed to assigning numbers to the remaining buses in | 472 | * them, we proceed to assigning numbers to the remaining buses in |
473 | * order to avoid overlaps between old and new bus numbers. | 473 | * order to avoid overlaps between old and new bus numbers. |
474 | */ | 474 | */ |
475 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) | 475 | int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) |
476 | { | 476 | { |
477 | struct pci_bus *child; | 477 | struct pci_bus *child; |
478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1008 | return nr; | 1008 | return nr; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | unsigned int pci_scan_child_bus(struct pci_bus *bus) | 1011 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1012 | { | 1012 | { |
1013 | unsigned int devfn, pass, max = bus->secondary; | 1013 | unsigned int devfn, pass, max = bus->secondary; |
1014 | struct pci_dev *dev; | 1014 | struct pci_dev *dev; |
@@ -1116,7 +1116,7 @@ err_out: | |||
1116 | return NULL; | 1116 | return NULL; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | struct pci_bus *pci_scan_bus_parented(struct device *parent, | 1119 | struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, |
1120 | int bus, struct pci_ops *ops, void *sysdata) | 1120 | int bus, struct pci_ops *ops, void *sysdata) |
1121 | { | 1121 | { |
1122 | struct pci_bus *b; | 1122 | struct pci_bus *b; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbad4a9f264f..e9a333d98552 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1652 | pci_write_config_byte(dev, 0x75, 0x1); | 1652 | pci_write_config_byte(dev, 0x75, 0x1); |
1653 | pci_write_config_byte(dev, 0x77, 0x0); | 1653 | pci_write_config_byte(dev, 0x77, 0x0); |
1654 | 1654 | ||
1655 | printk(KERN_INFO | 1655 | dev_info(&dev->dev, |
1656 | "PCI: VIA CX700 PCI parking/caching fixup on %s\n", | 1656 | "Disabling VIA CX700 PCI parking/caching\n"); |
1657 | pci_name(dev)); | ||
1658 | } | 1657 | } |
1659 | } | 1658 | } |
1660 | } | 1659 | } |
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2 | |||
1726 | quirk_msi_ht_cap); | 1725 | quirk_msi_ht_cap); |
1727 | 1726 | ||
1728 | 1727 | ||
1729 | /* | ||
1730 | * Force enable MSI mapping capability on HT bridges | ||
1731 | */ | ||
1732 | static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev) | ||
1733 | { | ||
1734 | int pos, ttl = 48; | ||
1735 | |||
1736 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); | ||
1737 | while (pos && ttl--) { | ||
1738 | u8 flags; | ||
1739 | |||
1740 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { | ||
1741 | printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n", | ||
1742 | pci_name(dev)); | ||
1743 | |||
1744 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | ||
1745 | flags | HT_MSI_FLAGS_ENABLE); | ||
1746 | } | ||
1747 | pos = pci_find_next_ht_capability(dev, pos, | ||
1748 | HT_CAPTYPE_MSI_MAPPING); | ||
1749 | } | ||
1750 | } | ||
1751 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1752 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1753 | quirk_msi_ht_cap_enable); | ||
1754 | |||
1755 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. | 1728 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. |
1756 | * MSI are supported if the MSI capability set in any of these mappings. | 1729 | * MSI are supported if the MSI capability set in any of these mappings. |
1757 | */ | 1730 | */ |
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) | |||
1778 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | 1751 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, |
1779 | quirk_nvidia_ck804_msi_ht_cap); | 1752 | quirk_nvidia_ck804_msi_ht_cap); |
1780 | 1753 | ||
1781 | /* | 1754 | /* Force enable MSI mapping capability on HT bridges */ |
1782 | * Force enable MSI mapping capability on HT bridges */ | 1755 | static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) |
1783 | static inline void ht_enable_msi_mapping(struct pci_dev *dev) | ||
1784 | { | 1756 | { |
1785 | int pos, ttl = 48; | 1757 | int pos, ttl = 48; |
1786 | 1758 | ||
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev) | |||
1799 | HT_CAPTYPE_MSI_MAPPING); | 1771 | HT_CAPTYPE_MSI_MAPPING); |
1800 | } | 1772 | } |
1801 | } | 1773 | } |
1774 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1775 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1776 | ht_enable_msi_mapping); | ||
1802 | 1777 | ||
1803 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | 1778 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) |
1804 | { | 1779 | { |
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | |||
1830 | 1805 | ||
1831 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, | 1806 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, |
1832 | &flags) == 0) { | 1807 | &flags) == 0) { |
1833 | dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); | 1808 | dev_info(&dev->dev, "Disabling HT MSI mapping"); |
1834 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | 1809 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, |
1835 | flags & ~HT_MSI_FLAGS_ENABLE); | 1810 | flags & ~HT_MSI_FLAGS_ENABLE); |
1836 | } | 1811 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index a98b2470b9ea..bd5c0e031398 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev) | |||
242 | #endif /* 0 */ | 242 | #endif /* 0 */ |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * pci_cleanup_rom - internal routine for freeing the ROM copy created | 245 | * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy |
246 | * by pci_map_rom_copy called from remove.c | ||
247 | * @pdev: pointer to pci device struct | 246 | * @pdev: pointer to pci device struct |
248 | * | 247 | * |
249 | * Free the copied ROM if we allocated one. | 248 | * Free the copied ROM if we allocated one. |
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 5480119ff9d3..3ce9f3defc12 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO | 81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure |
82 | * device id structure | ||
83 | * @id: the RIO device id structure to match against | 82 | * @id: the RIO device id structure to match against |
84 | * @dev: the RIO device structure to match against | 83 | * @dev: the RIO device structure to match against |
85 | * | 84 | * |
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev) | |||
137 | * rio_register_driver - register a new RIO driver | 136 | * rio_register_driver - register a new RIO driver |
138 | * @rdrv: the RIO driver structure to register | 137 | * @rdrv: the RIO driver structure to register |
139 | * | 138 | * |
140 | * Adds a &struct rio_driver to the list of registered drivers | 139 | * Adds a &struct rio_driver to the list of registered drivers. |
141 | * Returns a negative value on error, otherwise 0. If no error | 140 | * Returns a negative value on error, otherwise 0. If no error |
142 | * occurred, the driver remains registered even if no device | 141 | * occurred, the driver remains registered even if no device |
143 | * was claimed during registration. | 142 | * was claimed during registration. |
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv) | |||
167 | } | 166 | } |
168 | 167 | ||
169 | /** | 168 | /** |
170 | * rio_match_bus - Tell if a RIO device structure has a matching RIO | 169 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure |
171 | * driver device id structure | ||
172 | * @dev: the standard device structure to match against | 170 | * @dev: the standard device structure to match against |
173 | * @drv: the standard driver structure containing the ids to match against | 171 | * @drv: the standard driver structure containing the ids to match against |
174 | * | 172 | * |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6402d699072b..82f5ad9c3af4 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -250,6 +250,15 @@ config RTC_DRV_TWL92330 | |||
250 | platforms. The support is integrated with the rest of | 250 | platforms. The support is integrated with the rest of |
251 | the Menelaus driver; it's not separate module. | 251 | the Menelaus driver; it's not separate module. |
252 | 252 | ||
253 | config RTC_DRV_S35390A | ||
254 | tristate "Seiko Instruments S-35390A" | ||
255 | help | ||
256 | If you say yes here you will get support for the Seiko | ||
257 | Instruments S-35390A. | ||
258 | |||
259 | This driver can also be built as a module. If so the module | ||
260 | will be called rtc-s35390a. | ||
261 | |||
253 | endif # I2C | 262 | endif # I2C |
254 | 263 | ||
255 | comment "SPI RTC drivers" | 264 | comment "SPI RTC drivers" |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ec703f34ab86..872f1218ff9f 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o | |||
45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o | 45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o |
46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o | 46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o |
47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o | 47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o |
48 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o | ||
48 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o | 49 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o |
49 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | 50 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o |
50 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 51 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c new file mode 100644 index 000000000000..e8abc90c32c5 --- /dev/null +++ b/drivers/rtc/rtc-s35390a.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Seiko Instruments S-35390A RTC Driver | ||
3 | * | ||
4 | * Copyright (c) 2007 Byron Bradley | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/rtc.h> | ||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/bitrev.h> | ||
16 | #include <linux/bcd.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #define S35390A_CMD_STATUS1 0 | ||
20 | #define S35390A_CMD_STATUS2 1 | ||
21 | #define S35390A_CMD_TIME1 2 | ||
22 | |||
23 | #define S35390A_BYTE_YEAR 0 | ||
24 | #define S35390A_BYTE_MONTH 1 | ||
25 | #define S35390A_BYTE_DAY 2 | ||
26 | #define S35390A_BYTE_WDAY 3 | ||
27 | #define S35390A_BYTE_HOURS 4 | ||
28 | #define S35390A_BYTE_MINS 5 | ||
29 | #define S35390A_BYTE_SECS 6 | ||
30 | |||
31 | #define S35390A_FLAG_POC 0x01 | ||
32 | #define S35390A_FLAG_BLD 0x02 | ||
33 | #define S35390A_FLAG_24H 0x40 | ||
34 | #define S35390A_FLAG_RESET 0x80 | ||
35 | #define S35390A_FLAG_TEST 0x01 | ||
36 | |||
37 | struct s35390a { | ||
38 | struct i2c_client *client[8]; | ||
39 | struct rtc_device *rtc; | ||
40 | int twentyfourhour; | ||
41 | }; | ||
42 | |||
43 | static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
44 | { | ||
45 | struct i2c_client *client = s35390a->client[reg]; | ||
46 | struct i2c_msg msg[] = { | ||
47 | { client->addr, 0, len, buf }, | ||
48 | }; | ||
49 | |||
50 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
51 | return -EIO; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
57 | { | ||
58 | struct i2c_client *client = s35390a->client[reg]; | ||
59 | struct i2c_msg msg[] = { | ||
60 | { client->addr, I2C_M_RD, len, buf }, | ||
61 | }; | ||
62 | |||
63 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
64 | return -EIO; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int s35390a_reset(struct s35390a *s35390a) | ||
70 | { | ||
71 | char buf[1]; | ||
72 | |||
73 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) | ||
74 | return -EIO; | ||
75 | |||
76 | if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) | ||
77 | return 0; | ||
78 | |||
79 | buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); | ||
80 | buf[0] &= 0xf0; | ||
81 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
82 | } | ||
83 | |||
84 | static int s35390a_disable_test_mode(struct s35390a *s35390a) | ||
85 | { | ||
86 | char buf[1]; | ||
87 | |||
88 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) | ||
89 | return -EIO; | ||
90 | |||
91 | if (!(buf[0] & S35390A_FLAG_TEST)) | ||
92 | return 0; | ||
93 | |||
94 | buf[0] &= ~S35390A_FLAG_TEST; | ||
95 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); | ||
96 | } | ||
97 | |||
98 | static char s35390a_hr2reg(struct s35390a *s35390a, int hour) | ||
99 | { | ||
100 | if (s35390a->twentyfourhour) | ||
101 | return BIN2BCD(hour); | ||
102 | |||
103 | if (hour < 12) | ||
104 | return BIN2BCD(hour); | ||
105 | |||
106 | return 0x40 | BIN2BCD(hour - 12); | ||
107 | } | ||
108 | |||
109 | static int s35390a_reg2hr(struct s35390a *s35390a, char reg) | ||
110 | { | ||
111 | unsigned hour; | ||
112 | |||
113 | if (s35390a->twentyfourhour) | ||
114 | return BCD2BIN(reg & 0x3f); | ||
115 | |||
116 | hour = BCD2BIN(reg & 0x3f); | ||
117 | if (reg & 0x40) | ||
118 | hour += 12; | ||
119 | |||
120 | return hour; | ||
121 | } | ||
122 | |||
123 | static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
124 | { | ||
125 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
126 | int i, err; | ||
127 | char buf[7]; | ||
128 | |||
129 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " | ||
130 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
131 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
132 | tm->tm_wday); | ||
133 | |||
134 | buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100); | ||
135 | buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1); | ||
136 | buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday); | ||
137 | buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday); | ||
138 | buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); | ||
139 | buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min); | ||
140 | buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec); | ||
141 | |||
142 | /* This chip expects the bits of each byte to be in reverse order */ | ||
143 | for (i = 0; i < 7; ++i) | ||
144 | buf[i] = bitrev8(buf[i]); | ||
145 | |||
146 | err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
152 | { | ||
153 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
154 | char buf[7]; | ||
155 | int i, err; | ||
156 | |||
157 | err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | |||
161 | /* This chip returns the bits of each byte in reverse order */ | ||
162 | for (i = 0; i < 7; ++i) | ||
163 | buf[i] = bitrev8(buf[i]); | ||
164 | |||
165 | tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]); | ||
166 | tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]); | ||
167 | tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); | ||
168 | tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]); | ||
169 | tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]); | ||
170 | tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1; | ||
171 | tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100; | ||
172 | |||
173 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " | ||
174 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
175 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
176 | tm->tm_wday); | ||
177 | |||
178 | return rtc_valid_tm(tm); | ||
179 | } | ||
180 | |||
181 | static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
182 | { | ||
183 | return s35390a_get_datetime(to_i2c_client(dev), tm); | ||
184 | } | ||
185 | |||
186 | static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
187 | { | ||
188 | return s35390a_set_datetime(to_i2c_client(dev), tm); | ||
189 | } | ||
190 | |||
191 | static const struct rtc_class_ops s35390a_rtc_ops = { | ||
192 | .read_time = s35390a_rtc_read_time, | ||
193 | .set_time = s35390a_rtc_set_time, | ||
194 | }; | ||
195 | |||
196 | static struct i2c_driver s35390a_driver; | ||
197 | |||
198 | static int s35390a_probe(struct i2c_client *client) | ||
199 | { | ||
200 | int err; | ||
201 | unsigned int i; | ||
202 | struct s35390a *s35390a; | ||
203 | struct rtc_time tm; | ||
204 | char buf[1]; | ||
205 | |||
206 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
207 | err = -ENODEV; | ||
208 | goto exit; | ||
209 | } | ||
210 | |||
211 | s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); | ||
212 | if (!s35390a) { | ||
213 | err = -ENOMEM; | ||
214 | goto exit; | ||
215 | } | ||
216 | |||
217 | s35390a->client[0] = client; | ||
218 | i2c_set_clientdata(client, s35390a); | ||
219 | |||
220 | /* This chip uses multiple addresses, use dummy devices for them */ | ||
221 | for (i = 1; i < 8; ++i) { | ||
222 | s35390a->client[i] = i2c_new_dummy(client->adapter, | ||
223 | client->addr + i, "rtc-s35390a"); | ||
224 | if (!s35390a->client[i]) { | ||
225 | dev_err(&client->dev, "Address %02x unavailable\n", | ||
226 | client->addr + i); | ||
227 | err = -EBUSY; | ||
228 | goto exit_dummy; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | err = s35390a_reset(s35390a); | ||
233 | if (err < 0) { | ||
234 | dev_err(&client->dev, "error resetting chip\n"); | ||
235 | goto exit_dummy; | ||
236 | } | ||
237 | |||
238 | err = s35390a_disable_test_mode(s35390a); | ||
239 | if (err < 0) { | ||
240 | dev_err(&client->dev, "error disabling test mode\n"); | ||
241 | goto exit_dummy; | ||
242 | } | ||
243 | |||
244 | err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
245 | if (err < 0) { | ||
246 | dev_err(&client->dev, "error checking 12/24 hour mode\n"); | ||
247 | goto exit_dummy; | ||
248 | } | ||
249 | if (buf[0] & S35390A_FLAG_24H) | ||
250 | s35390a->twentyfourhour = 1; | ||
251 | else | ||
252 | s35390a->twentyfourhour = 0; | ||
253 | |||
254 | if (s35390a_get_datetime(client, &tm) < 0) | ||
255 | dev_warn(&client->dev, "clock needs to be set\n"); | ||
256 | |||
257 | s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, | ||
258 | &client->dev, &s35390a_rtc_ops, THIS_MODULE); | ||
259 | |||
260 | if (IS_ERR(s35390a->rtc)) { | ||
261 | err = PTR_ERR(s35390a->rtc); | ||
262 | goto exit_dummy; | ||
263 | } | ||
264 | return 0; | ||
265 | |||
266 | exit_dummy: | ||
267 | for (i = 1; i < 8; ++i) | ||
268 | if (s35390a->client[i]) | ||
269 | i2c_unregister_device(s35390a->client[i]); | ||
270 | kfree(s35390a); | ||
271 | i2c_set_clientdata(client, NULL); | ||
272 | |||
273 | exit: | ||
274 | return err; | ||
275 | } | ||
276 | |||
277 | static int s35390a_remove(struct i2c_client *client) | ||
278 | { | ||
279 | unsigned int i; | ||
280 | |||
281 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
282 | for (i = 1; i < 8; ++i) | ||
283 | if (s35390a->client[i]) | ||
284 | i2c_unregister_device(s35390a->client[i]); | ||
285 | |||
286 | rtc_device_unregister(s35390a->rtc); | ||
287 | kfree(s35390a); | ||
288 | i2c_set_clientdata(client, NULL); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static struct i2c_driver s35390a_driver = { | ||
294 | .driver = { | ||
295 | .name = "rtc-s35390a", | ||
296 | }, | ||
297 | .probe = s35390a_probe, | ||
298 | .remove = s35390a_remove, | ||
299 | }; | ||
300 | |||
301 | static int __init s35390a_rtc_init(void) | ||
302 | { | ||
303 | return i2c_add_driver(&s35390a_driver); | ||
304 | } | ||
305 | |||
306 | static void __exit s35390a_rtc_exit(void) | ||
307 | { | ||
308 | i2c_del_driver(&s35390a_driver); | ||
309 | } | ||
310 | |||
311 | MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); | ||
312 | MODULE_DESCRIPTION("S35390A RTC driver"); | ||
313 | MODULE_LICENSE("GPL"); | ||
314 | |||
315 | module_init(s35390a_rtc_init); | ||
316 | module_exit(s35390a_rtc_exit); | ||
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 389346cda6c8..07c7f31081bc 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = { | |||
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kbdiacruc accent_table[MAX_DIACR] = { | 153 | struct kbdiacruc accent_table[MAX_DIACR] = { |
154 | {'^', 'c', '\003'}, {'^', 'd', '\004'}, | 154 | {'^', 'c', 0003}, {'^', 'd', 0004}, |
155 | {'^', 'z', '\032'}, {'^', '\012', '\000'}, | 155 | {'^', 'z', 0032}, {'^', 0012, 0000}, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | unsigned int accent_table_size = 4; | 158 | unsigned int accent_table_size = 4; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fecba05b4e77..e5c6f6af8765 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
757 | "Notifying upper driver of completion " | 757 | "Notifying upper driver of completion " |
758 | "(result %x)\n", cmd->result)); | 758 | "(result %x)\n", cmd->result)); |
759 | 759 | ||
760 | good_bytes = scsi_bufflen(cmd); | 760 | good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; |
761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { | 761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
762 | drv = scsi_cmd_to_driver(cmd); | 762 | drv = scsi_cmd_to_driver(cmd); |
763 | if (drv->done) | 763 | if (drv->done) |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1dc165ad17fb..e67c14e31bab 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | /** | 1579 | /** |
1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the | 1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. |
1581 | * target. | ||
1582 | * @parent: host to scan | 1581 | * @parent: host to scan |
1583 | * @channel: channel to scan | 1582 | * @channel: channel to scan |
1584 | * @id: target id to scan | 1583 | * @id: target id to scan |
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6f09cbd7fc48..97c68d021d28 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c | |||
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = { | |||
91 | /* Archtek America Corp. */ | 91 | /* Archtek America Corp. */ |
92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ | 92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ |
93 | { "GVC000F", 0 }, | 93 | { "GVC000F", 0 }, |
94 | /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ | ||
95 | { "GVC0303", 0 }, | ||
94 | /* Hayes */ | 96 | /* Hayes */ |
95 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ | 97 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ |
96 | { "HAY0001", 0 }, | 98 | { "HAY0001", 0 }, |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b82595cf13e8..cf627cd1b4c8 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -686,7 +686,7 @@ config UART0_RTS_PIN | |||
686 | 686 | ||
687 | config SERIAL_BFIN_UART1 | 687 | config SERIAL_BFIN_UART1 |
688 | bool "Enable UART1" | 688 | bool "Enable UART1" |
689 | depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) | 689 | depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561) |
690 | help | 690 | help |
691 | Enable UART1 | 691 | Enable UART1 |
692 | 692 | ||
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS | |||
699 | 699 | ||
700 | config UART1_CTS_PIN | 700 | config UART1_CTS_PIN |
701 | int "UART1 CTS pin" | 701 | int "UART1 CTS pin" |
702 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 702 | depends on BFIN_UART1_CTSRTS && !BF54x |
703 | default -1 | 703 | default -1 |
704 | help | 704 | help |
705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
706 | 706 | ||
707 | config UART1_RTS_PIN | 707 | config UART1_RTS_PIN |
708 | int "UART1 RTS pin" | 708 | int "UART1 RTS pin" |
709 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 709 | depends on BFIN_UART1_CTSRTS && !BF54x |
710 | default -1 | 710 | default -1 |
711 | help | 711 | help |
712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index ac2a3ef28d55..0aa345b9a38b 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -1,30 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/serial/bfin_5xx.c | 2 | * Blackfin On-Chip Serial Driver |
3 | * Based on: Based on drivers/serial/sa1100.c | ||
4 | * Author: Aubrey Li <aubrey.li@analog.com> | ||
5 | * | 3 | * |
6 | * Created: | 4 | * Copyright 2006-2007 Analog Devices Inc. |
7 | * Description: Driver for blackfin 5xx serial ports | ||
8 | * | 5 | * |
9 | * Modified: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
10 | * Copyright 2006 Analog Devices Inc. | ||
11 | * | 7 | * |
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 8 | * Licensed under the GPL-2 or later. |
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | 9 | */ |
29 | 10 | ||
30 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 11 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
@@ -67,14 +48,12 @@ | |||
67 | #define DMA_RX_XCOUNT 512 | 48 | #define DMA_RX_XCOUNT 512 |
68 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) | 49 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) |
69 | 50 | ||
70 | #define DMA_RX_FLUSH_JIFFIES 5 | 51 | #define DMA_RX_FLUSH_JIFFIES (HZ / 50) |
71 | 52 | ||
72 | #ifdef CONFIG_SERIAL_BFIN_DMA | 53 | #ifdef CONFIG_SERIAL_BFIN_DMA |
73 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); | 54 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); |
74 | #else | 55 | #else |
75 | static void bfin_serial_do_work(struct work_struct *work); | ||
76 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); | 56 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); |
77 | static void local_put_char(struct bfin_serial_port *uart, char ch); | ||
78 | #endif | 57 | #endif |
79 | 58 | ||
80 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | 59 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); |
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | |||
85 | static void bfin_serial_stop_tx(struct uart_port *port) | 64 | static void bfin_serial_stop_tx(struct uart_port *port) |
86 | { | 65 | { |
87 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 66 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
67 | struct circ_buf *xmit = &uart->port.info->xmit; | ||
68 | #if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) | ||
69 | unsigned short ier; | ||
70 | #endif | ||
88 | 71 | ||
89 | while (!(UART_GET_LSR(uart) & TEMT)) | 72 | while (!(UART_GET_LSR(uart) & TEMT)) |
90 | continue; | 73 | cpu_relax(); |
91 | 74 | ||
92 | #ifdef CONFIG_SERIAL_BFIN_DMA | 75 | #ifdef CONFIG_SERIAL_BFIN_DMA |
93 | disable_dma(uart->tx_dma_channel); | 76 | disable_dma(uart->tx_dma_channel); |
77 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); | ||
78 | uart->port.icount.tx += uart->tx_count; | ||
79 | uart->tx_count = 0; | ||
80 | uart->tx_done = 1; | ||
94 | #else | 81 | #else |
95 | #ifdef CONFIG_BF54x | 82 | #ifdef CONFIG_BF54x |
96 | /* Waiting for Transmission Finished */ | ||
97 | while (!(UART_GET_LSR(uart) & TFI)) | ||
98 | continue; | ||
99 | /* Clear TFI bit */ | 83 | /* Clear TFI bit */ |
100 | UART_PUT_LSR(uart, TFI); | 84 | UART_PUT_LSR(uart, TFI); |
101 | UART_CLEAR_IER(uart, ETBEI); | 85 | UART_CLEAR_IER(uart, ETBEI); |
102 | #else | 86 | #else |
103 | unsigned short ier; | ||
104 | |||
105 | ier = UART_GET_IER(uart); | 87 | ier = UART_GET_IER(uart); |
106 | ier &= ~ETBEI; | 88 | ier &= ~ETBEI; |
107 | UART_PUT_IER(uart, ier); | 89 | UART_PUT_IER(uart, ier); |
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port) | |||
117 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 99 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
118 | 100 | ||
119 | #ifdef CONFIG_SERIAL_BFIN_DMA | 101 | #ifdef CONFIG_SERIAL_BFIN_DMA |
120 | bfin_serial_dma_tx_chars(uart); | 102 | if (uart->tx_done) |
103 | bfin_serial_dma_tx_chars(uart); | ||
121 | #else | 104 | #else |
122 | #ifdef CONFIG_BF54x | 105 | #ifdef CONFIG_BF54x |
123 | UART_SET_IER(uart, ETBEI); | 106 | UART_SET_IER(uart, ETBEI); |
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void) | |||
209 | } | 192 | } |
210 | #endif | 193 | #endif |
211 | 194 | ||
212 | #ifdef CONFIG_SERIAL_BFIN_PIO | 195 | #if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO) |
213 | static void local_put_char(struct bfin_serial_port *uart, char ch) | 196 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) |
214 | { | 197 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) |
215 | unsigned short status; | 198 | #else |
216 | int flags = 0; | 199 | # define UART_GET_ANOMALY_THRESHOLD(uart) 0 |
217 | 200 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) | |
218 | spin_lock_irqsave(&uart->port.lock, flags); | 201 | #endif |
219 | |||
220 | do { | ||
221 | status = UART_GET_LSR(uart); | ||
222 | } while (!(status & THRE)); | ||
223 | |||
224 | UART_PUT_CHAR(uart, ch); | ||
225 | SSYNC(); | ||
226 | |||
227 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
228 | } | ||
229 | 202 | ||
203 | #ifdef CONFIG_SERIAL_BFIN_PIO | ||
230 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | 204 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) |
231 | { | 205 | { |
232 | struct tty_struct *tty = uart->port.info->tty; | 206 | struct tty_struct *tty = uart->port.info->tty; |
233 | unsigned int status, ch, flg; | 207 | unsigned int status, ch, flg; |
234 | static int in_break = 0; | 208 | static struct timeval anomaly_start = { .tv_sec = 0 }; |
235 | #ifdef CONFIG_KGDB_UART | 209 | #ifdef CONFIG_KGDB_UART |
236 | struct pt_regs *regs = get_irq_regs(); | 210 | struct pt_regs *regs = get_irq_regs(); |
237 | #endif | 211 | #endif |
238 | 212 | ||
239 | status = UART_GET_LSR(uart); | 213 | status = UART_GET_LSR(uart); |
214 | UART_CLEAR_LSR(uart); | ||
215 | |||
240 | ch = UART_GET_CHAR(uart); | 216 | ch = UART_GET_CHAR(uart); |
241 | uart->port.icount.rx++; | 217 | uart->port.icount.rx++; |
242 | 218 | ||
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
262 | #endif | 238 | #endif |
263 | 239 | ||
264 | if (ANOMALY_05000230) { | 240 | if (ANOMALY_05000230) { |
265 | /* The BF533 family of processors have a nice misbehavior where | 241 | /* The BF533 (and BF561) family of processors have a nice anomaly |
266 | * they continuously generate characters for a "single" break. | 242 | * where they continuously generate characters for a "single" break. |
267 | * We have to basically ignore this flood until the "next" valid | 243 | * We have to basically ignore this flood until the "next" valid |
268 | * character comes across. All other Blackfin families operate | 244 | * character comes across. Due to the nature of the flood, it is |
269 | * properly though. | 245 | * not possible to reliably catch bytes that are sent too quickly |
246 | * after this break. So application code talking to the Blackfin | ||
247 | * which sends a break signal must allow at least 1.5 character | ||
248 | * times after the end of the break for things to stabilize. This | ||
249 | * timeout was picked as it must absolutely be larger than 1 | ||
250 | * character time +/- some percent. So 1.5 sounds good. All other | ||
251 | * Blackfin families operate properly. Woo. | ||
270 | * Note: While Anomaly 05000230 does not directly address this, | 252 | * Note: While Anomaly 05000230 does not directly address this, |
271 | * the changes that went in for it also fixed this issue. | 253 | * the changes that went in for it also fixed this issue. |
254 | * That anomaly was fixed in 0.5+ silicon. I like bunnies. | ||
272 | */ | 255 | */ |
273 | if (in_break) { | 256 | if (anomaly_start.tv_sec) { |
274 | if (ch != 0) { | 257 | struct timeval curr; |
275 | in_break = 0; | 258 | suseconds_t usecs; |
276 | ch = UART_GET_CHAR(uart); | 259 | |
277 | if (bfin_revid() < 5) | 260 | if ((~ch & (~ch + 1)) & 0xff) |
278 | return; | 261 | goto known_good_char; |
279 | } else | 262 | |
280 | return; | 263 | do_gettimeofday(&curr); |
264 | if (curr.tv_sec - anomaly_start.tv_sec > 1) | ||
265 | goto known_good_char; | ||
266 | |||
267 | usecs = 0; | ||
268 | if (curr.tv_sec != anomaly_start.tv_sec) | ||
269 | usecs += USEC_PER_SEC; | ||
270 | usecs += curr.tv_usec - anomaly_start.tv_usec; | ||
271 | |||
272 | if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) | ||
273 | goto known_good_char; | ||
274 | |||
275 | if (ch) | ||
276 | anomaly_start.tv_sec = 0; | ||
277 | else | ||
278 | anomaly_start = curr; | ||
279 | |||
280 | return; | ||
281 | |||
282 | known_good_char: | ||
283 | anomaly_start.tv_sec = 0; | ||
281 | } | 284 | } |
282 | } | 285 | } |
283 | 286 | ||
284 | if (status & BI) { | 287 | if (status & BI) { |
285 | if (ANOMALY_05000230) | 288 | if (ANOMALY_05000230) |
286 | in_break = 1; | 289 | if (bfin_revid() < 5) |
290 | do_gettimeofday(&anomaly_start); | ||
287 | uart->port.icount.brk++; | 291 | uart->port.icount.brk++; |
288 | if (uart_handle_break(&uart->port)) | 292 | if (uart_handle_break(&uart->port)) |
289 | goto ignore_char; | 293 | goto ignore_char; |
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
324 | UART_PUT_CHAR(uart, uart->port.x_char); | 328 | UART_PUT_CHAR(uart, uart->port.x_char); |
325 | uart->port.icount.tx++; | 329 | uart->port.icount.tx++; |
326 | uart->port.x_char = 0; | 330 | uart->port.x_char = 0; |
327 | return; | ||
328 | } | 331 | } |
329 | /* | 332 | /* |
330 | * Check the modem control lines before | 333 | * Check the modem control lines before |
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
337 | return; | 340 | return; |
338 | } | 341 | } |
339 | 342 | ||
340 | local_put_char(uart, xmit->buf[xmit->tail]); | 343 | while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { |
341 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 344 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); |
342 | uart->port.icount.tx++; | 345 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
346 | uart->port.icount.tx++; | ||
347 | SSYNC(); | ||
348 | } | ||
343 | 349 | ||
344 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 350 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
345 | uart_write_wakeup(&uart->port); | 351 | uart_write_wakeup(&uart->port); |
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) | |||
352 | { | 358 | { |
353 | struct bfin_serial_port *uart = dev_id; | 359 | struct bfin_serial_port *uart = dev_id; |
354 | 360 | ||
355 | #ifdef CONFIG_BF54x | ||
356 | unsigned short status; | ||
357 | spin_lock(&uart->port.lock); | ||
358 | status = UART_GET_LSR(uart); | ||
359 | while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) { | ||
360 | bfin_serial_rx_chars(uart); | ||
361 | status = UART_GET_LSR(uart); | ||
362 | } | ||
363 | spin_unlock(&uart->port.lock); | ||
364 | #else | ||
365 | spin_lock(&uart->port.lock); | 361 | spin_lock(&uart->port.lock); |
366 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) | 362 | while (UART_GET_LSR(uart) & DR) |
367 | bfin_serial_rx_chars(uart); | 363 | bfin_serial_rx_chars(uart); |
368 | spin_unlock(&uart->port.lock); | 364 | spin_unlock(&uart->port.lock); |
369 | #endif | 365 | |
370 | return IRQ_HANDLED; | 366 | return IRQ_HANDLED; |
371 | } | 367 | } |
372 | 368 | ||
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) | |||
374 | { | 370 | { |
375 | struct bfin_serial_port *uart = dev_id; | 371 | struct bfin_serial_port *uart = dev_id; |
376 | 372 | ||
377 | #ifdef CONFIG_BF54x | ||
378 | unsigned short status; | ||
379 | spin_lock(&uart->port.lock); | 373 | spin_lock(&uart->port.lock); |
380 | status = UART_GET_LSR(uart); | 374 | if (UART_GET_LSR(uart) & THRE) |
381 | while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) { | ||
382 | bfin_serial_tx_chars(uart); | 375 | bfin_serial_tx_chars(uart); |
383 | status = UART_GET_LSR(uart); | ||
384 | } | ||
385 | spin_unlock(&uart->port.lock); | 376 | spin_unlock(&uart->port.lock); |
386 | #else | 377 | |
387 | spin_lock(&uart->port.lock); | ||
388 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY) | ||
389 | bfin_serial_tx_chars(uart); | ||
390 | spin_unlock(&uart->port.lock); | ||
391 | #endif | ||
392 | return IRQ_HANDLED; | 378 | return IRQ_HANDLED; |
393 | } | 379 | } |
380 | #endif | ||
394 | 381 | ||
395 | 382 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | |
396 | static void bfin_serial_do_work(struct work_struct *work) | 383 | static void bfin_serial_do_work(struct work_struct *work) |
397 | { | 384 | { |
398 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); | 385 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); |
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
406 | { | 393 | { |
407 | struct circ_buf *xmit = &uart->port.info->xmit; | 394 | struct circ_buf *xmit = &uart->port.info->xmit; |
408 | unsigned short ier; | 395 | unsigned short ier; |
409 | int flags = 0; | ||
410 | |||
411 | if (!uart->tx_done) | ||
412 | return; | ||
413 | 396 | ||
414 | uart->tx_done = 0; | 397 | uart->tx_done = 0; |
415 | 398 | ||
399 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
400 | uart->tx_count = 0; | ||
401 | uart->tx_done = 1; | ||
402 | return; | ||
403 | } | ||
404 | |||
416 | if (uart->port.x_char) { | 405 | if (uart->port.x_char) { |
417 | UART_PUT_CHAR(uart, uart->port.x_char); | 406 | UART_PUT_CHAR(uart, uart->port.x_char); |
418 | uart->port.icount.tx++; | 407 | uart->port.icount.tx++; |
419 | uart->port.x_char = 0; | 408 | uart->port.x_char = 0; |
420 | uart->tx_done = 1; | ||
421 | return; | ||
422 | } | 409 | } |
410 | |||
423 | /* | 411 | /* |
424 | * Check the modem control lines before | 412 | * Check the modem control lines before |
425 | * transmitting anything. | 413 | * transmitting anything. |
426 | */ | 414 | */ |
427 | bfin_serial_mctrl_check(uart); | 415 | bfin_serial_mctrl_check(uart); |
428 | 416 | ||
429 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
430 | bfin_serial_stop_tx(&uart->port); | ||
431 | uart->tx_done = 1; | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | spin_lock_irqsave(&uart->port.lock, flags); | ||
436 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); | 417 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); |
437 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) | 418 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) |
438 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; | 419 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; |
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
448 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); | 429 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); |
449 | set_dma_x_modify(uart->tx_dma_channel, 1); | 430 | set_dma_x_modify(uart->tx_dma_channel, 1); |
450 | enable_dma(uart->tx_dma_channel); | 431 | enable_dma(uart->tx_dma_channel); |
432 | |||
451 | #ifdef CONFIG_BF54x | 433 | #ifdef CONFIG_BF54x |
452 | UART_SET_IER(uart, ETBEI); | 434 | UART_SET_IER(uart, ETBEI); |
453 | #else | 435 | #else |
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
455 | ier |= ETBEI; | 437 | ier |= ETBEI; |
456 | UART_PUT_IER(uart, ier); | 438 | UART_PUT_IER(uart, ier); |
457 | #endif | 439 | #endif |
458 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
459 | } | 440 | } |
460 | 441 | ||
461 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | 442 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) |
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
464 | int i, flg, status; | 445 | int i, flg, status; |
465 | 446 | ||
466 | status = UART_GET_LSR(uart); | 447 | status = UART_GET_LSR(uart); |
467 | uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; | 448 | UART_CLEAR_LSR(uart); |
449 | |||
450 | uart->port.icount.rx += | ||
451 | CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, | ||
452 | UART_XMIT_SIZE); | ||
468 | 453 | ||
469 | if (status & BI) { | 454 | if (status & BI) { |
470 | uart->port.icount.brk++; | 455 | uart->port.icount.brk++; |
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
490 | else | 475 | else |
491 | flg = TTY_NORMAL; | 476 | flg = TTY_NORMAL; |
492 | 477 | ||
493 | for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { | 478 | for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) { |
494 | if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) | 479 | if (i >= UART_XMIT_SIZE) |
495 | goto dma_ignore_char; | 480 | i = 0; |
496 | uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); | 481 | if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) |
482 | uart_insert_char(&uart->port, status, OE, | ||
483 | uart->rx_dma_buf.buf[i], flg); | ||
497 | } | 484 | } |
498 | 485 | ||
499 | dma_ignore_char: | 486 | dma_ignore_char: |
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
503 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | 490 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) |
504 | { | 491 | { |
505 | int x_pos, pos; | 492 | int x_pos, pos; |
506 | int flags = 0; | ||
507 | |||
508 | bfin_serial_dma_tx_chars(uart); | ||
509 | 493 | ||
510 | spin_lock_irqsave(&uart->port.lock, flags); | 494 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); |
511 | x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); | 495 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); |
496 | uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; | ||
497 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) | ||
498 | uart->rx_dma_nrows = 0; | ||
499 | x_pos = DMA_RX_XCOUNT - x_pos; | ||
512 | if (x_pos == DMA_RX_XCOUNT) | 500 | if (x_pos == DMA_RX_XCOUNT) |
513 | x_pos = 0; | 501 | x_pos = 0; |
514 | 502 | ||
515 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; | 503 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; |
516 | 504 | if (pos != uart->rx_dma_buf.tail) { | |
517 | if (pos>uart->rx_dma_buf.tail) { | 505 | uart->rx_dma_buf.head = pos; |
518 | uart->rx_dma_buf.tail = pos; | ||
519 | bfin_serial_dma_rx_chars(uart); | 506 | bfin_serial_dma_rx_chars(uart); |
520 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail; | 507 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
521 | } | 508 | } |
522 | spin_unlock_irqrestore(&uart->port.lock, flags); | 509 | |
523 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; | 510 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; |
524 | add_timer(&(uart->rx_dma_timer)); | 511 | add_timer(&(uart->rx_dma_timer)); |
525 | } | 512 | } |
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
532 | 519 | ||
533 | spin_lock(&uart->port.lock); | 520 | spin_lock(&uart->port.lock); |
534 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { | 521 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { |
535 | clear_dma_irqstat(uart->tx_dma_channel); | ||
536 | disable_dma(uart->tx_dma_channel); | 522 | disable_dma(uart->tx_dma_channel); |
523 | clear_dma_irqstat(uart->tx_dma_channel); | ||
537 | #ifdef CONFIG_BF54x | 524 | #ifdef CONFIG_BF54x |
538 | UART_CLEAR_IER(uart, ETBEI); | 525 | UART_CLEAR_IER(uart, ETBEI); |
539 | #else | 526 | #else |
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
541 | ier &= ~ETBEI; | 528 | ier &= ~ETBEI; |
542 | UART_PUT_IER(uart, ier); | 529 | UART_PUT_IER(uart, ier); |
543 | #endif | 530 | #endif |
544 | xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); | 531 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); |
545 | uart->port.icount.tx+=uart->tx_count; | 532 | uart->port.icount.tx += uart->tx_count; |
546 | 533 | ||
547 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 534 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
548 | uart_write_wakeup(&uart->port); | 535 | uart_write_wakeup(&uart->port); |
549 | 536 | ||
550 | if (uart_circ_empty(xmit)) | 537 | bfin_serial_dma_tx_chars(uart); |
551 | bfin_serial_stop_tx(&uart->port); | ||
552 | uart->tx_done = 1; | ||
553 | } | 538 | } |
554 | 539 | ||
555 | spin_unlock(&uart->port.lock); | 540 | spin_unlock(&uart->port.lock); |
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
561 | struct bfin_serial_port *uart = dev_id; | 546 | struct bfin_serial_port *uart = dev_id; |
562 | unsigned short irqstat; | 547 | unsigned short irqstat; |
563 | 548 | ||
564 | uart->rx_dma_nrows++; | ||
565 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { | ||
566 | uart->rx_dma_nrows = 0; | ||
567 | uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; | ||
568 | bfin_serial_dma_rx_chars(uart); | ||
569 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; | ||
570 | } | ||
571 | spin_lock(&uart->port.lock); | 549 | spin_lock(&uart->port.lock); |
572 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 550 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
573 | clear_dma_irqstat(uart->rx_dma_channel); | 551 | clear_dma_irqstat(uart->rx_dma_channel); |
574 | |||
575 | spin_unlock(&uart->port.lock); | 552 | spin_unlock(&uart->port.lock); |
553 | |||
554 | del_timer(&(uart->rx_dma_timer)); | ||
555 | uart->rx_dma_timer.expires = jiffies; | ||
556 | add_timer(&(uart->rx_dma_timer)); | ||
557 | |||
576 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
577 | } | 559 | } |
578 | #endif | 560 | #endif |
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port) | |||
599 | if (uart->cts_pin < 0) | 581 | if (uart->cts_pin < 0) |
600 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; | 582 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; |
601 | 583 | ||
584 | # ifdef BF54x | ||
585 | if (UART_GET_MSR(uart) & CTS) | ||
586 | # else | ||
602 | if (gpio_get_value(uart->cts_pin)) | 587 | if (gpio_get_value(uart->cts_pin)) |
588 | # endif | ||
603 | return TIOCM_DSR | TIOCM_CAR; | 589 | return TIOCM_DSR | TIOCM_CAR; |
604 | else | 590 | else |
605 | #endif | 591 | #endif |
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
614 | return; | 600 | return; |
615 | 601 | ||
616 | if (mctrl & TIOCM_RTS) | 602 | if (mctrl & TIOCM_RTS) |
603 | # ifdef BF54x | ||
604 | UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS); | ||
605 | # else | ||
617 | gpio_set_value(uart->rts_pin, 0); | 606 | gpio_set_value(uart->rts_pin, 0); |
607 | # endif | ||
618 | else | 608 | else |
609 | # ifdef BF54x | ||
610 | UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS); | ||
611 | # else | ||
619 | gpio_set_value(uart->rts_pin, 1); | 612 | gpio_set_value(uart->rts_pin, 1); |
613 | # endif | ||
620 | #endif | 614 | #endif |
621 | } | 615 | } |
622 | 616 | ||
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) | |||
627 | { | 621 | { |
628 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 622 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
629 | unsigned int status; | 623 | unsigned int status; |
630 | # ifdef CONFIG_SERIAL_BFIN_DMA | ||
631 | struct uart_info *info = uart->port.info; | 624 | struct uart_info *info = uart->port.info; |
632 | struct tty_struct *tty = info->tty; | 625 | struct tty_struct *tty = info->tty; |
633 | 626 | ||
634 | status = bfin_serial_get_mctrl(&uart->port); | 627 | status = bfin_serial_get_mctrl(&uart->port); |
628 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
635 | if (!(status & TIOCM_CTS)) { | 629 | if (!(status & TIOCM_CTS)) { |
636 | tty->hw_stopped = 1; | 630 | tty->hw_stopped = 1; |
631 | schedule_work(&uart->cts_workqueue); | ||
637 | } else { | 632 | } else { |
638 | tty->hw_stopped = 0; | 633 | tty->hw_stopped = 0; |
639 | } | 634 | } |
640 | # else | ||
641 | status = bfin_serial_get_mctrl(&uart->port); | ||
642 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
643 | if (!(status & TIOCM_CTS)) | ||
644 | schedule_work(&uart->cts_workqueue); | ||
645 | # endif | ||
646 | #endif | 635 | #endif |
647 | } | 636 | } |
648 | 637 | ||
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port) | |||
743 | disable_dma(uart->rx_dma_channel); | 732 | disable_dma(uart->rx_dma_channel); |
744 | free_dma(uart->rx_dma_channel); | 733 | free_dma(uart->rx_dma_channel); |
745 | del_timer(&(uart->rx_dma_timer)); | 734 | del_timer(&(uart->rx_dma_timer)); |
735 | dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); | ||
746 | #else | 736 | #else |
747 | #ifdef CONFIG_KGDB_UART | 737 | #ifdef CONFIG_KGDB_UART |
748 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | 738 | if (uart->port.line != CONFIG_KGDB_UART_PORT) |
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
814 | quot = uart_get_divisor(port, baud); | 804 | quot = uart_get_divisor(port, baud); |
815 | spin_lock_irqsave(&uart->port.lock, flags); | 805 | spin_lock_irqsave(&uart->port.lock, flags); |
816 | 806 | ||
807 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); | ||
808 | |||
817 | do { | 809 | do { |
818 | lsr = UART_GET_LSR(uart); | 810 | lsr = UART_GET_LSR(uart); |
819 | } while (!(lsr & TEMT)); | 811 | } while (!(lsr & TEMT)); |
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void) | |||
956 | bfin_serial_ports[i].rx_dma_channel = | 948 | bfin_serial_ports[i].rx_dma_channel = |
957 | bfin_serial_resource[i].uart_rx_dma_channel; | 949 | bfin_serial_resource[i].uart_rx_dma_channel; |
958 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); | 950 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); |
959 | #else | ||
960 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
961 | #endif | 951 | #endif |
962 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 952 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
953 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
963 | bfin_serial_ports[i].cts_pin = | 954 | bfin_serial_ports[i].cts_pin = |
964 | bfin_serial_resource[i].uart_cts_pin; | 955 | bfin_serial_resource[i].uart_cts_pin; |
965 | bfin_serial_ports[i].rts_pin = | 956 | bfin_serial_ports[i].rts_pin = |
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 348ee2c19b58..c2bb11c02bde 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c | |||
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up) | |||
421 | up->port.icount.tx++; | 421 | up->port.icount.tx++; |
422 | if (uart_circ_empty(xmit)) | 422 | if (uart_circ_empty(xmit)) |
423 | break; | 423 | break; |
424 | while (!serial_in(up, UART_LSR) & UART_LSR_THRE); | 424 | while (!(serial_in(up, UART_LSR) & UART_LSR_THRE)); |
425 | 425 | ||
426 | } while (--count > 0); | 426 | } while (--count > 0); |
427 | 427 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 9ce12cb2cebc..a8c116b80bff 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/console.h> | 42 | #include <linux/console.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/serial_sci.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_CPU_FREQ | 46 | #ifdef CONFIG_CPU_FREQ |
46 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
@@ -54,7 +55,6 @@ | |||
54 | #include <asm/kgdb.h> | 55 | #include <asm/kgdb.h> |
55 | #endif | 56 | #endif |
56 | 57 | ||
57 | #include <asm/sci.h> | ||
58 | #include "sh-sci.h" | 58 | #include "sh-sci.h" |
59 | 59 | ||
60 | struct sci_port { | 60 | struct sci_port { |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 9cfcfd8dad5e..617efb1640b1 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Core maple bus functionality | 2 | * Core maple bus functionality |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Adrian McMenamin | 4 | * Copyright (C) 2007, 2008 Adrian McMenamin |
5 | * | 5 | * |
6 | * Based on 2.4 code by: | 6 | * Based on 2.4 code by: |
7 | * | 7 | * |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -54,7 +53,7 @@ static struct device maple_bus; | |||
54 | static int subdevice_map[MAPLE_PORTS]; | 53 | static int subdevice_map[MAPLE_PORTS]; |
55 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | 54 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; |
56 | static unsigned long maple_pnp_time; | 55 | static unsigned long maple_pnp_time; |
57 | static int started, scanning, liststatus, realscan; | 56 | static int started, scanning, liststatus, fullscan; |
58 | static struct kmem_cache *maple_queue_cache; | 57 | static struct kmem_cache *maple_queue_cache; |
59 | 58 | ||
60 | struct maple_device_specify { | 59 | struct maple_device_specify { |
@@ -62,6 +61,9 @@ struct maple_device_specify { | |||
62 | int unit; | 61 | int unit; |
63 | }; | 62 | }; |
64 | 63 | ||
64 | static bool checked[4]; | ||
65 | static struct maple_device *baseunits[4]; | ||
66 | |||
65 | /** | 67 | /** |
66 | * maple_driver_register - register a device driver | 68 | * maple_driver_register - register a device driver |
67 | * automatically makes the driver bus a maple bus | 69 | * automatically makes the driver bus a maple bus |
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
309 | else | 311 | else |
310 | break; | 312 | break; |
311 | 313 | ||
312 | if (realscan) { | 314 | printk(KERN_INFO "Maple device detected: %s\n", |
313 | printk(KERN_INFO "Maple device detected: %s\n", | 315 | mdev->product_name); |
314 | mdev->product_name); | 316 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); |
315 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); | ||
316 | } | ||
317 | 317 | ||
318 | function = be32_to_cpu(mdev->devinfo.function); | 318 | function = be32_to_cpu(mdev->devinfo.function); |
319 | 319 | ||
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
323 | mdev->driver = &maple_dummy_driver; | 323 | mdev->driver = &maple_dummy_driver; |
324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); | 324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); |
325 | } else { | 325 | } else { |
326 | if (realscan) | 326 | printk(KERN_INFO |
327 | printk(KERN_INFO | 327 | "Maple bus at (%d, %d): Function 0x%lX\n", |
328 | "Maple bus at (%d, %d): Function 0x%lX\n", | 328 | mdev->port, mdev->unit, function); |
329 | mdev->port, mdev->unit, function); | ||
330 | 329 | ||
331 | matched = | 330 | matched = |
332 | bus_for_each_drv(&maple_bus_type, NULL, mdev, | 331 | bus_for_each_drv(&maple_bus_type, NULL, mdev, |
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
334 | 333 | ||
335 | if (matched == 0) { | 334 | if (matched == 0) { |
336 | /* Driver does not exist yet */ | 335 | /* Driver does not exist yet */ |
337 | if (realscan) | 336 | printk(KERN_INFO |
338 | printk(KERN_INFO | 337 | "No maple driver found.\n"); |
339 | "No maple driver found.\n"); | ||
340 | mdev->driver = &maple_dummy_driver; | 338 | mdev->driver = &maple_dummy_driver; |
341 | } | 339 | } |
342 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, | 340 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, |
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev, | |||
472 | maple_detach_driver(mdev); | 470 | maple_detach_driver(mdev); |
473 | return; | 471 | return; |
474 | } | 472 | } |
475 | if (!started) { | 473 | if (!started || !fullscan) { |
476 | printk(KERN_INFO "No maple devices attached to port %d\n", | 474 | if (checked[mdev->port] == false) { |
477 | mdev->port); | 475 | checked[mdev->port] = true; |
476 | printk(KERN_INFO "No maple devices attached" | ||
477 | " to port %d\n", mdev->port); | ||
478 | } | ||
478 | return; | 479 | return; |
479 | } | 480 | } |
480 | maple_clean_submap(mdev); | 481 | maple_clean_submap(mdev); |
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
485 | char *recvbuf) | 486 | char *recvbuf) |
486 | { | 487 | { |
487 | char submask; | 488 | char submask; |
488 | if ((!started) || (scanning == 2)) { | 489 | if (!started || (scanning == 2) || !fullscan) { |
489 | maple_attach_driver(mdev); | 490 | if ((mdev->unit == 0) && (checked[mdev->port] == false)) { |
491 | checked[mdev->port] = true; | ||
492 | maple_attach_driver(mdev); | ||
493 | } else { | ||
494 | if (mdev->unit != 0) | ||
495 | maple_attach_driver(mdev); | ||
496 | } | ||
490 | return; | 497 | return; |
491 | } | 498 | } |
492 | if (mdev->unit == 0) { | 499 | if (mdev->unit == 0) { |
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work) | |||
505 | struct maple_device *dev; | 512 | struct maple_device *dev; |
506 | char *recvbuf; | 513 | char *recvbuf; |
507 | enum maple_code code; | 514 | enum maple_code code; |
515 | int i; | ||
508 | 516 | ||
509 | if (!maple_dma_done()) | 517 | if (!maple_dma_done()) |
510 | return; | 518 | return; |
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work) | |||
557 | } else | 565 | } else |
558 | scanning = 0; | 566 | scanning = 0; |
559 | 567 | ||
568 | if (!fullscan) { | ||
569 | fullscan = 1; | ||
570 | for (i = 0; i < MAPLE_PORTS; i++) { | ||
571 | if (checked[i] == false) { | ||
572 | fullscan = 0; | ||
573 | dev = baseunits[i]; | ||
574 | dev->mq->command = | ||
575 | MAPLE_COMMAND_DEVINFO; | ||
576 | dev->mq->length = 0; | ||
577 | maple_add_packet(dev->mq); | ||
578 | } | ||
579 | } | ||
580 | } | ||
560 | if (started == 0) | 581 | if (started == 0) |
561 | started = 1; | 582 | started = 1; |
562 | } | 583 | } |
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void) | |||
694 | 715 | ||
695 | /* setup maple ports */ | 716 | /* setup maple ports */ |
696 | for (i = 0; i < MAPLE_PORTS; i++) { | 717 | for (i = 0; i < MAPLE_PORTS; i++) { |
718 | checked[i] = false; | ||
697 | mdev[i] = maple_alloc_dev(i, 0); | 719 | mdev[i] = maple_alloc_dev(i, 0); |
720 | baseunits[i] = mdev[i]; | ||
698 | if (!mdev[i]) { | 721 | if (!mdev[i]) { |
699 | while (i-- > 0) | 722 | while (i-- > 0) |
700 | maple_free_dev(mdev[i]); | 723 | maple_free_dev(mdev[i]); |
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void) | |||
703 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; | 726 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; |
704 | mdev[i]->mq->length = 0; | 727 | mdev[i]->mq->length = 0; |
705 | maple_add_packet(mdev[i]->mq); | 728 | maple_add_packet(mdev[i]->mq); |
706 | /* delay aids hardware detection */ | ||
707 | mdelay(5); | ||
708 | subdevice_map[i] = 0; | 729 | subdevice_map[i] = 0; |
709 | } | 730 | } |
710 | 731 | ||
711 | realscan = 1; | ||
712 | /* setup maplebus hardware */ | 732 | /* setup maplebus hardware */ |
713 | maplebus_dma_reset(); | 733 | maplebus_dma_reset(); |
714 | /* initial detection */ | 734 | /* initial detection */ |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 253ed5682a6d..a86315a0c5b8 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi { | |||
42 | 42 | ||
43 | /* driver internal data */ | 43 | /* driver internal data */ |
44 | struct mpc52xx_psc __iomem *psc; | 44 | struct mpc52xx_psc __iomem *psc; |
45 | struct mpc52xx_psc_fifo __iomem *fifo; | ||
45 | unsigned int irq; | 46 | unsigned int irq; |
46 | u8 bits_per_word; | 47 | u8 bits_per_word; |
47 | u8 busy; | 48 | u8 busy; |
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
139 | { | 140 | { |
140 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); | 141 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); |
141 | struct mpc52xx_psc __iomem *psc = mps->psc; | 142 | struct mpc52xx_psc __iomem *psc = mps->psc; |
143 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
142 | unsigned rb = 0; /* number of bytes receieved */ | 144 | unsigned rb = 0; /* number of bytes receieved */ |
143 | unsigned sb = 0; /* number of bytes sent */ | 145 | unsigned sb = 0; /* number of bytes sent */ |
144 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; | 146 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; |
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
190 | out_8(&psc->mode, 0); | 192 | out_8(&psc->mode, 0); |
191 | } else { | 193 | } else { |
192 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 194 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
193 | out_be16(&psc->rfalarm, rfalarm); | 195 | out_be16(&fifo->rfalarm, rfalarm); |
194 | } | 196 | } |
195 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); | 197 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); |
196 | wait_for_completion(&mps->done); | 198 | wait_for_completion(&mps->done); |
197 | recv_at_once = in_be16(&psc->rfnum); | 199 | recv_at_once = in_be16(&fifo->rfnum); |
198 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); | 200 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); |
199 | 201 | ||
200 | send_at_once = recv_at_once; | 202 | send_at_once = recv_at_once; |
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) | |||
331 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | 333 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) |
332 | { | 334 | { |
333 | struct mpc52xx_psc __iomem *psc = mps->psc; | 335 | struct mpc52xx_psc __iomem *psc = mps->psc; |
336 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
334 | u32 mclken_div; | 337 | u32 mclken_div; |
335 | int ret = 0; | 338 | int ret = 0; |
336 | 339 | ||
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
346 | /* Disable interrupts, interrupts are based on alarm level */ | 349 | /* Disable interrupts, interrupts are based on alarm level */ |
347 | out_be16(&psc->mpc52xx_psc_imr, 0); | 350 | out_be16(&psc->mpc52xx_psc_imr, 0); |
348 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); | 351 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); |
349 | out_8(&psc->rfcntl, 0); | 352 | out_8(&fifo->rfcntl, 0); |
350 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 353 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
351 | 354 | ||
352 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ | 355 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ |
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
419 | ret = -EFAULT; | 422 | ret = -EFAULT; |
420 | goto free_master; | 423 | goto free_master; |
421 | } | 424 | } |
425 | /* On the 5200, fifo regs are immediately ajacent to the psc regs */ | ||
426 | mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); | ||
422 | 427 | ||
423 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", | 428 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", |
424 | mps); | 429 | mps); |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 5c33cdb9cac7..a2b0aa48b8ea 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS | |||
87 | If you are unsure about this, say N here. | 87 | If you are unsure about this, say N here. |
88 | 88 | ||
89 | config USB_SUSPEND | 89 | config USB_SUSPEND |
90 | bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" | 90 | bool "USB selective suspend/resume and wakeup" |
91 | depends on USB && PM && EXPERIMENTAL | 91 | depends on USB && PM |
92 | help | 92 | help |
93 | If you say Y here, you can use driver calls or the sysfs | 93 | If you say Y here, you can use driver calls or the sysfs |
94 | "power/state" file to suspend or resume individual USB | 94 | "power/level" file to suspend or resume individual USB |
95 | peripherals. | 95 | peripherals and to enable or disable autosuspend (see |
96 | Documentation/usb/power-management.txt for more details). | ||
96 | 97 | ||
97 | Also, USB "remote wakeup" signaling is supported, whereby some | 98 | Also, USB "remote wakeup" signaling is supported, whereby some |
98 | USB devices (like keyboards and network adapters) can wake up | 99 | USB devices (like keyboards and network adapters) can wake up |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f90ab5e94c58..d9d1eb19f2a1 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -28,35 +28,38 @@ | |||
28 | * devices is broken... | 28 | * devices is broken... |
29 | */ | 29 | */ |
30 | static const struct usb_device_id usb_quirk_list[] = { | 30 | static const struct usb_device_id usb_quirk_list[] = { |
31 | /* Action Semiconductor flash disk */ | ||
32 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255}, | ||
33 | |||
34 | /* CBM - Flash disk */ | 31 | /* CBM - Flash disk */ |
35 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 32 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
33 | |||
36 | /* HP 5300/5370C scanner */ | 34 | /* HP 5300/5370C scanner */ |
37 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, | 35 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = |
36 | USB_QUIRK_STRING_FETCH_255 }, | ||
38 | 37 | ||
39 | /* Creative SB Audigy 2 NX */ | 38 | /* Creative SB Audigy 2 NX */ |
40 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 39 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
41 | 40 | ||
41 | /* Philips PSC805 audio device */ | ||
42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
43 | |||
42 | /* Roland SC-8820 */ | 44 | /* Roland SC-8820 */ |
43 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
44 | 46 | ||
45 | /* Edirol SD-20 */ | 47 | /* Edirol SD-20 */ |
46 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, | 48 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
47 | 49 | ||
48 | /* INTEL VALUE SSD */ | ||
49 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
50 | |||
51 | /* M-Systems Flash Disk Pioneers */ | 50 | /* M-Systems Flash Disk Pioneers */ |
52 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 52 | ||
54 | /* Philips PSC805 audio device */ | 53 | /* Action Semiconductor flash disk */ |
55 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 54 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
55 | USB_QUIRK_STRING_FETCH_255 }, | ||
56 | 56 | ||
57 | /* SKYMEDI USB_DRIVE */ | 57 | /* SKYMEDI USB_DRIVE */ |
58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, | 58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 59 | ||
60 | /* INTEL VALUE SSD */ | ||
61 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
62 | |||
60 | { } /* terminating entry must be last */ | 63 | { } /* terminating entry must be last */ |
61 | }; | 64 | }; |
62 | 65 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e984060c984..1f0db51190cc 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, | |||
99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); | 99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * usb_altnum_to_altsetting - get the altsetting structure with a given | 102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. |
103 | * alternate setting number. | ||
104 | * @intf: the interface containing the altsetting in question | 103 | * @intf: the interface containing the altsetting in question |
105 | * @altnum: the desired alternate setting number | 104 | * @altnum: the desired alternate setting number |
106 | * | 105 | * |
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void) | |||
234 | * singlethreaded. Its job doesn't justify running on more | 233 | * singlethreaded. Its job doesn't justify running on more |
235 | * than one CPU. | 234 | * than one CPU. |
236 | */ | 235 | */ |
237 | ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); | 236 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); |
238 | if (!ksuspend_usb_wq) | 237 | if (!ksuspend_usb_wq) |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
240 | return 0; | 239 | return 0; |
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf); | |||
442 | */ | 441 | */ |
443 | 442 | ||
444 | /** | 443 | /** |
445 | * usb_lock_device_for_reset - cautiously acquire the lock for a | 444 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure |
446 | * usb device structure | ||
447 | * @udev: device that's being locked | 445 | * @udev: device that's being locked |
448 | * @iface: interface bound to the driver making the request (optional) | 446 | * @iface: interface bound to the driver making the request (optional) |
449 | * | 447 | * |
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4f6bfa100f2a..2c32bd08ee7d 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
@@ -92,7 +92,6 @@ struct printer_dev { | |||
92 | u8 *current_rx_buf; | 92 | u8 *current_rx_buf; |
93 | u8 printer_status; | 93 | u8 printer_status; |
94 | u8 reset_printer; | 94 | u8 reset_printer; |
95 | struct class_device *printer_class_dev; | ||
96 | struct cdev printer_cdev; | 95 | struct cdev printer_cdev; |
97 | struct device *pdev; | 96 | struct device *pdev; |
98 | u8 printer_cdev_open; | 97 | u8 printer_cdev_open; |
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 4402d6f042d9..096c41cc40d1 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c | |||
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0"; | |||
103 | #error "Can't configure both IXP and PXA" | 103 | #error "Can't configure both IXP and PXA" |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* IXP doesn't yet support <linux/clk.h> */ | ||
107 | #define clk_get(dev,name) NULL | ||
108 | #define clk_enable(clk) do { } while (0) | ||
109 | #define clk_disable(clk) do { } while (0) | ||
110 | #define clk_put(clk) do { } while (0) | ||
111 | |||
106 | #endif | 112 | #endif |
107 | 113 | ||
108 | #include "pxa2xx_udc.h" | 114 | #include "pxa2xx_udc.h" |
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *); | |||
934 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not | 940 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not |
935 | * in active use. | 941 | * in active use. |
936 | */ | 942 | */ |
937 | static int pullup(struct pxa2xx_udc *udc, int is_active) | 943 | static int pullup(struct pxa2xx_udc *udc) |
938 | { | 944 | { |
939 | is_active = is_active && udc->vbus && udc->pullup; | 945 | int is_active = udc->vbus && udc->pullup && !udc->suspended; |
940 | DMSG("%s\n", is_active ? "active" : "inactive"); | 946 | DMSG("%s\n", is_active ? "active" : "inactive"); |
941 | if (is_active) | 947 | if (is_active) { |
942 | udc_enable(udc); | 948 | if (!udc->active) { |
943 | else { | 949 | udc->active = 1; |
944 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | 950 | /* Enable clock for USB device */ |
945 | DMSG("disconnect %s\n", udc->driver | 951 | clk_enable(udc->clk); |
946 | ? udc->driver->driver.name | 952 | udc_enable(udc); |
947 | : "(no driver)"); | ||
948 | stop_activity(udc, udc->driver); | ||
949 | } | 953 | } |
950 | udc_disable(udc); | 954 | } else { |
955 | if (udc->active) { | ||
956 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | ||
957 | DMSG("disconnect %s\n", udc->driver | ||
958 | ? udc->driver->driver.name | ||
959 | : "(no driver)"); | ||
960 | stop_activity(udc, udc->driver); | ||
961 | } | ||
962 | udc_disable(udc); | ||
963 | /* Disable clock for USB device */ | ||
964 | clk_disable(udc->clk); | ||
965 | udc->active = 0; | ||
966 | } | ||
967 | |||
951 | } | 968 | } |
952 | return 0; | 969 | return 0; |
953 | } | 970 | } |
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) | |||
958 | struct pxa2xx_udc *udc; | 975 | struct pxa2xx_udc *udc; |
959 | 976 | ||
960 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); | 977 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); |
961 | udc->vbus = is_active = (is_active != 0); | 978 | udc->vbus = (is_active != 0); |
962 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); | 979 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); |
963 | pullup(udc, is_active); | 980 | pullup(udc); |
964 | return 0; | 981 | return 0; |
965 | } | 982 | } |
966 | 983 | ||
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) | |||
975 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 992 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
976 | return -EOPNOTSUPP; | 993 | return -EOPNOTSUPP; |
977 | 994 | ||
978 | is_active = (is_active != 0); | 995 | udc->pullup = (is_active != 0); |
979 | udc->pullup = is_active; | 996 | pullup(udc); |
980 | pullup(udc, is_active); | ||
981 | return 0; | 997 | return 0; |
982 | } | 998 | } |
983 | 999 | ||
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = { | |||
997 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | 1013 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
998 | 1014 | ||
999 | static int | 1015 | static int |
1000 | udc_seq_show(struct seq_file *m, void *d) | 1016 | udc_seq_show(struct seq_file *m, void *_d) |
1001 | { | 1017 | { |
1002 | struct pxa2xx_udc *dev = m->private; | 1018 | struct pxa2xx_udc *dev = m->private; |
1003 | unsigned long flags; | 1019 | unsigned long flags; |
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev) | |||
1146 | 1162 | ||
1147 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1163 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1148 | 1164 | ||
1149 | #ifdef CONFIG_ARCH_PXA | ||
1150 | /* Disable clock for USB device */ | ||
1151 | clk_disable(dev->clk); | ||
1152 | #endif | ||
1153 | |||
1154 | ep0_idle (dev); | 1165 | ep0_idle (dev); |
1155 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 1166 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
1156 | } | 1167 | } |
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev) | |||
1191 | { | 1202 | { |
1192 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1203 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1193 | 1204 | ||
1194 | #ifdef CONFIG_ARCH_PXA | ||
1195 | /* Enable clock for USB device */ | ||
1196 | clk_enable(dev->clk); | ||
1197 | #endif | ||
1198 | |||
1199 | /* try to clear these bits before we enable the udc */ | 1205 | /* try to clear these bits before we enable the udc */ |
1200 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); | 1206 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); |
1201 | 1207 | ||
@@ -1286,7 +1292,7 @@ fail: | |||
1286 | * for set_configuration as well as eventual disconnect. | 1292 | * for set_configuration as well as eventual disconnect. |
1287 | */ | 1293 | */ |
1288 | DMSG("registered gadget driver '%s'\n", driver->driver.name); | 1294 | DMSG("registered gadget driver '%s'\n", driver->driver.name); |
1289 | pullup(dev, 1); | 1295 | pullup(dev); |
1290 | dump_state(dev); | 1296 | dump_state(dev); |
1291 | return 0; | 1297 | return 0; |
1292 | } | 1298 | } |
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1329 | return -EINVAL; | 1335 | return -EINVAL; |
1330 | 1336 | ||
1331 | local_irq_disable(); | 1337 | local_irq_disable(); |
1332 | pullup(dev, 0); | 1338 | dev->pullup = 0; |
1339 | pullup(dev); | ||
1333 | stop_activity(dev, driver); | 1340 | stop_activity(dev, driver); |
1334 | local_irq_enable(); | 1341 | local_irq_enable(); |
1335 | 1342 | ||
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev) | |||
2131 | if (irq < 0) | 2138 | if (irq < 0) |
2132 | return -ENODEV; | 2139 | return -ENODEV; |
2133 | 2140 | ||
2134 | #ifdef CONFIG_ARCH_PXA | ||
2135 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); | 2141 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); |
2136 | if (IS_ERR(dev->clk)) { | 2142 | if (IS_ERR(dev->clk)) { |
2137 | retval = PTR_ERR(dev->clk); | 2143 | retval = PTR_ERR(dev->clk); |
2138 | goto err_clk; | 2144 | goto err_clk; |
2139 | } | 2145 | } |
2140 | #endif | ||
2141 | 2146 | ||
2142 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, | 2147 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, |
2143 | dev->has_cfr ? "" : " (!cfr)", | 2148 | dev->has_cfr ? "" : " (!cfr)", |
@@ -2250,10 +2255,8 @@ lubbock_fail0: | |||
2250 | if (dev->mach->gpio_vbus) | 2255 | if (dev->mach->gpio_vbus) |
2251 | gpio_free(dev->mach->gpio_vbus); | 2256 | gpio_free(dev->mach->gpio_vbus); |
2252 | err_gpio_vbus: | 2257 | err_gpio_vbus: |
2253 | #ifdef CONFIG_ARCH_PXA | ||
2254 | clk_put(dev->clk); | 2258 | clk_put(dev->clk); |
2255 | err_clk: | 2259 | err_clk: |
2256 | #endif | ||
2257 | return retval; | 2260 | return retval; |
2258 | } | 2261 | } |
2259 | 2262 | ||
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2269 | if (dev->driver) | 2272 | if (dev->driver) |
2270 | return -EBUSY; | 2273 | return -EBUSY; |
2271 | 2274 | ||
2272 | udc_disable(dev); | 2275 | dev->pullup = 0; |
2276 | pullup(dev); | ||
2277 | |||
2273 | remove_debug_files(dev); | 2278 | remove_debug_files(dev); |
2274 | 2279 | ||
2275 | if (dev->got_irq) { | 2280 | if (dev->got_irq) { |
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2289 | if (dev->mach->gpio_pullup) | 2294 | if (dev->mach->gpio_pullup) |
2290 | gpio_free(dev->mach->gpio_pullup); | 2295 | gpio_free(dev->mach->gpio_pullup); |
2291 | 2296 | ||
2292 | #ifdef CONFIG_ARCH_PXA | ||
2293 | clk_put(dev->clk); | 2297 | clk_put(dev->clk); |
2294 | #endif | ||
2295 | 2298 | ||
2296 | platform_set_drvdata(pdev, NULL); | 2299 | platform_set_drvdata(pdev, NULL); |
2297 | the_controller = NULL; | 2300 | the_controller = NULL; |
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2317 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | 2320 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) |
2318 | { | 2321 | { |
2319 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2322 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2323 | unsigned long flags; | ||
2320 | 2324 | ||
2321 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 2325 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
2322 | WARN("USB host won't detect disconnect!\n"); | 2326 | WARN("USB host won't detect disconnect!\n"); |
2323 | pullup(udc, 0); | 2327 | udc->suspended = 1; |
2328 | |||
2329 | local_irq_save(flags); | ||
2330 | pullup(udc); | ||
2331 | local_irq_restore(flags); | ||
2324 | 2332 | ||
2325 | return 0; | 2333 | return 0; |
2326 | } | 2334 | } |
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | |||
2328 | static int pxa2xx_udc_resume(struct platform_device *dev) | 2336 | static int pxa2xx_udc_resume(struct platform_device *dev) |
2329 | { | 2337 | { |
2330 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2338 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2339 | unsigned long flags; | ||
2331 | 2340 | ||
2332 | pullup(udc, 1); | 2341 | udc->suspended = 0; |
2342 | local_irq_save(flags); | ||
2343 | pullup(udc); | ||
2344 | local_irq_restore(flags); | ||
2333 | 2345 | ||
2334 | return 0; | 2346 | return 0; |
2335 | } | 2347 | } |
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index b67e3ff5e4eb..e2c19e88c875 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h | |||
@@ -119,7 +119,9 @@ struct pxa2xx_udc { | |||
119 | has_cfr : 1, | 119 | has_cfr : 1, |
120 | req_pending : 1, | 120 | req_pending : 1, |
121 | req_std : 1, | 121 | req_std : 1, |
122 | req_config : 1; | 122 | req_config : 1, |
123 | suspended : 1, | ||
124 | active : 1; | ||
123 | 125 | ||
124 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) | 126 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) |
125 | struct timer_list timer; | 127 | struct timer_list timer; |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 776a97f33914..2e49de820b14 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
319 | if (likely (last->urb != urb)) { | 319 | if (likely (last->urb != urb)) { |
320 | ehci_urb_done(ehci, last->urb, last_status); | 320 | ehci_urb_done(ehci, last->urb, last_status); |
321 | count++; | 321 | count++; |
322 | last_status = -EINPROGRESS; | ||
322 | } | 323 | } |
323 | ehci_qtd_free (ehci, last); | 324 | ehci_qtd_free (ehci, last); |
324 | last = NULL; | 325 | last = NULL; |
325 | last_status = -EINPROGRESS; | ||
326 | } | 326 | } |
327 | 327 | ||
328 | /* ignore urbs submitted during completions we reported */ | 328 | /* ignore urbs submitted during completions we reported */ |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0130fd8571e4..d7071c855758 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
911 | buf[0] = 0; | 911 | buf[0] = 0; |
912 | 912 | ||
913 | for (i = 0; i < ports; i++) { | 913 | for (i = 0; i < ports; i++) { |
914 | u32 status = isp116x->rhport[i] = | 914 | u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); |
915 | isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); | ||
916 | 915 | ||
917 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | 916 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
918 | | RH_PS_OCIC | RH_PS_PRSC)) { | 917 | | RH_PS_OCIC | RH_PS_PRSC)) { |
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1031 | DBG("GetPortStatus\n"); | 1030 | DBG("GetPortStatus\n"); |
1032 | if (!wIndex || wIndex > ports) | 1031 | if (!wIndex || wIndex > ports) |
1033 | goto error; | 1032 | goto error; |
1034 | tmp = isp116x->rhport[--wIndex]; | 1033 | spin_lock_irqsave(&isp116x->lock, flags); |
1034 | tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); | ||
1035 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1035 | *(__le32 *) buf = cpu_to_le32(tmp); | 1036 | *(__le32 *) buf = cpu_to_le32(tmp); |
1036 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); | 1037 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); |
1037 | break; | 1038 | break; |
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1080 | spin_lock_irqsave(&isp116x->lock, flags); | 1081 | spin_lock_irqsave(&isp116x->lock, flags); |
1081 | isp116x_write_reg32(isp116x, wIndex | 1082 | isp116x_write_reg32(isp116x, wIndex |
1082 | ? HCRHPORT2 : HCRHPORT1, tmp); | 1083 | ? HCRHPORT2 : HCRHPORT1, tmp); |
1083 | isp116x->rhport[wIndex] = | ||
1084 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1085 | spin_unlock_irqrestore(&isp116x->lock, flags); | 1084 | spin_unlock_irqrestore(&isp116x->lock, flags); |
1086 | break; | 1085 | break; |
1087 | case SetPortFeature: | 1086 | case SetPortFeature: |
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1095 | spin_lock_irqsave(&isp116x->lock, flags); | 1094 | spin_lock_irqsave(&isp116x->lock, flags); |
1096 | isp116x_write_reg32(isp116x, wIndex | 1095 | isp116x_write_reg32(isp116x, wIndex |
1097 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); | 1096 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); |
1097 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1098 | break; | 1098 | break; |
1099 | case USB_PORT_FEAT_POWER: | 1099 | case USB_PORT_FEAT_POWER: |
1100 | DBG("USB_PORT_FEAT_POWER\n"); | 1100 | DBG("USB_PORT_FEAT_POWER\n"); |
1101 | spin_lock_irqsave(&isp116x->lock, flags); | 1101 | spin_lock_irqsave(&isp116x->lock, flags); |
1102 | isp116x_write_reg32(isp116x, wIndex | 1102 | isp116x_write_reg32(isp116x, wIndex |
1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); | 1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); |
1104 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1104 | break; | 1105 | break; |
1105 | case USB_PORT_FEAT_RESET: | 1106 | case USB_PORT_FEAT_RESET: |
1106 | DBG("USB_PORT_FEAT_RESET\n"); | 1107 | DBG("USB_PORT_FEAT_RESET\n"); |
1107 | root_port_reset(isp116x, wIndex); | 1108 | root_port_reset(isp116x, wIndex); |
1108 | spin_lock_irqsave(&isp116x->lock, flags); | ||
1109 | break; | 1109 | break; |
1110 | default: | 1110 | default: |
1111 | goto error; | 1111 | goto error; |
1112 | } | 1112 | } |
1113 | isp116x->rhport[wIndex] = | ||
1114 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1115 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1116 | break; | 1113 | break; |
1117 | 1114 | ||
1118 | default: | 1115 | default: |
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index b91e2edd9c5c..595b90a99848 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h | |||
@@ -270,7 +270,6 @@ struct isp116x { | |||
270 | u32 rhdesca; | 270 | u32 rhdesca; |
271 | u32 rhdescb; | 271 | u32 rhdescb; |
272 | u32 rhstatus; | 272 | u32 rhstatus; |
273 | u32 rhport[2]; | ||
274 | 273 | ||
275 | /* async schedule: control, bulk */ | 274 | /* async schedule: control, bulk */ |
276 | struct list_head async; | 275 | struct list_head async; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76db2fef4657..91dc433dbcf1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | static int ftdi_jtag_probe (struct usb_serial *serial); | 94 | static int ftdi_jtag_probe (struct usb_serial *serial); |
95 | static int ftdi_mtxorb_hack_setup (struct usb_serial *serial); | ||
95 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); | 96 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); |
96 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); | 97 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); |
97 | 98 | ||
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = { | |||
99 | .probe = ftdi_jtag_probe, | 100 | .probe = ftdi_jtag_probe, |
100 | }; | 101 | }; |
101 | 102 | ||
103 | static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { | ||
104 | .probe = ftdi_mtxorb_hack_setup, | ||
105 | }; | ||
106 | |||
102 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { | 107 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { |
103 | .port_probe = ftdi_USB_UIRT_setup, | 108 | .port_probe = ftdi_USB_UIRT_setup, |
104 | }; | 109 | }; |
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = { | |||
161 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, | 166 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, |
162 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, | 167 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, |
163 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, | 168 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, |
169 | { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), | ||
170 | .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, | ||
164 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, | 171 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, |
165 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, | 172 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, |
166 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, | 173 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, |
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = { | |||
274 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 281 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
275 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 282 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
276 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 283 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
284 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | ||
277 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 285 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
278 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 286 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
279 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 287 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
@@ -1088,6 +1096,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1088 | return 0; | 1096 | return 0; |
1089 | } | 1097 | } |
1090 | 1098 | ||
1099 | /* | ||
1100 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. | ||
1101 | * We have to correct it if we want to read from it. | ||
1102 | */ | ||
1103 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) | ||
1104 | { | ||
1105 | struct usb_host_endpoint *ep = serial->dev->ep_in[1]; | ||
1106 | struct usb_endpoint_descriptor *ep_desc = &ep->desc; | ||
1107 | |||
1108 | if (ep->enabled && ep_desc->wMaxPacketSize == 0) { | ||
1109 | ep_desc->wMaxPacketSize = 0x40; | ||
1110 | info("Fixing invalid wMaxPacketSize on read pipe"); | ||
1111 | } | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1091 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect | 1116 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect |
1092 | * it is called when the usb device is disconnected | 1117 | * it is called when the usb device is disconnected |
1093 | * | 1118 | * |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 6eee2ab914ec..e1eb742abcd5 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -102,6 +102,13 @@ | |||
102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
104 | 104 | ||
105 | /* | ||
106 | * The following are the values for the Matrix Orbital VK204-25-USB | ||
107 | * display, which use the FT232RL. | ||
108 | */ | ||
109 | #define MTXORB_VK_VID 0x1b3d | ||
110 | #define MTXORB_VK_PID 0x0158 | ||
111 | |||
105 | /* Interbiometrics USB I/O Board */ | 112 | /* Interbiometrics USB I/O Board */ |
106 | /* Developed for Interbiometrics by Rudolf Gugler */ | 113 | /* Developed for Interbiometrics by Rudolf Gugler */ |
107 | #define INTERBIOMETRICS_VID 0x1209 | 114 | #define INTERBIOMETRICS_VID 0x1209 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 869ecd374cb4..aeeb9cb20999 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -110,11 +110,20 @@ | |||
110 | 110 | ||
111 | /* vendor id and device id defines */ | 111 | /* vendor id and device id defines */ |
112 | 112 | ||
113 | /* The native mos7840/7820 component */ | ||
113 | #define USB_VENDOR_ID_MOSCHIP 0x9710 | 114 | #define USB_VENDOR_ID_MOSCHIP 0x9710 |
114 | #define MOSCHIP_DEVICE_ID_7840 0x7840 | 115 | #define MOSCHIP_DEVICE_ID_7840 0x7840 |
115 | #define MOSCHIP_DEVICE_ID_7820 0x7820 | 116 | #define MOSCHIP_DEVICE_ID_7820 0x7820 |
117 | /* The native component can have its vendor/device id's overridden | ||
118 | * in vendor-specific implementations. Such devices can be handled | ||
119 | * by making a change here, in moschip_port_id_table, and in | ||
120 | * moschip_id_table_combined | ||
121 | */ | ||
122 | #define USB_VENDOR_ID_BANDB 0x0856 | ||
123 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
124 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
116 | 125 | ||
117 | /* Interrupt Rotinue Defines */ | 126 | /* Interrupt Routine Defines */ |
118 | 127 | ||
119 | #define SERIAL_IIR_RLS 0x06 | 128 | #define SERIAL_IIR_RLS 0x06 |
120 | #define SERIAL_IIR_MS 0x00 | 129 | #define SERIAL_IIR_MS 0x00 |
@@ -159,12 +168,16 @@ | |||
159 | static struct usb_device_id moschip_port_id_table[] = { | 168 | static struct usb_device_id moschip_port_id_table[] = { |
160 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 169 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
161 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 170 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
171 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
172 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
162 | {} /* terminating entry */ | 173 | {} /* terminating entry */ |
163 | }; | 174 | }; |
164 | 175 | ||
165 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | 176 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { |
166 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 177 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
167 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 178 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
168 | {} /* terminating entry */ | 181 | {} /* terminating entry */ |
169 | }; | 182 | }; |
170 | 183 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af2674c57414..828a4377ec6a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -120,6 +120,9 @@ static int option_send_setup(struct usb_serial_port *port); | |||
120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 | 120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 |
121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 | 121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 |
122 | 122 | ||
123 | #define AXESSTEL_VENDOR_ID 0x1726 | ||
124 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | ||
125 | |||
123 | #define BANDRICH_VENDOR_ID 0x1A8D | 126 | #define BANDRICH_VENDOR_ID 0x1A8D |
124 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 127 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
125 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 128 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -192,6 +195,7 @@ static struct usb_device_id option_ids[] = { | |||
192 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ | 195 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ |
193 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, | 196 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, |
194 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 197 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
198 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | ||
195 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 199 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
196 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 200 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
197 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 201 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 958f5b17847c..b9b8ede61fb3 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c | |||
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
170 | 170 | ||
171 | if (!sg) | 171 | if (!sg) |
172 | sg = scsi_sglist(srb); | 172 | sg = scsi_sglist(srb); |
173 | buflen = min(buflen, scsi_bufflen(srb)); | ||
174 | 173 | ||
175 | /* This loop handles a single s-g list entry, which may | 174 | /* This loop handles a single s-g list entry, which may |
176 | * include multiple pages. Find the initial page structure | 175 | * include multiple pages. Find the initial page structure |
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer, | |||
232 | unsigned int offset = 0; | 231 | unsigned int offset = 0; |
233 | struct scatterlist *sg = NULL; | 232 | struct scatterlist *sg = NULL; |
234 | 233 | ||
234 | buflen = min(buflen, scsi_bufflen(srb)); | ||
235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, | 235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, |
236 | TO_XFER_BUF); | 236 | TO_XFER_BUF); |
237 | if (buflen < scsi_bufflen(srb)) | 237 | if (buflen < scsi_bufflen(srb)) |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index e83dfba7e636..742b5c656d66 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
237 | 237 | ||
238 | /* check we can fit these values into the registers */ | 238 | /* check we can fit these values into the registers */ |
239 | 239 | ||
240 | if (var->hsync_len > 255 || var->vsync_len > 255) | 240 | if (var->hsync_len > 255 || var->vsync_len > 63) |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | 242 | ||
243 | if ((var->xres + var->right_margin) >= 4096) | 243 | /* hdisplay end and hsync start */ |
244 | if ((var->xres + var->right_margin) > 4096) | ||
244 | return -EINVAL; | 245 | return -EINVAL; |
245 | 246 | ||
247 | /* vdisplay end and vsync start */ | ||
246 | if ((var->yres + var->lower_margin) > 2048) | 248 | if ((var->yres + var->lower_margin) > 2048) |
247 | return -EINVAL; | 249 | return -EINVAL; |
248 | 250 | ||
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
281 | var->blue.length = var->bits_per_pixel; | 283 | var->blue.length = var->bits_per_pixel; |
282 | var->blue.offset = 0; | 284 | var->blue.offset = 0; |
283 | var->transp.length = 0; | 285 | var->transp.length = 0; |
286 | var->transp.offset = 0; | ||
284 | 287 | ||
285 | break; | 288 | break; |
286 | 289 | ||
287 | case 16: | 290 | case 16: |
288 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { | 291 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { |
289 | var->red.offset = 11; | ||
290 | var->green.offset = 5; | ||
291 | var->blue.offset = 0; | ||
292 | } else { | ||
293 | var->blue.offset = 11; | 292 | var->blue.offset = 11; |
294 | var->green.offset = 5; | 293 | var->green.offset = 5; |
295 | var->red.offset = 0; | 294 | var->red.offset = 0; |
295 | } else { | ||
296 | var->red.offset = 11; | ||
297 | var->green.offset = 5; | ||
298 | var->blue.offset = 0; | ||
296 | } | 299 | } |
300 | var->transp.offset = 0; | ||
297 | 301 | ||
298 | var->red.length = 5; | 302 | var->red.length = 5; |
299 | var->green.length = 6; | 303 | var->green.length = 6; |
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
397 | break; | 401 | break; |
398 | 402 | ||
399 | case 16: | 403 | case 16: |
400 | info->fix.visual = FB_VISUAL_DIRECTCOLOR; | 404 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
401 | break; | 405 | break; |
402 | 406 | ||
403 | case 32: | 407 | case 32: |
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info) | |||
613 | 617 | ||
614 | case 16: | 618 | case 16: |
615 | control |= SM501_DC_CRT_CONTROL_16BPP; | 619 | control |= SM501_DC_CRT_CONTROL_16BPP; |
620 | sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); | ||
616 | break; | 621 | break; |
617 | 622 | ||
618 | case 32: | 623 | case 32: |
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info) | |||
750 | 755 | ||
751 | case 16: | 756 | case 16: |
752 | control |= SM501_DC_PANEL_CONTROL_16BPP; | 757 | control |= SM501_DC_PANEL_CONTROL_16BPP; |
758 | sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); | ||
753 | break; | 759 | break; |
754 | 760 | ||
755 | case 32: | 761 | case 32: |
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c index 70fb4ee2b421..919ce75db9e2 100644 --- a/drivers/video/tridentfb.c +++ b/drivers/video/tridentfb.c | |||
@@ -564,19 +564,46 @@ static inline void write3CE(int reg, unsigned char val) | |||
564 | t_outb(val, 0x3CF); | 564 | t_outb(val, 0x3CF); |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void enable_mmio(void) | 567 | static void enable_mmio(void) |
568 | { | 568 | { |
569 | unsigned char tmp; | ||
570 | |||
569 | /* Goto New Mode */ | 571 | /* Goto New Mode */ |
570 | outb(0x0B, 0x3C4); | 572 | outb(0x0B, 0x3C4); |
571 | inb(0x3C5); | 573 | inb(0x3C5); |
572 | 574 | ||
573 | /* Unprotect registers */ | 575 | /* Unprotect registers */ |
574 | outb(NewMode1, 0x3C4); | 576 | outb(NewMode1, 0x3C4); |
577 | tmp = inb(0x3C5); | ||
575 | outb(0x80, 0x3C5); | 578 | outb(0x80, 0x3C5); |
576 | 579 | ||
577 | /* Enable MMIO */ | 580 | /* Enable MMIO */ |
578 | outb(PCIReg, 0x3D4); | 581 | outb(PCIReg, 0x3D4); |
579 | outb(inb(0x3D5) | 0x01, 0x3D5); | 582 | outb(inb(0x3D5) | 0x01, 0x3D5); |
583 | |||
584 | t_outb(NewMode1, 0x3C4); | ||
585 | t_outb(tmp, 0x3C5); | ||
586 | } | ||
587 | |||
588 | static void disable_mmio(void) | ||
589 | { | ||
590 | unsigned char tmp; | ||
591 | |||
592 | /* Goto New Mode */ | ||
593 | t_outb(0x0B, 0x3C4); | ||
594 | t_inb(0x3C5); | ||
595 | |||
596 | /* Unprotect registers */ | ||
597 | t_outb(NewMode1, 0x3C4); | ||
598 | tmp = t_inb(0x3C5); | ||
599 | t_outb(0x80, 0x3C5); | ||
600 | |||
601 | /* Disable MMIO */ | ||
602 | t_outb(PCIReg, 0x3D4); | ||
603 | t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); | ||
604 | |||
605 | outb(NewMode1, 0x3C4); | ||
606 | outb(tmp, 0x3C5); | ||
580 | } | 607 | } |
581 | 608 | ||
582 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) | 609 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) |
@@ -1239,9 +1266,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1239 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1266 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1240 | 1267 | ||
1241 | if (!default_par.io_virt) { | 1268 | if (!default_par.io_virt) { |
1242 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1243 | debug("ioremap failed\n"); | 1269 | debug("ioremap failed\n"); |
1244 | return -1; | 1270 | err = -1; |
1271 | goto out_unmap1; | ||
1245 | } | 1272 | } |
1246 | 1273 | ||
1247 | enable_mmio(); | 1274 | enable_mmio(); |
@@ -1252,25 +1279,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1252 | 1279 | ||
1253 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { | 1280 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { |
1254 | debug("request_mem_region failed!\n"); | 1281 | debug("request_mem_region failed!\n"); |
1282 | disable_mmio(); | ||
1255 | err = -1; | 1283 | err = -1; |
1256 | goto out_unmap; | 1284 | goto out_unmap1; |
1257 | } | 1285 | } |
1258 | 1286 | ||
1259 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, | 1287 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, |
1260 | tridentfb_fix.smem_len); | 1288 | tridentfb_fix.smem_len); |
1261 | 1289 | ||
1262 | if (!fb_info.screen_base) { | 1290 | if (!fb_info.screen_base) { |
1263 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1264 | debug("ioremap failed\n"); | 1291 | debug("ioremap failed\n"); |
1265 | err = -1; | 1292 | err = -1; |
1266 | goto out_unmap; | 1293 | goto out_unmap2; |
1267 | } | 1294 | } |
1268 | 1295 | ||
1269 | output("%s board found\n", pci_name(dev)); | 1296 | output("%s board found\n", pci_name(dev)); |
1270 | #if 0 | ||
1271 | output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n", | ||
1272 | tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt); | ||
1273 | #endif | ||
1274 | displaytype = get_displaytype(); | 1297 | displaytype = get_displaytype(); |
1275 | 1298 | ||
1276 | if (flatpanel) | 1299 | if (flatpanel) |
@@ -1288,9 +1311,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1288 | 1311 | ||
1289 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { | 1312 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { |
1290 | err = -EINVAL; | 1313 | err = -EINVAL; |
1291 | goto out_unmap; | 1314 | goto out_unmap2; |
1292 | } | 1315 | } |
1293 | fb_alloc_cmap(&fb_info.cmap, 256, 0); | 1316 | err = fb_alloc_cmap(&fb_info.cmap, 256, 0); |
1317 | if (err < 0) | ||
1318 | goto out_unmap2; | ||
1319 | |||
1294 | if (defaultaccel && acc) | 1320 | if (defaultaccel && acc) |
1295 | default_var.accel_flags |= FB_ACCELF_TEXT; | 1321 | default_var.accel_flags |= FB_ACCELF_TEXT; |
1296 | else | 1322 | else |
@@ -1300,19 +1326,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1300 | fb_info.device = &dev->dev; | 1326 | fb_info.device = &dev->dev; |
1301 | if (register_framebuffer(&fb_info) < 0) { | 1327 | if (register_framebuffer(&fb_info) < 0) { |
1302 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); | 1328 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); |
1329 | fb_dealloc_cmap(&fb_info.cmap); | ||
1303 | err = -EINVAL; | 1330 | err = -EINVAL; |
1304 | goto out_unmap; | 1331 | goto out_unmap2; |
1305 | } | 1332 | } |
1306 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", | 1333 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", |
1307 | fb_info.node, fb_info.fix.id, default_var.xres, | 1334 | fb_info.node, fb_info.fix.id, default_var.xres, |
1308 | default_var.yres, default_var.bits_per_pixel); | 1335 | default_var.yres, default_var.bits_per_pixel); |
1309 | return 0; | 1336 | return 0; |
1310 | 1337 | ||
1311 | out_unmap: | 1338 | out_unmap2: |
1312 | if (default_par.io_virt) | ||
1313 | iounmap(default_par.io_virt); | ||
1314 | if (fb_info.screen_base) | 1339 | if (fb_info.screen_base) |
1315 | iounmap(fb_info.screen_base); | 1340 | iounmap(fb_info.screen_base); |
1341 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1342 | disable_mmio(); | ||
1343 | out_unmap1: | ||
1344 | if (default_par.io_virt) | ||
1345 | iounmap(default_par.io_virt); | ||
1346 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1316 | return err; | 1347 | return err; |
1317 | } | 1348 | } |
1318 | 1349 | ||
@@ -1323,7 +1354,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev) | |||
1323 | iounmap(par->io_virt); | 1354 | iounmap(par->io_virt); |
1324 | iounmap(fb_info.screen_base); | 1355 | iounmap(fb_info.screen_base); |
1325 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | 1356 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); |
1326 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1357 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1327 | } | 1358 | } |
1328 | 1359 | ||
1329 | /* List of boards that we are trying to support */ | 1360 | /* List of boards that we are trying to support */ |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 688e435b4d9a..10211e493001 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | ||
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/ds1wm.h> | 22 | #include <linux/ds1wm.h> |
22 | 23 | ||
@@ -102,12 +103,12 @@ struct ds1wm_data { | |||
102 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, | 103 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, |
103 | u8 val) | 104 | u8 val) |
104 | { | 105 | { |
105 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 106 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
106 | } | 107 | } |
107 | 108 | ||
108 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) | 109 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) |
109 | { | 110 | { |
110 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 111 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
111 | } | 112 | } |
112 | 113 | ||
113 | 114 | ||
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
149 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); | 150 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); |
150 | ds1wm_data->reset_complete = NULL; | 151 | ds1wm_data->reset_complete = NULL; |
151 | if (!timeleft) { | 152 | if (!timeleft) { |
152 | dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); | 153 | dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); |
153 | return 1; | 154 | return 1; |
154 | } | 155 | } |
155 | 156 | ||
156 | /* Wait for the end of the reset. According to the specs, the time | 157 | /* Wait for the end of the reset. According to the specs, the time |
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
167 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | 168 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); |
168 | 169 | ||
169 | if (!ds1wm_data->slave_present) { | 170 | if (!ds1wm_data->slave_present) { |
170 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); | 171 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); |
171 | return 1; | 172 | return 1; |
172 | } | 173 | } |
173 | 174 | ||
174 | return 0; | 175 | return 0; |
175 | } | 176 | } |
176 | 177 | ||
177 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) | 178 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) |
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
334 | if (!pdev) | 335 | if (!pdev) |
335 | return -ENODEV; | 336 | return -ENODEV; |
336 | 337 | ||
337 | ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); | 338 | ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); |
338 | if (!ds1wm_data) | 339 | if (!ds1wm_data) |
339 | return -ENOMEM; | 340 | return -ENOMEM; |
340 | 341 | ||
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
374 | goto err1; | 375 | goto err1; |
375 | 376 | ||
376 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); | 377 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); |
377 | if (!ds1wm_data->clk) { | 378 | if (IS_ERR(ds1wm_data->clk)) { |
378 | ret = -ENOENT; | 379 | ret = PTR_ERR(ds1wm_data->clk); |
379 | goto err2; | 380 | goto err2; |
380 | } | 381 | } |
381 | 382 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 41a958a7585e..5e1a4fb5cacb 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1424,6 +1424,18 @@ struct elf_note_info { | |||
1424 | int thread_notes; | 1424 | int thread_notes; |
1425 | }; | 1425 | }; |
1426 | 1426 | ||
1427 | /* | ||
1428 | * When a regset has a writeback hook, we call it on each thread before | ||
1429 | * dumping user memory. On register window machines, this makes sure the | ||
1430 | * user memory backing the register data is up to date before we read it. | ||
1431 | */ | ||
1432 | static void do_thread_regset_writeback(struct task_struct *task, | ||
1433 | const struct user_regset *regset) | ||
1434 | { | ||
1435 | if (regset->writeback) | ||
1436 | regset->writeback(task, regset, 1); | ||
1437 | } | ||
1438 | |||
1427 | static int fill_thread_core_info(struct elf_thread_core_info *t, | 1439 | static int fill_thread_core_info(struct elf_thread_core_info *t, |
1428 | const struct user_regset_view *view, | 1440 | const struct user_regset_view *view, |
1429 | long signr, size_t *total) | 1441 | long signr, size_t *total) |
@@ -1445,6 +1457,8 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1445 | sizeof(t->prstatus), &t->prstatus); | 1457 | sizeof(t->prstatus), &t->prstatus); |
1446 | *total += notesize(&t->notes[0]); | 1458 | *total += notesize(&t->notes[0]); |
1447 | 1459 | ||
1460 | do_thread_regset_writeback(t->task, &view->regsets[0]); | ||
1461 | |||
1448 | /* | 1462 | /* |
1449 | * Each other regset might generate a note too. For each regset | 1463 | * Each other regset might generate a note too. For each regset |
1450 | * that has no core_note_type or is inactive, we leave t->notes[i] | 1464 | * that has no core_note_type or is inactive, we leave t->notes[i] |
@@ -1452,6 +1466,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1452 | */ | 1466 | */ |
1453 | for (i = 1; i < view->n; ++i) { | 1467 | for (i = 1; i < view->n; ++i) { |
1454 | const struct user_regset *regset = &view->regsets[i]; | 1468 | const struct user_regset *regset = &view->regsets[i]; |
1469 | do_thread_regset_writeback(t->task, regset); | ||
1455 | if (regset->core_note_type && | 1470 | if (regset->core_note_type && |
1456 | (!regset->active || regset->active(t->task, regset))) { | 1471 | (!regset->active || regset->active(t->task, regset))) { |
1457 | int ret; | 1472 | int ret; |
diff --git a/fs/buffer.c b/fs/buffer.c index 3ebccf4aa7e3..ddfdd2c80bf9 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -627,8 +627,7 @@ repeat: | |||
627 | } | 627 | } |
628 | 628 | ||
629 | /** | 629 | /** |
630 | * sync_mapping_buffers - write out and wait upon a mapping's "associated" | 630 | * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers |
631 | * buffers | ||
632 | * @mapping: the mapping which wants those buffers written | 631 | * @mapping: the mapping which wants those buffers written |
633 | * | 632 | * |
634 | * Starts I/O against the buffers at mapping->private_list, and waits upon | 633 | * Starts I/O against the buffers at mapping->private_list, and waits upon |
@@ -836,7 +835,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
836 | smp_mb(); | 835 | smp_mb(); |
837 | if (buffer_dirty(bh)) { | 836 | if (buffer_dirty(bh)) { |
838 | list_add(&bh->b_assoc_buffers, | 837 | list_add(&bh->b_assoc_buffers, |
839 | &bh->b_assoc_map->private_list); | 838 | &mapping->private_list); |
840 | bh->b_assoc_map = mapping; | 839 | bh->b_assoc_map = mapping; |
841 | } | 840 | } |
842 | spin_unlock(lock); | 841 | spin_unlock(lock); |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index edd248367b36..dbd91461853c 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -6,7 +6,9 @@ and sync so that events like out of disk space get reported properly on | |||
6 | cached files. Fix setxattr failure to certain Samba versions. Fix mount | 6 | cached files. Fix setxattr failure to certain Samba versions. Fix mount |
7 | of second share to disconnected server session (autoreconnect on this). | 7 | of second share to disconnected server session (autoreconnect on this). |
8 | Add ability to modify cifs acls for handling chmod (when mounted with | 8 | Add ability to modify cifs acls for handling chmod (when mounted with |
9 | cifsacl flag). | 9 | cifsacl flag). Fix prefixpath path separator so we can handle mounts |
10 | with prefixpaths longer than one directory (one path component) when | ||
11 | mounted to Windows servers. | ||
10 | 12 | ||
11 | Version 1.51 | 13 | Version 1.51 |
12 | ------------ | 14 | ------------ |
diff --git a/fs/cifs/README b/fs/cifs/README index c623e2f9c5db..50306229b0f9 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
@@ -461,7 +461,7 @@ A partial list of the supported mount options follows: | |||
461 | cifsacl Report mode bits (e.g. on stat) based on the Windows ACL for | 461 | cifsacl Report mode bits (e.g. on stat) based on the Windows ACL for |
462 | the file. (EXPERIMENTAL) | 462 | the file. (EXPERIMENTAL) |
463 | servern Specify the server 's netbios name (RFC1001 name) to use | 463 | servern Specify the server 's netbios name (RFC1001 name) to use |
464 | when attempting to setup a session to the server. This is | 464 | when attempting to setup a session to the server. |
465 | This is needed for mounting to some older servers (such | 465 | This is needed for mounting to some older servers (such |
466 | as OS/2 or Windows 98 and Windows ME) since they do not | 466 | as OS/2 or Windows 98 and Windows ME) since they do not |
467 | support a default server name. A server name can be up | 467 | support a default server name. A server name can be up |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 73c4c419663c..0228ed06069e 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -98,8 +98,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server) | |||
98 | if (mid_entry->resp_buf) { | 98 | if (mid_entry->resp_buf) { |
99 | cifs_dump_detail(mid_entry->resp_buf); | 99 | cifs_dump_detail(mid_entry->resp_buf); |
100 | cifs_dump_mem("existing buf: ", | 100 | cifs_dump_mem("existing buf: ", |
101 | mid_entry->resp_buf, | 101 | mid_entry->resp_buf, 62); |
102 | 62 /* fixme */); | ||
103 | } | 102 | } |
104 | } | 103 | } |
105 | } | 104 | } |
@@ -439,7 +438,7 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset, | |||
439 | 438 | ||
440 | return length; | 439 | return length; |
441 | } | 440 | } |
442 | #endif | 441 | #endif /* STATS */ |
443 | 442 | ||
444 | static struct proc_dir_entry *proc_fs_cifs; | 443 | static struct proc_dir_entry *proc_fs_cifs; |
445 | read_proc_t cifs_txanchor_read; | 444 | read_proc_t cifs_txanchor_read; |
@@ -482,7 +481,7 @@ cifs_proc_init(void) | |||
482 | cifs_stats_read, NULL); | 481 | cifs_stats_read, NULL); |
483 | if (pde) | 482 | if (pde) |
484 | pde->write_proc = cifs_stats_write; | 483 | pde->write_proc = cifs_stats_write; |
485 | #endif | 484 | #endif /* STATS */ |
486 | pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs, | 485 | pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs, |
487 | cifsFYI_read, NULL); | 486 | cifsFYI_read, NULL); |
488 | if (pde) | 487 | if (pde) |
@@ -918,4 +917,12 @@ security_flags_write(struct file *file, const char __user *buffer, | |||
918 | /* BB should we turn on MAY flags for other MUST options? */ | 917 | /* BB should we turn on MAY flags for other MUST options? */ |
919 | return count; | 918 | return count; |
920 | } | 919 | } |
921 | #endif | 920 | #else |
921 | inline void cifs_proc_init(void) | ||
922 | { | ||
923 | } | ||
924 | |||
925 | inline void cifs_proc_clean(void) | ||
926 | { | ||
927 | } | ||
928 | #endif /* PROC_FS */ | ||
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index c26cd0d2c6d5..5eb3b83bbfa7 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h | |||
@@ -25,8 +25,11 @@ | |||
25 | 25 | ||
26 | void cifs_dump_mem(char *label, void *data, int length); | 26 | void cifs_dump_mem(char *label, void *data, int length); |
27 | #ifdef CONFIG_CIFS_DEBUG2 | 27 | #ifdef CONFIG_CIFS_DEBUG2 |
28 | #define DBG2 2 | ||
28 | void cifs_dump_detail(struct smb_hdr *); | 29 | void cifs_dump_detail(struct smb_hdr *); |
29 | void cifs_dump_mids(struct TCP_Server_Info *); | 30 | void cifs_dump_mids(struct TCP_Server_Info *); |
31 | #else | ||
32 | #define DBG2 0 | ||
30 | #endif | 33 | #endif |
31 | extern int traceSMB; /* flag which enables the function below */ | 34 | extern int traceSMB; /* flag which enables the function below */ |
32 | void dump_smb(struct smb_hdr *, int); | 35 | void dump_smb(struct smb_hdr *, int); |
@@ -64,10 +67,10 @@ extern int cifsERROR; | |||
64 | * --------- | 67 | * --------- |
65 | */ | 68 | */ |
66 | #else /* _CIFS_DEBUG */ | 69 | #else /* _CIFS_DEBUG */ |
67 | #define cERROR(button,prspec) | 70 | #define cERROR(button, prspec) |
68 | #define cEVENT(format,arg...) | 71 | #define cEVENT(format, arg...) |
69 | #define cFYI(button, prspec) | 72 | #define cFYI(button, prspec) |
70 | #define cifserror(format,arg...) | 73 | #define cifserror(format, arg...) |
71 | #endif /* _CIFS_DEBUG */ | 74 | #endif /* _CIFS_DEBUG */ |
72 | 75 | ||
73 | #endif /* _H_CIFS_DEBUG */ | 76 | #endif /* _H_CIFS_DEBUG */ |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 6ad447529961..7f8838253410 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -286,7 +286,7 @@ static void dump_referral(const struct dfs_info3_param *ref) | |||
286 | cFYI(1, ("DFS: node path: %s", ref->node_name)); | 286 | cFYI(1, ("DFS: node path: %s", ref->node_name)); |
287 | cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type)); | 287 | cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type)); |
288 | cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag, | 288 | cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag, |
289 | ref->PathConsumed)); | 289 | ref->path_consumed)); |
290 | } | 290 | } |
291 | 291 | ||
292 | 292 | ||
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index d543accc10dd..6653e29637a7 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -125,7 +125,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
125 | #ifdef CONFIG_CIFS_DEBUG2 | 125 | #ifdef CONFIG_CIFS_DEBUG2 |
126 | if (cifsFYI && !IS_ERR(spnego_key)) { | 126 | if (cifsFYI && !IS_ERR(spnego_key)) { |
127 | struct cifs_spnego_msg *msg = spnego_key->payload.data; | 127 | struct cifs_spnego_msg *msg = spnego_key->payload.data; |
128 | cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024, | 128 | cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U, |
129 | msg->secblob_len + msg->sesskey_len)); | 129 | msg->secblob_len + msg->sesskey_len)); |
130 | } | 130 | } |
131 | #endif /* CONFIG_CIFS_DEBUG2 */ | 131 | #endif /* CONFIG_CIFS_DEBUG2 */ |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index b5903b89250d..7d75272a6b3f 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * | 32 | * |
33 | */ | 33 | */ |
34 | int | 34 | int |
35 | cifs_strfromUCS_le(char *to, const __le16 * from, | 35 | cifs_strfromUCS_le(char *to, const __le16 *from, |
36 | int len, const struct nls_table *codepage) | 36 | int len, const struct nls_table *codepage) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
@@ -61,7 +61,7 @@ cifs_strfromUCS_le(char *to, const __le16 * from, | |||
61 | * | 61 | * |
62 | */ | 62 | */ |
63 | int | 63 | int |
64 | cifs_strtoUCS(__le16 * to, const char *from, int len, | 64 | cifs_strtoUCS(__le16 *to, const char *from, int len, |
65 | const struct nls_table *codepage) | 65 | const struct nls_table *codepage) |
66 | { | 66 | { |
67 | int charlen; | 67 | int charlen; |
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 614c11fcdcb6..14eb9a2395d3 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h | |||
@@ -254,7 +254,8 @@ UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2) | |||
254 | const wchar_t *anchor2 = ucs2; | 254 | const wchar_t *anchor2 = ucs2; |
255 | 255 | ||
256 | while (*ucs1) { | 256 | while (*ucs1) { |
257 | if (*ucs1 == *ucs2) { /* Partial match found */ | 257 | if (*ucs1 == *ucs2) { |
258 | /* Partial match found */ | ||
258 | ucs1++; | 259 | ucs1++; |
259 | ucs2++; | 260 | ucs2++; |
260 | } else { | 261 | } else { |
@@ -279,7 +280,8 @@ UniToupper(register wchar_t uc) | |||
279 | { | 280 | { |
280 | register const struct UniCaseRange *rp; | 281 | register const struct UniCaseRange *rp; |
281 | 282 | ||
282 | if (uc < sizeof (CifsUniUpperTable)) { /* Latin characters */ | 283 | if (uc < sizeof(CifsUniUpperTable)) { |
284 | /* Latin characters */ | ||
283 | return uc + CifsUniUpperTable[uc]; /* Use base tables */ | 285 | return uc + CifsUniUpperTable[uc]; /* Use base tables */ |
284 | } else { | 286 | } else { |
285 | rp = CifsUniUpperRange; /* Use range tables */ | 287 | rp = CifsUniUpperRange; /* Use range tables */ |
@@ -320,7 +322,8 @@ UniTolower(wchar_t uc) | |||
320 | { | 322 | { |
321 | register struct UniCaseRange *rp; | 323 | register struct UniCaseRange *rp; |
322 | 324 | ||
323 | if (uc < sizeof (UniLowerTable)) { /* Latin characters */ | 325 | if (uc < sizeof(UniLowerTable)) { |
326 | /* Latin characters */ | ||
324 | return uc + UniLowerTable[uc]; /* Use base tables */ | 327 | return uc + UniLowerTable[uc]; /* Use base tables */ |
325 | } else { | 328 | } else { |
326 | rp = UniLowerRange; /* Use range tables */ | 329 | rp = UniLowerRange; /* Use range tables */ |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index a7035bd18e4e..f93932c21772 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -46,8 +46,7 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = { | |||
46 | static const struct cifs_sid sid_everyone = { | 46 | static const struct cifs_sid sid_everyone = { |
47 | 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; | 47 | 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; |
48 | /* group users */ | 48 | /* group users */ |
49 | static const struct cifs_sid sid_user = | 49 | static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; |
50 | {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; | ||
51 | 50 | ||
52 | 51 | ||
53 | int match_sid(struct cifs_sid *ctsid) | 52 | int match_sid(struct cifs_sid *ctsid) |
@@ -195,9 +194,9 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, | |||
195 | /* For deny ACEs we change the mask so that subsequent allow access | 194 | /* For deny ACEs we change the mask so that subsequent allow access |
196 | control entries do not turn on the bits we are denying */ | 195 | control entries do not turn on the bits we are denying */ |
197 | if (type == ACCESS_DENIED) { | 196 | if (type == ACCESS_DENIED) { |
198 | if (flags & GENERIC_ALL) { | 197 | if (flags & GENERIC_ALL) |
199 | *pbits_to_set &= ~S_IRWXUGO; | 198 | *pbits_to_set &= ~S_IRWXUGO; |
200 | } | 199 | |
201 | if ((flags & GENERIC_WRITE) || | 200 | if ((flags & GENERIC_WRITE) || |
202 | ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) | 201 | ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) |
203 | *pbits_to_set &= ~S_IWUGO; | 202 | *pbits_to_set &= ~S_IWUGO; |
@@ -216,9 +215,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, | |||
216 | 215 | ||
217 | if (flags & GENERIC_ALL) { | 216 | if (flags & GENERIC_ALL) { |
218 | *pmode |= (S_IRWXUGO & (*pbits_to_set)); | 217 | *pmode |= (S_IRWXUGO & (*pbits_to_set)); |
219 | #ifdef CONFIG_CIFS_DEBUG2 | 218 | cFYI(DBG2, ("all perms")); |
220 | cFYI(1, ("all perms")); | ||
221 | #endif | ||
222 | return; | 219 | return; |
223 | } | 220 | } |
224 | if ((flags & GENERIC_WRITE) || | 221 | if ((flags & GENERIC_WRITE) || |
@@ -231,9 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, | |||
231 | ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) | 228 | ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) |
232 | *pmode |= (S_IXUGO & (*pbits_to_set)); | 229 | *pmode |= (S_IXUGO & (*pbits_to_set)); |
233 | 230 | ||
234 | #ifdef CONFIG_CIFS_DEBUG2 | 231 | cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode)); |
235 | cFYI(1, ("access flags 0x%x mode now 0x%x", flags, *pmode)); | ||
236 | #endif | ||
237 | return; | 232 | return; |
238 | } | 233 | } |
239 | 234 | ||
@@ -262,9 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, | |||
262 | if (mode & S_IXUGO) | 257 | if (mode & S_IXUGO) |
263 | *pace_flags |= SET_FILE_EXEC_RIGHTS; | 258 | *pace_flags |= SET_FILE_EXEC_RIGHTS; |
264 | 259 | ||
265 | #ifdef CONFIG_CIFS_DEBUG2 | 260 | cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags)); |
266 | cFYI(1, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags)); | ||
267 | #endif | ||
268 | return; | 261 | return; |
269 | } | 262 | } |
270 | 263 | ||
@@ -358,11 +351,9 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
358 | return; | 351 | return; |
359 | } | 352 | } |
360 | 353 | ||
361 | #ifdef CONFIG_CIFS_DEBUG2 | 354 | cFYI(DBG2, ("DACL revision %d size %d num aces %d", |
362 | cFYI(1, ("DACL revision %d size %d num aces %d", | ||
363 | le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), | 355 | le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), |
364 | le32_to_cpu(pdacl->num_aces))); | 356 | le32_to_cpu(pdacl->num_aces))); |
365 | #endif | ||
366 | 357 | ||
367 | /* reset rwx permissions for user/group/other. | 358 | /* reset rwx permissions for user/group/other. |
368 | Also, if num_aces is 0 i.e. DACL has no ACEs, | 359 | Also, if num_aces is 0 i.e. DACL has no ACEs, |
@@ -381,10 +372,6 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
381 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), | 372 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), |
382 | GFP_KERNEL); | 373 | GFP_KERNEL); |
383 | 374 | ||
384 | /* cifscred->cecount = pdacl->num_aces; | ||
385 | cifscred->aces = kmalloc(num_aces * | ||
386 | sizeof(struct cifs_ace *), GFP_KERNEL);*/ | ||
387 | |||
388 | for (i = 0; i < num_aces; ++i) { | 375 | for (i = 0; i < num_aces; ++i) { |
389 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); | 376 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); |
390 | #ifdef CONFIG_CIFS_DEBUG2 | 377 | #ifdef CONFIG_CIFS_DEBUG2 |
@@ -437,7 +424,7 @@ static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid, | |||
437 | &sid_everyone, nmode, S_IRWXO); | 424 | &sid_everyone, nmode, S_IRWXO); |
438 | 425 | ||
439 | pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl)); | 426 | pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl)); |
440 | pndacl->num_aces = 3; | 427 | pndacl->num_aces = cpu_to_le32(3); |
441 | 428 | ||
442 | return (0); | 429 | return (0); |
443 | } | 430 | } |
@@ -495,13 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, | |||
495 | le32_to_cpu(pntsd->gsidoffset)); | 482 | le32_to_cpu(pntsd->gsidoffset)); |
496 | dacloffset = le32_to_cpu(pntsd->dacloffset); | 483 | dacloffset = le32_to_cpu(pntsd->dacloffset); |
497 | dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); | 484 | dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); |
498 | #ifdef CONFIG_CIFS_DEBUG2 | 485 | cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x " |
499 | cFYI(1, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x " | ||
500 | "sacloffset 0x%x dacloffset 0x%x", | 486 | "sacloffset 0x%x dacloffset 0x%x", |
501 | pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), | 487 | pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset), |
502 | le32_to_cpu(pntsd->gsidoffset), | 488 | le32_to_cpu(pntsd->gsidoffset), |
503 | le32_to_cpu(pntsd->sacloffset), dacloffset)); | 489 | le32_to_cpu(pntsd->sacloffset), dacloffset)); |
504 | #endif | ||
505 | /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ | 490 | /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */ |
506 | rc = parse_sid(owner_sid_ptr, end_of_acl); | 491 | rc = parse_sid(owner_sid_ptr, end_of_acl); |
507 | if (rc) | 492 | if (rc) |
@@ -636,9 +621,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, | |||
636 | struct super_block *sb; | 621 | struct super_block *sb; |
637 | struct cifs_sb_info *cifs_sb; | 622 | struct cifs_sb_info *cifs_sb; |
638 | 623 | ||
639 | #ifdef CONFIG_CIFS_DEBUG2 | 624 | cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode)); |
640 | cFYI(1, ("set ACL for %s from mode 0x%x", path, inode->i_mode)); | ||
641 | #endif | ||
642 | 625 | ||
643 | if (!inode) | 626 | if (!inode) |
644 | return (rc); | 627 | return (rc); |
@@ -669,9 +652,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, | |||
669 | } | 652 | } |
670 | 653 | ||
671 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); | 654 | rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen); |
672 | #ifdef CONFIG_CIFS_DEBUG2 | 655 | cFYI(DBG2, ("SetCIFSACL rc = %d", rc)); |
673 | cFYI(1, ("SetCIFSACL rc = %d", rc)); | ||
674 | #endif | ||
675 | if (unlock_file == TRUE) | 656 | if (unlock_file == TRUE) |
676 | atomic_dec(&open_file->wrtPending); | 657 | atomic_dec(&open_file->wrtPending); |
677 | else | 658 | else |
@@ -689,9 +670,7 @@ void acl_to_uid_mode(struct inode *inode, const char *path) | |||
689 | u32 acllen = 0; | 670 | u32 acllen = 0; |
690 | int rc = 0; | 671 | int rc = 0; |
691 | 672 | ||
692 | #ifdef CONFIG_CIFS_DEBUG2 | 673 | cFYI(DBG2, ("converting ACL to mode for %s", path)); |
693 | cFYI(1, ("converting ACL to mode for %s", path)); | ||
694 | #endif | ||
695 | pntsd = get_cifs_acl(&acllen, inode, path); | 674 | pntsd = get_cifs_acl(&acllen, inode, path); |
696 | 675 | ||
697 | /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ | 676 | /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ |
@@ -712,9 +691,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) | |||
712 | struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ | 691 | struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ |
713 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ | 692 | struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ |
714 | 693 | ||
715 | #ifdef CONFIG_CIFS_DEBUG2 | 694 | cFYI(DBG2, ("set ACL from mode for %s", path)); |
716 | cFYI(1, ("set ACL from mode for %s", path)); | ||
717 | #endif | ||
718 | 695 | ||
719 | /* Get the security descriptor */ | 696 | /* Get the security descriptor */ |
720 | pntsd = get_cifs_acl(&acllen, inode, path); | 697 | pntsd = get_cifs_acl(&acllen, inode, path); |
@@ -736,16 +713,12 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) | |||
736 | 713 | ||
737 | rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode); | 714 | rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode); |
738 | 715 | ||
739 | #ifdef CONFIG_CIFS_DEBUG2 | 716 | cFYI(DBG2, ("build_sec_desc rc: %d", rc)); |
740 | cFYI(1, ("build_sec_desc rc: %d", rc)); | ||
741 | #endif | ||
742 | 717 | ||
743 | if (!rc) { | 718 | if (!rc) { |
744 | /* Set the security descriptor */ | 719 | /* Set the security descriptor */ |
745 | rc = set_cifs_acl(pnntsd, acllen, inode, path); | 720 | rc = set_cifs_acl(pnntsd, acllen, inode, path); |
746 | #ifdef CONFIG_CIFS_DEBUG2 | 721 | cFYI(DBG2, ("set_cifs_acl rc: %d", rc)); |
747 | cFYI(1, ("set_cifs_acl rc: %d", rc)); | ||
748 | #endif | ||
749 | } | 722 | } |
750 | 723 | ||
751 | kfree(pnntsd); | 724 | kfree(pnntsd); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index fcc434227691..a04b17e5a9d0 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -204,9 +204,8 @@ cifs_put_super(struct super_block *sb) | |||
204 | return; | 204 | return; |
205 | } | 205 | } |
206 | rc = cifs_umount(sb, cifs_sb); | 206 | rc = cifs_umount(sb, cifs_sb); |
207 | if (rc) { | 207 | if (rc) |
208 | cERROR(1, ("cifs_umount failed with return code %d", rc)); | 208 | cERROR(1, ("cifs_umount failed with return code %d", rc)); |
209 | } | ||
210 | #ifdef CONFIG_CIFS_DFS_UPCALL | 209 | #ifdef CONFIG_CIFS_DFS_UPCALL |
211 | if (cifs_sb->mountdata) { | 210 | if (cifs_sb->mountdata) { |
212 | kfree(cifs_sb->mountdata); | 211 | kfree(cifs_sb->mountdata); |
@@ -461,7 +460,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) | |||
461 | 460 | ||
462 | static struct quotactl_ops cifs_quotactl_ops = { | 461 | static struct quotactl_ops cifs_quotactl_ops = { |
463 | .set_xquota = cifs_xquota_set, | 462 | .set_xquota = cifs_xquota_set, |
464 | .get_xquota = cifs_xquota_set, | 463 | .get_xquota = cifs_xquota_get, |
465 | .set_xstate = cifs_xstate_set, | 464 | .set_xstate = cifs_xstate_set, |
466 | .get_xstate = cifs_xstate_get, | 465 | .get_xstate = cifs_xstate_get, |
467 | }; | 466 | }; |
@@ -472,9 +471,7 @@ static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags) | |||
472 | struct cifs_sb_info *cifs_sb; | 471 | struct cifs_sb_info *cifs_sb; |
473 | struct cifsTconInfo *tcon; | 472 | struct cifsTconInfo *tcon; |
474 | 473 | ||
475 | #ifdef CONFIG_CIFS_DFS_UPCALL | ||
476 | dfs_shrink_umount_helper(vfsmnt); | 474 | dfs_shrink_umount_helper(vfsmnt); |
477 | #endif /* CONFIG CIFS_DFS_UPCALL */ | ||
478 | 475 | ||
479 | if (!(flags & MNT_FORCE)) | 476 | if (!(flags & MNT_FORCE)) |
480 | return; | 477 | return; |
@@ -992,9 +989,7 @@ static int __init | |||
992 | init_cifs(void) | 989 | init_cifs(void) |
993 | { | 990 | { |
994 | int rc = 0; | 991 | int rc = 0; |
995 | #ifdef CONFIG_PROC_FS | ||
996 | cifs_proc_init(); | 992 | cifs_proc_init(); |
997 | #endif | ||
998 | /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */ | 993 | /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */ |
999 | INIT_LIST_HEAD(&GlobalSMBSessionList); | 994 | INIT_LIST_HEAD(&GlobalSMBSessionList); |
1000 | INIT_LIST_HEAD(&GlobalTreeConnectionList); | 995 | INIT_LIST_HEAD(&GlobalTreeConnectionList); |
@@ -1095,19 +1090,15 @@ init_cifs(void) | |||
1095 | out_destroy_inodecache: | 1090 | out_destroy_inodecache: |
1096 | cifs_destroy_inodecache(); | 1091 | cifs_destroy_inodecache(); |
1097 | out_clean_proc: | 1092 | out_clean_proc: |
1098 | #ifdef CONFIG_PROC_FS | ||
1099 | cifs_proc_clean(); | 1093 | cifs_proc_clean(); |
1100 | #endif | ||
1101 | return rc; | 1094 | return rc; |
1102 | } | 1095 | } |
1103 | 1096 | ||
1104 | static void __exit | 1097 | static void __exit |
1105 | exit_cifs(void) | 1098 | exit_cifs(void) |
1106 | { | 1099 | { |
1107 | cFYI(0, ("exit_cifs")); | 1100 | cFYI(DBG2, ("exit_cifs")); |
1108 | #ifdef CONFIG_PROC_FS | ||
1109 | cifs_proc_clean(); | 1101 | cifs_proc_clean(); |
1110 | #endif | ||
1111 | #ifdef CONFIG_CIFS_DFS_UPCALL | 1102 | #ifdef CONFIG_CIFS_DFS_UPCALL |
1112 | unregister_key_type(&key_type_dns_resolver); | 1103 | unregister_key_type(&key_type_dns_resolver); |
1113 | #endif | 1104 | #endif |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 5d32d8ddc82e..69a2e1942542 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -454,7 +454,7 @@ struct dir_notify_req { | |||
454 | 454 | ||
455 | struct dfs_info3_param { | 455 | struct dfs_info3_param { |
456 | int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/ | 456 | int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/ |
457 | int PathConsumed; | 457 | int path_consumed; |
458 | int server_type; | 458 | int server_type; |
459 | int ref_flag; | 459 | int ref_flag; |
460 | char *path_name; | 460 | char *path_name; |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 2f09f565a3d9..0af63e6b426b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -53,11 +53,11 @@ extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | |||
53 | extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, | 53 | extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, |
54 | struct kvec *, int /* nvec to send */, | 54 | struct kvec *, int /* nvec to send */, |
55 | int * /* type of buf returned */ , const int flags); | 55 | int * /* type of buf returned */ , const int flags); |
56 | extern int SendReceiveBlockingLock(const unsigned int /* xid */ , | 56 | extern int SendReceiveBlockingLock(const unsigned int xid, |
57 | struct cifsTconInfo *, | 57 | struct cifsTconInfo *ptcon, |
58 | struct smb_hdr * /* input */ , | 58 | struct smb_hdr *in_buf , |
59 | struct smb_hdr * /* out */ , | 59 | struct smb_hdr *out_buf, |
60 | int * /* bytes returned */); | 60 | int *bytes_returned); |
61 | extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); | 61 | extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); |
62 | extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); | 62 | extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); |
63 | extern int is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); | 63 | extern int is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); |
@@ -84,7 +84,7 @@ extern __u16 GetNextMid(struct TCP_Server_Info *server); | |||
84 | extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16, | 84 | extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16, |
85 | struct cifsTconInfo *); | 85 | struct cifsTconInfo *); |
86 | extern void DeleteOplockQEntry(struct oplock_q_entry *); | 86 | extern void DeleteOplockQEntry(struct oplock_q_entry *); |
87 | extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ ); | 87 | extern struct timespec cifs_NTtimeToUnix(u64 utc_nanoseconds_since_1601); |
88 | extern u64 cifs_UnixTimeToNT(struct timespec); | 88 | extern u64 cifs_UnixTimeToNT(struct timespec); |
89 | extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time); | 89 | extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time); |
90 | extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time); | 90 | extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time); |
@@ -104,7 +104,11 @@ extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, | |||
104 | extern int cifs_umount(struct super_block *, struct cifs_sb_info *); | 104 | extern int cifs_umount(struct super_block *, struct cifs_sb_info *); |
105 | #ifdef CONFIG_CIFS_DFS_UPCALL | 105 | #ifdef CONFIG_CIFS_DFS_UPCALL |
106 | extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt); | 106 | extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt); |
107 | #endif | 107 | #else |
108 | static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt) | ||
109 | { | ||
110 | } | ||
111 | #endif /* DFS_UPCALL */ | ||
108 | void cifs_proc_init(void); | 112 | void cifs_proc_init(void); |
109 | void cifs_proc_clean(void); | 113 | void cifs_proc_clean(void); |
110 | 114 | ||
@@ -175,11 +179,11 @@ extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, | |||
175 | struct kstatfs *FSData); | 179 | struct kstatfs *FSData); |
176 | 180 | ||
177 | extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, | 181 | extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, |
178 | const char *fileName, const FILE_BASIC_INFO * data, | 182 | const char *fileName, const FILE_BASIC_INFO *data, |
179 | const struct nls_table *nls_codepage, | 183 | const struct nls_table *nls_codepage, |
180 | int remap_special_chars); | 184 | int remap_special_chars); |
181 | extern int CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, | 185 | extern int CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, |
182 | const FILE_BASIC_INFO * data, __u16 fid); | 186 | const FILE_BASIC_INFO *data, __u16 fid); |
183 | #if 0 | 187 | #if 0 |
184 | extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, | 188 | extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, |
185 | char *fileName, __u16 dos_attributes, | 189 | char *fileName, __u16 dos_attributes, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 9409524e4bf8..30bbe448e260 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/cifssmb.c | 2 | * fs/cifs/cifssmb.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2007 | 4 | * Copyright (C) International Business Machines Corp., 2002,2008 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * Contains the routines for constructing the SMB PDUs themselves | 7 | * Contains the routines for constructing the SMB PDUs themselves |
@@ -102,10 +102,12 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon) | |||
102 | to this tcon */ | 102 | to this tcon */ |
103 | } | 103 | } |
104 | 104 | ||
105 | /* If the return code is zero, this function must fill in request_buf pointer */ | 105 | /* Allocate and return pointer to an SMB request buffer, and set basic |
106 | SMB information in the SMB header. If the return code is zero, this | ||
107 | function must have filled in request_buf pointer */ | ||
106 | static int | 108 | static int |
107 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 109 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, |
108 | void **request_buf /* returned */) | 110 | void **request_buf) |
109 | { | 111 | { |
110 | int rc = 0; | 112 | int rc = 0; |
111 | 113 | ||
@@ -363,7 +365,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
363 | *response_buf = *request_buf; | 365 | *response_buf = *request_buf; |
364 | 366 | ||
365 | header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, | 367 | header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, |
366 | wct /*wct */ ); | 368 | wct); |
367 | 369 | ||
368 | if (tcon != NULL) | 370 | if (tcon != NULL) |
369 | cifs_stats_inc(&tcon->num_smbs_sent); | 371 | cifs_stats_inc(&tcon->num_smbs_sent); |
@@ -523,7 +525,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
523 | if (remain >= (MIN_TZ_ADJ / 2)) | 525 | if (remain >= (MIN_TZ_ADJ / 2)) |
524 | result += MIN_TZ_ADJ; | 526 | result += MIN_TZ_ADJ; |
525 | if (val < 0) | 527 | if (val < 0) |
526 | result = - result; | 528 | result = -result; |
527 | server->timeAdj = result; | 529 | server->timeAdj = result; |
528 | } else { | 530 | } else { |
529 | server->timeAdj = (int)tmp; | 531 | server->timeAdj = (int)tmp; |
@@ -600,7 +602,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
600 | server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), | 602 | server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), |
601 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); | 603 | (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); |
602 | server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); | 604 | server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); |
603 | cFYI(0, ("Max buf = %d", ses->server->maxBuf)); | 605 | cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf)); |
604 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); | 606 | GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); |
605 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); | 607 | server->capabilities = le32_to_cpu(pSMBr->Capabilities); |
606 | server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); | 608 | server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); |
@@ -868,9 +870,8 @@ PsxDelete: | |||
868 | pSMB->ByteCount = cpu_to_le16(byte_count); | 870 | pSMB->ByteCount = cpu_to_le16(byte_count); |
869 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 871 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
870 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 872 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
871 | if (rc) { | 873 | if (rc) |
872 | cFYI(1, ("Posix delete returned %d", rc)); | 874 | cFYI(1, ("Posix delete returned %d", rc)); |
873 | } | ||
874 | cifs_buf_release(pSMB); | 875 | cifs_buf_release(pSMB); |
875 | 876 | ||
876 | cifs_stats_inc(&tcon->num_deletes); | 877 | cifs_stats_inc(&tcon->num_deletes); |
@@ -916,9 +917,8 @@ DelFileRetry: | |||
916 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 917 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
917 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 918 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
918 | cifs_stats_inc(&tcon->num_deletes); | 919 | cifs_stats_inc(&tcon->num_deletes); |
919 | if (rc) { | 920 | if (rc) |
920 | cFYI(1, ("Error in RMFile = %d", rc)); | 921 | cFYI(1, ("Error in RMFile = %d", rc)); |
921 | } | ||
922 | 922 | ||
923 | cifs_buf_release(pSMB); | 923 | cifs_buf_release(pSMB); |
924 | if (rc == -EAGAIN) | 924 | if (rc == -EAGAIN) |
@@ -961,9 +961,8 @@ RmDirRetry: | |||
961 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 961 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
962 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 962 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
963 | cifs_stats_inc(&tcon->num_rmdirs); | 963 | cifs_stats_inc(&tcon->num_rmdirs); |
964 | if (rc) { | 964 | if (rc) |
965 | cFYI(1, ("Error in RMDir = %d", rc)); | 965 | cFYI(1, ("Error in RMDir = %d", rc)); |
966 | } | ||
967 | 966 | ||
968 | cifs_buf_release(pSMB); | 967 | cifs_buf_release(pSMB); |
969 | if (rc == -EAGAIN) | 968 | if (rc == -EAGAIN) |
@@ -1005,9 +1004,8 @@ MkDirRetry: | |||
1005 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 1004 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
1006 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 1005 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
1007 | cifs_stats_inc(&tcon->num_mkdirs); | 1006 | cifs_stats_inc(&tcon->num_mkdirs); |
1008 | if (rc) { | 1007 | if (rc) |
1009 | cFYI(1, ("Error in Mkdir = %d", rc)); | 1008 | cFYI(1, ("Error in Mkdir = %d", rc)); |
1010 | } | ||
1011 | 1009 | ||
1012 | cifs_buf_release(pSMB); | 1010 | cifs_buf_release(pSMB); |
1013 | if (rc == -EAGAIN) | 1011 | if (rc == -EAGAIN) |
@@ -1017,7 +1015,7 @@ MkDirRetry: | |||
1017 | 1015 | ||
1018 | int | 1016 | int |
1019 | CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, | 1017 | CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, |
1020 | __u64 mode, __u16 * netfid, FILE_UNIX_BASIC_INFO *pRetData, | 1018 | __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, |
1021 | __u32 *pOplock, const char *name, | 1019 | __u32 *pOplock, const char *name, |
1022 | const struct nls_table *nls_codepage, int remap) | 1020 | const struct nls_table *nls_codepage, int remap) |
1023 | { | 1021 | { |
@@ -1027,8 +1025,8 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, | |||
1027 | int rc = 0; | 1025 | int rc = 0; |
1028 | int bytes_returned = 0; | 1026 | int bytes_returned = 0; |
1029 | __u16 params, param_offset, offset, byte_count, count; | 1027 | __u16 params, param_offset, offset, byte_count, count; |
1030 | OPEN_PSX_REQ * pdata; | 1028 | OPEN_PSX_REQ *pdata; |
1031 | OPEN_PSX_RSP * psx_rsp; | 1029 | OPEN_PSX_RSP *psx_rsp; |
1032 | 1030 | ||
1033 | cFYI(1, ("In POSIX Create")); | 1031 | cFYI(1, ("In POSIX Create")); |
1034 | PsxCreat: | 1032 | PsxCreat: |
@@ -1110,9 +1108,7 @@ PsxCreat: | |||
1110 | /* check to make sure response data is there */ | 1108 | /* check to make sure response data is there */ |
1111 | if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { | 1109 | if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { |
1112 | pRetData->Type = cpu_to_le32(-1); /* unknown */ | 1110 | pRetData->Type = cpu_to_le32(-1); /* unknown */ |
1113 | #ifdef CONFIG_CIFS_DEBUG2 | 1111 | cFYI(DBG2, ("unknown type")); |
1114 | cFYI(1, ("unknown type")); | ||
1115 | #endif | ||
1116 | } else { | 1112 | } else { |
1117 | if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP) | 1113 | if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP) |
1118 | + sizeof(FILE_UNIX_BASIC_INFO)) { | 1114 | + sizeof(FILE_UNIX_BASIC_INFO)) { |
@@ -1169,8 +1165,8 @@ static __u16 convert_disposition(int disposition) | |||
1169 | int | 1165 | int |
1170 | SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, | 1166 | SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, |
1171 | const char *fileName, const int openDisposition, | 1167 | const char *fileName, const int openDisposition, |
1172 | const int access_flags, const int create_options, __u16 * netfid, | 1168 | const int access_flags, const int create_options, __u16 *netfid, |
1173 | int *pOplock, FILE_ALL_INFO * pfile_info, | 1169 | int *pOplock, FILE_ALL_INFO *pfile_info, |
1174 | const struct nls_table *nls_codepage, int remap) | 1170 | const struct nls_table *nls_codepage, int remap) |
1175 | { | 1171 | { |
1176 | int rc = -EACCES; | 1172 | int rc = -EACCES; |
@@ -1221,8 +1217,8 @@ OldOpenRetry: | |||
1221 | 1217 | ||
1222 | if (create_options & CREATE_OPTION_SPECIAL) | 1218 | if (create_options & CREATE_OPTION_SPECIAL) |
1223 | pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); | 1219 | pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); |
1224 | else | 1220 | else /* BB FIXME BB */ |
1225 | pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); /* BB FIXME */ | 1221 | pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); |
1226 | 1222 | ||
1227 | /* if ((omode & S_IWUGO) == 0) | 1223 | /* if ((omode & S_IWUGO) == 0) |
1228 | pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);*/ | 1224 | pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);*/ |
@@ -1284,8 +1280,8 @@ OldOpenRetry: | |||
1284 | int | 1280 | int |
1285 | CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, | 1281 | CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, |
1286 | const char *fileName, const int openDisposition, | 1282 | const char *fileName, const int openDisposition, |
1287 | const int access_flags, const int create_options, __u16 * netfid, | 1283 | const int access_flags, const int create_options, __u16 *netfid, |
1288 | int *pOplock, FILE_ALL_INFO * pfile_info, | 1284 | int *pOplock, FILE_ALL_INFO *pfile_info, |
1289 | const struct nls_table *nls_codepage, int remap) | 1285 | const struct nls_table *nls_codepage, int remap) |
1290 | { | 1286 | { |
1291 | int rc = -EACCES; | 1287 | int rc = -EACCES; |
@@ -1556,9 +1552,9 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1556 | } /* else setting file size with write of zero bytes */ | 1552 | } /* else setting file size with write of zero bytes */ |
1557 | if (wct == 14) | 1553 | if (wct == 14) |
1558 | byte_count = bytes_sent + 1; /* pad */ | 1554 | byte_count = bytes_sent + 1; /* pad */ |
1559 | else /* wct == 12 */ { | 1555 | else /* wct == 12 */ |
1560 | byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ | 1556 | byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ |
1561 | } | 1557 | |
1562 | pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); | 1558 | pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); |
1563 | pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); | 1559 | pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); |
1564 | pSMB->hdr.smb_buf_length += byte_count; | 1560 | pSMB->hdr.smb_buf_length += byte_count; |
@@ -1663,7 +1659,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1663 | rc = -EIO; | 1659 | rc = -EIO; |
1664 | *nbytes = 0; | 1660 | *nbytes = 0; |
1665 | } else { | 1661 | } else { |
1666 | WRITE_RSP * pSMBr = (WRITE_RSP *)iov[0].iov_base; | 1662 | WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base; |
1667 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 1663 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
1668 | *nbytes = (*nbytes) << 16; | 1664 | *nbytes = (*nbytes) << 16; |
1669 | *nbytes += le16_to_cpu(pSMBr->Count); | 1665 | *nbytes += le16_to_cpu(pSMBr->Count); |
@@ -1744,9 +1740,8 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1744 | /* SMB buffer freed by function above */ | 1740 | /* SMB buffer freed by function above */ |
1745 | } | 1741 | } |
1746 | cifs_stats_inc(&tcon->num_locks); | 1742 | cifs_stats_inc(&tcon->num_locks); |
1747 | if (rc) { | 1743 | if (rc) |
1748 | cFYI(1, ("Send error in Lock = %d", rc)); | 1744 | cFYI(1, ("Send error in Lock = %d", rc)); |
1749 | } | ||
1750 | 1745 | ||
1751 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 1746 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
1752 | since file handle passed in no longer valid */ | 1747 | since file handle passed in no longer valid */ |
@@ -1791,7 +1786,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
1791 | 1786 | ||
1792 | count = sizeof(struct cifs_posix_lock); | 1787 | count = sizeof(struct cifs_posix_lock); |
1793 | pSMB->MaxParameterCount = cpu_to_le16(2); | 1788 | pSMB->MaxParameterCount = cpu_to_le16(2); |
1794 | pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */ | 1789 | pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ |
1795 | pSMB->SetupCount = 1; | 1790 | pSMB->SetupCount = 1; |
1796 | pSMB->Reserved3 = 0; | 1791 | pSMB->Reserved3 = 0; |
1797 | if (get_flag) | 1792 | if (get_flag) |
@@ -1972,9 +1967,8 @@ renameRetry: | |||
1972 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 1967 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
1973 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 1968 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
1974 | cifs_stats_inc(&tcon->num_renames); | 1969 | cifs_stats_inc(&tcon->num_renames); |
1975 | if (rc) { | 1970 | if (rc) |
1976 | cFYI(1, ("Send error in rename = %d", rc)); | 1971 | cFYI(1, ("Send error in rename = %d", rc)); |
1977 | } | ||
1978 | 1972 | ||
1979 | cifs_buf_release(pSMB); | 1973 | cifs_buf_release(pSMB); |
1980 | 1974 | ||
@@ -2016,7 +2010,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | |||
2016 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; | 2010 | data_offset = (char *) (&pSMB->hdr.Protocol) + offset; |
2017 | rename_info = (struct set_file_rename *) data_offset; | 2011 | rename_info = (struct set_file_rename *) data_offset; |
2018 | pSMB->MaxParameterCount = cpu_to_le16(2); | 2012 | pSMB->MaxParameterCount = cpu_to_le16(2); |
2019 | pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */ | 2013 | pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ |
2020 | pSMB->SetupCount = 1; | 2014 | pSMB->SetupCount = 1; |
2021 | pSMB->Reserved3 = 0; | 2015 | pSMB->Reserved3 = 0; |
2022 | pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); | 2016 | pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); |
@@ -2052,9 +2046,8 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | |||
2052 | rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, | 2046 | rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, |
2053 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2047 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2054 | cifs_stats_inc(&pTcon->num_t2renames); | 2048 | cifs_stats_inc(&pTcon->num_t2renames); |
2055 | if (rc) { | 2049 | if (rc) |
2056 | cFYI(1, ("Send error in Rename (by file handle) = %d", rc)); | 2050 | cFYI(1, ("Send error in Rename (by file handle) = %d", rc)); |
2057 | } | ||
2058 | 2051 | ||
2059 | cifs_buf_release(pSMB); | 2052 | cifs_buf_release(pSMB); |
2060 | 2053 | ||
@@ -2211,9 +2204,8 @@ createSymLinkRetry: | |||
2211 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2204 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2212 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2205 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2213 | cifs_stats_inc(&tcon->num_symlinks); | 2206 | cifs_stats_inc(&tcon->num_symlinks); |
2214 | if (rc) { | 2207 | if (rc) |
2215 | cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc)); | 2208 | cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc)); |
2216 | } | ||
2217 | 2209 | ||
2218 | if (pSMB) | 2210 | if (pSMB) |
2219 | cifs_buf_release(pSMB); | 2211 | cifs_buf_release(pSMB); |
@@ -2299,9 +2291,8 @@ createHardLinkRetry: | |||
2299 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2291 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2300 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2292 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2301 | cifs_stats_inc(&tcon->num_hardlinks); | 2293 | cifs_stats_inc(&tcon->num_hardlinks); |
2302 | if (rc) { | 2294 | if (rc) |
2303 | cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc)); | 2295 | cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc)); |
2304 | } | ||
2305 | 2296 | ||
2306 | cifs_buf_release(pSMB); | 2297 | cifs_buf_release(pSMB); |
2307 | if (rc == -EAGAIN) | 2298 | if (rc == -EAGAIN) |
@@ -2370,9 +2361,9 @@ winCreateHardLinkRetry: | |||
2370 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2361 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2371 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2362 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2372 | cifs_stats_inc(&tcon->num_hardlinks); | 2363 | cifs_stats_inc(&tcon->num_hardlinks); |
2373 | if (rc) { | 2364 | if (rc) |
2374 | cFYI(1, ("Send error in hard link (NT rename) = %d", rc)); | 2365 | cFYI(1, ("Send error in hard link (NT rename) = %d", rc)); |
2375 | } | 2366 | |
2376 | cifs_buf_release(pSMB); | 2367 | cifs_buf_release(pSMB); |
2377 | if (rc == -EAGAIN) | 2368 | if (rc == -EAGAIN) |
2378 | goto winCreateHardLinkRetry; | 2369 | goto winCreateHardLinkRetry; |
@@ -2968,9 +2959,8 @@ setAclRetry: | |||
2968 | pSMB->ByteCount = cpu_to_le16(byte_count); | 2959 | pSMB->ByteCount = cpu_to_le16(byte_count); |
2969 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 2960 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
2970 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 2961 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
2971 | if (rc) { | 2962 | if (rc) |
2972 | cFYI(1, ("Set POSIX ACL returned %d", rc)); | 2963 | cFYI(1, ("Set POSIX ACL returned %d", rc)); |
2973 | } | ||
2974 | 2964 | ||
2975 | setACLerrorExit: | 2965 | setACLerrorExit: |
2976 | cifs_buf_release(pSMB); | 2966 | cifs_buf_release(pSMB); |
@@ -2982,7 +2972,7 @@ setACLerrorExit: | |||
2982 | /* BB fix tabs in this function FIXME BB */ | 2972 | /* BB fix tabs in this function FIXME BB */ |
2983 | int | 2973 | int |
2984 | CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, | 2974 | CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, |
2985 | const int netfid, __u64 * pExtAttrBits, __u64 *pMask) | 2975 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask) |
2986 | { | 2976 | { |
2987 | int rc = 0; | 2977 | int rc = 0; |
2988 | struct smb_t2_qfi_req *pSMB = NULL; | 2978 | struct smb_t2_qfi_req *pSMB = NULL; |
@@ -3000,7 +2990,7 @@ GetExtAttrRetry: | |||
3000 | if (rc) | 2990 | if (rc) |
3001 | return rc; | 2991 | return rc; |
3002 | 2992 | ||
3003 | params = 2 /* level */ +2 /* fid */; | 2993 | params = 2 /* level */ + 2 /* fid */; |
3004 | pSMB->t2.TotalDataCount = 0; | 2994 | pSMB->t2.TotalDataCount = 0; |
3005 | pSMB->t2.MaxParameterCount = cpu_to_le16(4); | 2995 | pSMB->t2.MaxParameterCount = cpu_to_le16(4); |
3006 | /* BB find exact max data count below from sess structure BB */ | 2996 | /* BB find exact max data count below from sess structure BB */ |
@@ -3071,7 +3061,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3071 | { | 3061 | { |
3072 | int rc = 0; | 3062 | int rc = 0; |
3073 | int buf_type = 0; | 3063 | int buf_type = 0; |
3074 | QUERY_SEC_DESC_REQ * pSMB; | 3064 | QUERY_SEC_DESC_REQ *pSMB; |
3075 | struct kvec iov[1]; | 3065 | struct kvec iov[1]; |
3076 | 3066 | ||
3077 | cFYI(1, ("GetCifsACL")); | 3067 | cFYI(1, ("GetCifsACL")); |
@@ -3101,7 +3091,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | |||
3101 | if (rc) { | 3091 | if (rc) { |
3102 | cFYI(1, ("Send error in QuerySecDesc = %d", rc)); | 3092 | cFYI(1, ("Send error in QuerySecDesc = %d", rc)); |
3103 | } else { /* decode response */ | 3093 | } else { /* decode response */ |
3104 | __le32 * parm; | 3094 | __le32 *parm; |
3105 | __u32 parm_len; | 3095 | __u32 parm_len; |
3106 | __u32 acl_len; | 3096 | __u32 acl_len; |
3107 | struct smb_com_ntransact_rsp *pSMBr; | 3097 | struct smb_com_ntransact_rsp *pSMBr; |
@@ -3230,8 +3220,8 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, | |||
3230 | FILE_ALL_INFO *pFinfo, | 3220 | FILE_ALL_INFO *pFinfo, |
3231 | const struct nls_table *nls_codepage, int remap) | 3221 | const struct nls_table *nls_codepage, int remap) |
3232 | { | 3222 | { |
3233 | QUERY_INFORMATION_REQ * pSMB; | 3223 | QUERY_INFORMATION_REQ *pSMB; |
3234 | QUERY_INFORMATION_RSP * pSMBr; | 3224 | QUERY_INFORMATION_RSP *pSMBr; |
3235 | int rc = 0; | 3225 | int rc = 0; |
3236 | int bytes_returned; | 3226 | int bytes_returned; |
3237 | int name_len; | 3227 | int name_len; |
@@ -3263,9 +3253,11 @@ QInfRetry: | |||
3263 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 3253 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
3264 | if (rc) { | 3254 | if (rc) { |
3265 | cFYI(1, ("Send error in QueryInfo = %d", rc)); | 3255 | cFYI(1, ("Send error in QueryInfo = %d", rc)); |
3266 | } else if (pFinfo) { /* decode response */ | 3256 | } else if (pFinfo) { |
3267 | struct timespec ts; | 3257 | struct timespec ts; |
3268 | __u32 time = le32_to_cpu(pSMBr->last_write_time); | 3258 | __u32 time = le32_to_cpu(pSMBr->last_write_time); |
3259 | |||
3260 | /* decode response */ | ||
3269 | /* BB FIXME - add time zone adjustment BB */ | 3261 | /* BB FIXME - add time zone adjustment BB */ |
3270 | memset(pFinfo, 0, sizeof(FILE_ALL_INFO)); | 3262 | memset(pFinfo, 0, sizeof(FILE_ALL_INFO)); |
3271 | ts.tv_nsec = 0; | 3263 | ts.tv_nsec = 0; |
@@ -3296,7 +3288,7 @@ QInfRetry: | |||
3296 | int | 3288 | int |
3297 | CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, | 3289 | CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, |
3298 | const unsigned char *searchName, | 3290 | const unsigned char *searchName, |
3299 | FILE_ALL_INFO * pFindData, | 3291 | FILE_ALL_INFO *pFindData, |
3300 | int legacy /* old style infolevel */, | 3292 | int legacy /* old style infolevel */, |
3301 | const struct nls_table *nls_codepage, int remap) | 3293 | const struct nls_table *nls_codepage, int remap) |
3302 | { | 3294 | { |
@@ -3371,10 +3363,12 @@ QPathInfoRetry: | |||
3371 | else if (pFindData) { | 3363 | else if (pFindData) { |
3372 | int size; | 3364 | int size; |
3373 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); | 3365 | __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); |
3374 | if (legacy) /* we do not read the last field, EAsize, | 3366 | |
3375 | fortunately since it varies by subdialect | 3367 | /* On legacy responses we do not read the last field, |
3376 | and on Set vs. Get, is two bytes or 4 | 3368 | EAsize, fortunately since it varies by subdialect and |
3377 | bytes depending but we don't care here */ | 3369 | also note it differs on Set vs. Get, ie two bytes or 4 |
3370 | bytes depending but we don't care here */ | ||
3371 | if (legacy) | ||
3378 | size = sizeof(FILE_INFO_STANDARD); | 3372 | size = sizeof(FILE_INFO_STANDARD); |
3379 | else | 3373 | else |
3380 | size = sizeof(FILE_ALL_INFO); | 3374 | size = sizeof(FILE_ALL_INFO); |
@@ -3476,85 +3470,6 @@ UnixQPathInfoRetry: | |||
3476 | return rc; | 3470 | return rc; |
3477 | } | 3471 | } |
3478 | 3472 | ||
3479 | #if 0 /* function unused at present */ | ||
3480 | int CIFSFindSingle(const int xid, struct cifsTconInfo *tcon, | ||
3481 | const char *searchName, FILE_ALL_INFO * findData, | ||
3482 | const struct nls_table *nls_codepage) | ||
3483 | { | ||
3484 | /* level 257 SMB_ */ | ||
3485 | TRANSACTION2_FFIRST_REQ *pSMB = NULL; | ||
3486 | TRANSACTION2_FFIRST_RSP *pSMBr = NULL; | ||
3487 | int rc = 0; | ||
3488 | int bytes_returned; | ||
3489 | int name_len; | ||
3490 | __u16 params, byte_count; | ||
3491 | |||
3492 | cFYI(1, ("In FindUnique")); | ||
3493 | findUniqueRetry: | ||
3494 | rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, | ||
3495 | (void **) &pSMBr); | ||
3496 | if (rc) | ||
3497 | return rc; | ||
3498 | |||
3499 | if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { | ||
3500 | name_len = | ||
3501 | cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, | ||
3502 | PATH_MAX, nls_codepage); | ||
3503 | name_len++; /* trailing null */ | ||
3504 | name_len *= 2; | ||
3505 | } else { /* BB improve the check for buffer overruns BB */ | ||
3506 | name_len = strnlen(searchName, PATH_MAX); | ||
3507 | name_len++; /* trailing null */ | ||
3508 | strncpy(pSMB->FileName, searchName, name_len); | ||
3509 | } | ||
3510 | |||
3511 | params = 12 + name_len /* includes null */ ; | ||
3512 | pSMB->TotalDataCount = 0; /* no EAs */ | ||
3513 | pSMB->MaxParameterCount = cpu_to_le16(2); | ||
3514 | pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */ | ||
3515 | pSMB->MaxSetupCount = 0; | ||
3516 | pSMB->Reserved = 0; | ||
3517 | pSMB->Flags = 0; | ||
3518 | pSMB->Timeout = 0; | ||
3519 | pSMB->Reserved2 = 0; | ||
3520 | pSMB->ParameterOffset = cpu_to_le16( | ||
3521 | offsetof(struct smb_com_transaction2_ffirst_req, InformationLevel)-4); | ||
3522 | pSMB->DataCount = 0; | ||
3523 | pSMB->DataOffset = 0; | ||
3524 | pSMB->SetupCount = 1; /* one byte, no need to le convert */ | ||
3525 | pSMB->Reserved3 = 0; | ||
3526 | pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST); | ||
3527 | byte_count = params + 1 /* pad */ ; | ||
3528 | pSMB->TotalParameterCount = cpu_to_le16(params); | ||
3529 | pSMB->ParameterCount = pSMB->TotalParameterCount; | ||
3530 | pSMB->SearchAttributes = | ||
3531 | cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | | ||
3532 | ATTR_DIRECTORY); | ||
3533 | pSMB->SearchCount = cpu_to_le16(16); /* BB increase */ | ||
3534 | pSMB->SearchFlags = cpu_to_le16(1); | ||
3535 | pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO); | ||
3536 | pSMB->SearchStorageType = 0; /* BB what should we set this to? BB */ | ||
3537 | pSMB->hdr.smb_buf_length += byte_count; | ||
3538 | pSMB->ByteCount = cpu_to_le16(byte_count); | ||
3539 | |||
3540 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | ||
3541 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | ||
3542 | |||
3543 | if (rc) { | ||
3544 | cFYI(1, ("Send error in FindFileDirInfo = %d", rc)); | ||
3545 | } else { /* decode response */ | ||
3546 | cifs_stats_inc(&tcon->num_ffirst); | ||
3547 | /* BB fill in */ | ||
3548 | } | ||
3549 | |||
3550 | cifs_buf_release(pSMB); | ||
3551 | if (rc == -EAGAIN) | ||
3552 | goto findUniqueRetry; | ||
3553 | |||
3554 | return rc; | ||
3555 | } | ||
3556 | #endif /* end unused (temporarily) function */ | ||
3557 | |||
3558 | /* xid, tcon, searchName and codepage are input parms, rest are returned */ | 3473 | /* xid, tcon, searchName and codepage are input parms, rest are returned */ |
3559 | int | 3474 | int |
3560 | CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, | 3475 | CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, |
@@ -3566,7 +3481,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, | |||
3566 | /* level 257 SMB_ */ | 3481 | /* level 257 SMB_ */ |
3567 | TRANSACTION2_FFIRST_REQ *pSMB = NULL; | 3482 | TRANSACTION2_FFIRST_REQ *pSMB = NULL; |
3568 | TRANSACTION2_FFIRST_RSP *pSMBr = NULL; | 3483 | TRANSACTION2_FFIRST_RSP *pSMBr = NULL; |
3569 | T2_FFIRST_RSP_PARMS * parms; | 3484 | T2_FFIRST_RSP_PARMS *parms; |
3570 | int rc = 0; | 3485 | int rc = 0; |
3571 | int bytes_returned = 0; | 3486 | int bytes_returned = 0; |
3572 | int name_len; | 3487 | int name_len; |
@@ -3697,7 +3612,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | |||
3697 | { | 3612 | { |
3698 | TRANSACTION2_FNEXT_REQ *pSMB = NULL; | 3613 | TRANSACTION2_FNEXT_REQ *pSMB = NULL; |
3699 | TRANSACTION2_FNEXT_RSP *pSMBr = NULL; | 3614 | TRANSACTION2_FNEXT_RSP *pSMBr = NULL; |
3700 | T2_FNEXT_RSP_PARMS * parms; | 3615 | T2_FNEXT_RSP_PARMS *parms; |
3701 | char *response_data; | 3616 | char *response_data; |
3702 | int rc = 0; | 3617 | int rc = 0; |
3703 | int bytes_returned, name_len; | 3618 | int bytes_returned, name_len; |
@@ -3836,9 +3751,9 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | |||
3836 | pSMB->FileID = searchHandle; | 3751 | pSMB->FileID = searchHandle; |
3837 | pSMB->ByteCount = 0; | 3752 | pSMB->ByteCount = 0; |
3838 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 3753 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
3839 | if (rc) { | 3754 | if (rc) |
3840 | cERROR(1, ("Send error in FindClose = %d", rc)); | 3755 | cERROR(1, ("Send error in FindClose = %d", rc)); |
3841 | } | 3756 | |
3842 | cifs_stats_inc(&tcon->num_fclose); | 3757 | cifs_stats_inc(&tcon->num_fclose); |
3843 | 3758 | ||
3844 | /* Since session is dead, search handle closed on server already */ | 3759 | /* Since session is dead, search handle closed on server already */ |
@@ -3851,7 +3766,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | |||
3851 | int | 3766 | int |
3852 | CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, | 3767 | CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, |
3853 | const unsigned char *searchName, | 3768 | const unsigned char *searchName, |
3854 | __u64 * inode_number, | 3769 | __u64 *inode_number, |
3855 | const struct nls_table *nls_codepage, int remap) | 3770 | const struct nls_table *nls_codepage, int remap) |
3856 | { | 3771 | { |
3857 | int rc = 0; | 3772 | int rc = 0; |
@@ -4560,9 +4475,8 @@ SETFSUnixRetry: | |||
4560 | cERROR(1, ("Send error in SETFSUnixInfo = %d", rc)); | 4475 | cERROR(1, ("Send error in SETFSUnixInfo = %d", rc)); |
4561 | } else { /* decode response */ | 4476 | } else { /* decode response */ |
4562 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); | 4477 | rc = validate_t2((struct smb_t2_rsp *)pSMBr); |
4563 | if (rc) { | 4478 | if (rc) |
4564 | rc = -EIO; /* bad smb */ | 4479 | rc = -EIO; /* bad smb */ |
4565 | } | ||
4566 | } | 4480 | } |
4567 | cifs_buf_release(pSMB); | 4481 | cifs_buf_release(pSMB); |
4568 | 4482 | ||
@@ -4744,9 +4658,8 @@ SetEOFRetry: | |||
4744 | pSMB->ByteCount = cpu_to_le16(byte_count); | 4658 | pSMB->ByteCount = cpu_to_le16(byte_count); |
4745 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4659 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4746 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4660 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4747 | if (rc) { | 4661 | if (rc) |
4748 | cFYI(1, ("SetPathInfo (file size) returned %d", rc)); | 4662 | cFYI(1, ("SetPathInfo (file size) returned %d", rc)); |
4749 | } | ||
4750 | 4663 | ||
4751 | cifs_buf_release(pSMB); | 4664 | cifs_buf_release(pSMB); |
4752 | 4665 | ||
@@ -4897,9 +4810,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, | |||
4897 | pSMB->ByteCount = cpu_to_le16(byte_count); | 4810 | pSMB->ByteCount = cpu_to_le16(byte_count); |
4898 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); | 4811 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); |
4899 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); | 4812 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); |
4900 | if (rc) { | 4813 | if (rc) |
4901 | cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc)); | 4814 | cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc)); |
4902 | } | ||
4903 | 4815 | ||
4904 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 4816 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
4905 | since file handle passed in no longer valid */ | 4817 | since file handle passed in no longer valid */ |
@@ -4975,9 +4887,8 @@ SetTimesRetry: | |||
4975 | pSMB->ByteCount = cpu_to_le16(byte_count); | 4887 | pSMB->ByteCount = cpu_to_le16(byte_count); |
4976 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4888 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
4977 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4889 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
4978 | if (rc) { | 4890 | if (rc) |
4979 | cFYI(1, ("SetPathInfo (times) returned %d", rc)); | 4891 | cFYI(1, ("SetPathInfo (times) returned %d", rc)); |
4980 | } | ||
4981 | 4892 | ||
4982 | cifs_buf_release(pSMB); | 4893 | cifs_buf_release(pSMB); |
4983 | 4894 | ||
@@ -5027,9 +4938,8 @@ SetAttrLgcyRetry: | |||
5027 | pSMB->ByteCount = cpu_to_le16(name_len + 1); | 4938 | pSMB->ByteCount = cpu_to_le16(name_len + 1); |
5028 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 4939 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5029 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 4940 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5030 | if (rc) { | 4941 | if (rc) |
5031 | cFYI(1, ("Error in LegacySetAttr = %d", rc)); | 4942 | cFYI(1, ("Error in LegacySetAttr = %d", rc)); |
5032 | } | ||
5033 | 4943 | ||
5034 | cifs_buf_release(pSMB); | 4944 | cifs_buf_release(pSMB); |
5035 | 4945 | ||
@@ -5138,9 +5048,8 @@ setPermsRetry: | |||
5138 | pSMB->ByteCount = cpu_to_le16(byte_count); | 5048 | pSMB->ByteCount = cpu_to_le16(byte_count); |
5139 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5049 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5140 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5050 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5141 | if (rc) { | 5051 | if (rc) |
5142 | cFYI(1, ("SetPathInfo (perms) returned %d", rc)); | 5052 | cFYI(1, ("SetPathInfo (perms) returned %d", rc)); |
5143 | } | ||
5144 | 5053 | ||
5145 | if (pSMB) | 5054 | if (pSMB) |
5146 | cifs_buf_release(pSMB); | 5055 | cifs_buf_release(pSMB); |
@@ -5615,9 +5524,8 @@ SetEARetry: | |||
5615 | pSMB->ByteCount = cpu_to_le16(byte_count); | 5524 | pSMB->ByteCount = cpu_to_le16(byte_count); |
5616 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, | 5525 | rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, |
5617 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); | 5526 | (struct smb_hdr *) pSMBr, &bytes_returned, 0); |
5618 | if (rc) { | 5527 | if (rc) |
5619 | cFYI(1, ("SetPathInfo (EA) returned %d", rc)); | 5528 | cFYI(1, ("SetPathInfo (EA) returned %d", rc)); |
5620 | } | ||
5621 | 5529 | ||
5622 | cifs_buf_release(pSMB); | 5530 | cifs_buf_release(pSMB); |
5623 | 5531 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 65d0ba72e78f..8dbfa97cd18c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1722,8 +1722,15 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
1722 | originally at mount time */ | 1722 | originally at mount time */ |
1723 | if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) | 1723 | if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) |
1724 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; | 1724 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; |
1725 | if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) | 1725 | if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { |
1726 | if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) | ||
1727 | cERROR(1, ("POSIXPATH support change")); | ||
1726 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; | 1728 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; |
1729 | } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { | ||
1730 | cERROR(1, ("possible reconnect error")); | ||
1731 | cERROR(1, | ||
1732 | ("server disabled POSIX path support")); | ||
1733 | } | ||
1727 | } | 1734 | } |
1728 | 1735 | ||
1729 | cap &= CIFS_UNIX_CAP_MASK; | 1736 | cap &= CIFS_UNIX_CAP_MASK; |
@@ -1753,9 +1760,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
1753 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { | 1760 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { |
1754 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { | 1761 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { |
1755 | CIFS_SB(sb)->rsize = 127 * 1024; | 1762 | CIFS_SB(sb)->rsize = 127 * 1024; |
1756 | #ifdef CONFIG_CIFS_DEBUG2 | 1763 | cFYI(DBG2, |
1757 | cFYI(1, ("larger reads not supported by srv")); | 1764 | ("larger reads not supported by srv")); |
1758 | #endif | ||
1759 | } | 1765 | } |
1760 | } | 1766 | } |
1761 | 1767 | ||
@@ -1792,6 +1798,26 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
1792 | } | 1798 | } |
1793 | } | 1799 | } |
1794 | 1800 | ||
1801 | static void | ||
1802 | convert_delimiter(char *path, char delim) | ||
1803 | { | ||
1804 | int i; | ||
1805 | char old_delim; | ||
1806 | |||
1807 | if (path == NULL) | ||
1808 | return; | ||
1809 | |||
1810 | if (delim == '/') | ||
1811 | old_delim = '\\'; | ||
1812 | else | ||
1813 | old_delim = '/'; | ||
1814 | |||
1815 | for (i = 0; path[i] != '\0'; i++) { | ||
1816 | if (path[i] == old_delim) | ||
1817 | path[i] = delim; | ||
1818 | } | ||
1819 | } | ||
1820 | |||
1795 | int | 1821 | int |
1796 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | 1822 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, |
1797 | char *mount_data, const char *devname) | 1823 | char *mount_data, const char *devname) |
@@ -2057,7 +2083,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
2057 | cifs_sb->prepath = volume_info.prepath; | 2083 | cifs_sb->prepath = volume_info.prepath; |
2058 | if (cifs_sb->prepath) { | 2084 | if (cifs_sb->prepath) { |
2059 | cifs_sb->prepathlen = strlen(cifs_sb->prepath); | 2085 | cifs_sb->prepathlen = strlen(cifs_sb->prepath); |
2060 | cifs_sb->prepath[0] = CIFS_DIR_SEP(cifs_sb); | 2086 | /* we can not convert the / to \ in the path |
2087 | separators in the prefixpath yet because we do not | ||
2088 | know (until reset_cifs_unix_caps is called later) | ||
2089 | whether POSIX PATH CAP is available. We normalize | ||
2090 | the / to \ after reset_cifs_unix_caps is called */ | ||
2061 | volume_info.prepath = NULL; | 2091 | volume_info.prepath = NULL; |
2062 | } else | 2092 | } else |
2063 | cifs_sb->prepathlen = 0; | 2093 | cifs_sb->prepathlen = 0; |
@@ -2225,11 +2255,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
2225 | else | 2255 | else |
2226 | tcon->unix_ext = 0; /* server does not support them */ | 2256 | tcon->unix_ext = 0; /* server does not support them */ |
2227 | 2257 | ||
2258 | /* convert forward to back slashes in prepath here if needed */ | ||
2259 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) | ||
2260 | convert_delimiter(cifs_sb->prepath, | ||
2261 | CIFS_DIR_SEP(cifs_sb)); | ||
2262 | |||
2228 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { | 2263 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { |
2229 | cifs_sb->rsize = 1024 * 127; | 2264 | cifs_sb->rsize = 1024 * 127; |
2230 | #ifdef CONFIG_CIFS_DEBUG2 | 2265 | cFYI(DBG2, |
2231 | cFYI(1, ("no very large read support, rsize now 127K")); | 2266 | ("no very large read support, rsize now 127K")); |
2232 | #endif | ||
2233 | } | 2267 | } |
2234 | if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) | 2268 | if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) |
2235 | cifs_sb->wsize = min(cifs_sb->wsize, | 2269 | cifs_sb->wsize = min(cifs_sb->wsize, |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 699ec1198409..4e83b47c4b34 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * vfs operations that deal with dentries | 4 | * vfs operations that deal with dentries |
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2002,2007 | 6 | * Copyright (C) International Business Machines Corp., 2002,2008 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * | 8 | * |
9 | * This library is free software; you can redistribute it and/or modify | 9 | * This library is free software; you can redistribute it and/or modify |
@@ -111,16 +111,6 @@ cifs_bp_rename_retry: | |||
111 | return full_path; | 111 | return full_path; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* char * build_wildcard_path_from_dentry(struct dentry *direntry) | ||
115 | { | ||
116 | if(full_path == NULL) | ||
117 | return full_path; | ||
118 | |||
119 | full_path[namelen] = '\\'; | ||
120 | full_path[namelen+1] = '*'; | ||
121 | full_path[namelen+2] = 0; | ||
122 | BB remove above eight lines BB */ | ||
123 | |||
124 | /* Inode operations in similar order to how they appear in Linux file fs.h */ | 114 | /* Inode operations in similar order to how they appear in Linux file fs.h */ |
125 | 115 | ||
126 | int | 116 | int |
@@ -171,9 +161,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
171 | disposition = FILE_OVERWRITE_IF; | 161 | disposition = FILE_OVERWRITE_IF; |
172 | else if ((oflags & O_CREAT) == O_CREAT) | 162 | else if ((oflags & O_CREAT) == O_CREAT) |
173 | disposition = FILE_OPEN_IF; | 163 | disposition = FILE_OPEN_IF; |
174 | else { | 164 | else |
175 | cFYI(1, ("Create flag not set in create function")); | 165 | cFYI(1, ("Create flag not set in create function")); |
176 | } | ||
177 | } | 166 | } |
178 | 167 | ||
179 | /* BB add processing to set equivalent of mode - e.g. via CreateX with | 168 | /* BB add processing to set equivalent of mode - e.g. via CreateX with |
@@ -367,7 +356,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
367 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { | 356 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { |
368 | int oplock = 0; | 357 | int oplock = 0; |
369 | u16 fileHandle; | 358 | u16 fileHandle; |
370 | FILE_ALL_INFO * buf; | 359 | FILE_ALL_INFO *buf; |
371 | 360 | ||
372 | cFYI(1, ("sfu compat create special file")); | 361 | cFYI(1, ("sfu compat create special file")); |
373 | 362 | ||
@@ -534,9 +523,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) | |||
534 | int isValid = 1; | 523 | int isValid = 1; |
535 | 524 | ||
536 | if (direntry->d_inode) { | 525 | if (direntry->d_inode) { |
537 | if (cifs_revalidate(direntry)) { | 526 | if (cifs_revalidate(direntry)) |
538 | return 0; | 527 | return 0; |
539 | } | ||
540 | } else { | 528 | } else { |
541 | cFYI(1, ("neg dentry 0x%p name = %s", | 529 | cFYI(1, ("neg dentry 0x%p name = %s", |
542 | direntry, direntry->d_name.name)); | 530 | direntry, direntry->d_name.name)); |
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 073fdc3db419..966e9288930b 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS | 2 | * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS |
3 | * Handles host name to IP address resolution | 3 | * Handles host name to IP address resolution |
4 | * | 4 | * |
5 | * Copyright (c) International Business Machines Corp., 2008 | 5 | * Copyright (c) International Business Machines Corp., 2008 |
6 | * Author(s): Steve French (sfrench@us.ibm.com) | 6 | * Author(s): Steve French (sfrench@us.ibm.com) |
7 | * | 7 | * |
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index 995474c90885..7d1d5aa4c430 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c | |||
@@ -35,9 +35,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags) | |||
35 | 35 | ||
36 | /* No way on Linux VFS to ask to monitor xattr | 36 | /* No way on Linux VFS to ask to monitor xattr |
37 | changes (and no stream support either */ | 37 | changes (and no stream support either */ |
38 | if (fcntl_notify_flags & DN_ACCESS) { | 38 | if (fcntl_notify_flags & DN_ACCESS) |
39 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS; | 39 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS; |
40 | } | ||
41 | if (fcntl_notify_flags & DN_MODIFY) { | 40 | if (fcntl_notify_flags & DN_MODIFY) { |
42 | /* What does this mean on directories? */ | 41 | /* What does this mean on directories? */ |
43 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE | | 42 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE | |
@@ -47,9 +46,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags) | |||
47 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION | | 46 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION | |
48 | FILE_NOTIFY_CHANGE_LAST_WRITE; | 47 | FILE_NOTIFY_CHANGE_LAST_WRITE; |
49 | } | 48 | } |
50 | if (fcntl_notify_flags & DN_DELETE) { | 49 | if (fcntl_notify_flags & DN_DELETE) |
51 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE; | 50 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE; |
52 | } | ||
53 | if (fcntl_notify_flags & DN_RENAME) { | 51 | if (fcntl_notify_flags & DN_RENAME) { |
54 | /* BB review this - checking various server behaviors */ | 52 | /* BB review this - checking various server behaviors */ |
55 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME | | 53 | cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME | |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5f7c374ae89c..fa849c91d323 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -353,9 +353,9 @@ static int cifs_reopen_file(struct file *file, int can_flush) | |||
353 | int disposition = FILE_OPEN; | 353 | int disposition = FILE_OPEN; |
354 | __u16 netfid; | 354 | __u16 netfid; |
355 | 355 | ||
356 | if (file->private_data) { | 356 | if (file->private_data) |
357 | pCifsFile = (struct cifsFileInfo *)file->private_data; | 357 | pCifsFile = (struct cifsFileInfo *)file->private_data; |
358 | } else | 358 | else |
359 | return -EBADF; | 359 | return -EBADF; |
360 | 360 | ||
361 | xid = GetXid(); | 361 | xid = GetXid(); |
@@ -499,9 +499,8 @@ int cifs_close(struct inode *inode, struct file *file) | |||
499 | the struct would be in each open file, | 499 | the struct would be in each open file, |
500 | but this should give enough time to | 500 | but this should give enough time to |
501 | clear the socket */ | 501 | clear the socket */ |
502 | #ifdef CONFIG_CIFS_DEBUG2 | 502 | cFYI(DBG2, |
503 | cFYI(1, ("close delay, write pending")); | 503 | ("close delay, write pending")); |
504 | #endif /* DEBUG2 */ | ||
505 | msleep(timeout); | 504 | msleep(timeout); |
506 | timeout *= 4; | 505 | timeout *= 4; |
507 | } | 506 | } |
@@ -1423,9 +1422,8 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc) | |||
1423 | xid = GetXid(); | 1422 | xid = GetXid(); |
1424 | /* BB add check for wbc flags */ | 1423 | /* BB add check for wbc flags */ |
1425 | page_cache_get(page); | 1424 | page_cache_get(page); |
1426 | if (!PageUptodate(page)) { | 1425 | if (!PageUptodate(page)) |
1427 | cFYI(1, ("ppw - page not up to date")); | 1426 | cFYI(1, ("ppw - page not up to date")); |
1428 | } | ||
1429 | 1427 | ||
1430 | /* | 1428 | /* |
1431 | * Set the "writeback" flag, and clear "dirty" in the radix tree. | 1429 | * Set the "writeback" flag, and clear "dirty" in the radix tree. |
@@ -1460,9 +1458,9 @@ static int cifs_commit_write(struct file *file, struct page *page, | |||
1460 | cFYI(1, ("commit write for page %p up to position %lld for %d", | 1458 | cFYI(1, ("commit write for page %p up to position %lld for %d", |
1461 | page, position, to)); | 1459 | page, position, to)); |
1462 | spin_lock(&inode->i_lock); | 1460 | spin_lock(&inode->i_lock); |
1463 | if (position > inode->i_size) { | 1461 | if (position > inode->i_size) |
1464 | i_size_write(inode, position); | 1462 | i_size_write(inode, position); |
1465 | } | 1463 | |
1466 | spin_unlock(&inode->i_lock); | 1464 | spin_unlock(&inode->i_lock); |
1467 | if (!PageUptodate(page)) { | 1465 | if (!PageUptodate(page)) { |
1468 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; | 1466 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; |
@@ -1596,9 +1594,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1596 | } | 1594 | } |
1597 | open_file = (struct cifsFileInfo *)file->private_data; | 1595 | open_file = (struct cifsFileInfo *)file->private_data; |
1598 | 1596 | ||
1599 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) { | 1597 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1600 | cFYI(1, ("attempting read on write only file instance")); | 1598 | cFYI(1, ("attempting read on write only file instance")); |
1601 | } | 1599 | |
1602 | for (total_read = 0, current_offset = read_data; | 1600 | for (total_read = 0, current_offset = read_data; |
1603 | read_size > total_read; | 1601 | read_size > total_read; |
1604 | total_read += bytes_read, current_offset += bytes_read) { | 1602 | total_read += bytes_read, current_offset += bytes_read) { |
@@ -1625,9 +1623,8 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1625 | smb_read_data + | 1623 | smb_read_data + |
1626 | 4 /* RFC1001 length field */ + | 1624 | 4 /* RFC1001 length field */ + |
1627 | le16_to_cpu(pSMBr->DataOffset), | 1625 | le16_to_cpu(pSMBr->DataOffset), |
1628 | bytes_read)) { | 1626 | bytes_read)) |
1629 | rc = -EFAULT; | 1627 | rc = -EFAULT; |
1630 | } | ||
1631 | 1628 | ||
1632 | if (buf_type == CIFS_SMALL_BUFFER) | 1629 | if (buf_type == CIFS_SMALL_BUFFER) |
1633 | cifs_small_buf_release(smb_read_data); | 1630 | cifs_small_buf_release(smb_read_data); |
@@ -1814,9 +1811,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
1814 | pTcon = cifs_sb->tcon; | 1811 | pTcon = cifs_sb->tcon; |
1815 | 1812 | ||
1816 | pagevec_init(&lru_pvec, 0); | 1813 | pagevec_init(&lru_pvec, 0); |
1817 | #ifdef CONFIG_CIFS_DEBUG2 | 1814 | cFYI(DBG2, ("rpages: num pages %d", num_pages)); |
1818 | cFYI(1, ("rpages: num pages %d", num_pages)); | ||
1819 | #endif | ||
1820 | for (i = 0; i < num_pages; ) { | 1815 | for (i = 0; i < num_pages; ) { |
1821 | unsigned contig_pages; | 1816 | unsigned contig_pages; |
1822 | struct page *tmp_page; | 1817 | struct page *tmp_page; |
@@ -1849,10 +1844,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
1849 | /* Read size needs to be in multiples of one page */ | 1844 | /* Read size needs to be in multiples of one page */ |
1850 | read_size = min_t(const unsigned int, read_size, | 1845 | read_size = min_t(const unsigned int, read_size, |
1851 | cifs_sb->rsize & PAGE_CACHE_MASK); | 1846 | cifs_sb->rsize & PAGE_CACHE_MASK); |
1852 | #ifdef CONFIG_CIFS_DEBUG2 | 1847 | cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d", |
1853 | cFYI(1, ("rpages: read size 0x%x contiguous pages %d", | ||
1854 | read_size, contig_pages)); | 1848 | read_size, contig_pages)); |
1855 | #endif | ||
1856 | rc = -EAGAIN; | 1849 | rc = -EAGAIN; |
1857 | while (rc == -EAGAIN) { | 1850 | while (rc == -EAGAIN) { |
1858 | if ((open_file->invalidHandle) && | 1851 | if ((open_file->invalidHandle) && |
@@ -2026,7 +2019,7 @@ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) | |||
2026 | struct cifs_sb_info *cifs_sb; | 2019 | struct cifs_sb_info *cifs_sb; |
2027 | 2020 | ||
2028 | cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); | 2021 | cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); |
2029 | if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) { | 2022 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { |
2030 | /* since no page cache to corrupt on directio | 2023 | /* since no page cache to corrupt on directio |
2031 | we can change size safely */ | 2024 | we can change size safely */ |
2032 | return 1; | 2025 | return 1; |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index b1a4a65eaa08..24eb4d392155 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -29,6 +29,130 @@ | |||
29 | #include "cifs_debug.h" | 29 | #include "cifs_debug.h" |
30 | #include "cifs_fs_sb.h" | 30 | #include "cifs_fs_sb.h" |
31 | 31 | ||
32 | |||
33 | static void cifs_set_ops(struct inode *inode) | ||
34 | { | ||
35 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | ||
36 | |||
37 | switch (inode->i_mode & S_IFMT) { | ||
38 | case S_IFREG: | ||
39 | inode->i_op = &cifs_file_inode_ops; | ||
40 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { | ||
41 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
42 | inode->i_fop = &cifs_file_direct_nobrl_ops; | ||
43 | else | ||
44 | inode->i_fop = &cifs_file_direct_ops; | ||
45 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
46 | inode->i_fop = &cifs_file_nobrl_ops; | ||
47 | else { /* not direct, send byte range locks */ | ||
48 | inode->i_fop = &cifs_file_ops; | ||
49 | } | ||
50 | |||
51 | |||
52 | /* check if server can support readpages */ | ||
53 | if (cifs_sb->tcon->ses->server->maxBuf < | ||
54 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) | ||
55 | inode->i_data.a_ops = &cifs_addr_ops_smallbuf; | ||
56 | else | ||
57 | inode->i_data.a_ops = &cifs_addr_ops; | ||
58 | break; | ||
59 | case S_IFDIR: | ||
60 | inode->i_op = &cifs_dir_inode_ops; | ||
61 | inode->i_fop = &cifs_dir_ops; | ||
62 | break; | ||
63 | case S_IFLNK: | ||
64 | inode->i_op = &cifs_symlink_inode_ops; | ||
65 | break; | ||
66 | default: | ||
67 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | ||
68 | break; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static void cifs_unix_info_to_inode(struct inode *inode, | ||
73 | FILE_UNIX_BASIC_INFO *info, int force_uid_gid) | ||
74 | { | ||
75 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | ||
76 | struct cifsInodeInfo *cifsInfo = CIFS_I(inode); | ||
77 | __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes); | ||
78 | __u64 end_of_file = le64_to_cpu(info->EndOfFile); | ||
79 | |||
80 | inode->i_atime = cifs_NTtimeToUnix(le64_to_cpu(info->LastAccessTime)); | ||
81 | inode->i_mtime = | ||
82 | cifs_NTtimeToUnix(le64_to_cpu(info->LastModificationTime)); | ||
83 | inode->i_ctime = cifs_NTtimeToUnix(le64_to_cpu(info->LastStatusChange)); | ||
84 | inode->i_mode = le64_to_cpu(info->Permissions); | ||
85 | |||
86 | /* | ||
87 | * Since we set the inode type below we need to mask off | ||
88 | * to avoid strange results if bits set above. | ||
89 | */ | ||
90 | inode->i_mode &= ~S_IFMT; | ||
91 | switch (le32_to_cpu(info->Type)) { | ||
92 | case UNIX_FILE: | ||
93 | inode->i_mode |= S_IFREG; | ||
94 | break; | ||
95 | case UNIX_SYMLINK: | ||
96 | inode->i_mode |= S_IFLNK; | ||
97 | break; | ||
98 | case UNIX_DIR: | ||
99 | inode->i_mode |= S_IFDIR; | ||
100 | break; | ||
101 | case UNIX_CHARDEV: | ||
102 | inode->i_mode |= S_IFCHR; | ||
103 | inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), | ||
104 | le64_to_cpu(info->DevMinor) & MINORMASK); | ||
105 | break; | ||
106 | case UNIX_BLOCKDEV: | ||
107 | inode->i_mode |= S_IFBLK; | ||
108 | inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), | ||
109 | le64_to_cpu(info->DevMinor) & MINORMASK); | ||
110 | break; | ||
111 | case UNIX_FIFO: | ||
112 | inode->i_mode |= S_IFIFO; | ||
113 | break; | ||
114 | case UNIX_SOCKET: | ||
115 | inode->i_mode |= S_IFSOCK; | ||
116 | break; | ||
117 | default: | ||
118 | /* safest to call it a file if we do not know */ | ||
119 | inode->i_mode |= S_IFREG; | ||
120 | cFYI(1, ("unknown type %d", le32_to_cpu(info->Type))); | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) && | ||
125 | !force_uid_gid) | ||
126 | inode->i_uid = cifs_sb->mnt_uid; | ||
127 | else | ||
128 | inode->i_uid = le64_to_cpu(info->Uid); | ||
129 | |||
130 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) && | ||
131 | !force_uid_gid) | ||
132 | inode->i_gid = cifs_sb->mnt_gid; | ||
133 | else | ||
134 | inode->i_gid = le64_to_cpu(info->Gid); | ||
135 | |||
136 | inode->i_nlink = le64_to_cpu(info->Nlinks); | ||
137 | |||
138 | spin_lock(&inode->i_lock); | ||
139 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | ||
140 | /* | ||
141 | * We can not safely change the file size here if the client | ||
142 | * is writing to it due to potential races. | ||
143 | */ | ||
144 | i_size_write(inode, end_of_file); | ||
145 | |||
146 | /* | ||
147 | * i_blocks is not related to (i_size / i_blksize), | ||
148 | * but instead 512 byte (2**9) size is required for | ||
149 | * calculating num blocks. | ||
150 | */ | ||
151 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | ||
152 | } | ||
153 | spin_unlock(&inode->i_lock); | ||
154 | } | ||
155 | |||
32 | int cifs_get_inode_info_unix(struct inode **pinode, | 156 | int cifs_get_inode_info_unix(struct inode **pinode, |
33 | const unsigned char *search_path, struct super_block *sb, int xid) | 157 | const unsigned char *search_path, struct super_block *sb, int xid) |
34 | { | 158 | { |
@@ -74,7 +198,6 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
74 | } | 198 | } |
75 | } else { | 199 | } else { |
76 | struct cifsInodeInfo *cifsInfo; | 200 | struct cifsInodeInfo *cifsInfo; |
77 | __u32 type = le32_to_cpu(findData.Type); | ||
78 | __u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes); | 201 | __u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes); |
79 | __u64 end_of_file = le64_to_cpu(findData.EndOfFile); | 202 | __u64 end_of_file = le64_to_cpu(findData.EndOfFile); |
80 | 203 | ||
@@ -105,112 +228,16 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
105 | /* this is ok to set on every inode revalidate */ | 228 | /* this is ok to set on every inode revalidate */ |
106 | atomic_set(&cifsInfo->inUse, 1); | 229 | atomic_set(&cifsInfo->inUse, 1); |
107 | 230 | ||
108 | inode->i_atime = | 231 | cifs_unix_info_to_inode(inode, &findData, 0); |
109 | cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime)); | ||
110 | inode->i_mtime = | ||
111 | cifs_NTtimeToUnix(le64_to_cpu | ||
112 | (findData.LastModificationTime)); | ||
113 | inode->i_ctime = | ||
114 | cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange)); | ||
115 | inode->i_mode = le64_to_cpu(findData.Permissions); | ||
116 | /* since we set the inode type below we need to mask off | ||
117 | to avoid strange results if bits set above */ | ||
118 | inode->i_mode &= ~S_IFMT; | ||
119 | if (type == UNIX_FILE) { | ||
120 | inode->i_mode |= S_IFREG; | ||
121 | } else if (type == UNIX_SYMLINK) { | ||
122 | inode->i_mode |= S_IFLNK; | ||
123 | } else if (type == UNIX_DIR) { | ||
124 | inode->i_mode |= S_IFDIR; | ||
125 | } else if (type == UNIX_CHARDEV) { | ||
126 | inode->i_mode |= S_IFCHR; | ||
127 | inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor), | ||
128 | le64_to_cpu(findData.DevMinor) & MINORMASK); | ||
129 | } else if (type == UNIX_BLOCKDEV) { | ||
130 | inode->i_mode |= S_IFBLK; | ||
131 | inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor), | ||
132 | le64_to_cpu(findData.DevMinor) & MINORMASK); | ||
133 | } else if (type == UNIX_FIFO) { | ||
134 | inode->i_mode |= S_IFIFO; | ||
135 | } else if (type == UNIX_SOCKET) { | ||
136 | inode->i_mode |= S_IFSOCK; | ||
137 | } else { | ||
138 | /* safest to call it a file if we do not know */ | ||
139 | inode->i_mode |= S_IFREG; | ||
140 | cFYI(1, ("unknown type %d", type)); | ||
141 | } | ||
142 | 232 | ||
143 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) | ||
144 | inode->i_uid = cifs_sb->mnt_uid; | ||
145 | else | ||
146 | inode->i_uid = le64_to_cpu(findData.Uid); | ||
147 | |||
148 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) | ||
149 | inode->i_gid = cifs_sb->mnt_gid; | ||
150 | else | ||
151 | inode->i_gid = le64_to_cpu(findData.Gid); | ||
152 | |||
153 | inode->i_nlink = le64_to_cpu(findData.Nlinks); | ||
154 | |||
155 | spin_lock(&inode->i_lock); | ||
156 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | ||
157 | /* can not safely change the file size here if the | ||
158 | client is writing to it due to potential races */ | ||
159 | i_size_write(inode, end_of_file); | ||
160 | |||
161 | /* blksize needs to be multiple of two. So safer to default to | ||
162 | blksize and blkbits set in superblock so 2**blkbits and blksize | ||
163 | will match rather than setting to: | ||
164 | (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/ | ||
165 | |||
166 | /* This seems incredibly stupid but it turns out that i_blocks | ||
167 | is not related to (i_size / i_blksize), instead 512 byte size | ||
168 | is required for calculating num blocks */ | ||
169 | |||
170 | /* 512 bytes (2**9) is the fake blocksize that must be used */ | ||
171 | /* for this calculation */ | ||
172 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | ||
173 | } | ||
174 | spin_unlock(&inode->i_lock); | ||
175 | 233 | ||
176 | if (num_of_bytes < end_of_file) | 234 | if (num_of_bytes < end_of_file) |
177 | cFYI(1, ("allocation size less than end of file")); | 235 | cFYI(1, ("allocation size less than end of file")); |
178 | cFYI(1, ("Size %ld and blocks %llu", | 236 | cFYI(1, ("Size %ld and blocks %llu", |
179 | (unsigned long) inode->i_size, | 237 | (unsigned long) inode->i_size, |
180 | (unsigned long long)inode->i_blocks)); | 238 | (unsigned long long)inode->i_blocks)); |
181 | if (S_ISREG(inode->i_mode)) { | 239 | |
182 | cFYI(1, ("File inode")); | 240 | cifs_set_ops(inode); |
183 | inode->i_op = &cifs_file_inode_ops; | ||
184 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { | ||
185 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
186 | inode->i_fop = | ||
187 | &cifs_file_direct_nobrl_ops; | ||
188 | else | ||
189 | inode->i_fop = &cifs_file_direct_ops; | ||
190 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
191 | inode->i_fop = &cifs_file_nobrl_ops; | ||
192 | else /* not direct, send byte range locks */ | ||
193 | inode->i_fop = &cifs_file_ops; | ||
194 | |||
195 | /* check if server can support readpages */ | ||
196 | if (pTcon->ses->server->maxBuf < | ||
197 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) | ||
198 | inode->i_data.a_ops = &cifs_addr_ops_smallbuf; | ||
199 | else | ||
200 | inode->i_data.a_ops = &cifs_addr_ops; | ||
201 | } else if (S_ISDIR(inode->i_mode)) { | ||
202 | cFYI(1, ("Directory inode")); | ||
203 | inode->i_op = &cifs_dir_inode_ops; | ||
204 | inode->i_fop = &cifs_dir_ops; | ||
205 | } else if (S_ISLNK(inode->i_mode)) { | ||
206 | cFYI(1, ("Symbolic Link inode")); | ||
207 | inode->i_op = &cifs_symlink_inode_ops; | ||
208 | /* tmp_inode->i_fop = */ /* do not need to set to anything */ | ||
209 | } else { | ||
210 | cFYI(1, ("Init special inode")); | ||
211 | init_special_inode(inode, inode->i_mode, | ||
212 | inode->i_rdev); | ||
213 | } | ||
214 | } | 241 | } |
215 | return rc; | 242 | return rc; |
216 | } | 243 | } |
@@ -490,9 +517,9 @@ int cifs_get_inode_info(struct inode **pinode, | |||
490 | if (decode_sfu_inode(inode, | 517 | if (decode_sfu_inode(inode, |
491 | le64_to_cpu(pfindData->EndOfFile), | 518 | le64_to_cpu(pfindData->EndOfFile), |
492 | search_path, | 519 | search_path, |
493 | cifs_sb, xid)) { | 520 | cifs_sb, xid)) |
494 | cFYI(1, ("Unrecognized sfu inode type")); | 521 | cFYI(1, ("Unrecognized sfu inode type")); |
495 | } | 522 | |
496 | cFYI(1, ("sfu mode 0%o", inode->i_mode)); | 523 | cFYI(1, ("sfu mode 0%o", inode->i_mode)); |
497 | } else { | 524 | } else { |
498 | inode->i_mode |= S_IFREG; | 525 | inode->i_mode |= S_IFREG; |
@@ -546,36 +573,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
546 | atomic_set(&cifsInfo->inUse, 1); | 573 | atomic_set(&cifsInfo->inUse, 1); |
547 | } | 574 | } |
548 | 575 | ||
549 | if (S_ISREG(inode->i_mode)) { | 576 | cifs_set_ops(inode); |
550 | cFYI(1, ("File inode")); | ||
551 | inode->i_op = &cifs_file_inode_ops; | ||
552 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { | ||
553 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
554 | inode->i_fop = | ||
555 | &cifs_file_direct_nobrl_ops; | ||
556 | else | ||
557 | inode->i_fop = &cifs_file_direct_ops; | ||
558 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
559 | inode->i_fop = &cifs_file_nobrl_ops; | ||
560 | else /* not direct, send byte range locks */ | ||
561 | inode->i_fop = &cifs_file_ops; | ||
562 | |||
563 | if (pTcon->ses->server->maxBuf < | ||
564 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) | ||
565 | inode->i_data.a_ops = &cifs_addr_ops_smallbuf; | ||
566 | else | ||
567 | inode->i_data.a_ops = &cifs_addr_ops; | ||
568 | } else if (S_ISDIR(inode->i_mode)) { | ||
569 | cFYI(1, ("Directory inode")); | ||
570 | inode->i_op = &cifs_dir_inode_ops; | ||
571 | inode->i_fop = &cifs_dir_ops; | ||
572 | } else if (S_ISLNK(inode->i_mode)) { | ||
573 | cFYI(1, ("Symbolic Link inode")); | ||
574 | inode->i_op = &cifs_symlink_inode_ops; | ||
575 | } else { | ||
576 | init_special_inode(inode, inode->i_mode, | ||
577 | inode->i_rdev); | ||
578 | } | ||
579 | } | 577 | } |
580 | kfree(buf); | 578 | kfree(buf); |
581 | return rc; | 579 | return rc; |
@@ -792,17 +790,12 @@ psx_del_no_retry: | |||
792 | } | 790 | } |
793 | 791 | ||
794 | static void posix_fill_in_inode(struct inode *tmp_inode, | 792 | static void posix_fill_in_inode(struct inode *tmp_inode, |
795 | FILE_UNIX_BASIC_INFO *pData, int *pobject_type, int isNewInode) | 793 | FILE_UNIX_BASIC_INFO *pData, int isNewInode) |
796 | { | 794 | { |
795 | struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); | ||
797 | loff_t local_size; | 796 | loff_t local_size; |
798 | struct timespec local_mtime; | 797 | struct timespec local_mtime; |
799 | 798 | ||
800 | struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); | ||
801 | struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); | ||
802 | |||
803 | __u32 type = le32_to_cpu(pData->Type); | ||
804 | __u64 num_of_bytes = le64_to_cpu(pData->NumOfBytes); | ||
805 | __u64 end_of_file = le64_to_cpu(pData->EndOfFile); | ||
806 | cifsInfo->time = jiffies; | 799 | cifsInfo->time = jiffies; |
807 | atomic_inc(&cifsInfo->inUse); | 800 | atomic_inc(&cifsInfo->inUse); |
808 | 801 | ||
@@ -810,115 +803,27 @@ static void posix_fill_in_inode(struct inode *tmp_inode, | |||
810 | local_mtime = tmp_inode->i_mtime; | 803 | local_mtime = tmp_inode->i_mtime; |
811 | local_size = tmp_inode->i_size; | 804 | local_size = tmp_inode->i_size; |
812 | 805 | ||
813 | tmp_inode->i_atime = | 806 | cifs_unix_info_to_inode(tmp_inode, pData, 1); |
814 | cifs_NTtimeToUnix(le64_to_cpu(pData->LastAccessTime)); | 807 | cifs_set_ops(tmp_inode); |
815 | tmp_inode->i_mtime = | ||
816 | cifs_NTtimeToUnix(le64_to_cpu(pData->LastModificationTime)); | ||
817 | tmp_inode->i_ctime = | ||
818 | cifs_NTtimeToUnix(le64_to_cpu(pData->LastStatusChange)); | ||
819 | |||
820 | tmp_inode->i_mode = le64_to_cpu(pData->Permissions); | ||
821 | /* since we set the inode type below we need to mask off type | ||
822 | to avoid strange results if bits above were corrupt */ | ||
823 | tmp_inode->i_mode &= ~S_IFMT; | ||
824 | if (type == UNIX_FILE) { | ||
825 | *pobject_type = DT_REG; | ||
826 | tmp_inode->i_mode |= S_IFREG; | ||
827 | } else if (type == UNIX_SYMLINK) { | ||
828 | *pobject_type = DT_LNK; | ||
829 | tmp_inode->i_mode |= S_IFLNK; | ||
830 | } else if (type == UNIX_DIR) { | ||
831 | *pobject_type = DT_DIR; | ||
832 | tmp_inode->i_mode |= S_IFDIR; | ||
833 | } else if (type == UNIX_CHARDEV) { | ||
834 | *pobject_type = DT_CHR; | ||
835 | tmp_inode->i_mode |= S_IFCHR; | ||
836 | tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor), | ||
837 | le64_to_cpu(pData->DevMinor) & MINORMASK); | ||
838 | } else if (type == UNIX_BLOCKDEV) { | ||
839 | *pobject_type = DT_BLK; | ||
840 | tmp_inode->i_mode |= S_IFBLK; | ||
841 | tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor), | ||
842 | le64_to_cpu(pData->DevMinor) & MINORMASK); | ||
843 | } else if (type == UNIX_FIFO) { | ||
844 | *pobject_type = DT_FIFO; | ||
845 | tmp_inode->i_mode |= S_IFIFO; | ||
846 | } else if (type == UNIX_SOCKET) { | ||
847 | *pobject_type = DT_SOCK; | ||
848 | tmp_inode->i_mode |= S_IFSOCK; | ||
849 | } else { | ||
850 | /* safest to just call it a file */ | ||
851 | *pobject_type = DT_REG; | ||
852 | tmp_inode->i_mode |= S_IFREG; | ||
853 | cFYI(1, ("unknown inode type %d", type)); | ||
854 | } | ||
855 | |||
856 | #ifdef CONFIG_CIFS_DEBUG2 | ||
857 | cFYI(1, ("object type: %d", type)); | ||
858 | #endif | ||
859 | tmp_inode->i_uid = le64_to_cpu(pData->Uid); | ||
860 | tmp_inode->i_gid = le64_to_cpu(pData->Gid); | ||
861 | tmp_inode->i_nlink = le64_to_cpu(pData->Nlinks); | ||
862 | |||
863 | spin_lock(&tmp_inode->i_lock); | ||
864 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | ||
865 | /* can not safely change the file size here if the | ||
866 | client is writing to it due to potential races */ | ||
867 | i_size_write(tmp_inode, end_of_file); | ||
868 | 808 | ||
869 | /* 512 bytes (2**9) is the fake blocksize that must be used */ | 809 | if (!S_ISREG(tmp_inode->i_mode)) |
870 | /* for this calculation, not the real blocksize */ | 810 | return; |
871 | tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | ||
872 | } | ||
873 | spin_unlock(&tmp_inode->i_lock); | ||
874 | |||
875 | if (S_ISREG(tmp_inode->i_mode)) { | ||
876 | cFYI(1, ("File inode")); | ||
877 | tmp_inode->i_op = &cifs_file_inode_ops; | ||
878 | |||
879 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { | ||
880 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
881 | tmp_inode->i_fop = &cifs_file_direct_nobrl_ops; | ||
882 | else | ||
883 | tmp_inode->i_fop = &cifs_file_direct_ops; | ||
884 | 811 | ||
885 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | 812 | /* |
886 | tmp_inode->i_fop = &cifs_file_nobrl_ops; | 813 | * No sense invalidating pages for new inode |
887 | else | 814 | * since we we have not started caching |
888 | tmp_inode->i_fop = &cifs_file_ops; | 815 | * readahead file data yet. |
889 | 816 | */ | |
890 | if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) && | 817 | if (isNewInode) |
891 | (cifs_sb->tcon->ses->server->maxBuf < | 818 | return; |
892 | PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)) | ||
893 | tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf; | ||
894 | else | ||
895 | tmp_inode->i_data.a_ops = &cifs_addr_ops; | ||
896 | |||
897 | if (isNewInode) | ||
898 | return; /* No sense invalidating pages for new inode | ||
899 | since we we have not started caching | ||
900 | readahead file data yet */ | ||
901 | 819 | ||
902 | if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) && | 820 | if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) && |
903 | (local_size == tmp_inode->i_size)) { | 821 | (local_size == tmp_inode->i_size)) { |
904 | cFYI(1, ("inode exists but unchanged")); | 822 | cFYI(1, ("inode exists but unchanged")); |
905 | } else { | ||
906 | /* file may have changed on server */ | ||
907 | cFYI(1, ("invalidate inode, readdir detected change")); | ||
908 | invalidate_remote_inode(tmp_inode); | ||
909 | } | ||
910 | } else if (S_ISDIR(tmp_inode->i_mode)) { | ||
911 | cFYI(1, ("Directory inode")); | ||
912 | tmp_inode->i_op = &cifs_dir_inode_ops; | ||
913 | tmp_inode->i_fop = &cifs_dir_ops; | ||
914 | } else if (S_ISLNK(tmp_inode->i_mode)) { | ||
915 | cFYI(1, ("Symbolic Link inode")); | ||
916 | tmp_inode->i_op = &cifs_symlink_inode_ops; | ||
917 | /* tmp_inode->i_fop = *//* do not need to set to anything */ | ||
918 | } else { | 823 | } else { |
919 | cFYI(1, ("Special inode")); | 824 | /* file may have changed on server */ |
920 | init_special_inode(tmp_inode, tmp_inode->i_mode, | 825 | cFYI(1, ("invalidate inode, readdir detected change")); |
921 | tmp_inode->i_rdev); | 826 | invalidate_remote_inode(tmp_inode); |
922 | } | 827 | } |
923 | } | 828 | } |
924 | 829 | ||
@@ -968,7 +873,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
968 | cFYI(1, ("posix mkdir returned 0x%x", rc)); | 873 | cFYI(1, ("posix mkdir returned 0x%x", rc)); |
969 | d_drop(direntry); | 874 | d_drop(direntry); |
970 | } else { | 875 | } else { |
971 | int obj_type; | ||
972 | if (pInfo->Type == cpu_to_le32(-1)) { | 876 | if (pInfo->Type == cpu_to_le32(-1)) { |
973 | /* no return info, go query for it */ | 877 | /* no return info, go query for it */ |
974 | kfree(pInfo); | 878 | kfree(pInfo); |
@@ -1004,7 +908,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1004 | /* we already checked in POSIXCreate whether | 908 | /* we already checked in POSIXCreate whether |
1005 | frame was long enough */ | 909 | frame was long enough */ |
1006 | posix_fill_in_inode(direntry->d_inode, | 910 | posix_fill_in_inode(direntry->d_inode, |
1007 | pInfo, &obj_type, 1 /* NewInode */); | 911 | pInfo, 1 /* NewInode */); |
1008 | #ifdef CONFIG_CIFS_DEBUG2 | 912 | #ifdef CONFIG_CIFS_DEBUG2 |
1009 | cFYI(1, ("instantiated dentry %p %s to inode %p", | 913 | cFYI(1, ("instantiated dentry %p %s to inode %p", |
1010 | direntry, direntry->d_name.name, newinode)); | 914 | direntry, direntry->d_name.name, newinode)); |
@@ -1214,9 +1118,8 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry, | |||
1214 | } /* if we can not get memory just leave rc as EEXIST */ | 1118 | } /* if we can not get memory just leave rc as EEXIST */ |
1215 | } | 1119 | } |
1216 | 1120 | ||
1217 | if (rc) { | 1121 | if (rc) |
1218 | cFYI(1, ("rename rc %d", rc)); | 1122 | cFYI(1, ("rename rc %d", rc)); |
1219 | } | ||
1220 | 1123 | ||
1221 | if ((rc == -EIO) || (rc == -EEXIST)) { | 1124 | if ((rc == -EIO) || (rc == -EEXIST)) { |
1222 | int oplock = FALSE; | 1125 | int oplock = FALSE; |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index d24fe6880a04..5c792df13d62 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2) | 31 | #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2) |
32 | 32 | ||
33 | int cifs_ioctl (struct inode *inode, struct file *filep, | 33 | int cifs_ioctl(struct inode *inode, struct file *filep, |
34 | unsigned int command, unsigned long arg) | 34 | unsigned int command, unsigned long arg) |
35 | { | 35 | { |
36 | int rc = -ENOTTY; /* strange error - but the precedent */ | 36 | int rc = -ENOTTY; /* strange error - but the precedent */ |
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c index a2415c1a14db..a725c2609d67 100644 --- a/fs/cifs/md4.c +++ b/fs/cifs/md4.c | |||
@@ -56,7 +56,7 @@ lshift(__u32 x, int s) | |||
56 | 56 | ||
57 | /* this applies md4 to 64 byte chunks */ | 57 | /* this applies md4 to 64 byte chunks */ |
58 | static void | 58 | static void |
59 | mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D) | 59 | mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D) |
60 | { | 60 | { |
61 | int j; | 61 | int j; |
62 | __u32 AA, BB, CC, DD; | 62 | __u32 AA, BB, CC, DD; |
@@ -137,7 +137,7 @@ mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | static void | 139 | static void |
140 | copy64(__u32 * M, unsigned char *in) | 140 | copy64(__u32 *M, unsigned char *in) |
141 | { | 141 | { |
142 | int i; | 142 | int i; |
143 | 143 | ||
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c index f13f96d42fcf..462bbfefd4b6 100644 --- a/fs/cifs/md5.c +++ b/fs/cifs/md5.c | |||
@@ -161,7 +161,7 @@ MD5Final(unsigned char digest[16], struct MD5Context *ctx) | |||
161 | 161 | ||
162 | /* This is the central step in the MD5 algorithm. */ | 162 | /* This is the central step in the MD5 algorithm. */ |
163 | #define MD5STEP(f, w, x, y, z, data, s) \ | 163 | #define MD5STEP(f, w, x, y, z, data, s) \ |
164 | ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x ) | 164 | (w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x) |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * The core of the MD5 algorithm, this alters an existing MD5 hash to | 167 | * The core of the MD5 algorithm, this alters an existing MD5 hash to |
@@ -302,9 +302,8 @@ hmac_md5_init_limK_to_64(const unsigned char *key, int key_len, | |||
302 | int i; | 302 | int i; |
303 | 303 | ||
304 | /* if key is longer than 64 bytes truncate it */ | 304 | /* if key is longer than 64 bytes truncate it */ |
305 | if (key_len > 64) { | 305 | if (key_len > 64) |
306 | key_len = 64; | 306 | key_len = 64; |
307 | } | ||
308 | 307 | ||
309 | /* start out by storing key in pads */ | 308 | /* start out by storing key in pads */ |
310 | memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad)); | 309 | memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad)); |
@@ -359,9 +358,9 @@ hmac_md5(unsigned char key[16], unsigned char *data, int data_len, | |||
359 | { | 358 | { |
360 | struct HMACMD5Context ctx; | 359 | struct HMACMD5Context ctx; |
361 | hmac_md5_init_limK_to_64(key, 16, &ctx); | 360 | hmac_md5_init_limK_to_64(key, 16, &ctx); |
362 | if (data_len != 0) { | 361 | if (data_len != 0) |
363 | hmac_md5_update(data, data_len, &ctx); | 362 | hmac_md5_update(data, data_len, &ctx); |
364 | } | 363 | |
365 | hmac_md5_final(digest, &ctx); | 364 | hmac_md5_final(digest, &ctx); |
366 | } | 365 | } |
367 | #endif | 366 | #endif |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 15546c2354c5..2a42d9fedbb2 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/misc.c | 2 | * fs/cifs/misc.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2007 | 4 | * Copyright (C) International Business Machines Corp., 2002,2008 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * This library is free software; you can redistribute it and/or modify | 7 | * This library is free software; you can redistribute it and/or modify |
@@ -320,9 +320,9 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
320 | if (treeCon->ses) { | 320 | if (treeCon->ses) { |
321 | if (treeCon->ses->capabilities & CAP_UNICODE) | 321 | if (treeCon->ses->capabilities & CAP_UNICODE) |
322 | buffer->Flags2 |= SMBFLG2_UNICODE; | 322 | buffer->Flags2 |= SMBFLG2_UNICODE; |
323 | if (treeCon->ses->capabilities & CAP_STATUS32) { | 323 | if (treeCon->ses->capabilities & CAP_STATUS32) |
324 | buffer->Flags2 |= SMBFLG2_ERR_STATUS; | 324 | buffer->Flags2 |= SMBFLG2_ERR_STATUS; |
325 | } | 325 | |
326 | /* Uid is not converted */ | 326 | /* Uid is not converted */ |
327 | buffer->Uid = treeCon->ses->Suid; | 327 | buffer->Uid = treeCon->ses->Suid; |
328 | buffer->Mid = GetNextMid(treeCon->ses->server); | 328 | buffer->Mid = GetNextMid(treeCon->ses->server); |
@@ -610,7 +610,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length) | |||
610 | 610 | ||
611 | buffer = (unsigned char *) smb_buf; | 611 | buffer = (unsigned char *) smb_buf; |
612 | for (i = 0, j = 0; i < smb_buf_length; i++, j++) { | 612 | for (i = 0, j = 0; i < smb_buf_length; i++, j++) { |
613 | if (i % 8 == 0) { /* have reached the beginning of line */ | 613 | if (i % 8 == 0) { |
614 | /* have reached the beginning of line */ | ||
614 | printk(KERN_DEBUG "| "); | 615 | printk(KERN_DEBUG "| "); |
615 | j = 0; | 616 | j = 0; |
616 | } | 617 | } |
@@ -621,7 +622,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length) | |||
621 | else | 622 | else |
622 | debug_line[1 + (2 * j)] = '_'; | 623 | debug_line[1 + (2 * j)] = '_'; |
623 | 624 | ||
624 | if (i % 8 == 7) { /* reached end of line, time to print ascii */ | 625 | if (i % 8 == 7) { |
626 | /* reached end of line, time to print ascii */ | ||
625 | debug_line[16] = 0; | 627 | debug_line[16] = 0; |
626 | printk(" | %s\n", debug_line); | 628 | printk(" | %s\n", debug_line); |
627 | } | 629 | } |
@@ -631,7 +633,7 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length) | |||
631 | debug_line[2 * j] = ' '; | 633 | debug_line[2 * j] = ' '; |
632 | debug_line[1 + (2 * j)] = ' '; | 634 | debug_line[1 + (2 * j)] = ' '; |
633 | } | 635 | } |
634 | printk( " | %s\n", debug_line); | 636 | printk(" | %s\n", debug_line); |
635 | return; | 637 | return; |
636 | } | 638 | } |
637 | 639 | ||
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 646e1f06941b..3b5a5ce882b6 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/netmisc.c | 2 | * fs/cifs/netmisc.c |
3 | * | 3 | * |
4 | * Copyright (c) International Business Machines Corp., 2002 | 4 | * Copyright (c) International Business Machines Corp., 2002,2008 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * Error mapping routines from Samba libsmb/errormap.c | 7 | * Error mapping routines from Samba libsmb/errormap.c |
@@ -150,9 +150,7 @@ static int canonicalize_unc(char *cp) | |||
150 | if (cp[i] == '\\') | 150 | if (cp[i] == '\\') |
151 | break; | 151 | break; |
152 | if (cp[i] == '/') { | 152 | if (cp[i] == '/') { |
153 | #ifdef CONFIG_CIFS_DEBUG2 | 153 | cFYI(DBG2, ("change slash to \\ in malformed UNC")); |
154 | cFYI(1, ("change slash to backslash in malformed UNC")); | ||
155 | #endif | ||
156 | cp[i] = '\\'; | 154 | cp[i] = '\\'; |
157 | return 1; | 155 | return 1; |
158 | } | 156 | } |
@@ -178,9 +176,7 @@ cifs_inet_pton(int address_family, char *cp, void *dst) | |||
178 | } else if (address_family == AF_INET6) { | 176 | } else if (address_family == AF_INET6) { |
179 | ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL); | 177 | ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL); |
180 | } | 178 | } |
181 | #ifdef CONFIG_CIFS_DEBUG2 | 179 | cFYI(DBG2, ("address conversion returned %d for %s", ret, cp)); |
182 | cFYI(1, ("address conversion returned %d for %s", ret, cp)); | ||
183 | #endif | ||
184 | if (ret > 0) | 180 | if (ret > 0) |
185 | ret = 1; | 181 | ret = 1; |
186 | return ret; | 182 | return ret; |
@@ -253,7 +249,8 @@ static const struct { | |||
253 | ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, { | 249 | ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, { |
254 | ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, { | 250 | ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, { |
255 | ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, { | 251 | ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, { |
256 | ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { /* mapping changed since shell does lookup on * and expects file not found */ | 252 | /* mapping changed since shell does lookup on * expects FileNotFound */ |
253 | ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { | ||
257 | ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, { | 254 | ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, { |
258 | ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, { | 255 | ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, { |
259 | ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, { | 256 | ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, { |
@@ -820,7 +817,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr) | |||
820 | /* old style errors */ | 817 | /* old style errors */ |
821 | 818 | ||
822 | /* DOS class smb error codes - map DOS */ | 819 | /* DOS class smb error codes - map DOS */ |
823 | if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */ | 820 | if (smberrclass == ERRDOS) { |
821 | /* 1 byte field no need to byte reverse */ | ||
824 | for (i = 0; | 822 | for (i = 0; |
825 | i < | 823 | i < |
826 | sizeof(mapping_table_ERRDOS) / | 824 | sizeof(mapping_table_ERRDOS) / |
@@ -834,7 +832,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr) | |||
834 | } | 832 | } |
835 | /* else try next error mapping one to see if match */ | 833 | /* else try next error mapping one to see if match */ |
836 | } | 834 | } |
837 | } else if (smberrclass == ERRSRV) { /* server class of error codes */ | 835 | } else if (smberrclass == ERRSRV) { |
836 | /* server class of error codes */ | ||
838 | for (i = 0; | 837 | for (i = 0; |
839 | i < | 838 | i < |
840 | sizeof(mapping_table_ERRSRV) / | 839 | sizeof(mapping_table_ERRSRV) / |
@@ -922,8 +921,8 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time) | |||
922 | { | 921 | { |
923 | struct timespec ts; | 922 | struct timespec ts; |
924 | int sec, min, days, month, year; | 923 | int sec, min, days, month, year; |
925 | SMB_TIME * st = (SMB_TIME *)&time; | 924 | SMB_TIME *st = (SMB_TIME *)&time; |
926 | SMB_DATE * sd = (SMB_DATE *)&date; | 925 | SMB_DATE *sd = (SMB_DATE *)&date; |
927 | 926 | ||
928 | cFYI(1, ("date %d time %d", date, time)); | 927 | cFYI(1, ("date %d time %d", date, time)); |
929 | 928 | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 0f22def4bdff..32b445edc882 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Directory search handling | 4 | * Directory search handling |
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2004, 2007 | 6 | * Copyright (C) International Business Machines Corp., 2004, 2008 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * | 8 | * |
9 | * This library is free software; you can redistribute it and/or modify | 9 | * This library is free software; you can redistribute it and/or modify |
@@ -42,17 +42,18 @@ static void dump_cifs_file_struct(struct file *file, char *label) | |||
42 | cFYI(1, ("empty cifs private file data")); | 42 | cFYI(1, ("empty cifs private file data")); |
43 | return; | 43 | return; |
44 | } | 44 | } |
45 | if (cf->invalidHandle) { | 45 | if (cf->invalidHandle) |
46 | cFYI(1, ("invalid handle")); | 46 | cFYI(1, ("invalid handle")); |
47 | } | 47 | if (cf->srch_inf.endOfSearch) |
48 | if (cf->srch_inf.endOfSearch) { | ||
49 | cFYI(1, ("end of search")); | 48 | cFYI(1, ("end of search")); |
50 | } | 49 | if (cf->srch_inf.emptyDir) |
51 | if (cf->srch_inf.emptyDir) { | ||
52 | cFYI(1, ("empty dir")); | 50 | cFYI(1, ("empty dir")); |
53 | } | ||
54 | } | 51 | } |
55 | } | 52 | } |
53 | #else | ||
54 | static inline void dump_cifs_file_struct(struct file *file, char *label) | ||
55 | { | ||
56 | } | ||
56 | #endif /* DEBUG2 */ | 57 | #endif /* DEBUG2 */ |
57 | 58 | ||
58 | /* Returns one if new inode created (which therefore needs to be hashed) */ | 59 | /* Returns one if new inode created (which therefore needs to be hashed) */ |
@@ -150,7 +151,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
150 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); | 151 | cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); |
151 | } else { /* legacy, OS2 and DOS style */ | 152 | } else { /* legacy, OS2 and DOS style */ |
152 | /* struct timespec ts;*/ | 153 | /* struct timespec ts;*/ |
153 | FIND_FILE_STANDARD_INFO * pfindData = | 154 | FIND_FILE_STANDARD_INFO *pfindData = |
154 | (FIND_FILE_STANDARD_INFO *)buf; | 155 | (FIND_FILE_STANDARD_INFO *)buf; |
155 | 156 | ||
156 | tmp_inode->i_mtime = cnvrtDosUnixTm( | 157 | tmp_inode->i_mtime = cnvrtDosUnixTm( |
@@ -198,9 +199,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
198 | if (attr & ATTR_DIRECTORY) { | 199 | if (attr & ATTR_DIRECTORY) { |
199 | *pobject_type = DT_DIR; | 200 | *pobject_type = DT_DIR; |
200 | /* override default perms since we do not lock dirs */ | 201 | /* override default perms since we do not lock dirs */ |
201 | if (atomic_read(&cifsInfo->inUse) == 0) { | 202 | if (atomic_read(&cifsInfo->inUse) == 0) |
202 | tmp_inode->i_mode = cifs_sb->mnt_dir_mode; | 203 | tmp_inode->i_mode = cifs_sb->mnt_dir_mode; |
203 | } | ||
204 | tmp_inode->i_mode |= S_IFDIR; | 204 | tmp_inode->i_mode |= S_IFDIR; |
205 | } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && | 205 | } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && |
206 | (attr & ATTR_SYSTEM)) { | 206 | (attr & ATTR_SYSTEM)) { |
@@ -231,9 +231,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
231 | } /* could add code here - to validate if device or weird share type? */ | 231 | } /* could add code here - to validate if device or weird share type? */ |
232 | 232 | ||
233 | /* can not fill in nlink here as in qpathinfo version and Unx search */ | 233 | /* can not fill in nlink here as in qpathinfo version and Unx search */ |
234 | if (atomic_read(&cifsInfo->inUse) == 0) { | 234 | if (atomic_read(&cifsInfo->inUse) == 0) |
235 | atomic_set(&cifsInfo->inUse, 1); | 235 | atomic_set(&cifsInfo->inUse, 1); |
236 | } | ||
237 | 236 | ||
238 | spin_lock(&tmp_inode->i_lock); | 237 | spin_lock(&tmp_inode->i_lock); |
239 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 238 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
@@ -461,9 +460,8 @@ static int initiate_cifs_search(const int xid, struct file *file) | |||
461 | 460 | ||
462 | full_path = build_path_from_dentry(file->f_path.dentry); | 461 | full_path = build_path_from_dentry(file->f_path.dentry); |
463 | 462 | ||
464 | if (full_path == NULL) { | 463 | if (full_path == NULL) |
465 | return -ENOMEM; | 464 | return -ENOMEM; |
466 | } | ||
467 | 465 | ||
468 | cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos)); | 466 | cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos)); |
469 | 467 | ||
@@ -471,9 +469,9 @@ ffirst_retry: | |||
471 | /* test for Unix extensions */ | 469 | /* test for Unix extensions */ |
472 | /* but now check for them on the share/mount not on the SMB session */ | 470 | /* but now check for them on the share/mount not on the SMB session */ |
473 | /* if (pTcon->ses->capabilities & CAP_UNIX) { */ | 471 | /* if (pTcon->ses->capabilities & CAP_UNIX) { */ |
474 | if (pTcon->unix_ext) { | 472 | if (pTcon->unix_ext) |
475 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; | 473 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; |
476 | } else if ((pTcon->ses->capabilities & | 474 | else if ((pTcon->ses->capabilities & |
477 | (CAP_NT_SMBS | CAP_NT_FIND)) == 0) { | 475 | (CAP_NT_SMBS | CAP_NT_FIND)) == 0) { |
478 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; | 476 | cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; |
479 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 477 | } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
@@ -514,10 +512,10 @@ static int cifs_unicode_bytelen(char *str) | |||
514 | static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) | 512 | static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) |
515 | { | 513 | { |
516 | char *new_entry; | 514 | char *new_entry; |
517 | FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; | 515 | FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; |
518 | 516 | ||
519 | if (level == SMB_FIND_FILE_INFO_STANDARD) { | 517 | if (level == SMB_FIND_FILE_INFO_STANDARD) { |
520 | FIND_FILE_STANDARD_INFO * pfData; | 518 | FIND_FILE_STANDARD_INFO *pfData; |
521 | pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; | 519 | pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; |
522 | 520 | ||
523 | new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + | 521 | new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + |
@@ -553,7 +551,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
553 | int len = 0; | 551 | int len = 0; |
554 | 552 | ||
555 | if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) { | 553 | if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) { |
556 | FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry; | 554 | FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry; |
557 | filename = &pFindData->FileName[0]; | 555 | filename = &pFindData->FileName[0]; |
558 | if (cfile->srch_inf.unicode) { | 556 | if (cfile->srch_inf.unicode) { |
559 | len = cifs_unicode_bytelen(filename); | 557 | len = cifs_unicode_bytelen(filename); |
@@ -562,30 +560,30 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) | |||
562 | len = strnlen(filename, 5); | 560 | len = strnlen(filename, 5); |
563 | } | 561 | } |
564 | } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) { | 562 | } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) { |
565 | FILE_DIRECTORY_INFO * pFindData = | 563 | FILE_DIRECTORY_INFO *pFindData = |
566 | (FILE_DIRECTORY_INFO *)current_entry; | 564 | (FILE_DIRECTORY_INFO *)current_entry; |
567 | filename = &pFindData->FileName[0]; | 565 | filename = &pFindData->FileName[0]; |
568 | len = le32_to_cpu(pFindData->FileNameLength); | 566 | len = le32_to_cpu(pFindData->FileNameLength); |
569 | } else if (cfile->srch_inf.info_level == | 567 | } else if (cfile->srch_inf.info_level == |
570 | SMB_FIND_FILE_FULL_DIRECTORY_INFO) { | 568 | SMB_FIND_FILE_FULL_DIRECTORY_INFO) { |
571 | FILE_FULL_DIRECTORY_INFO * pFindData = | 569 | FILE_FULL_DIRECTORY_INFO *pFindData = |
572 | (FILE_FULL_DIRECTORY_INFO *)current_entry; | 570 | (FILE_FULL_DIRECTORY_INFO *)current_entry; |
573 | filename = &pFindData->FileName[0]; | 571 | filename = &pFindData->FileName[0]; |
574 | len = le32_to_cpu(pFindData->FileNameLength); | 572 | len = le32_to_cpu(pFindData->FileNameLength); |
575 | } else if (cfile->srch_inf.info_level == | 573 | } else if (cfile->srch_inf.info_level == |
576 | SMB_FIND_FILE_ID_FULL_DIR_INFO) { | 574 | SMB_FIND_FILE_ID_FULL_DIR_INFO) { |
577 | SEARCH_ID_FULL_DIR_INFO * pFindData = | 575 | SEARCH_ID_FULL_DIR_INFO *pFindData = |
578 | (SEARCH_ID_FULL_DIR_INFO *)current_entry; | 576 | (SEARCH_ID_FULL_DIR_INFO *)current_entry; |
579 | filename = &pFindData->FileName[0]; | 577 | filename = &pFindData->FileName[0]; |
580 | len = le32_to_cpu(pFindData->FileNameLength); | 578 | len = le32_to_cpu(pFindData->FileNameLength); |
581 | } else if (cfile->srch_inf.info_level == | 579 | } else if (cfile->srch_inf.info_level == |
582 | SMB_FIND_FILE_BOTH_DIRECTORY_INFO) { | 580 | SMB_FIND_FILE_BOTH_DIRECTORY_INFO) { |
583 | FILE_BOTH_DIRECTORY_INFO * pFindData = | 581 | FILE_BOTH_DIRECTORY_INFO *pFindData = |
584 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; | 582 | (FILE_BOTH_DIRECTORY_INFO *)current_entry; |
585 | filename = &pFindData->FileName[0]; | 583 | filename = &pFindData->FileName[0]; |
586 | len = le32_to_cpu(pFindData->FileNameLength); | 584 | len = le32_to_cpu(pFindData->FileNameLength); |
587 | } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) { | 585 | } else if (cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) { |
588 | FIND_FILE_STANDARD_INFO * pFindData = | 586 | FIND_FILE_STANDARD_INFO *pFindData = |
589 | (FIND_FILE_STANDARD_INFO *)current_entry; | 587 | (FIND_FILE_STANDARD_INFO *)current_entry; |
590 | filename = &pFindData->FileName[0]; | 588 | filename = &pFindData->FileName[0]; |
591 | len = pFindData->FileNameLength; | 589 | len = pFindData->FileNameLength; |
@@ -666,9 +664,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
666 | . and .. for the root of a drive and for those we need | 664 | . and .. for the root of a drive and for those we need |
667 | to start two entries earlier */ | 665 | to start two entries earlier */ |
668 | 666 | ||
669 | #ifdef CONFIG_CIFS_DEBUG2 | ||
670 | dump_cifs_file_struct(file, "In fce "); | 667 | dump_cifs_file_struct(file, "In fce "); |
671 | #endif | ||
672 | if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) && | 668 | if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) && |
673 | is_dir_changed(file)) || | 669 | is_dir_changed(file)) || |
674 | (index_to_find < first_entry_in_buffer)) { | 670 | (index_to_find < first_entry_in_buffer)) { |
@@ -718,7 +714,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
718 | pos_in_buf = index_to_find - first_entry_in_buffer; | 714 | pos_in_buf = index_to_find - first_entry_in_buffer; |
719 | cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf)); | 715 | cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf)); |
720 | 716 | ||
721 | for (i=0; (i < (pos_in_buf)) && (current_entry != NULL); i++) { | 717 | for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) { |
722 | /* go entry by entry figuring out which is first */ | 718 | /* go entry by entry figuring out which is first */ |
723 | current_entry = nxt_dir_entry(current_entry, end_of_smb, | 719 | current_entry = nxt_dir_entry(current_entry, end_of_smb, |
724 | cifsFile->srch_inf.info_level); | 720 | cifsFile->srch_inf.info_level); |
@@ -793,7 +789,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, | |||
793 | filename = &pFindData->FileName[0]; | 789 | filename = &pFindData->FileName[0]; |
794 | len = le32_to_cpu(pFindData->FileNameLength); | 790 | len = le32_to_cpu(pFindData->FileNameLength); |
795 | } else if (level == SMB_FIND_FILE_INFO_STANDARD) { | 791 | } else if (level == SMB_FIND_FILE_INFO_STANDARD) { |
796 | FIND_FILE_STANDARD_INFO * pFindData = | 792 | FIND_FILE_STANDARD_INFO *pFindData = |
797 | (FIND_FILE_STANDARD_INFO *)current_entry; | 793 | (FIND_FILE_STANDARD_INFO *)current_entry; |
798 | filename = &pFindData->FileName[0]; | 794 | filename = &pFindData->FileName[0]; |
799 | /* one byte length, no name conversion */ | 795 | /* one byte length, no name conversion */ |
@@ -928,7 +924,7 @@ static int cifs_save_resume_key(const char *current_entry, | |||
928 | level = cifsFile->srch_inf.info_level; | 924 | level = cifsFile->srch_inf.info_level; |
929 | 925 | ||
930 | if (level == SMB_FIND_FILE_UNIX) { | 926 | if (level == SMB_FIND_FILE_UNIX) { |
931 | FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry; | 927 | FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry; |
932 | 928 | ||
933 | filename = &pFindData->FileName[0]; | 929 | filename = &pFindData->FileName[0]; |
934 | if (cifsFile->srch_inf.unicode) { | 930 | if (cifsFile->srch_inf.unicode) { |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index d2153abcba6d..ed150efbe27c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -417,10 +417,6 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, | |||
417 | 417 | ||
418 | calc_lanman_hash(ses, lnm_session_key); | 418 | calc_lanman_hash(ses, lnm_session_key); |
419 | ses->flags |= CIFS_SES_LANMAN; | 419 | ses->flags |= CIFS_SES_LANMAN; |
420 | /* #ifdef CONFIG_CIFS_DEBUG2 | ||
421 | cifs_dump_mem("cryptkey: ",ses->server->cryptKey, | ||
422 | CIFS_SESS_KEY_SIZE); | ||
423 | #endif */ | ||
424 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); | 420 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); |
425 | bcc_ptr += CIFS_SESS_KEY_SIZE; | 421 | bcc_ptr += CIFS_SESS_KEY_SIZE; |
426 | 422 | ||
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c index cfa6d21fb4e8..04943c976f98 100644 --- a/fs/cifs/smbdes.c +++ b/fs/cifs/smbdes.c | |||
@@ -114,42 +114,42 @@ static uchar sbox[8][4][16] = { | |||
114 | {{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7}, | 114 | {{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7}, |
115 | {0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8}, | 115 | {0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8}, |
116 | {4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0}, | 116 | {4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0}, |
117 | {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13}}, | 117 | {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13} }, |
118 | 118 | ||
119 | {{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10}, | 119 | {{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10}, |
120 | {3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5}, | 120 | {3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5}, |
121 | {0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15}, | 121 | {0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15}, |
122 | {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9}}, | 122 | {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9} }, |
123 | 123 | ||
124 | {{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8}, | 124 | {{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8}, |
125 | {13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1}, | 125 | {13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1}, |
126 | {13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7}, | 126 | {13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7}, |
127 | {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12}}, | 127 | {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12} }, |
128 | 128 | ||
129 | {{7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15}, | 129 | {{7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15}, |
130 | {13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9}, | 130 | {13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9}, |
131 | {10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4}, | 131 | {10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4}, |
132 | {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14}}, | 132 | {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14} }, |
133 | 133 | ||
134 | {{2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9}, | 134 | {{2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9}, |
135 | {14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6}, | 135 | {14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6}, |
136 | {4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14}, | 136 | {4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14}, |
137 | {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3}}, | 137 | {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3} }, |
138 | 138 | ||
139 | {{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11}, | 139 | {{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11}, |
140 | {10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8}, | 140 | {10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8}, |
141 | {9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6}, | 141 | {9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6}, |
142 | {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13}}, | 142 | {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13} }, |
143 | 143 | ||
144 | {{4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1}, | 144 | {{4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1}, |
145 | {13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6}, | 145 | {13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6}, |
146 | {1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2}, | 146 | {1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2}, |
147 | {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12}}, | 147 | {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12} }, |
148 | 148 | ||
149 | {{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7}, | 149 | {{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7}, |
150 | {1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2}, | 150 | {1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2}, |
151 | {7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8}, | 151 | {7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8}, |
152 | {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11}} | 152 | {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11} } |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static void | 155 | static void |
@@ -313,9 +313,8 @@ str_to_key(unsigned char *str, unsigned char *key) | |||
313 | key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); | 313 | key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); |
314 | key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); | 314 | key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); |
315 | key[7] = str[6] & 0x7F; | 315 | key[7] = str[6] & 0x7F; |
316 | for (i = 0; i < 8; i++) { | 316 | for (i = 0; i < 8; i++) |
317 | key[i] = (key[i] << 1); | 317 | key[i] = (key[i] << 1); |
318 | } | ||
319 | } | 318 | } |
320 | 319 | ||
321 | static void | 320 | static void |
@@ -344,9 +343,8 @@ smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw) | |||
344 | 343 | ||
345 | dohash(outb, inb, keyb, forw); | 344 | dohash(outb, inb, keyb, forw); |
346 | 345 | ||
347 | for (i = 0; i < 8; i++) { | 346 | for (i = 0; i < 8; i++) |
348 | out[i] = 0; | 347 | out[i] = 0; |
349 | } | ||
350 | 348 | ||
351 | for (i = 0; i < 64; i++) { | 349 | for (i = 0; i < 64; i++) { |
352 | if (outb[i]) | 350 | if (outb[i]) |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 50b623ad9320..3612d6c0a0bb 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/transport.c | 2 | * fs/cifs/transport.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2007 | 4 | * Copyright (C) International Business Machines Corp., 2002,2008 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * Jeremy Allison (jra@samba.org) 2006. | 6 | * Jeremy Allison (jra@samba.org) 2006. |
7 | * | 7 | * |
@@ -358,9 +358,9 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, | |||
358 | } else if (ses->status != CifsGood) { | 358 | } else if (ses->status != CifsGood) { |
359 | /* check if SMB session is bad because we are setting it up */ | 359 | /* check if SMB session is bad because we are setting it up */ |
360 | if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && | 360 | if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && |
361 | (in_buf->Command != SMB_COM_NEGOTIATE)) { | 361 | (in_buf->Command != SMB_COM_NEGOTIATE)) |
362 | return -EAGAIN; | 362 | return -EAGAIN; |
363 | } /* else ok - we are setting up session */ | 363 | /* else ok - we are setting up session */ |
364 | } | 364 | } |
365 | *ppmidQ = AllocMidQEntry(in_buf, ses); | 365 | *ppmidQ = AllocMidQEntry(in_buf, ses); |
366 | if (*ppmidQ == NULL) | 366 | if (*ppmidQ == NULL) |
@@ -437,9 +437,8 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | |||
437 | iov[0].iov_len = in_buf->smb_buf_length + 4; | 437 | iov[0].iov_len = in_buf->smb_buf_length + 4; |
438 | flags |= CIFS_NO_RESP; | 438 | flags |= CIFS_NO_RESP; |
439 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); | 439 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); |
440 | #ifdef CONFIG_CIFS_DEBUG2 | 440 | cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc)); |
441 | cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc)); | 441 | |
442 | #endif | ||
443 | return rc; | 442 | return rc; |
444 | } | 443 | } |
445 | 444 | ||
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 54e8ef96cb79..8cd6a445b017 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -139,9 +139,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
139 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { | 139 | } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { |
140 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | 140 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) |
141 | goto set_ea_exit; | 141 | goto set_ea_exit; |
142 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) { | 142 | if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) |
143 | cFYI(1, ("attempt to set cifs inode metadata")); | 143 | cFYI(1, ("attempt to set cifs inode metadata")); |
144 | } | 144 | |
145 | ea_name += 5; /* skip past user. prefix */ | 145 | ea_name += 5; /* skip past user. prefix */ |
146 | rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, | 146 | rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, |
147 | (__u16)value_size, cifs_sb->local_nls, | 147 | (__u16)value_size, cifs_sb->local_nls, |
@@ -262,7 +262,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
262 | cifs_sb->mnt_cifs_flags & | 262 | cifs_sb->mnt_cifs_flags & |
263 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 263 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
264 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 264 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
265 | else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { | 265 | else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { |
266 | __u16 fid; | 266 | __u16 fid; |
267 | int oplock = FALSE; | 267 | int oplock = FALSE; |
268 | struct cifs_ntsd *pacl = NULL; | 268 | struct cifs_ntsd *pacl = NULL; |
@@ -303,11 +303,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
303 | } else if (strncmp(ea_name, | 303 | } else if (strncmp(ea_name, |
304 | CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { | 304 | CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { |
305 | cFYI(1, ("Security xattr namespace not supported yet")); | 305 | cFYI(1, ("Security xattr namespace not supported yet")); |
306 | } else { | 306 | } else |
307 | cFYI(1, | 307 | cFYI(1, |
308 | ("illegal xattr request %s (only user namespace supported)", | 308 | ("illegal xattr request %s (only user namespace supported)", |
309 | ea_name)); | 309 | ea_name)); |
310 | } | ||
311 | 310 | ||
312 | /* We could add an additional check for streams ie | 311 | /* We could add an additional check for streams ie |
313 | if proc/fs/cifs/streamstoxattr is set then | 312 | if proc/fs/cifs/streamstoxattr is set then |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index d26e2826ba5b..e9602d85c11d 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
@@ -29,10 +29,6 @@ | |||
29 | 29 | ||
30 | #define DEBUGFS_MAGIC 0x64626720 | 30 | #define DEBUGFS_MAGIC 0x64626720 |
31 | 31 | ||
32 | /* declared over in file.c */ | ||
33 | extern struct file_operations debugfs_file_operations; | ||
34 | extern struct inode_operations debugfs_link_operations; | ||
35 | |||
36 | static struct vfsmount *debugfs_mount; | 32 | static struct vfsmount *debugfs_mount; |
37 | static int debugfs_mount_count; | 33 | static int debugfs_mount_count; |
38 | 34 | ||
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index dc74b186145d..6df1debdccce 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -263,52 +263,102 @@ out: | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | /* This function must zero any hole we create */ | 266 | /** |
267 | * ecryptfs_prepare_write | ||
268 | * @file: The eCryptfs file | ||
269 | * @page: The eCryptfs page | ||
270 | * @from: The start byte from which we will write | ||
271 | * @to: The end byte to which we will write | ||
272 | * | ||
273 | * This function must zero any hole we create | ||
274 | * | ||
275 | * Returns zero on success; non-zero otherwise | ||
276 | */ | ||
267 | static int ecryptfs_prepare_write(struct file *file, struct page *page, | 277 | static int ecryptfs_prepare_write(struct file *file, struct page *page, |
268 | unsigned from, unsigned to) | 278 | unsigned from, unsigned to) |
269 | { | 279 | { |
270 | int rc = 0; | ||
271 | loff_t prev_page_end_size; | 280 | loff_t prev_page_end_size; |
281 | int rc = 0; | ||
272 | 282 | ||
273 | if (!PageUptodate(page)) { | 283 | if (!PageUptodate(page)) { |
274 | rc = ecryptfs_read_lower_page_segment(page, page->index, 0, | 284 | struct ecryptfs_crypt_stat *crypt_stat = |
275 | PAGE_CACHE_SIZE, | 285 | &ecryptfs_inode_to_private( |
276 | page->mapping->host); | 286 | file->f_path.dentry->d_inode)->crypt_stat; |
277 | if (rc) { | 287 | |
278 | printk(KERN_ERR "%s: Error attemping to read lower " | 288 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED) |
279 | "page segment; rc = [%d]\n", __FUNCTION__, rc); | 289 | || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) { |
280 | ClearPageUptodate(page); | 290 | rc = ecryptfs_read_lower_page_segment( |
281 | goto out; | 291 | page, page->index, 0, PAGE_CACHE_SIZE, |
282 | } else | 292 | page->mapping->host); |
293 | if (rc) { | ||
294 | printk(KERN_ERR "%s: Error attemping to read " | ||
295 | "lower page segment; rc = [%d]\n", | ||
296 | __FUNCTION__, rc); | ||
297 | ClearPageUptodate(page); | ||
298 | goto out; | ||
299 | } else | ||
300 | SetPageUptodate(page); | ||
301 | } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { | ||
302 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { | ||
303 | rc = ecryptfs_copy_up_encrypted_with_header( | ||
304 | page, crypt_stat); | ||
305 | if (rc) { | ||
306 | printk(KERN_ERR "%s: Error attempting " | ||
307 | "to copy the encrypted content " | ||
308 | "from the lower file whilst " | ||
309 | "inserting the metadata from " | ||
310 | "the xattr into the header; rc " | ||
311 | "= [%d]\n", __FUNCTION__, rc); | ||
312 | ClearPageUptodate(page); | ||
313 | goto out; | ||
314 | } | ||
315 | SetPageUptodate(page); | ||
316 | } else { | ||
317 | rc = ecryptfs_read_lower_page_segment( | ||
318 | page, page->index, 0, PAGE_CACHE_SIZE, | ||
319 | page->mapping->host); | ||
320 | if (rc) { | ||
321 | printk(KERN_ERR "%s: Error reading " | ||
322 | "page; rc = [%d]\n", | ||
323 | __FUNCTION__, rc); | ||
324 | ClearPageUptodate(page); | ||
325 | goto out; | ||
326 | } | ||
327 | SetPageUptodate(page); | ||
328 | } | ||
329 | } else { | ||
330 | rc = ecryptfs_decrypt_page(page); | ||
331 | if (rc) { | ||
332 | printk(KERN_ERR "%s: Error decrypting page " | ||
333 | "at index [%ld]; rc = [%d]\n", | ||
334 | __FUNCTION__, page->index, rc); | ||
335 | ClearPageUptodate(page); | ||
336 | goto out; | ||
337 | } | ||
283 | SetPageUptodate(page); | 338 | SetPageUptodate(page); |
339 | } | ||
284 | } | 340 | } |
285 | |||
286 | prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT); | 341 | prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT); |
287 | 342 | /* If creating a page or more of holes, zero them out via truncate. | |
288 | /* | 343 | * Note, this will increase i_size. */ |
289 | * If creating a page or more of holes, zero them out via truncate. | ||
290 | * Note, this will increase i_size. | ||
291 | */ | ||
292 | if (page->index != 0) { | 344 | if (page->index != 0) { |
293 | if (prev_page_end_size > i_size_read(page->mapping->host)) { | 345 | if (prev_page_end_size > i_size_read(page->mapping->host)) { |
294 | rc = ecryptfs_truncate(file->f_path.dentry, | 346 | rc = ecryptfs_truncate(file->f_path.dentry, |
295 | prev_page_end_size); | 347 | prev_page_end_size); |
296 | if (rc) { | 348 | if (rc) { |
297 | printk(KERN_ERR "Error on attempt to " | 349 | printk(KERN_ERR "%s: Error on attempt to " |
298 | "truncate to (higher) offset [%lld];" | 350 | "truncate to (higher) offset [%lld];" |
299 | " rc = [%d]\n", prev_page_end_size, rc); | 351 | " rc = [%d]\n", __FUNCTION__, |
352 | prev_page_end_size, rc); | ||
300 | goto out; | 353 | goto out; |
301 | } | 354 | } |
302 | } | 355 | } |
303 | } | 356 | } |
304 | /* | 357 | /* Writing to a new page, and creating a small hole from start |
305 | * Writing to a new page, and creating a small hole from start of page? | 358 | * of page? Zero it out. */ |
306 | * Zero it out. | 359 | if ((i_size_read(page->mapping->host) == prev_page_end_size) |
307 | */ | 360 | && (from != 0)) |
308 | if ((i_size_read(page->mapping->host) == prev_page_end_size) && | ||
309 | (from != 0)) { | ||
310 | zero_user(page, 0, PAGE_CACHE_SIZE); | 361 | zero_user(page, 0, PAGE_CACHE_SIZE); |
311 | } | ||
312 | out: | 362 | out: |
313 | return rc; | 363 | return rc; |
314 | } | 364 | } |
@@ -173,8 +173,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
173 | return NULL; | 173 | return NULL; |
174 | 174 | ||
175 | if (write) { | 175 | if (write) { |
176 | struct rlimit *rlim = current->signal->rlim; | ||
177 | unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; | 176 | unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; |
177 | struct rlimit *rlim; | ||
178 | |||
179 | /* | ||
180 | * We've historically supported up to 32 pages (ARG_MAX) | ||
181 | * of argument strings even with small stacks | ||
182 | */ | ||
183 | if (size <= ARG_MAX) | ||
184 | return page; | ||
178 | 185 | ||
179 | /* | 186 | /* |
180 | * Limit to 1/4-th the stack size for the argv+env strings. | 187 | * Limit to 1/4-th the stack size for the argv+env strings. |
@@ -183,6 +190,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
183 | * - the program will have a reasonable amount of stack left | 190 | * - the program will have a reasonable amount of stack left |
184 | * to work from. | 191 | * to work from. |
185 | */ | 192 | */ |
193 | rlim = current->signal->rlim; | ||
186 | if (size > rlim[RLIMIT_STACK].rlim_cur / 4) { | 194 | if (size > rlim[RLIMIT_STACK].rlim_cur / 4) { |
187 | put_page(page); | 195 | put_page(page); |
188 | return NULL; | 196 | return NULL; |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 18769cc32377..ad5360664082 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -806,8 +806,8 @@ static match_table_t tokens = { | |||
806 | {Opt_quota, "quota"}, | 806 | {Opt_quota, "quota"}, |
807 | {Opt_usrquota, "usrquota"}, | 807 | {Opt_usrquota, "usrquota"}, |
808 | {Opt_barrier, "barrier=%u"}, | 808 | {Opt_barrier, "barrier=%u"}, |
809 | {Opt_err, NULL}, | ||
810 | {Opt_resize, "resize"}, | 809 | {Opt_resize, "resize"}, |
810 | {Opt_err, NULL}, | ||
811 | }; | 811 | }; |
812 | 812 | ||
813 | static ext3_fsblk_t get_sb_block(void **data) | 813 | static ext3_fsblk_t get_sb_block(void **data) |
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 33888bb58144..2c23bade9aa6 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
@@ -46,7 +46,7 @@ const struct file_operations ext4_dir_operations = { | |||
46 | #ifdef CONFIG_COMPAT | 46 | #ifdef CONFIG_COMPAT |
47 | .compat_ioctl = ext4_compat_ioctl, | 47 | .compat_ioctl = ext4_compat_ioctl, |
48 | #endif | 48 | #endif |
49 | .fsync = ext4_sync_file, /* BKL held */ | 49 | .fsync = ext4_sync_file, |
50 | .release = ext4_release_dir, | 50 | .release = ext4_release_dir, |
51 | }; | 51 | }; |
52 | 52 | ||
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index bc7081f1fbe8..9ae6e67090cd 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -148,6 +148,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
148 | { | 148 | { |
149 | struct ext4_inode_info *ei = EXT4_I(inode); | 149 | struct ext4_inode_info *ei = EXT4_I(inode); |
150 | ext4_fsblk_t bg_start; | 150 | ext4_fsblk_t bg_start; |
151 | ext4_fsblk_t last_block; | ||
151 | ext4_grpblk_t colour; | 152 | ext4_grpblk_t colour; |
152 | int depth; | 153 | int depth; |
153 | 154 | ||
@@ -169,8 +170,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
169 | /* OK. use inode's group */ | 170 | /* OK. use inode's group */ |
170 | bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + | 171 | bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + |
171 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); | 172 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); |
172 | colour = (current->pid % 16) * | 173 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
174 | |||
175 | if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) | ||
176 | colour = (current->pid % 16) * | ||
173 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); | 177 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); |
178 | else | ||
179 | colour = (current->pid % 16) * ((last_block - bg_start) / 16); | ||
174 | return bg_start + colour + block; | 180 | return bg_start + colour + block; |
175 | } | 181 | } |
176 | 182 | ||
@@ -349,7 +355,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |||
349 | #define ext4_ext_show_leaf(inode,path) | 355 | #define ext4_ext_show_leaf(inode,path) |
350 | #endif | 356 | #endif |
351 | 357 | ||
352 | static void ext4_ext_drop_refs(struct ext4_ext_path *path) | 358 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
353 | { | 359 | { |
354 | int depth = path->p_depth; | 360 | int depth = path->p_depth; |
355 | int i; | 361 | int i; |
@@ -2168,6 +2174,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2168 | newblock = iblock - ee_block + ext_pblock(ex); | 2174 | newblock = iblock - ee_block + ext_pblock(ex); |
2169 | ex2 = ex; | 2175 | ex2 = ex; |
2170 | 2176 | ||
2177 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2178 | if (err) | ||
2179 | goto out; | ||
2180 | |||
2171 | /* ex1: ee_block to iblock - 1 : uninitialized */ | 2181 | /* ex1: ee_block to iblock - 1 : uninitialized */ |
2172 | if (iblock > ee_block) { | 2182 | if (iblock > ee_block) { |
2173 | ex1 = ex; | 2183 | ex1 = ex; |
@@ -2200,16 +2210,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2200 | newdepth = ext_depth(inode); | 2210 | newdepth = ext_depth(inode); |
2201 | if (newdepth != depth) { | 2211 | if (newdepth != depth) { |
2202 | depth = newdepth; | 2212 | depth = newdepth; |
2203 | path = ext4_ext_find_extent(inode, iblock, NULL); | 2213 | ext4_ext_drop_refs(path); |
2214 | path = ext4_ext_find_extent(inode, iblock, path); | ||
2204 | if (IS_ERR(path)) { | 2215 | if (IS_ERR(path)) { |
2205 | err = PTR_ERR(path); | 2216 | err = PTR_ERR(path); |
2206 | path = NULL; | ||
2207 | goto out; | 2217 | goto out; |
2208 | } | 2218 | } |
2209 | eh = path[depth].p_hdr; | 2219 | eh = path[depth].p_hdr; |
2210 | ex = path[depth].p_ext; | 2220 | ex = path[depth].p_ext; |
2211 | if (ex2 != &newex) | 2221 | if (ex2 != &newex) |
2212 | ex2 = ex; | 2222 | ex2 = ex; |
2223 | |||
2224 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2225 | if (err) | ||
2226 | goto out; | ||
2213 | } | 2227 | } |
2214 | allocated = max_blocks; | 2228 | allocated = max_blocks; |
2215 | } | 2229 | } |
@@ -2230,9 +2244,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2230 | ex2->ee_len = cpu_to_le16(allocated); | 2244 | ex2->ee_len = cpu_to_le16(allocated); |
2231 | if (ex2 != ex) | 2245 | if (ex2 != ex) |
2232 | goto insert; | 2246 | goto insert; |
2233 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2234 | if (err) | ||
2235 | goto out; | ||
2236 | /* | 2247 | /* |
2237 | * New (initialized) extent starts from the first block | 2248 | * New (initialized) extent starts from the first block |
2238 | * in the current extent. i.e., ex2 == ex | 2249 | * in the current extent. i.e., ex2 == ex |
@@ -2276,9 +2287,22 @@ out: | |||
2276 | } | 2287 | } |
2277 | 2288 | ||
2278 | /* | 2289 | /* |
2290 | * Block allocation/map/preallocation routine for extents based files | ||
2291 | * | ||
2292 | * | ||
2279 | * Need to be called with | 2293 | * Need to be called with |
2280 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block | 2294 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
2281 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | 2295 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) |
2296 | * | ||
2297 | * return > 0, number of of blocks already mapped/allocated | ||
2298 | * if create == 0 and these are pre-allocated blocks | ||
2299 | * buffer head is unmapped | ||
2300 | * otherwise blocks are mapped | ||
2301 | * | ||
2302 | * return = 0, if plain look up failed (blocks have not been allocated) | ||
2303 | * buffer head is unmapped | ||
2304 | * | ||
2305 | * return < 0, error case. | ||
2282 | */ | 2306 | */ |
2283 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | 2307 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, |
2284 | ext4_lblk_t iblock, | 2308 | ext4_lblk_t iblock, |
@@ -2623,7 +2647,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) | |||
2623 | * modify 1 super block, 1 block bitmap and 1 group descriptor. | 2647 | * modify 1 super block, 1 block bitmap and 1 group descriptor. |
2624 | */ | 2648 | */ |
2625 | credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3; | 2649 | credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3; |
2626 | down_write((&EXT4_I(inode)->i_data_sem)); | 2650 | mutex_lock(&inode->i_mutex); |
2627 | retry: | 2651 | retry: |
2628 | while (ret >= 0 && ret < max_blocks) { | 2652 | while (ret >= 0 && ret < max_blocks) { |
2629 | block = block + ret; | 2653 | block = block + ret; |
@@ -2634,16 +2658,17 @@ retry: | |||
2634 | break; | 2658 | break; |
2635 | } | 2659 | } |
2636 | 2660 | ||
2637 | ret = ext4_ext_get_blocks(handle, inode, block, | 2661 | ret = ext4_get_blocks_wrap(handle, inode, block, |
2638 | max_blocks, &map_bh, | 2662 | max_blocks, &map_bh, |
2639 | EXT4_CREATE_UNINITIALIZED_EXT, 0); | 2663 | EXT4_CREATE_UNINITIALIZED_EXT, 0); |
2640 | WARN_ON(ret <= 0); | ||
2641 | if (ret <= 0) { | 2664 | if (ret <= 0) { |
2642 | ext4_error(inode->i_sb, "ext4_fallocate", | 2665 | #ifdef EXT4FS_DEBUG |
2643 | "ext4_ext_get_blocks returned error: " | 2666 | WARN_ON(ret <= 0); |
2644 | "inode#%lu, block=%u, max_blocks=%lu", | 2667 | printk(KERN_ERR "%s: ext4_ext_get_blocks " |
2668 | "returned error inode#%lu, block=%u, " | ||
2669 | "max_blocks=%lu", __func__, | ||
2645 | inode->i_ino, block, max_blocks); | 2670 | inode->i_ino, block, max_blocks); |
2646 | ret = -EIO; | 2671 | #endif |
2647 | ext4_mark_inode_dirty(handle, inode); | 2672 | ext4_mark_inode_dirty(handle, inode); |
2648 | ret2 = ext4_journal_stop(handle); | 2673 | ret2 = ext4_journal_stop(handle); |
2649 | break; | 2674 | break; |
@@ -2680,7 +2705,6 @@ retry: | |||
2680 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 2705 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
2681 | goto retry; | 2706 | goto retry; |
2682 | 2707 | ||
2683 | up_write((&EXT4_I(inode)->i_data_sem)); | ||
2684 | /* | 2708 | /* |
2685 | * Time to update the file size. | 2709 | * Time to update the file size. |
2686 | * Update only when preallocation was requested beyond the file size. | 2710 | * Update only when preallocation was requested beyond the file size. |
@@ -2692,21 +2716,18 @@ retry: | |||
2692 | * if no error, we assume preallocation succeeded | 2716 | * if no error, we assume preallocation succeeded |
2693 | * completely | 2717 | * completely |
2694 | */ | 2718 | */ |
2695 | mutex_lock(&inode->i_mutex); | ||
2696 | i_size_write(inode, offset + len); | 2719 | i_size_write(inode, offset + len); |
2697 | EXT4_I(inode)->i_disksize = i_size_read(inode); | 2720 | EXT4_I(inode)->i_disksize = i_size_read(inode); |
2698 | mutex_unlock(&inode->i_mutex); | ||
2699 | } else if (ret < 0 && nblocks) { | 2721 | } else if (ret < 0 && nblocks) { |
2700 | /* Handle partial allocation scenario */ | 2722 | /* Handle partial allocation scenario */ |
2701 | loff_t newsize; | 2723 | loff_t newsize; |
2702 | 2724 | ||
2703 | mutex_lock(&inode->i_mutex); | ||
2704 | newsize = (nblocks << blkbits) + i_size_read(inode); | 2725 | newsize = (nblocks << blkbits) + i_size_read(inode); |
2705 | i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits)); | 2726 | i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits)); |
2706 | EXT4_I(inode)->i_disksize = i_size_read(inode); | 2727 | EXT4_I(inode)->i_disksize = i_size_read(inode); |
2707 | mutex_unlock(&inode->i_mutex); | ||
2708 | } | 2728 | } |
2709 | } | 2729 | } |
2710 | 2730 | ||
2731 | mutex_unlock(&inode->i_mutex); | ||
2711 | return ret > 0 ? ret2 : ret; | 2732 | return ret > 0 ? ret2 : ret; |
2712 | } | 2733 | } |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index da18a74b966a..8036b9b5376b 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -702,7 +702,12 @@ got: | |||
702 | ei->i_dir_start_lookup = 0; | 702 | ei->i_dir_start_lookup = 0; |
703 | ei->i_disksize = 0; | 703 | ei->i_disksize = 0; |
704 | 704 | ||
705 | ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL; | 705 | /* |
706 | * Don't inherit extent flag from directory. We set extent flag on | ||
707 | * newly created directory and file only if -o extent mount option is | ||
708 | * specified | ||
709 | */ | ||
710 | ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL); | ||
706 | if (S_ISLNK(mode)) | 711 | if (S_ISLNK(mode)) |
707 | ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); | 712 | ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); |
708 | /* dirsync only applies to directories */ | 713 | /* dirsync only applies to directories */ |
@@ -745,12 +750,15 @@ got: | |||
745 | goto fail_free_drop; | 750 | goto fail_free_drop; |
746 | } | 751 | } |
747 | if (test_opt(sb, EXTENTS)) { | 752 | if (test_opt(sb, EXTENTS)) { |
748 | EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; | 753 | /* set extent flag only for directory and file */ |
749 | ext4_ext_tree_init(handle, inode); | 754 | if (S_ISDIR(mode) || S_ISREG(mode)) { |
750 | err = ext4_update_incompat_feature(handle, sb, | 755 | EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; |
751 | EXT4_FEATURE_INCOMPAT_EXTENTS); | 756 | ext4_ext_tree_init(handle, inode); |
752 | if (err) | 757 | err = ext4_update_incompat_feature(handle, sb, |
753 | goto fail; | 758 | EXT4_FEATURE_INCOMPAT_EXTENTS); |
759 | if (err) | ||
760 | goto fail; | ||
761 | } | ||
754 | } | 762 | } |
755 | 763 | ||
756 | ext4_debug("allocating inode %lu\n", inode->i_ino); | 764 | ext4_debug("allocating inode %lu\n", inode->i_ino); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7dd9b50d5ebc..945cbf6cb1fc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -403,6 +403,7 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | |||
403 | __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; | 403 | __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; |
404 | __le32 *p; | 404 | __le32 *p; |
405 | ext4_fsblk_t bg_start; | 405 | ext4_fsblk_t bg_start; |
406 | ext4_fsblk_t last_block; | ||
406 | ext4_grpblk_t colour; | 407 | ext4_grpblk_t colour; |
407 | 408 | ||
408 | /* Try to find previous block */ | 409 | /* Try to find previous block */ |
@@ -420,8 +421,13 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | |||
420 | * into the same cylinder group then. | 421 | * into the same cylinder group then. |
421 | */ | 422 | */ |
422 | bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); | 423 | bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); |
423 | colour = (current->pid % 16) * | 424 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
425 | |||
426 | if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) | ||
427 | colour = (current->pid % 16) * | ||
424 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); | 428 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); |
429 | else | ||
430 | colour = (current->pid % 16) * ((last_block - bg_start) / 16); | ||
425 | return bg_start + colour; | 431 | return bg_start + colour; |
426 | } | 432 | } |
427 | 433 | ||
@@ -768,7 +774,6 @@ err_out: | |||
768 | * | 774 | * |
769 | * `handle' can be NULL if create == 0. | 775 | * `handle' can be NULL if create == 0. |
770 | * | 776 | * |
771 | * The BKL may not be held on entry here. Be sure to take it early. | ||
772 | * return > 0, # of blocks mapped or allocated. | 777 | * return > 0, # of blocks mapped or allocated. |
773 | * return = 0, if plain lookup failed. | 778 | * return = 0, if plain lookup failed. |
774 | * return < 0, error case. | 779 | * return < 0, error case. |
@@ -903,11 +908,38 @@ out: | |||
903 | */ | 908 | */ |
904 | #define DIO_CREDITS 25 | 909 | #define DIO_CREDITS 25 |
905 | 910 | ||
911 | |||
912 | /* | ||
913 | * | ||
914 | * | ||
915 | * ext4_ext4 get_block() wrapper function | ||
916 | * It will do a look up first, and returns if the blocks already mapped. | ||
917 | * Otherwise it takes the write lock of the i_data_sem and allocate blocks | ||
918 | * and store the allocated blocks in the result buffer head and mark it | ||
919 | * mapped. | ||
920 | * | ||
921 | * If file type is extents based, it will call ext4_ext_get_blocks(), | ||
922 | * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping | ||
923 | * based files | ||
924 | * | ||
925 | * On success, it returns the number of blocks being mapped or allocate. | ||
926 | * if create==0 and the blocks are pre-allocated and uninitialized block, | ||
927 | * the result buffer head is unmapped. If the create ==1, it will make sure | ||
928 | * the buffer head is mapped. | ||
929 | * | ||
930 | * It returns 0 if plain look up failed (blocks have not been allocated), in | ||
931 | * that casem, buffer head is unmapped | ||
932 | * | ||
933 | * It returns the error in case of allocation failure. | ||
934 | */ | ||
906 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, | 935 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, |
907 | unsigned long max_blocks, struct buffer_head *bh, | 936 | unsigned long max_blocks, struct buffer_head *bh, |
908 | int create, int extend_disksize) | 937 | int create, int extend_disksize) |
909 | { | 938 | { |
910 | int retval; | 939 | int retval; |
940 | |||
941 | clear_buffer_mapped(bh); | ||
942 | |||
911 | /* | 943 | /* |
912 | * Try to see if we can get the block without requesting | 944 | * Try to see if we can get the block without requesting |
913 | * for new file system block. | 945 | * for new file system block. |
@@ -921,12 +953,26 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, | |||
921 | inode, block, max_blocks, bh, 0, 0); | 953 | inode, block, max_blocks, bh, 0, 0); |
922 | } | 954 | } |
923 | up_read((&EXT4_I(inode)->i_data_sem)); | 955 | up_read((&EXT4_I(inode)->i_data_sem)); |
924 | if (!create || (retval > 0)) | 956 | |
957 | /* If it is only a block(s) look up */ | ||
958 | if (!create) | ||
959 | return retval; | ||
960 | |||
961 | /* | ||
962 | * Returns if the blocks have already allocated | ||
963 | * | ||
964 | * Note that if blocks have been preallocated | ||
965 | * ext4_ext_get_block() returns th create = 0 | ||
966 | * with buffer head unmapped. | ||
967 | */ | ||
968 | if (retval > 0 && buffer_mapped(bh)) | ||
925 | return retval; | 969 | return retval; |
926 | 970 | ||
927 | /* | 971 | /* |
928 | * We need to allocate new blocks which will result | 972 | * New blocks allocate and/or writing to uninitialized extent |
929 | * in i_data update | 973 | * will possibly result in updating i_data, so we take |
974 | * the write lock of i_data_sem, and call get_blocks() | ||
975 | * with create == 1 flag. | ||
930 | */ | 976 | */ |
931 | down_write((&EXT4_I(inode)->i_data_sem)); | 977 | down_write((&EXT4_I(inode)->i_data_sem)); |
932 | /* | 978 | /* |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index dd0fcfcb35ce..ef97f19c2f9d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -627,21 +627,19 @@ static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, | |||
627 | return block; | 627 | return block; |
628 | } | 628 | } |
629 | 629 | ||
630 | static inline void *mb_correct_addr_and_bit(int *bit, void *addr) | ||
631 | { | ||
630 | #if BITS_PER_LONG == 64 | 632 | #if BITS_PER_LONG == 64 |
631 | #define mb_correct_addr_and_bit(bit, addr) \ | 633 | *bit += ((unsigned long) addr & 7UL) << 3; |
632 | { \ | 634 | addr = (void *) ((unsigned long) addr & ~7UL); |
633 | bit += ((unsigned long) addr & 7UL) << 3; \ | ||
634 | addr = (void *) ((unsigned long) addr & ~7UL); \ | ||
635 | } | ||
636 | #elif BITS_PER_LONG == 32 | 635 | #elif BITS_PER_LONG == 32 |
637 | #define mb_correct_addr_and_bit(bit, addr) \ | 636 | *bit += ((unsigned long) addr & 3UL) << 3; |
638 | { \ | 637 | addr = (void *) ((unsigned long) addr & ~3UL); |
639 | bit += ((unsigned long) addr & 3UL) << 3; \ | ||
640 | addr = (void *) ((unsigned long) addr & ~3UL); \ | ||
641 | } | ||
642 | #else | 638 | #else |
643 | #error "how many bits you are?!" | 639 | #error "how many bits you are?!" |
644 | #endif | 640 | #endif |
641 | return addr; | ||
642 | } | ||
645 | 643 | ||
646 | static inline int mb_test_bit(int bit, void *addr) | 644 | static inline int mb_test_bit(int bit, void *addr) |
647 | { | 645 | { |
@@ -649,34 +647,54 @@ static inline int mb_test_bit(int bit, void *addr) | |||
649 | * ext4_test_bit on architecture like powerpc | 647 | * ext4_test_bit on architecture like powerpc |
650 | * needs unsigned long aligned address | 648 | * needs unsigned long aligned address |
651 | */ | 649 | */ |
652 | mb_correct_addr_and_bit(bit, addr); | 650 | addr = mb_correct_addr_and_bit(&bit, addr); |
653 | return ext4_test_bit(bit, addr); | 651 | return ext4_test_bit(bit, addr); |
654 | } | 652 | } |
655 | 653 | ||
656 | static inline void mb_set_bit(int bit, void *addr) | 654 | static inline void mb_set_bit(int bit, void *addr) |
657 | { | 655 | { |
658 | mb_correct_addr_and_bit(bit, addr); | 656 | addr = mb_correct_addr_and_bit(&bit, addr); |
659 | ext4_set_bit(bit, addr); | 657 | ext4_set_bit(bit, addr); |
660 | } | 658 | } |
661 | 659 | ||
662 | static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) | 660 | static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) |
663 | { | 661 | { |
664 | mb_correct_addr_and_bit(bit, addr); | 662 | addr = mb_correct_addr_and_bit(&bit, addr); |
665 | ext4_set_bit_atomic(lock, bit, addr); | 663 | ext4_set_bit_atomic(lock, bit, addr); |
666 | } | 664 | } |
667 | 665 | ||
668 | static inline void mb_clear_bit(int bit, void *addr) | 666 | static inline void mb_clear_bit(int bit, void *addr) |
669 | { | 667 | { |
670 | mb_correct_addr_and_bit(bit, addr); | 668 | addr = mb_correct_addr_and_bit(&bit, addr); |
671 | ext4_clear_bit(bit, addr); | 669 | ext4_clear_bit(bit, addr); |
672 | } | 670 | } |
673 | 671 | ||
674 | static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) | 672 | static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) |
675 | { | 673 | { |
676 | mb_correct_addr_and_bit(bit, addr); | 674 | addr = mb_correct_addr_and_bit(&bit, addr); |
677 | ext4_clear_bit_atomic(lock, bit, addr); | 675 | ext4_clear_bit_atomic(lock, bit, addr); |
678 | } | 676 | } |
679 | 677 | ||
678 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) | ||
679 | { | ||
680 | int fix = 0; | ||
681 | addr = mb_correct_addr_and_bit(&fix, addr); | ||
682 | max += fix; | ||
683 | start += fix; | ||
684 | |||
685 | return ext4_find_next_zero_bit(addr, max, start) - fix; | ||
686 | } | ||
687 | |||
688 | static inline int mb_find_next_bit(void *addr, int max, int start) | ||
689 | { | ||
690 | int fix = 0; | ||
691 | addr = mb_correct_addr_and_bit(&fix, addr); | ||
692 | max += fix; | ||
693 | start += fix; | ||
694 | |||
695 | return ext4_find_next_bit(addr, max, start) - fix; | ||
696 | } | ||
697 | |||
680 | static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) | 698 | static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) |
681 | { | 699 | { |
682 | char *bb; | 700 | char *bb; |
@@ -906,7 +924,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, | |||
906 | unsigned short chunk; | 924 | unsigned short chunk; |
907 | unsigned short border; | 925 | unsigned short border; |
908 | 926 | ||
909 | BUG_ON(len >= EXT4_BLOCKS_PER_GROUP(sb)); | 927 | BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); |
910 | 928 | ||
911 | border = 2 << sb->s_blocksize_bits; | 929 | border = 2 << sb->s_blocksize_bits; |
912 | 930 | ||
@@ -946,12 +964,12 @@ static void ext4_mb_generate_buddy(struct super_block *sb, | |||
946 | 964 | ||
947 | /* initialize buddy from bitmap which is aggregation | 965 | /* initialize buddy from bitmap which is aggregation |
948 | * of on-disk bitmap and preallocations */ | 966 | * of on-disk bitmap and preallocations */ |
949 | i = ext4_find_next_zero_bit(bitmap, max, 0); | 967 | i = mb_find_next_zero_bit(bitmap, max, 0); |
950 | grp->bb_first_free = i; | 968 | grp->bb_first_free = i; |
951 | while (i < max) { | 969 | while (i < max) { |
952 | fragments++; | 970 | fragments++; |
953 | first = i; | 971 | first = i; |
954 | i = ext4_find_next_bit(bitmap, max, i); | 972 | i = mb_find_next_bit(bitmap, max, i); |
955 | len = i - first; | 973 | len = i - first; |
956 | free += len; | 974 | free += len; |
957 | if (len > 1) | 975 | if (len > 1) |
@@ -959,7 +977,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb, | |||
959 | else | 977 | else |
960 | grp->bb_counters[0]++; | 978 | grp->bb_counters[0]++; |
961 | if (i < max) | 979 | if (i < max) |
962 | i = ext4_find_next_zero_bit(bitmap, max, i); | 980 | i = mb_find_next_zero_bit(bitmap, max, i); |
963 | } | 981 | } |
964 | grp->bb_fragments = fragments; | 982 | grp->bb_fragments = fragments; |
965 | 983 | ||
@@ -967,6 +985,10 @@ static void ext4_mb_generate_buddy(struct super_block *sb, | |||
967 | ext4_error(sb, __FUNCTION__, | 985 | ext4_error(sb, __FUNCTION__, |
968 | "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", | 986 | "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", |
969 | group, free, grp->bb_free); | 987 | group, free, grp->bb_free); |
988 | /* | ||
989 | * If we intent to continue, we consider group descritor | ||
990 | * corrupt and update bb_free using bitmap value | ||
991 | */ | ||
970 | grp->bb_free = free; | 992 | grp->bb_free = free; |
971 | } | 993 | } |
972 | 994 | ||
@@ -1778,7 +1800,7 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, | |||
1778 | buddy = mb_find_buddy(e4b, i, &max); | 1800 | buddy = mb_find_buddy(e4b, i, &max); |
1779 | BUG_ON(buddy == NULL); | 1801 | BUG_ON(buddy == NULL); |
1780 | 1802 | ||
1781 | k = ext4_find_next_zero_bit(buddy, max, 0); | 1803 | k = mb_find_next_zero_bit(buddy, max, 0); |
1782 | BUG_ON(k >= max); | 1804 | BUG_ON(k >= max); |
1783 | 1805 | ||
1784 | ac->ac_found++; | 1806 | ac->ac_found++; |
@@ -1818,11 +1840,11 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, | |||
1818 | i = e4b->bd_info->bb_first_free; | 1840 | i = e4b->bd_info->bb_first_free; |
1819 | 1841 | ||
1820 | while (free && ac->ac_status == AC_STATUS_CONTINUE) { | 1842 | while (free && ac->ac_status == AC_STATUS_CONTINUE) { |
1821 | i = ext4_find_next_zero_bit(bitmap, | 1843 | i = mb_find_next_zero_bit(bitmap, |
1822 | EXT4_BLOCKS_PER_GROUP(sb), i); | 1844 | EXT4_BLOCKS_PER_GROUP(sb), i); |
1823 | if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { | 1845 | if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { |
1824 | /* | 1846 | /* |
1825 | * IF we corrupt the bitmap we won't find any | 1847 | * IF we have corrupt bitmap, we won't find any |
1826 | * free blocks even though group info says we | 1848 | * free blocks even though group info says we |
1827 | * we have free blocks | 1849 | * we have free blocks |
1828 | */ | 1850 | */ |
@@ -1838,6 +1860,12 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, | |||
1838 | ext4_error(sb, __FUNCTION__, "%d free blocks as per " | 1860 | ext4_error(sb, __FUNCTION__, "%d free blocks as per " |
1839 | "group info. But got %d blocks\n", | 1861 | "group info. But got %d blocks\n", |
1840 | free, ex.fe_len); | 1862 | free, ex.fe_len); |
1863 | /* | ||
1864 | * The number of free blocks differs. This mostly | ||
1865 | * indicate that the bitmap is corrupt. So exit | ||
1866 | * without claiming the space. | ||
1867 | */ | ||
1868 | break; | ||
1841 | } | 1869 | } |
1842 | 1870 | ||
1843 | ext4_mb_measure_extent(ac, &ex, e4b); | 1871 | ext4_mb_measure_extent(ac, &ex, e4b); |
@@ -3740,10 +3768,10 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3740 | } | 3768 | } |
3741 | 3769 | ||
3742 | while (bit < end) { | 3770 | while (bit < end) { |
3743 | bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); | 3771 | bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); |
3744 | if (bit >= end) | 3772 | if (bit >= end) |
3745 | break; | 3773 | break; |
3746 | next = ext4_find_next_bit(bitmap_bh->b_data, end, bit); | 3774 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); |
3747 | if (next > end) | 3775 | if (next > end) |
3748 | next = end; | 3776 | next = end; |
3749 | start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + | 3777 | start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + |
@@ -3771,6 +3799,10 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3771 | (unsigned long) pa->pa_len); | 3799 | (unsigned long) pa->pa_len); |
3772 | ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n", | 3800 | ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n", |
3773 | free, pa->pa_free); | 3801 | free, pa->pa_free); |
3802 | /* | ||
3803 | * pa is already deleted so we use the value obtained | ||
3804 | * from the bitmap and continue. | ||
3805 | */ | ||
3774 | } | 3806 | } |
3775 | atomic_add(free, &sbi->s_mb_discarded); | 3807 | atomic_add(free, &sbi->s_mb_discarded); |
3776 | if (ac) | 3808 | if (ac) |
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 8c6c685b9d22..5c1e27de7755 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c | |||
@@ -43,6 +43,7 @@ static int finish_range(handle_t *handle, struct inode *inode, | |||
43 | 43 | ||
44 | if (IS_ERR(path)) { | 44 | if (IS_ERR(path)) { |
45 | retval = PTR_ERR(path); | 45 | retval = PTR_ERR(path); |
46 | path = NULL; | ||
46 | goto err_out; | 47 | goto err_out; |
47 | } | 48 | } |
48 | 49 | ||
@@ -74,6 +75,10 @@ static int finish_range(handle_t *handle, struct inode *inode, | |||
74 | } | 75 | } |
75 | retval = ext4_ext_insert_extent(handle, inode, path, &newext); | 76 | retval = ext4_ext_insert_extent(handle, inode, path, &newext); |
76 | err_out: | 77 | err_out: |
78 | if (path) { | ||
79 | ext4_ext_drop_refs(path); | ||
80 | kfree(path); | ||
81 | } | ||
77 | lb->first_pblock = 0; | 82 | lb->first_pblock = 0; |
78 | return retval; | 83 | return retval; |
79 | } | 84 | } |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a9347fb43bcc..28aa2ed4297e 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -1804,12 +1804,8 @@ retry: | |||
1804 | inode->i_fop = &ext4_dir_operations; | 1804 | inode->i_fop = &ext4_dir_operations; |
1805 | inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize; | 1805 | inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize; |
1806 | dir_block = ext4_bread (handle, inode, 0, 1, &err); | 1806 | dir_block = ext4_bread (handle, inode, 0, 1, &err); |
1807 | if (!dir_block) { | 1807 | if (!dir_block) |
1808 | ext4_dec_count(handle, inode); /* is this nlink == 0? */ | 1808 | goto out_clear_inode; |
1809 | ext4_mark_inode_dirty(handle, inode); | ||
1810 | iput (inode); | ||
1811 | goto out_stop; | ||
1812 | } | ||
1813 | BUFFER_TRACE(dir_block, "get_write_access"); | 1809 | BUFFER_TRACE(dir_block, "get_write_access"); |
1814 | ext4_journal_get_write_access(handle, dir_block); | 1810 | ext4_journal_get_write_access(handle, dir_block); |
1815 | de = (struct ext4_dir_entry_2 *) dir_block->b_data; | 1811 | de = (struct ext4_dir_entry_2 *) dir_block->b_data; |
@@ -1832,7 +1828,8 @@ retry: | |||
1832 | ext4_mark_inode_dirty(handle, inode); | 1828 | ext4_mark_inode_dirty(handle, inode); |
1833 | err = ext4_add_entry (handle, dentry, inode); | 1829 | err = ext4_add_entry (handle, dentry, inode); |
1834 | if (err) { | 1830 | if (err) { |
1835 | inode->i_nlink = 0; | 1831 | out_clear_inode: |
1832 | clear_nlink(inode); | ||
1836 | ext4_mark_inode_dirty(handle, inode); | 1833 | ext4_mark_inode_dirty(handle, inode); |
1837 | iput (inode); | 1834 | iput (inode); |
1838 | goto out_stop; | 1835 | goto out_stop; |
@@ -2164,7 +2161,7 @@ static int ext4_unlink(struct inode * dir, struct dentry *dentry) | |||
2164 | dir->i_ctime = dir->i_mtime = ext4_current_time(dir); | 2161 | dir->i_ctime = dir->i_mtime = ext4_current_time(dir); |
2165 | ext4_update_dx_flag(dir); | 2162 | ext4_update_dx_flag(dir); |
2166 | ext4_mark_inode_dirty(handle, dir); | 2163 | ext4_mark_inode_dirty(handle, dir); |
2167 | ext4_dec_count(handle, inode); | 2164 | drop_nlink(inode); |
2168 | if (!inode->i_nlink) | 2165 | if (!inode->i_nlink) |
2169 | ext4_orphan_add(handle, inode); | 2166 | ext4_orphan_add(handle, inode); |
2170 | inode->i_ctime = ext4_current_time(inode); | 2167 | inode->i_ctime = ext4_current_time(inode); |
@@ -2214,7 +2211,7 @@ retry: | |||
2214 | err = __page_symlink(inode, symname, l, | 2211 | err = __page_symlink(inode, symname, l, |
2215 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | 2212 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); |
2216 | if (err) { | 2213 | if (err) { |
2217 | ext4_dec_count(handle, inode); | 2214 | clear_nlink(inode); |
2218 | ext4_mark_inode_dirty(handle, inode); | 2215 | ext4_mark_inode_dirty(handle, inode); |
2219 | iput (inode); | 2216 | iput (inode); |
2220 | goto out_stop; | 2217 | goto out_stop; |
@@ -2223,7 +2220,6 @@ retry: | |||
2223 | inode->i_op = &ext4_fast_symlink_inode_operations; | 2220 | inode->i_op = &ext4_fast_symlink_inode_operations; |
2224 | memcpy((char*)&EXT4_I(inode)->i_data,symname,l); | 2221 | memcpy((char*)&EXT4_I(inode)->i_data,symname,l); |
2225 | inode->i_size = l-1; | 2222 | inode->i_size = l-1; |
2226 | EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL; | ||
2227 | } | 2223 | } |
2228 | EXT4_I(inode)->i_disksize = inode->i_size; | 2224 | EXT4_I(inode)->i_disksize = inode->i_size; |
2229 | err = ext4_add_nondir(handle, dentry, inode); | 2225 | err = ext4_add_nondir(handle, dentry, inode); |
@@ -2407,7 +2403,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry, | |||
2407 | ext4_dec_count(handle, old_dir); | 2403 | ext4_dec_count(handle, old_dir); |
2408 | if (new_inode) { | 2404 | if (new_inode) { |
2409 | /* checked empty_dir above, can't have another parent, | 2405 | /* checked empty_dir above, can't have another parent, |
2410 | * ext3_dec_count() won't work for many-linked dirs */ | 2406 | * ext4_dec_count() won't work for many-linked dirs */ |
2411 | new_inode->i_nlink = 0; | 2407 | new_inode->i_nlink = 0; |
2412 | } else { | 2408 | } else { |
2413 | ext4_inc_count(handle, new_dir); | 2409 | ext4_inc_count(handle, new_dir); |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 9477a2bd6ff2..e29efa0f9d62 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -1037,6 +1037,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
1037 | ext4_warning(sb, __FUNCTION__, | 1037 | ext4_warning(sb, __FUNCTION__, |
1038 | "multiple resizers run on filesystem!"); | 1038 | "multiple resizers run on filesystem!"); |
1039 | unlock_super(sb); | 1039 | unlock_super(sb); |
1040 | ext4_journal_stop(handle); | ||
1040 | err = -EBUSY; | 1041 | err = -EBUSY; |
1041 | goto exit_put; | 1042 | goto exit_put; |
1042 | } | 1043 | } |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 038ed7436199..c6cbb6cd59b2 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -369,7 +369,7 @@ out: | |||
369 | 369 | ||
370 | 370 | ||
371 | /** | 371 | /** |
372 | * int journal_restart() - restart a handle . | 372 | * int journal_restart() - restart a handle. |
373 | * @handle: handle to restart | 373 | * @handle: handle to restart |
374 | * @nblocks: nr credits requested | 374 | * @nblocks: nr credits requested |
375 | * | 375 | * |
@@ -844,8 +844,7 @@ out: | |||
844 | } | 844 | } |
845 | 845 | ||
846 | /** | 846 | /** |
847 | * int journal_get_undo_access() - Notify intent to modify metadata with | 847 | * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences |
848 | * non-rewindable consequences | ||
849 | * @handle: transaction | 848 | * @handle: transaction |
850 | * @bh: buffer to undo | 849 | * @bh: buffer to undo |
851 | * @credits: store the number of taken credits here (if not NULL) | 850 | * @credits: store the number of taken credits here (if not NULL) |
@@ -921,12 +920,14 @@ out: | |||
921 | } | 920 | } |
922 | 921 | ||
923 | /** | 922 | /** |
924 | * int journal_dirty_data() - mark a buffer as containing dirty data which | 923 | * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed |
925 | * needs to be flushed before we can commit the | ||
926 | * current transaction. | ||
927 | * @handle: transaction | 924 | * @handle: transaction |
928 | * @bh: bufferhead to mark | 925 | * @bh: bufferhead to mark |
929 | * | 926 | * |
927 | * Description: | ||
928 | * Mark a buffer as containing dirty data which needs to be flushed before | ||
929 | * we can commit the current transaction. | ||
930 | * | ||
930 | * The buffer is placed on the transaction's data list and is marked as | 931 | * The buffer is placed on the transaction's data list and is marked as |
931 | * belonging to the transaction. | 932 | * belonging to the transaction. |
932 | * | 933 | * |
@@ -1098,11 +1099,11 @@ no_journal: | |||
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | /** | 1101 | /** |
1101 | * int journal_dirty_metadata() - mark a buffer as containing dirty metadata | 1102 | * int journal_dirty_metadata() - mark a buffer as containing dirty metadata |
1102 | * @handle: transaction to add buffer to. | 1103 | * @handle: transaction to add buffer to. |
1103 | * @bh: buffer to mark | 1104 | * @bh: buffer to mark |
1104 | * | 1105 | * |
1105 | * mark dirty metadata which needs to be journaled as part of the current | 1106 | * Mark dirty metadata which needs to be journaled as part of the current |
1106 | * transaction. | 1107 | * transaction. |
1107 | * | 1108 | * |
1108 | * The buffer is placed on the transaction's metadata list and is marked | 1109 | * The buffer is placed on the transaction's metadata list and is marked |
diff --git a/fs/mpage.c b/fs/mpage.c index 5df564366f36..235e4d3873a8 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -325,16 +325,12 @@ confused: | |||
325 | } | 325 | } |
326 | 326 | ||
327 | /** | 327 | /** |
328 | * mpage_readpages - populate an address space with some pages, and | 328 | * mpage_readpages - populate an address space with some pages & start reads against them |
329 | * start reads against them. | ||
330 | * | ||
331 | * @mapping: the address_space | 329 | * @mapping: the address_space |
332 | * @pages: The address of a list_head which contains the target pages. These | 330 | * @pages: The address of a list_head which contains the target pages. These |
333 | * pages have their ->index populated and are otherwise uninitialised. | 331 | * pages have their ->index populated and are otherwise uninitialised. |
334 | * | ||
335 | * The page at @pages->prev has the lowest file offset, and reads should be | 332 | * The page at @pages->prev has the lowest file offset, and reads should be |
336 | * issued in @pages->prev to @pages->next order. | 333 | * issued in @pages->prev to @pages->next order. |
337 | * | ||
338 | * @nr_pages: The number of pages at *@pages | 334 | * @nr_pages: The number of pages at *@pages |
339 | * @get_block: The filesystem's block mapper function. | 335 | * @get_block: The filesystem's block mapper function. |
340 | * | 336 | * |
@@ -360,6 +356,7 @@ confused: | |||
360 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | 356 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be |
361 | * submitted in the following order: | 357 | * submitted in the following order: |
362 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | 358 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
359 | * | ||
363 | * because the indirect block has to be read to get the mappings of blocks | 360 | * because the indirect block has to be read to get the mappings of blocks |
364 | * 13,14,15,16. Obviously, this impacts performance. | 361 | * 13,14,15,16. Obviously, this impacts performance. |
365 | * | 362 | * |
@@ -656,9 +653,7 @@ out: | |||
656 | } | 653 | } |
657 | 654 | ||
658 | /** | 655 | /** |
659 | * mpage_writepages - walk the list of dirty pages of the given | 656 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
660 | * address space and writepage() all of them. | ||
661 | * | ||
662 | * @mapping: address space structure to write | 657 | * @mapping: address space structure to write |
663 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 658 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
664 | * @get_block: the filesystem's block mapper function. | 659 | * @get_block: the filesystem's block mapper function. |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 82243127eebf..90383ed61005 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -257,7 +257,7 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page) | |||
257 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 257 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
258 | 258 | ||
259 | BUG_ON(!PageLocked(page)); | 259 | BUG_ON(!PageLocked(page)); |
260 | BUG_ON(!OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL); | 260 | BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); |
261 | 261 | ||
262 | ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh, | 262 | ret = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &di_bh, |
263 | OCFS2_BH_CACHED, inode); | 263 | OCFS2_BH_CACHED, inode); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index e280833ceb9a..8a1875848080 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -390,9 +390,8 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, | |||
390 | goto bail; | 390 | goto bail; |
391 | } | 391 | } |
392 | if (pde) | 392 | if (pde) |
393 | pde->rec_len = | 393 | le16_add_cpu(&pde->rec_len, |
394 | cpu_to_le16(le16_to_cpu(pde->rec_len) + | 394 | le16_to_cpu(de->rec_len)); |
395 | le16_to_cpu(de->rec_len)); | ||
396 | else | 395 | else |
397 | de->inode = 0; | 396 | de->inode = 0; |
398 | dir->i_version++; | 397 | dir->i_version++; |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a54d33d95ada..c92d1b19fc0b 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -1695,9 +1695,9 @@ send_response: | |||
1695 | * can periodically run all locks owned by this node | 1695 | * can periodically run all locks owned by this node |
1696 | * and re-assert across the cluster... | 1696 | * and re-assert across the cluster... |
1697 | */ | 1697 | */ |
1698 | int dlm_do_assert_master(struct dlm_ctxt *dlm, | 1698 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, |
1699 | struct dlm_lock_resource *res, | 1699 | struct dlm_lock_resource *res, |
1700 | void *nodemap, u32 flags) | 1700 | void *nodemap, u32 flags) |
1701 | { | 1701 | { |
1702 | struct dlm_assert_master assert; | 1702 | struct dlm_assert_master assert; |
1703 | int to, tmpret; | 1703 | int to, tmpret; |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 351130c9b734..f7794306b2bd 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -3042,7 +3042,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, | |||
3042 | inode = ocfs2_lock_res_inode(lockres); | 3042 | inode = ocfs2_lock_res_inode(lockres); |
3043 | mapping = inode->i_mapping; | 3043 | mapping = inode->i_mapping; |
3044 | 3044 | ||
3045 | if (S_ISREG(inode->i_mode)) | 3045 | if (!S_ISREG(inode->i_mode)) |
3046 | goto out; | 3046 | goto out; |
3047 | 3047 | ||
3048 | /* | 3048 | /* |
@@ -3219,8 +3219,8 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, | |||
3219 | return UNBLOCK_CONTINUE_POST; | 3219 | return UNBLOCK_CONTINUE_POST; |
3220 | } | 3220 | } |
3221 | 3221 | ||
3222 | void ocfs2_process_blocked_lock(struct ocfs2_super *osb, | 3222 | static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, |
3223 | struct ocfs2_lock_res *lockres) | 3223 | struct ocfs2_lock_res *lockres) |
3224 | { | 3224 | { |
3225 | int status; | 3225 | int status; |
3226 | struct ocfs2_unblock_ctl ctl = {0, 0,}; | 3226 | struct ocfs2_unblock_ctl ctl = {0, 0,}; |
@@ -3356,7 +3356,7 @@ static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb) | |||
3356 | return should_wake; | 3356 | return should_wake; |
3357 | } | 3357 | } |
3358 | 3358 | ||
3359 | int ocfs2_downconvert_thread(void *arg) | 3359 | static int ocfs2_downconvert_thread(void *arg) |
3360 | { | 3360 | { |
3361 | int status = 0; | 3361 | int status = 0; |
3362 | struct ocfs2_super *osb = arg; | 3362 | struct ocfs2_super *osb = arg; |
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 1d5b0699d0a9..e3cf902404b4 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h | |||
@@ -109,8 +109,6 @@ void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, | |||
109 | struct ocfs2_lock_res *lockres); | 109 | struct ocfs2_lock_res *lockres); |
110 | 110 | ||
111 | /* for the downconvert thread */ | 111 | /* for the downconvert thread */ |
112 | void ocfs2_process_blocked_lock(struct ocfs2_super *osb, | ||
113 | struct ocfs2_lock_res *lockres); | ||
114 | void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb); | 112 | void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb); |
115 | 113 | ||
116 | struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); | 114 | struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); |
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c index c0efd9489fe8..0758daf64da0 100644 --- a/fs/ocfs2/heartbeat.c +++ b/fs/ocfs2/heartbeat.c | |||
@@ -49,10 +49,15 @@ static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, | |||
49 | static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, | 49 | static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, |
50 | int bit); | 50 | int bit); |
51 | static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map); | 51 | static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map); |
52 | static void __ocfs2_node_map_dup(struct ocfs2_node_map *target, | 52 | |
53 | struct ocfs2_node_map *from); | 53 | /* special case -1 for now |
54 | static void __ocfs2_node_map_set(struct ocfs2_node_map *target, | 54 | * TODO: should *really* make sure the calling func never passes -1!! */ |
55 | struct ocfs2_node_map *from); | 55 | static void ocfs2_node_map_init(struct ocfs2_node_map *map) |
56 | { | ||
57 | map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; | ||
58 | memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * | ||
59 | sizeof(unsigned long)); | ||
60 | } | ||
56 | 61 | ||
57 | void ocfs2_init_node_maps(struct ocfs2_super *osb) | 62 | void ocfs2_init_node_maps(struct ocfs2_super *osb) |
58 | { | 63 | { |
@@ -136,15 +141,6 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb) | |||
136 | mlog_errno(ret); | 141 | mlog_errno(ret); |
137 | } | 142 | } |
138 | 143 | ||
139 | /* special case -1 for now | ||
140 | * TODO: should *really* make sure the calling func never passes -1!! */ | ||
141 | void ocfs2_node_map_init(struct ocfs2_node_map *map) | ||
142 | { | ||
143 | map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; | ||
144 | memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * | ||
145 | sizeof(unsigned long)); | ||
146 | } | ||
147 | |||
148 | static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, | 144 | static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, |
149 | int bit) | 145 | int bit) |
150 | { | 146 | { |
@@ -216,6 +212,8 @@ int ocfs2_node_map_is_empty(struct ocfs2_super *osb, | |||
216 | return ret; | 212 | return ret; |
217 | } | 213 | } |
218 | 214 | ||
215 | #if 0 | ||
216 | |||
219 | static void __ocfs2_node_map_dup(struct ocfs2_node_map *target, | 217 | static void __ocfs2_node_map_dup(struct ocfs2_node_map *target, |
220 | struct ocfs2_node_map *from) | 218 | struct ocfs2_node_map *from) |
221 | { | 219 | { |
@@ -254,6 +252,8 @@ static void __ocfs2_node_map_set(struct ocfs2_node_map *target, | |||
254 | target->map[i] = from->map[i]; | 252 | target->map[i] = from->map[i]; |
255 | } | 253 | } |
256 | 254 | ||
255 | #endif /* 0 */ | ||
256 | |||
257 | /* Returns whether the recovery bit was actually set - it may not be | 257 | /* Returns whether the recovery bit was actually set - it may not be |
258 | * if a node is still marked as needing recovery */ | 258 | * if a node is still marked as needing recovery */ |
259 | int ocfs2_recovery_map_set(struct ocfs2_super *osb, | 259 | int ocfs2_recovery_map_set(struct ocfs2_super *osb, |
diff --git a/fs/ocfs2/heartbeat.h b/fs/ocfs2/heartbeat.h index 56859211888a..eac63aed7611 100644 --- a/fs/ocfs2/heartbeat.h +++ b/fs/ocfs2/heartbeat.h | |||
@@ -33,7 +33,6 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb); | |||
33 | 33 | ||
34 | /* node map functions - used to keep track of mounted and in-recovery | 34 | /* node map functions - used to keep track of mounted and in-recovery |
35 | * nodes. */ | 35 | * nodes. */ |
36 | void ocfs2_node_map_init(struct ocfs2_node_map *map); | ||
37 | int ocfs2_node_map_is_empty(struct ocfs2_super *osb, | 36 | int ocfs2_node_map_is_empty(struct ocfs2_super *osb, |
38 | struct ocfs2_node_map *map); | 37 | struct ocfs2_node_map *map); |
39 | void ocfs2_node_map_set_bit(struct ocfs2_super *osb, | 38 | void ocfs2_node_map_set_bit(struct ocfs2_super *osb, |
@@ -57,9 +56,5 @@ int ocfs2_recovery_map_set(struct ocfs2_super *osb, | |||
57 | int num); | 56 | int num); |
58 | void ocfs2_recovery_map_clear(struct ocfs2_super *osb, | 57 | void ocfs2_recovery_map_clear(struct ocfs2_super *osb, |
59 | int num); | 58 | int num); |
60 | /* returns 1 if bit is the only bit set in target, 0 otherwise */ | ||
61 | int ocfs2_node_map_is_only(struct ocfs2_super *osb, | ||
62 | struct ocfs2_node_map *target, | ||
63 | int bit); | ||
64 | 59 | ||
65 | #endif /* OCFS2_HEARTBEAT_H */ | 60 | #endif /* OCFS2_HEARTBEAT_H */ |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index add1ffdc5c6c..ab83fd562429 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -120,9 +120,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb) | |||
120 | 120 | ||
121 | mlog_entry_void(); | 121 | mlog_entry_void(); |
122 | 122 | ||
123 | if (ocfs2_mount_local(osb)) | ||
124 | goto bail; | ||
125 | |||
126 | if (osb->local_alloc_size == 0) | 123 | if (osb->local_alloc_size == 0) |
127 | goto bail; | 124 | goto bail; |
128 | 125 | ||
@@ -588,8 +585,7 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | |||
588 | while(bits_wanted--) | 585 | while(bits_wanted--) |
589 | ocfs2_set_bit(start++, bitmap); | 586 | ocfs2_set_bit(start++, bitmap); |
590 | 587 | ||
591 | alloc->id1.bitmap1.i_used = cpu_to_le32(*num_bits + | 588 | le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits); |
592 | le32_to_cpu(alloc->id1.bitmap1.i_used)); | ||
593 | 589 | ||
594 | status = ocfs2_journal_dirty(handle, osb->local_alloc_bh); | 590 | status = ocfs2_journal_dirty(handle, osb->local_alloc_bh); |
595 | if (status < 0) { | 591 | if (status < 0) { |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 96ee899d6502..91a1bd67ac1d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer) | |||
314 | static int lstats_show_proc(struct seq_file *m, void *v) | 314 | static int lstats_show_proc(struct seq_file *m, void *v) |
315 | { | 315 | { |
316 | int i; | 316 | int i; |
317 | struct task_struct *task = m->private; | 317 | struct inode *inode = m->private; |
318 | seq_puts(m, "Latency Top version : v0.1\n"); | 318 | struct task_struct *task = get_proc_task(inode); |
319 | 319 | ||
320 | if (!task) | ||
321 | return -ESRCH; | ||
322 | seq_puts(m, "Latency Top version : v0.1\n"); | ||
320 | for (i = 0; i < 32; i++) { | 323 | for (i = 0; i < 32; i++) { |
321 | if (task->latency_record[i].backtrace[0]) { | 324 | if (task->latency_record[i].backtrace[0]) { |
322 | int q; | 325 | int q; |
@@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v) | |||
341 | } | 344 | } |
342 | 345 | ||
343 | } | 346 | } |
347 | put_task_struct(task); | ||
344 | return 0; | 348 | return 0; |
345 | } | 349 | } |
346 | 350 | ||
347 | static int lstats_open(struct inode *inode, struct file *file) | 351 | static int lstats_open(struct inode *inode, struct file *file) |
348 | { | 352 | { |
349 | int ret; | 353 | return single_open(file, lstats_show_proc, inode); |
350 | struct seq_file *m; | ||
351 | struct task_struct *task = get_proc_task(inode); | ||
352 | |||
353 | ret = single_open(file, lstats_show_proc, NULL); | ||
354 | if (!ret) { | ||
355 | m = file->private_data; | ||
356 | m->private = task; | ||
357 | } | ||
358 | return ret; | ||
359 | } | 354 | } |
360 | 355 | ||
361 | static ssize_t lstats_write(struct file *file, const char __user *buf, | 356 | static ssize_t lstats_write(struct file *file, const char __user *buf, |
362 | size_t count, loff_t *offs) | 357 | size_t count, loff_t *offs) |
363 | { | 358 | { |
364 | struct seq_file *m; | 359 | struct task_struct *task = get_proc_task(file->f_dentry->d_inode); |
365 | struct task_struct *task; | ||
366 | 360 | ||
367 | m = file->private_data; | 361 | if (!task) |
368 | task = m->private; | 362 | return -ESRCH; |
369 | clear_all_latency_tracing(task); | 363 | clear_all_latency_tracing(task); |
364 | put_task_struct(task); | ||
370 | 365 | ||
371 | return count; | 366 | return count; |
372 | } | 367 | } |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 468805d40e2b..2d563979cb02 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/genhd.h> | ||
35 | #include <linux/smp.h> | 36 | #include <linux/smp.h> |
36 | #include <linux/signal.h> | 37 | #include <linux/signal.h> |
37 | #include <linux/module.h> | 38 | #include <linux/module.h> |
@@ -377,7 +378,6 @@ static int stram_read_proc(char *page, char **start, off_t off, | |||
377 | #endif | 378 | #endif |
378 | 379 | ||
379 | #ifdef CONFIG_BLOCK | 380 | #ifdef CONFIG_BLOCK |
380 | extern const struct seq_operations partitions_op; | ||
381 | static int partitions_open(struct inode *inode, struct file *file) | 381 | static int partitions_open(struct inode *inode, struct file *file) |
382 | { | 382 | { |
383 | return seq_open(file, &partitions_op); | 383 | return seq_open(file, &partitions_op); |
@@ -389,7 +389,6 @@ static const struct file_operations proc_partitions_operations = { | |||
389 | .release = seq_release, | 389 | .release = seq_release, |
390 | }; | 390 | }; |
391 | 391 | ||
392 | extern const struct seq_operations diskstats_op; | ||
393 | static int diskstats_open(struct inode *inode, struct file *file) | 392 | static int diskstats_open(struct inode *inode, struct file *file) |
394 | { | 393 | { |
395 | return seq_open(file, &diskstats_op); | 394 | return seq_open(file, &diskstats_op); |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 6841452e0dea..393cc22c1717 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -2031,7 +2031,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, | |||
2031 | return -EXDEV; | 2031 | return -EXDEV; |
2032 | } | 2032 | } |
2033 | /* We must not pack tails for quota files on reiserfs for quota IO to work */ | 2033 | /* We must not pack tails for quota files on reiserfs for quota IO to work */ |
2034 | if (!REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask) { | 2034 | if (!(REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask)) { |
2035 | reiserfs_warning(sb, | 2035 | reiserfs_warning(sb, |
2036 | "reiserfs: Quota file must have tail packing disabled."); | 2036 | "reiserfs: Quota file must have tail packing disabled."); |
2037 | path_put(&nd.path); | 2037 | path_put(&nd.path); |
diff --git a/fs/splice.c b/fs/splice.c index 9b559ee711a8..0670c915cd35 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1669,6 +1669,13 @@ static int link_pipe(struct pipe_inode_info *ipipe, | |||
1669 | i++; | 1669 | i++; |
1670 | } while (len); | 1670 | } while (len); |
1671 | 1671 | ||
1672 | /* | ||
1673 | * return EAGAIN if we have the potential of some data in the | ||
1674 | * future, otherwise just return 0 | ||
1675 | */ | ||
1676 | if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) | ||
1677 | ret = -EAGAIN; | ||
1678 | |||
1672 | inode_double_unlock(ipipe->inode, opipe->inode); | 1679 | inode_double_unlock(ipipe->inode, opipe->inode); |
1673 | 1680 | ||
1674 | /* | 1681 | /* |
@@ -1709,11 +1716,8 @@ static long do_tee(struct file *in, struct file *out, size_t len, | |||
1709 | ret = link_ipipe_prep(ipipe, flags); | 1716 | ret = link_ipipe_prep(ipipe, flags); |
1710 | if (!ret) { | 1717 | if (!ret) { |
1711 | ret = link_opipe_prep(opipe, flags); | 1718 | ret = link_opipe_prep(opipe, flags); |
1712 | if (!ret) { | 1719 | if (!ret) |
1713 | ret = link_pipe(ipipe, opipe, len, flags); | 1720 | ret = link_pipe(ipipe, opipe, len, flags); |
1714 | if (!ret && (flags & SPLICE_F_NONBLOCK)) | ||
1715 | ret = -EAGAIN; | ||
1716 | } | ||
1717 | } | 1721 | } |
1718 | } | 1722 | } |
1719 | 1723 | ||
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 21dfc9da235e..8831d9518790 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -171,7 +171,7 @@ xfs_parseargs( | |||
171 | char *this_char, *value, *eov; | 171 | char *this_char, *value, *eov; |
172 | int dsunit, dswidth, vol_dsunit, vol_dswidth; | 172 | int dsunit, dswidth, vol_dsunit, vol_dswidth; |
173 | int iosize; | 173 | int iosize; |
174 | int ikeep = 0; | 174 | int dmapi_implies_ikeep = 1; |
175 | 175 | ||
176 | args->flags |= XFSMNT_BARRIER; | 176 | args->flags |= XFSMNT_BARRIER; |
177 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; | 177 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; |
@@ -302,10 +302,10 @@ xfs_parseargs( | |||
302 | } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { | 302 | } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { |
303 | args->flags &= ~XFSMNT_BARRIER; | 303 | args->flags &= ~XFSMNT_BARRIER; |
304 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { | 304 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { |
305 | ikeep = 1; | 305 | args->flags |= XFSMNT_IKEEP; |
306 | args->flags &= ~XFSMNT_IDELETE; | ||
307 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { | 306 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { |
308 | args->flags |= XFSMNT_IDELETE; | 307 | dmapi_implies_ikeep = 0; |
308 | args->flags &= ~XFSMNT_IKEEP; | ||
309 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { | 309 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { |
310 | args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; | 310 | args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; |
311 | } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { | 311 | } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { |
@@ -410,8 +410,8 @@ xfs_parseargs( | |||
410 | * Note that if "ikeep" or "noikeep" mount options are | 410 | * Note that if "ikeep" or "noikeep" mount options are |
411 | * supplied, then they are honored. | 411 | * supplied, then they are honored. |
412 | */ | 412 | */ |
413 | if (!(args->flags & XFSMNT_DMAPI) && !ikeep) | 413 | if ((args->flags & XFSMNT_DMAPI) && dmapi_implies_ikeep) |
414 | args->flags |= XFSMNT_IDELETE; | 414 | args->flags |= XFSMNT_IKEEP; |
415 | 415 | ||
416 | if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { | 416 | if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { |
417 | if (dsunit) { | 417 | if (dsunit) { |
@@ -446,6 +446,7 @@ xfs_showargs( | |||
446 | { | 446 | { |
447 | static struct proc_xfs_info xfs_info_set[] = { | 447 | static struct proc_xfs_info xfs_info_set[] = { |
448 | /* the few simple ones we can get from the mount struct */ | 448 | /* the few simple ones we can get from the mount struct */ |
449 | { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, | ||
449 | { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, | 450 | { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, |
450 | { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, | 451 | { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, |
451 | { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, | 452 | { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, |
@@ -461,7 +462,6 @@ xfs_showargs( | |||
461 | }; | 462 | }; |
462 | static struct proc_xfs_info xfs_info_unset[] = { | 463 | static struct proc_xfs_info xfs_info_unset[] = { |
463 | /* the few simple ones we can get from the mount struct */ | 464 | /* the few simple ones we can get from the mount struct */ |
464 | { XFS_MOUNT_IDELETE, "," MNTOPT_IKEEP }, | ||
465 | { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, | 465 | { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, |
466 | { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, | 466 | { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, |
467 | { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, | 467 | { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, |
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c index 48228848f5ae..fab0b6d5a41b 100644 --- a/fs/xfs/xfs_bit.c +++ b/fs/xfs/xfs_bit.c | |||
@@ -25,6 +25,109 @@ | |||
25 | * XFS bit manipulation routines, used in non-realtime code. | 25 | * XFS bit manipulation routines, used in non-realtime code. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #ifndef HAVE_ARCH_HIGHBIT | ||
29 | /* | ||
30 | * Index of high bit number in byte, -1 for none set, 0..7 otherwise. | ||
31 | */ | ||
32 | static const char xfs_highbit[256] = { | ||
33 | -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ | ||
34 | 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ | ||
35 | 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ | ||
36 | 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ | ||
37 | 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ | ||
38 | 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ | ||
39 | 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ | ||
40 | 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ | ||
41 | 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ | ||
42 | 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ | ||
43 | 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ | ||
44 | 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ | ||
45 | 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ | ||
46 | 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ | ||
47 | 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ | ||
48 | 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ | ||
49 | 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ | ||
50 | 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ | ||
51 | 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ | ||
52 | 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ | ||
53 | 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ | ||
54 | 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ | ||
55 | 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ | ||
56 | 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ | ||
57 | 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ | ||
58 | 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ | ||
59 | 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ | ||
60 | 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ | ||
61 | 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ | ||
62 | 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ | ||
63 | 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ | ||
64 | 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ | ||
65 | }; | ||
66 | #endif | ||
67 | |||
68 | /* | ||
69 | * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. | ||
70 | */ | ||
71 | inline int | ||
72 | xfs_highbit32( | ||
73 | __uint32_t v) | ||
74 | { | ||
75 | #ifdef HAVE_ARCH_HIGHBIT | ||
76 | return highbit32(v); | ||
77 | #else | ||
78 | int i; | ||
79 | |||
80 | if (v & 0xffff0000) | ||
81 | if (v & 0xff000000) | ||
82 | i = 24; | ||
83 | else | ||
84 | i = 16; | ||
85 | else if (v & 0x0000ffff) | ||
86 | if (v & 0x0000ff00) | ||
87 | i = 8; | ||
88 | else | ||
89 | i = 0; | ||
90 | else | ||
91 | return -1; | ||
92 | return i + xfs_highbit[(v >> i) & 0xff]; | ||
93 | #endif | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. | ||
98 | */ | ||
99 | int | ||
100 | xfs_lowbit64( | ||
101 | __uint64_t v) | ||
102 | { | ||
103 | __uint32_t w = (__uint32_t)v; | ||
104 | int n = 0; | ||
105 | |||
106 | if (w) { /* lower bits */ | ||
107 | n = ffs(w); | ||
108 | } else { /* upper bits */ | ||
109 | w = (__uint32_t)(v >> 32); | ||
110 | if (w && (n = ffs(w))) | ||
111 | n += 32; | ||
112 | } | ||
113 | return n - 1; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. | ||
118 | */ | ||
119 | int | ||
120 | xfs_highbit64( | ||
121 | __uint64_t v) | ||
122 | { | ||
123 | __uint32_t h = (__uint32_t)(v >> 32); | ||
124 | |||
125 | if (h) | ||
126 | return xfs_highbit32(h) + 32; | ||
127 | return xfs_highbit32((__uint32_t)v); | ||
128 | } | ||
129 | |||
130 | |||
28 | /* | 131 | /* |
29 | * Return whether bitmap is empty. | 132 | * Return whether bitmap is empty. |
30 | * Size is number of words in the bitmap, which is padded to word boundary | 133 | * Size is number of words in the bitmap, which is padded to word boundary |
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h index 325a007dec91..082641a9782c 100644 --- a/fs/xfs/xfs_bit.h +++ b/fs/xfs/xfs_bit.h | |||
@@ -47,30 +47,13 @@ static inline __uint64_t xfs_mask64lo(int n) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | /* Get high bit set out of 32-bit argument, -1 if none set */ | 49 | /* Get high bit set out of 32-bit argument, -1 if none set */ |
50 | static inline int xfs_highbit32(__uint32_t v) | 50 | extern int xfs_highbit32(__uint32_t v); |
51 | { | ||
52 | return fls(v) - 1; | ||
53 | } | ||
54 | |||
55 | /* Get high bit set out of 64-bit argument, -1 if none set */ | ||
56 | static inline int xfs_highbit64(__uint64_t v) | ||
57 | { | ||
58 | return fls64(v) - 1; | ||
59 | } | ||
60 | |||
61 | /* Get low bit set out of 32-bit argument, -1 if none set */ | ||
62 | static inline int xfs_lowbit32(__uint32_t v) | ||
63 | { | ||
64 | __uint32_t t = v; | ||
65 | return (t) ? find_first_bit((unsigned long *)&t, 32) : -1; | ||
66 | } | ||
67 | 51 | ||
68 | /* Get low bit set out of 64-bit argument, -1 if none set */ | 52 | /* Get low bit set out of 64-bit argument, -1 if none set */ |
69 | static inline int xfs_lowbit64(__uint64_t v) | 53 | extern int xfs_lowbit64(__uint64_t v); |
70 | { | 54 | |
71 | __uint64_t t = v; | 55 | /* Get high bit set out of 64-bit argument, -1 if none set */ |
72 | return (t) ? find_first_bit((unsigned long *)&t, 64) : -1; | 56 | extern int xfs_highbit64(__uint64_t); |
73 | } | ||
74 | 57 | ||
75 | /* Return whether bitmap is empty (1 == empty) */ | 58 | /* Return whether bitmap is empty (1 == empty) */ |
76 | extern int xfs_bitmap_empty(uint *map, uint size); | 59 | extern int xfs_bitmap_empty(uint *map, uint size); |
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h index d16c1b971074..d5d1e60ee224 100644 --- a/fs/xfs/xfs_clnt.h +++ b/fs/xfs/xfs_clnt.h | |||
@@ -86,7 +86,7 @@ struct xfs_mount_args { | |||
86 | #define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */ | 86 | #define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */ |
87 | #define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */ | 87 | #define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */ |
88 | #define XFSMNT_BARRIER 0x04000000 /* use write barriers */ | 88 | #define XFSMNT_BARRIER 0x04000000 /* use write barriers */ |
89 | #define XFSMNT_IDELETE 0x08000000 /* inode cluster delete */ | 89 | #define XFSMNT_IKEEP 0x08000000 /* inode cluster delete */ |
90 | #define XFSMNT_SWALLOC 0x10000000 /* turn on stripe width | 90 | #define XFSMNT_SWALLOC 0x10000000 /* turn on stripe width |
91 | * allocation */ | 91 | * allocation */ |
92 | #define XFSMNT_DIRSYNC 0x40000000 /* sync creat,link,unlink,rename | 92 | #define XFSMNT_DIRSYNC 0x40000000 /* sync creat,link,unlink,rename |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index c5836b951d0c..db9d5fa600af 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -1053,7 +1053,7 @@ xfs_difree( | |||
1053 | /* | 1053 | /* |
1054 | * When an inode cluster is free, it becomes eligible for removal | 1054 | * When an inode cluster is free, it becomes eligible for removal |
1055 | */ | 1055 | */ |
1056 | if ((mp->m_flags & XFS_MOUNT_IDELETE) && | 1056 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && |
1057 | (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { | 1057 | (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { |
1058 | 1058 | ||
1059 | *delete = 1; | 1059 | *delete = 1; |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f7c620ec6e69..1d8a4728d847 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -366,7 +366,7 @@ typedef struct xfs_mount { | |||
366 | #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ | 366 | #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ |
367 | #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */ | 367 | #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */ |
368 | #define XFS_MOUNT_BARRIER (1ULL << 17) | 368 | #define XFS_MOUNT_BARRIER (1ULL << 17) |
369 | #define XFS_MOUNT_IDELETE (1ULL << 18) /* delete empty inode clusters*/ | 369 | #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/ |
370 | #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width | 370 | #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width |
371 | * allocation */ | 371 | * allocation */ |
372 | #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */ | 372 | #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */ |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index ca83ddf72af4..47082c01872d 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -73,6 +73,18 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, | |||
73 | */ | 73 | */ |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. | ||
77 | */ | ||
78 | STATIC int | ||
79 | xfs_lowbit32( | ||
80 | __uint32_t v) | ||
81 | { | ||
82 | if (v) | ||
83 | return ffs(v) - 1; | ||
84 | return -1; | ||
85 | } | ||
86 | |||
87 | /* | ||
76 | * Allocate space to the bitmap or summary file, and zero it, for growfs. | 88 | * Allocate space to the bitmap or summary file, and zero it, for growfs. |
77 | */ | 89 | */ |
78 | STATIC int /* error */ | 90 | STATIC int /* error */ |
@@ -432,7 +444,6 @@ xfs_rtallocate_extent_near( | |||
432 | } | 444 | } |
433 | bbno = XFS_BITTOBLOCK(mp, bno); | 445 | bbno = XFS_BITTOBLOCK(mp, bno); |
434 | i = 0; | 446 | i = 0; |
435 | ASSERT(minlen != 0); | ||
436 | log2len = xfs_highbit32(minlen); | 447 | log2len = xfs_highbit32(minlen); |
437 | /* | 448 | /* |
438 | * Loop over all bitmap blocks (bbno + i is current block). | 449 | * Loop over all bitmap blocks (bbno + i is current block). |
@@ -601,8 +612,6 @@ xfs_rtallocate_extent_size( | |||
601 | xfs_suminfo_t sum; /* summary information for extents */ | 612 | xfs_suminfo_t sum; /* summary information for extents */ |
602 | 613 | ||
603 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); | 614 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); |
604 | ASSERT(maxlen != 0); | ||
605 | |||
606 | /* | 615 | /* |
607 | * Loop over all the levels starting with maxlen. | 616 | * Loop over all the levels starting with maxlen. |
608 | * At each level, look at all the bitmap blocks, to see if there | 617 | * At each level, look at all the bitmap blocks, to see if there |
@@ -660,9 +669,6 @@ xfs_rtallocate_extent_size( | |||
660 | *rtblock = NULLRTBLOCK; | 669 | *rtblock = NULLRTBLOCK; |
661 | return 0; | 670 | return 0; |
662 | } | 671 | } |
663 | ASSERT(minlen != 0); | ||
664 | ASSERT(maxlen != 0); | ||
665 | |||
666 | /* | 672 | /* |
667 | * Loop over sizes, from maxlen down to minlen. | 673 | * Loop over sizes, from maxlen down to minlen. |
668 | * This time, when we do the allocations, allow smaller ones | 674 | * This time, when we do the allocations, allow smaller ones |
@@ -1948,7 +1954,6 @@ xfs_growfs_rt( | |||
1948 | nsbp->sb_blocksize * nsbp->sb_rextsize); | 1954 | nsbp->sb_blocksize * nsbp->sb_rextsize); |
1949 | nsbp->sb_rextents = nsbp->sb_rblocks; | 1955 | nsbp->sb_rextents = nsbp->sb_rblocks; |
1950 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); | 1956 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); |
1951 | ASSERT(nsbp->sb_rextents != 0); | ||
1952 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); | 1957 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); |
1953 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; | 1958 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; |
1954 | nrsumsize = | 1959 | nrsumsize = |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 413587f02155..7321304a69cc 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -281,8 +281,8 @@ xfs_start_flags( | |||
281 | mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; | 281 | mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; |
282 | } | 282 | } |
283 | 283 | ||
284 | if (ap->flags & XFSMNT_IDELETE) | 284 | if (ap->flags & XFSMNT_IKEEP) |
285 | mp->m_flags |= XFS_MOUNT_IDELETE; | 285 | mp->m_flags |= XFS_MOUNT_IKEEP; |
286 | if (ap->flags & XFSMNT_DIRSYNC) | 286 | if (ap->flags & XFSMNT_DIRSYNC) |
287 | mp->m_flags |= XFS_MOUNT_DIRSYNC; | 287 | mp->m_flags |= XFS_MOUNT_DIRSYNC; |
288 | if (ap->flags & XFSMNT_ATTR2) | 288 | if (ap->flags & XFSMNT_ATTR2) |
diff --git a/include/asm-arm/arch-pxa/entry-macro.S b/include/asm-arm/arch-pxa/entry-macro.S index b7e730851461..c145bb01bc8f 100644 --- a/include/asm-arm/arch-pxa/entry-macro.S +++ b/include/asm-arm/arch-pxa/entry-macro.S | |||
@@ -35,7 +35,7 @@ | |||
35 | 1004: | 35 | 1004: |
36 | mrc p6, 0, \irqstat, c6, c0, 0 @ ICIP2 | 36 | mrc p6, 0, \irqstat, c6, c0, 0 @ ICIP2 |
37 | mrc p6, 0, \irqnr, c7, c0, 0 @ ICMR2 | 37 | mrc p6, 0, \irqnr, c7, c0, 0 @ ICMR2 |
38 | ands \irqstat, \irqstat, \irqnr | 38 | ands \irqnr, \irqstat, \irqnr |
39 | beq 1003f | 39 | beq 1003f |
40 | rsb \irqstat, \irqnr, #0 | 40 | rsb \irqstat, \irqnr, #0 |
41 | and \irqstat, \irqstat, \irqnr | 41 | and \irqstat, \irqstat, \irqnr |
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h index ac175b4d10cb..2357a73340d4 100644 --- a/include/asm-arm/arch-pxa/pxa-regs.h +++ b/include/asm-arm/arch-pxa/pxa-regs.h | |||
@@ -520,6 +520,9 @@ | |||
520 | #define MCCR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */ | 520 | #define MCCR_FSRIE (1 << 1) /* FIFO Service Request Interrupt Enable */ |
521 | 521 | ||
522 | #define GCR __REG(0x4050000C) /* Global Control Register */ | 522 | #define GCR __REG(0x4050000C) /* Global Control Register */ |
523 | #ifdef CONFIG_PXA3xx | ||
524 | #define GCR_CLKBPB (1 << 31) /* Internal clock enable */ | ||
525 | #endif | ||
523 | #define GCR_nDMAEN (1 << 24) /* non DMA Enable */ | 526 | #define GCR_nDMAEN (1 << 24) /* non DMA Enable */ |
524 | #define GCR_CDONE_IE (1 << 19) /* Command Done Interrupt Enable */ | 527 | #define GCR_CDONE_IE (1 << 19) /* Command Done Interrupt Enable */ |
525 | #define GCR_SDONE_IE (1 << 18) /* Status Done Interrupt Enable */ | 528 | #define GCR_SDONE_IE (1 << 18) /* Status Done Interrupt Enable */ |
diff --git a/include/asm-arm/kexec.h b/include/asm-arm/kexec.h index 1ee17b6951d0..47fe34d692da 100644 --- a/include/asm-arm/kexec.h +++ b/include/asm-arm/kexec.h | |||
@@ -8,7 +8,7 @@ | |||
8 | /* Maximum address we can reach in physical address mode */ | 8 | /* Maximum address we can reach in physical address mode */ |
9 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) | 9 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) |
10 | /* Maximum address we can use for the control code buffer */ | 10 | /* Maximum address we can use for the control code buffer */ |
11 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | 11 | #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) |
12 | 12 | ||
13 | #define KEXEC_CONTROL_CODE_SIZE 4096 | 13 | #define KEXEC_CONTROL_CODE_SIZE 4096 |
14 | 14 | ||
diff --git a/include/asm-arm/kprobes.h b/include/asm-arm/kprobes.h index 4e7bd32288ae..c042194d3ab5 100644 --- a/include/asm-arm/kprobes.h +++ b/include/asm-arm/kprobes.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/ptrace.h> | 20 | #include <linux/ptrace.h> |
21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
22 | 22 | ||
23 | #define ARCH_SUPPORTS_KRETPROBES | ||
24 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 23 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
25 | #define MAX_INSN_SIZE 2 | 24 | #define MAX_INSN_SIZE 2 |
26 | #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ | 25 | #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ |
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h index 8431f6eed5c6..5db03cf3b905 100644 --- a/include/asm-arm/unaligned.h +++ b/include/asm-arm/unaligned.h | |||
@@ -40,16 +40,16 @@ extern int __bug_unaligned_x(const void *ptr); | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | #define __get_unaligned_2_le(__p) \ | 42 | #define __get_unaligned_2_le(__p) \ |
43 | (__p[0] | __p[1] << 8) | 43 | (unsigned int)(__p[0] | __p[1] << 8) |
44 | 44 | ||
45 | #define __get_unaligned_2_be(__p) \ | 45 | #define __get_unaligned_2_be(__p) \ |
46 | (__p[0] << 8 | __p[1]) | 46 | (unsigned int)(__p[0] << 8 | __p[1]) |
47 | 47 | ||
48 | #define __get_unaligned_4_le(__p) \ | 48 | #define __get_unaligned_4_le(__p) \ |
49 | (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) | 49 | (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) |
50 | 50 | ||
51 | #define __get_unaligned_4_be(__p) \ | 51 | #define __get_unaligned_4_be(__p) \ |
52 | (__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3]) | 52 | (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3]) |
53 | 53 | ||
54 | #define __get_unaligned_8_le(__p) \ | 54 | #define __get_unaligned_8_le(__p) \ |
55 | ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \ | 55 | ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \ |
diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h index 018f6e2a0242..3ae7b548fce7 100644 --- a/include/asm-avr32/pgtable.h +++ b/include/asm-avr32/pgtable.h | |||
@@ -157,6 +157,7 @@ extern struct page *empty_zero_page; | |||
157 | #define _PAGE_S(x) _PAGE_NORMAL(x) | 157 | #define _PAGE_S(x) _PAGE_NORMAL(x) |
158 | 158 | ||
159 | #define PAGE_COPY _PAGE_P(PAGE_WRITE | PAGE_READ) | 159 | #define PAGE_COPY _PAGE_P(PAGE_WRITE | PAGE_READ) |
160 | #define PAGE_SHARED _PAGE_S(PAGE_WRITE | PAGE_READ) | ||
160 | 161 | ||
161 | #ifndef __ASSEMBLY__ | 162 | #ifndef __ASSEMBLY__ |
162 | /* | 163 | /* |
diff --git a/include/asm-blackfin/gptimers.h b/include/asm-blackfin/gptimers.h index 8265ea473d5b..4f318f1fd2d9 100644 --- a/include/asm-blackfin/gptimers.h +++ b/include/asm-blackfin/gptimers.h | |||
@@ -1,12 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm/bf5xx_timers.h | 2 | * gptimers.h - Blackfin General Purpose Timer structs/defines/prototypes |
3 | * | ||
4 | * This file contains the major Data structures and constants | ||
5 | * used for General Purpose Timer Implementation in BF5xx | ||
6 | * | 3 | * |
4 | * Copyright (c) 2005-2008 Analog Devices Inc. | ||
7 | * Copyright (C) 2005 John DeHority | 5 | * Copyright (C) 2005 John DeHority |
8 | * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de) | 6 | * Copyright (C) 2006 Hella Aglaia GmbH (awe@aglaia-gmbh.de) |
9 | * | 7 | * |
8 | * Licensed under the GPL-2. | ||
10 | */ | 9 | */ |
11 | 10 | ||
12 | #ifndef _BLACKFIN_TIMERS_H_ | 11 | #ifndef _BLACKFIN_TIMERS_H_ |
diff --git a/include/asm-blackfin/irq.h b/include/asm-blackfin/irq.h index 65480dab244e..86b67834354d 100644 --- a/include/asm-blackfin/irq.h +++ b/include/asm-blackfin/irq.h | |||
@@ -67,4 +67,6 @@ static __inline__ int irq_canonicalize(int irq) | |||
67 | #define NO_IRQ ((unsigned int)(-1)) | 67 | #define NO_IRQ ((unsigned int)(-1)) |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) | ||
71 | |||
70 | #endif /* _BFIN_IRQ_H_ */ | 72 | #endif /* _BFIN_IRQ_H_ */ |
diff --git a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h index 15dbc21eed8b..c0694ecd2ecd 100644 --- a/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf527/bfin_serial_5xx.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) | 23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) |
24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) | 24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) |
25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) | 25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) |
26 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) | ||
27 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) | 26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) |
28 | 27 | ||
29 | #define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v) | 28 | #define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v) |
@@ -58,6 +57,7 @@ | |||
58 | struct bfin_serial_port { | 57 | struct bfin_serial_port { |
59 | struct uart_port port; | 58 | struct uart_port port; |
60 | unsigned int old_status; | 59 | unsigned int old_status; |
60 | unsigned int lsr; | ||
61 | #ifdef CONFIG_SERIAL_BFIN_DMA | 61 | #ifdef CONFIG_SERIAL_BFIN_DMA |
62 | int tx_done; | 62 | int tx_done; |
63 | int tx_count; | 63 | int tx_count; |
@@ -67,15 +67,31 @@ struct bfin_serial_port { | |||
67 | unsigned int tx_dma_channel; | 67 | unsigned int tx_dma_channel; |
68 | unsigned int rx_dma_channel; | 68 | unsigned int rx_dma_channel; |
69 | struct work_struct tx_dma_workqueue; | 69 | struct work_struct tx_dma_workqueue; |
70 | #else | ||
71 | struct work_struct cts_workqueue; | ||
72 | #endif | 70 | #endif |
73 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 71 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
72 | struct work_struct cts_workqueue; | ||
74 | int cts_pin; | 73 | int cts_pin; |
75 | int rts_pin; | 74 | int rts_pin; |
76 | #endif | 75 | #endif |
77 | }; | 76 | }; |
78 | 77 | ||
78 | /* The hardware clears the LSR bits upon read, so we need to cache | ||
79 | * some of the more fun bits in software so they don't get lost | ||
80 | * when checking the LSR in other code paths (TX). | ||
81 | */ | ||
82 | static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart) | ||
83 | { | ||
84 | unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR); | ||
85 | uart->lsr |= (lsr & (BI|FE|PE|OE)); | ||
86 | return lsr | uart->lsr; | ||
87 | } | ||
88 | |||
89 | static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart) | ||
90 | { | ||
91 | uart->lsr = 0; | ||
92 | bfin_write16(uart->port.membase + OFFSET_LSR, -1); | ||
93 | } | ||
94 | |||
79 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; | 95 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; |
80 | struct bfin_serial_res { | 96 | struct bfin_serial_res { |
81 | unsigned long uart_base_addr; | 97 | unsigned long uart_base_addr; |
diff --git a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h index 7871d4313f49..b6f513bee56e 100644 --- a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) | 23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) |
24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) | 24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) |
25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) | 25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) |
26 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) | ||
27 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) | 26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) |
28 | 27 | ||
29 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) | 28 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) |
@@ -46,6 +45,7 @@ | |||
46 | struct bfin_serial_port { | 45 | struct bfin_serial_port { |
47 | struct uart_port port; | 46 | struct uart_port port; |
48 | unsigned int old_status; | 47 | unsigned int old_status; |
48 | unsigned int lsr; | ||
49 | #ifdef CONFIG_SERIAL_BFIN_DMA | 49 | #ifdef CONFIG_SERIAL_BFIN_DMA |
50 | int tx_done; | 50 | int tx_done; |
51 | int tx_count; | 51 | int tx_count; |
@@ -56,14 +56,34 @@ struct bfin_serial_port { | |||
56 | unsigned int rx_dma_channel; | 56 | unsigned int rx_dma_channel; |
57 | struct work_struct tx_dma_workqueue; | 57 | struct work_struct tx_dma_workqueue; |
58 | #else | 58 | #else |
59 | struct work_struct cts_workqueue; | 59 | # if ANOMALY_05000230 |
60 | unsigned int anomaly_threshold; | ||
61 | # endif | ||
60 | #endif | 62 | #endif |
61 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 63 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
64 | struct work_struct cts_workqueue; | ||
62 | int cts_pin; | 65 | int cts_pin; |
63 | int rts_pin; | 66 | int rts_pin; |
64 | #endif | 67 | #endif |
65 | }; | 68 | }; |
66 | 69 | ||
70 | /* The hardware clears the LSR bits upon read, so we need to cache | ||
71 | * some of the more fun bits in software so they don't get lost | ||
72 | * when checking the LSR in other code paths (TX). | ||
73 | */ | ||
74 | static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart) | ||
75 | { | ||
76 | unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR); | ||
77 | uart->lsr |= (lsr & (BI|FE|PE|OE)); | ||
78 | return lsr | uart->lsr; | ||
79 | } | ||
80 | |||
81 | static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart) | ||
82 | { | ||
83 | uart->lsr = 0; | ||
84 | bfin_write16(uart->port.membase + OFFSET_LSR, -1); | ||
85 | } | ||
86 | |||
67 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; | 87 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; |
68 | struct bfin_serial_res { | 88 | struct bfin_serial_res { |
69 | unsigned long uart_base_addr; | 89 | unsigned long uart_base_addr; |
diff --git a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h index 86e45c379838..8fc672d31057 100644 --- a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) | 23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) |
24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) | 24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) |
25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) | 25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) |
26 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) | ||
27 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) | 26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) |
28 | 27 | ||
29 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) | 28 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) |
@@ -58,6 +57,7 @@ | |||
58 | struct bfin_serial_port { | 57 | struct bfin_serial_port { |
59 | struct uart_port port; | 58 | struct uart_port port; |
60 | unsigned int old_status; | 59 | unsigned int old_status; |
60 | unsigned int lsr; | ||
61 | #ifdef CONFIG_SERIAL_BFIN_DMA | 61 | #ifdef CONFIG_SERIAL_BFIN_DMA |
62 | int tx_done; | 62 | int tx_done; |
63 | int tx_count; | 63 | int tx_count; |
@@ -67,15 +67,31 @@ struct bfin_serial_port { | |||
67 | unsigned int tx_dma_channel; | 67 | unsigned int tx_dma_channel; |
68 | unsigned int rx_dma_channel; | 68 | unsigned int rx_dma_channel; |
69 | struct work_struct tx_dma_workqueue; | 69 | struct work_struct tx_dma_workqueue; |
70 | #else | ||
71 | struct work_struct cts_workqueue; | ||
72 | #endif | 70 | #endif |
73 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 71 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
72 | struct work_struct cts_workqueue; | ||
74 | int cts_pin; | 73 | int cts_pin; |
75 | int rts_pin; | 74 | int rts_pin; |
76 | #endif | 75 | #endif |
77 | }; | 76 | }; |
78 | 77 | ||
78 | /* The hardware clears the LSR bits upon read, so we need to cache | ||
79 | * some of the more fun bits in software so they don't get lost | ||
80 | * when checking the LSR in other code paths (TX). | ||
81 | */ | ||
82 | static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart) | ||
83 | { | ||
84 | unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR); | ||
85 | uart->lsr |= (lsr & (BI|FE|PE|OE)); | ||
86 | return lsr | uart->lsr; | ||
87 | } | ||
88 | |||
89 | static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart) | ||
90 | { | ||
91 | uart->lsr = 0; | ||
92 | bfin_write16(uart->port.membase + OFFSET_LSR, -1); | ||
93 | } | ||
94 | |||
79 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; | 95 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; |
80 | struct bfin_serial_res { | 96 | struct bfin_serial_res { |
81 | unsigned long uart_base_addr; | 97 | unsigned long uart_base_addr; |
diff --git a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h index 3770aa38ee9f..7e6339f62a50 100644 --- a/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf548/bfin_serial_5xx.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) | 24 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) |
25 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) | 25 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) |
26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) | 26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) |
27 | #define UART_GET_MSR(uart) bfin_read16(((uart)->port.membase + OFFSET_MSR)) | ||
28 | #define UART_GET_MCR(uart) bfin_read16(((uart)->port.membase + OFFSET_MCR)) | ||
27 | 29 | ||
28 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) | 30 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) |
29 | #define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) | 31 | #define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v) |
@@ -32,7 +34,9 @@ | |||
32 | #define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) | 34 | #define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v) |
33 | #define UART_PUT_LSR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LSR),v) | 35 | #define UART_PUT_LSR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LSR),v) |
34 | #define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) | 36 | #define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v) |
37 | #define UART_CLEAR_LSR(uart) bfin_write16(((uart)->port.membase + OFFSET_LSR), -1) | ||
35 | #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) | 38 | #define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v) |
39 | #define UART_PUT_MCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_MCR),v) | ||
36 | 40 | ||
37 | #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) | 41 | #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) |
38 | # define CONFIG_SERIAL_BFIN_CTSRTS | 42 | # define CONFIG_SERIAL_BFIN_CTSRTS |
@@ -68,10 +72,9 @@ struct bfin_serial_port { | |||
68 | unsigned int tx_dma_channel; | 72 | unsigned int tx_dma_channel; |
69 | unsigned int rx_dma_channel; | 73 | unsigned int rx_dma_channel; |
70 | struct work_struct tx_dma_workqueue; | 74 | struct work_struct tx_dma_workqueue; |
71 | #else | ||
72 | struct work_struct cts_workqueue; | ||
73 | #endif | 75 | #endif |
74 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 76 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
77 | struct work_struct cts_workqueue; | ||
75 | int cts_pin; | 78 | int cts_pin; |
76 | int rts_pin; | 79 | int rts_pin; |
77 | #endif | 80 | #endif |
diff --git a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h index 7871d4313f49..b6f513bee56e 100644 --- a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h +++ b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) | 23 | #define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH)) |
24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) | 24 | #define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR)) |
25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) | 25 | #define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR)) |
26 | #define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR)) | ||
27 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) | 26 | #define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL)) |
28 | 27 | ||
29 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) | 28 | #define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v) |
@@ -46,6 +45,7 @@ | |||
46 | struct bfin_serial_port { | 45 | struct bfin_serial_port { |
47 | struct uart_port port; | 46 | struct uart_port port; |
48 | unsigned int old_status; | 47 | unsigned int old_status; |
48 | unsigned int lsr; | ||
49 | #ifdef CONFIG_SERIAL_BFIN_DMA | 49 | #ifdef CONFIG_SERIAL_BFIN_DMA |
50 | int tx_done; | 50 | int tx_done; |
51 | int tx_count; | 51 | int tx_count; |
@@ -56,14 +56,34 @@ struct bfin_serial_port { | |||
56 | unsigned int rx_dma_channel; | 56 | unsigned int rx_dma_channel; |
57 | struct work_struct tx_dma_workqueue; | 57 | struct work_struct tx_dma_workqueue; |
58 | #else | 58 | #else |
59 | struct work_struct cts_workqueue; | 59 | # if ANOMALY_05000230 |
60 | unsigned int anomaly_threshold; | ||
61 | # endif | ||
60 | #endif | 62 | #endif |
61 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 63 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
64 | struct work_struct cts_workqueue; | ||
62 | int cts_pin; | 65 | int cts_pin; |
63 | int rts_pin; | 66 | int rts_pin; |
64 | #endif | 67 | #endif |
65 | }; | 68 | }; |
66 | 69 | ||
70 | /* The hardware clears the LSR bits upon read, so we need to cache | ||
71 | * some of the more fun bits in software so they don't get lost | ||
72 | * when checking the LSR in other code paths (TX). | ||
73 | */ | ||
74 | static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart) | ||
75 | { | ||
76 | unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR); | ||
77 | uart->lsr |= (lsr & (BI|FE|PE|OE)); | ||
78 | return lsr | uart->lsr; | ||
79 | } | ||
80 | |||
81 | static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart) | ||
82 | { | ||
83 | uart->lsr = 0; | ||
84 | bfin_write16(uart->port.membase + OFFSET_LSR, -1); | ||
85 | } | ||
86 | |||
67 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; | 87 | struct bfin_serial_port bfin_serial_ports[NR_PORTS]; |
68 | struct bfin_serial_res { | 88 | struct bfin_serial_res { |
69 | unsigned long uart_base_addr; | 89 | unsigned long uart_base_addr; |
diff --git a/include/asm-blackfin/mach-bf561/blackfin.h b/include/asm-blackfin/mach-bf561/blackfin.h index 362617f93845..3a16df2c86d8 100644 --- a/include/asm-blackfin/mach-bf561/blackfin.h +++ b/include/asm-blackfin/mach-bf561/blackfin.h | |||
@@ -49,7 +49,8 @@ | |||
49 | #define bfin_read_FIO_INEN() bfin_read_FIO0_INEN() | 49 | #define bfin_read_FIO_INEN() bfin_read_FIO0_INEN() |
50 | #define bfin_write_FIO_INEN(val) bfin_write_FIO0_INEN(val) | 50 | #define bfin_write_FIO_INEN(val) bfin_write_FIO0_INEN(val) |
51 | 51 | ||
52 | 52 | #define SIC_IWR0 SICA_IWR0 | |
53 | #define SIC_IWR1 SICA_IWR1 | ||
53 | #define SIC_IAR0 SICA_IAR0 | 54 | #define SIC_IAR0 SICA_IAR0 |
54 | #define bfin_write_SIC_IMASK0 bfin_write_SICA_IMASK0 | 55 | #define bfin_write_SIC_IMASK0 bfin_write_SICA_IMASK0 |
55 | #define bfin_write_SIC_IMASK1 bfin_write_SICA_IMASK1 | 56 | #define bfin_write_SIC_IMASK1 bfin_write_SICA_IMASK1 |
diff --git a/include/asm-blackfin/mach-bf561/cdefBF561.h b/include/asm-blackfin/mach-bf561/cdefBF561.h index d667816486c0..1bc8d2f89ccc 100644 --- a/include/asm-blackfin/mach-bf561/cdefBF561.h +++ b/include/asm-blackfin/mach-bf561/cdefBF561.h | |||
@@ -559,6 +559,7 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val) | |||
559 | #define bfin_write_PPI0_CONTROL(val) bfin_write16(PPI0_CONTROL,val) | 559 | #define bfin_write_PPI0_CONTROL(val) bfin_write16(PPI0_CONTROL,val) |
560 | #define bfin_read_PPI0_STATUS() bfin_read16(PPI0_STATUS) | 560 | #define bfin_read_PPI0_STATUS() bfin_read16(PPI0_STATUS) |
561 | #define bfin_write_PPI0_STATUS(val) bfin_write16(PPI0_STATUS,val) | 561 | #define bfin_write_PPI0_STATUS(val) bfin_write16(PPI0_STATUS,val) |
562 | #define bfin_clear_PPI0_STATUS() bfin_read_PPI0_STATUS() | ||
562 | #define bfin_read_PPI0_COUNT() bfin_read16(PPI0_COUNT) | 563 | #define bfin_read_PPI0_COUNT() bfin_read16(PPI0_COUNT) |
563 | #define bfin_write_PPI0_COUNT(val) bfin_write16(PPI0_COUNT,val) | 564 | #define bfin_write_PPI0_COUNT(val) bfin_write16(PPI0_COUNT,val) |
564 | #define bfin_read_PPI0_DELAY() bfin_read16(PPI0_DELAY) | 565 | #define bfin_read_PPI0_DELAY() bfin_read16(PPI0_DELAY) |
@@ -570,6 +571,7 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val) | |||
570 | #define bfin_write_PPI1_CONTROL(val) bfin_write16(PPI1_CONTROL,val) | 571 | #define bfin_write_PPI1_CONTROL(val) bfin_write16(PPI1_CONTROL,val) |
571 | #define bfin_read_PPI1_STATUS() bfin_read16(PPI1_STATUS) | 572 | #define bfin_read_PPI1_STATUS() bfin_read16(PPI1_STATUS) |
572 | #define bfin_write_PPI1_STATUS(val) bfin_write16(PPI1_STATUS,val) | 573 | #define bfin_write_PPI1_STATUS(val) bfin_write16(PPI1_STATUS,val) |
574 | #define bfin_clear_PPI1_STATUS() bfin_read_PPI1_STATUS() | ||
573 | #define bfin_read_PPI1_COUNT() bfin_read16(PPI1_COUNT) | 575 | #define bfin_read_PPI1_COUNT() bfin_read16(PPI1_COUNT) |
574 | #define bfin_write_PPI1_COUNT(val) bfin_write16(PPI1_COUNT,val) | 576 | #define bfin_write_PPI1_COUNT(val) bfin_write16(PPI1_COUNT,val) |
575 | #define bfin_read_PPI1_DELAY() bfin_read16(PPI1_DELAY) | 577 | #define bfin_read_PPI1_DELAY() bfin_read16(PPI1_DELAY) |
diff --git a/include/asm-cris/uaccess.h b/include/asm-cris/uaccess.h index 69d48a2dc8e1..ea11eaf0e922 100644 --- a/include/asm-cris/uaccess.h +++ b/include/asm-cris/uaccess.h | |||
@@ -1,43 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Authors: Bjorn Wesen (bjornw@axis.com) | 2 | * Authors: Bjorn Wesen (bjornw@axis.com) |
3 | * Hans-Peter Nilsson (hp@axis.com) | 3 | * Hans-Peter Nilsson (hp@axis.com) |
4 | * | ||
5 | * $Log: uaccess.h,v $ | ||
6 | * Revision 1.8 2001/10/29 13:01:48 bjornw | ||
7 | * Removed unused variable tmp2 in strnlen_user | ||
8 | * | ||
9 | * Revision 1.7 2001/10/02 12:44:52 hp | ||
10 | * Add support for 64-bit put_user/get_user | ||
11 | * | ||
12 | * Revision 1.6 2001/10/01 14:51:17 bjornw | ||
13 | * Added register prefixes and removed underscores | ||
14 | * | ||
15 | * Revision 1.5 2000/10/25 03:33:21 hp | ||
16 | * - Provide implementation for everything else but get_user and put_user; | ||
17 | * copying inline to/from user for constant length 0..16, 20, 24, and | ||
18 | * clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user | ||
19 | * always inline. | ||
20 | * - Constraints for destination addr in get_user cannot be memory, only reg. | ||
21 | * - Correct labels for PC at expected fault points. | ||
22 | * - Nits with assembly code. | ||
23 | * - Don't use statement expressions without value; use "do {} while (0)". | ||
24 | * - Return correct values from __generic_... functions. | ||
25 | * | ||
26 | * Revision 1.4 2000/09/12 16:28:25 bjornw | ||
27 | * * Removed comments from the get/put user asm code | ||
28 | * * Constrains for destination addr in put_user cannot be memory, only reg | ||
29 | * | ||
30 | * Revision 1.3 2000/09/12 14:30:20 bjornw | ||
31 | * MAX_ADDR_USER does not exist anymore | ||
32 | * | ||
33 | * Revision 1.2 2000/07/13 15:52:48 bjornw | ||
34 | * New user-access functions | ||
35 | * | ||
36 | * Revision 1.1.1.1 2000/07/10 16:32:31 bjornw | ||
37 | * CRIS architecture, working draft | ||
38 | * | ||
39 | * | ||
40 | * | ||
41 | */ | 4 | */ |
42 | 5 | ||
43 | /* Asm:s have been tweaked (within the domain of correctness) to give | 6 | /* Asm:s have been tweaked (within the domain of correctness) to give |
@@ -209,9 +172,9 @@ extern long __get_user_bad(void); | |||
209 | /* More complex functions. Most are inline, but some call functions that | 172 | /* More complex functions. Most are inline, but some call functions that |
210 | live in lib/usercopy.c */ | 173 | live in lib/usercopy.c */ |
211 | 174 | ||
212 | extern unsigned long __copy_user(void *to, const void *from, unsigned long n); | 175 | extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); |
213 | extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n); | 176 | extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); |
214 | extern unsigned long __do_clear_user(void *to, unsigned long n); | 177 | extern unsigned long __do_clear_user(void __user *to, unsigned long n); |
215 | 178 | ||
216 | static inline unsigned long | 179 | static inline unsigned long |
217 | __generic_copy_to_user(void __user *to, const void *from, unsigned long n) | 180 | __generic_copy_to_user(void __user *to, const void *from, unsigned long n) |
@@ -253,7 +216,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) | |||
253 | } | 216 | } |
254 | 217 | ||
255 | 218 | ||
256 | /* Note that if these expand awfully if made into switch constructs, so | 219 | /* Note that these expand awfully if made into switch constructs, so |
257 | don't do that. */ | 220 | don't do that. */ |
258 | 221 | ||
259 | static inline unsigned long | 222 | static inline unsigned long |
@@ -407,19 +370,21 @@ __constant_clear_user(void __user *to, unsigned long n) | |||
407 | */ | 370 | */ |
408 | 371 | ||
409 | static inline unsigned long | 372 | static inline unsigned long |
410 | __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) | 373 | __generic_copy_from_user_nocheck(void *to, const void __user *from, |
374 | unsigned long n) | ||
411 | { | 375 | { |
412 | return __copy_user_zeroing(to,from,n); | 376 | return __copy_user_zeroing(to,from,n); |
413 | } | 377 | } |
414 | 378 | ||
415 | static inline unsigned long | 379 | static inline unsigned long |
416 | __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) | 380 | __generic_copy_to_user_nocheck(void __user *to, const void *from, |
381 | unsigned long n) | ||
417 | { | 382 | { |
418 | return __copy_user(to,from,n); | 383 | return __copy_user(to,from,n); |
419 | } | 384 | } |
420 | 385 | ||
421 | static inline unsigned long | 386 | static inline unsigned long |
422 | __generic_clear_user_nocheck(void *to, unsigned long n) | 387 | __generic_clear_user_nocheck(void __user *to, unsigned long n) |
423 | { | 388 | { |
424 | return __do_clear_user(to,n); | 389 | return __do_clear_user(to,n); |
425 | } | 390 | } |
diff --git a/include/asm-cris/unistd.h b/include/asm-cris/unistd.h index 007cb16a6b5b..76398ef87e9b 100644 --- a/include/asm-cris/unistd.h +++ b/include/asm-cris/unistd.h | |||
@@ -329,12 +329,12 @@ | |||
329 | #define __NR_timerfd_create 322 | 329 | #define __NR_timerfd_create 322 |
330 | #define __NR_eventfd 323 | 330 | #define __NR_eventfd 323 |
331 | #define __NR_fallocate 324 | 331 | #define __NR_fallocate 324 |
332 | #define __NR_timerfd_settime 315 | 332 | #define __NR_timerfd_settime 325 |
333 | #define __NR_timerfd_gettime 316 | 333 | #define __NR_timerfd_gettime 326 |
334 | 334 | ||
335 | #ifdef __KERNEL__ | 335 | #ifdef __KERNEL__ |
336 | 336 | ||
337 | #define NR_syscalls 325 | 337 | #define NR_syscalls 327 |
338 | 338 | ||
339 | #include <asm/arch/unistd.h> | 339 | #include <asm/arch/unistd.h> |
340 | 340 | ||
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index 4a1e48b9f403..eb24a3f47caa 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild | |||
@@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm | |||
3 | header-y += break.h | 3 | header-y += break.h |
4 | header-y += fpu.h | 4 | header-y += fpu.h |
5 | header-y += fpswa.h | 5 | header-y += fpswa.h |
6 | header-y += gcc_intrin.h | ||
7 | header-y += ia64regs.h | 6 | header-y += ia64regs.h |
8 | header-y += intel_intrin.h | 7 | header-y += intel_intrin.h |
9 | header-y += intrinsics.h | 8 | header-y += intrinsics.h |
@@ -12,5 +11,6 @@ header-y += ptrace_offsets.h | |||
12 | header-y += rse.h | 11 | header-y += rse.h |
13 | header-y += ucontext.h | 12 | header-y += ucontext.h |
14 | 13 | ||
14 | unifdef-y += gcc_intrin.h | ||
15 | unifdef-y += perfmon.h | 15 | unifdef-y += perfmon.h |
16 | unifdef-y += ustack.h | 16 | unifdef-y += ustack.h |
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 7e6e3779670a..76366dc9c1a0 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h | |||
@@ -93,6 +93,9 @@ extern __u8 isa_irq_to_vector_map[16]; | |||
93 | struct irq_cfg { | 93 | struct irq_cfg { |
94 | ia64_vector vector; | 94 | ia64_vector vector; |
95 | cpumask_t domain; | 95 | cpumask_t domain; |
96 | cpumask_t old_domain; | ||
97 | unsigned move_cleanup_count; | ||
98 | u8 move_in_progress : 1; | ||
96 | }; | 99 | }; |
97 | extern spinlock_t vector_lock; | 100 | extern spinlock_t vector_lock; |
98 | extern struct irq_cfg irq_cfg[NR_IRQS]; | 101 | extern struct irq_cfg irq_cfg[NR_IRQS]; |
@@ -106,12 +109,19 @@ extern int assign_irq_vector (int irq); /* allocate a free vector */ | |||
106 | extern void free_irq_vector (int vector); | 109 | extern void free_irq_vector (int vector); |
107 | extern int reserve_irq_vector (int vector); | 110 | extern int reserve_irq_vector (int vector); |
108 | extern void __setup_vector_irq(int cpu); | 111 | extern void __setup_vector_irq(int cpu); |
109 | extern int reassign_irq_vector(int irq, int cpu); | ||
110 | extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); | 112 | extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); |
111 | extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); | 113 | extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); |
112 | extern int check_irq_used (int irq); | 114 | extern int check_irq_used (int irq); |
113 | extern void destroy_and_reserve_irq (unsigned int irq); | 115 | extern void destroy_and_reserve_irq (unsigned int irq); |
114 | 116 | ||
117 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | ||
118 | extern int irq_prepare_move(int irq, int cpu); | ||
119 | extern void irq_complete_move(unsigned int irq); | ||
120 | #else | ||
121 | static inline int irq_prepare_move(int irq, int cpu) { return 0; } | ||
122 | static inline void irq_complete_move(unsigned int irq) {} | ||
123 | #endif | ||
124 | |||
115 | static inline void ia64_resend_irq(unsigned int vector) | 125 | static inline void ia64_resend_irq(unsigned int vector) |
116 | { | 126 | { |
117 | platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); | 127 | platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index a93ce9ef07ff..8233b3a964c6 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
@@ -82,7 +82,6 @@ struct kprobe_ctlblk { | |||
82 | struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; | 82 | struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | #define ARCH_SUPPORTS_KRETPROBES | ||
86 | #define kretprobe_blacklist_size 0 | 85 | #define kretprobe_blacklist_size 0 |
87 | 86 | ||
88 | #define SLOT0_OPCODE_SHIFT (37) | 87 | #define SLOT0_OPCODE_SHIFT (37) |
@@ -122,10 +121,6 @@ extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr); | |||
122 | extern int kprobe_exceptions_notify(struct notifier_block *self, | 121 | extern int kprobe_exceptions_notify(struct notifier_block *self, |
123 | unsigned long val, void *data); | 122 | unsigned long val, void *data); |
124 | 123 | ||
125 | /* ia64 does not need this */ | ||
126 | static inline void jprobe_return(void) | ||
127 | { | ||
128 | } | ||
129 | extern void invalidate_stacked_regs(void); | 124 | extern void invalidate_stacked_regs(void); |
130 | extern void flush_register_stack(void); | 125 | extern void flush_register_stack(void); |
131 | extern void arch_remove_kprobe(struct kprobe *p); | 126 | extern void arch_remove_kprobe(struct kprobe *p); |
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 2251118894ae..f4904db3b057 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h | |||
@@ -807,6 +807,10 @@ static inline s64 | |||
807 | ia64_sal_physical_id_info(u16 *splid) | 807 | ia64_sal_physical_id_info(u16 *splid) |
808 | { | 808 | { |
809 | struct ia64_sal_retval isrv; | 809 | struct ia64_sal_retval isrv; |
810 | |||
811 | if (sal_revision < SAL_VERSION_CODE(3,2)) | ||
812 | return -1; | ||
813 | |||
810 | SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0); | 814 | SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0); |
811 | if (splid) | 815 | if (splid) |
812 | *splid = isrv.v0; | 816 | *splid = isrv.v0; |
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h index 87f77b119317..e72ba563f102 100644 --- a/include/asm-m68k/unistd.h +++ b/include/asm-m68k/unistd.h | |||
@@ -320,13 +320,15 @@ | |||
320 | #define __NR_epoll_pwait 315 | 320 | #define __NR_epoll_pwait 315 |
321 | #define __NR_utimensat 316 | 321 | #define __NR_utimensat 316 |
322 | #define __NR_signalfd 317 | 322 | #define __NR_signalfd 317 |
323 | #define __NR_timerfd 318 | 323 | #define __NR_timerfd_create 318 |
324 | #define __NR_eventfd 319 | 324 | #define __NR_eventfd 319 |
325 | #define __NR_fallocate 320 | 325 | #define __NR_fallocate 320 |
326 | #define __NR_timerfd_settime 321 | ||
327 | #define __NR_timerfd_gettime 322 | ||
326 | 328 | ||
327 | #ifdef __KERNEL__ | 329 | #ifdef __KERNEL__ |
328 | 330 | ||
329 | #define NR_syscalls 321 | 331 | #define NR_syscalls 323 |
330 | 332 | ||
331 | #define __ARCH_WANT_IPC_PARSE_VERSION | 333 | #define __ARCH_WANT_IPC_PARSE_VERSION |
332 | #define __ARCH_WANT_OLD_READDIR | 334 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h index 1cf26d240d83..de9f47a51cc2 100644 --- a/include/asm-m68knommu/machdep.h +++ b/include/asm-m68knommu/machdep.h | |||
@@ -21,4 +21,6 @@ extern void (*mach_power_off)( void ); | |||
21 | 21 | ||
22 | extern void config_BSP(char *command, int len); | 22 | extern void config_BSP(char *command, int len); |
23 | 23 | ||
24 | extern void do_IRQ(int irq, struct pt_regs *fp); | ||
25 | |||
24 | #endif /* _M68KNOMMU_MACHDEP_H */ | 26 | #endif /* _M68KNOMMU_MACHDEP_H */ |
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h index 27c2f9bb4dbd..4ba98b9c5d79 100644 --- a/include/asm-m68knommu/unistd.h +++ b/include/asm-m68knommu/unistd.h | |||
@@ -321,13 +321,15 @@ | |||
321 | #define __NR_epoll_pwait 315 | 321 | #define __NR_epoll_pwait 315 |
322 | #define __NR_utimensat 316 | 322 | #define __NR_utimensat 316 |
323 | #define __NR_signalfd 317 | 323 | #define __NR_signalfd 317 |
324 | #define __NR_timerfd 318 | 324 | #define __NR_timerfd_create 318 |
325 | #define __NR_eventfd 319 | 325 | #define __NR_eventfd 319 |
326 | #define __NR_fallocate 320 | 326 | #define __NR_fallocate 320 |
327 | #define __NR_timerfd_settime 321 | ||
328 | #define __NR_timerfd_gettime 322 | ||
327 | 329 | ||
328 | #ifdef __KERNEL__ | 330 | #ifdef __KERNEL__ |
329 | 331 | ||
330 | #define NR_syscalls 321 | 332 | #define NR_syscalls 323 |
331 | 333 | ||
332 | #define __ARCH_WANT_IPC_PARSE_VERSION | 334 | #define __ARCH_WANT_IPC_PARSE_VERSION |
333 | #define __ARCH_WANT_OLD_READDIR | 335 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index afabad230dbb..d0e7701fa1f6 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h | |||
@@ -80,7 +80,6 @@ typedef unsigned int kprobe_opcode_t; | |||
80 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) | 80 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | #define ARCH_SUPPORTS_KRETPROBES | ||
84 | #define flush_insn_slot(p) do { } while (0) | 83 | #define flush_insn_slot(p) do { } while (0) |
85 | #define kretprobe_blacklist_size 0 | 84 | #define kretprobe_blacklist_size 0 |
86 | 85 | ||
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 0d6238987df8..edc0cfd7f6e2 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h | |||
@@ -153,6 +153,9 @@ | |||
153 | #define CTRL_RUNLATCH 0x1 | 153 | #define CTRL_RUNLATCH 0x1 |
154 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | 154 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ |
155 | #define DABR_TRANSLATION (1UL << 2) | 155 | #define DABR_TRANSLATION (1UL << 2) |
156 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ | ||
157 | #define DABRX_USER (1UL << 0) | ||
158 | #define DABRX_KERNEL (1UL << 1) | ||
156 | #define SPRN_DAR 0x013 /* Data Address Register */ | 159 | #define SPRN_DAR 0x013 /* Data Address Register */ |
157 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | 160 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ |
158 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ | 161 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ |
diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h index 948db3d0d05c..330f68caffe4 100644 --- a/include/asm-s390/kprobes.h +++ b/include/asm-s390/kprobes.h | |||
@@ -46,7 +46,6 @@ typedef u16 kprobe_opcode_t; | |||
46 | ? (MAX_STACK_SIZE) \ | 46 | ? (MAX_STACK_SIZE) \ |
47 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | 47 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) |
48 | 48 | ||
49 | #define ARCH_SUPPORTS_KRETPROBES | ||
50 | #define kretprobe_blacklist_size 0 | 49 | #define kretprobe_blacklist_size 0 |
51 | 50 | ||
52 | #define KPROBE_SWAP_INST 0x10 | 51 | #define KPROBE_SWAP_INST 0x10 |
diff --git a/include/asm-sh/cpu-sh3/cache.h b/include/asm-sh/cpu-sh3/cache.h index 56bd838b7db4..bee2d81c56bf 100644 --- a/include/asm-sh/cpu-sh3/cache.h +++ b/include/asm-sh/cpu-sh3/cache.h | |||
@@ -35,7 +35,7 @@ | |||
35 | defined(CONFIG_CPU_SUBTYPE_SH7710) || \ | 35 | defined(CONFIG_CPU_SUBTYPE_SH7710) || \ |
36 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 36 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
37 | defined(CONFIG_CPU_SUBTYPE_SH7721) | 37 | defined(CONFIG_CPU_SUBTYPE_SH7721) |
38 | #define CCR3 0xa40000b4 | 38 | #define CCR3_REG 0xa40000b4 |
39 | #define CCR_CACHE_16KB 0x00010000 | 39 | #define CCR_CACHE_16KB 0x00010000 |
40 | #define CCR_CACHE_32KB 0x00020000 | 40 | #define CCR_CACHE_32KB 0x00020000 |
41 | #endif | 41 | #endif |
diff --git a/include/asm-sh/entry-macros.S b/include/asm-sh/entry-macros.S index 500030eae7aa..2dab0b8d9454 100644 --- a/include/asm-sh/entry-macros.S +++ b/include/asm-sh/entry-macros.S | |||
@@ -12,7 +12,7 @@ | |||
12 | not r11, r11 | 12 | not r11, r11 |
13 | stc sr, r10 | 13 | stc sr, r10 |
14 | and r11, r10 | 14 | and r11, r10 |
15 | #ifdef CONFIG_HAS_SR_RB | 15 | #ifdef CONFIG_CPU_HAS_SR_RB |
16 | stc k_g_imask, r11 | 16 | stc k_g_imask, r11 |
17 | or r11, r10 | 17 | or r11, r10 |
18 | #endif | 18 | #endif |
@@ -20,7 +20,7 @@ | |||
20 | .endm | 20 | .endm |
21 | 21 | ||
22 | .macro get_current_thread_info, ti, tmp | 22 | .macro get_current_thread_info, ti, tmp |
23 | #ifdef CONFIG_HAS_SR_RB | 23 | #ifdef CONFIG_CPU_HAS_SR_RB |
24 | stc r7_bank, \ti | 24 | stc r7_bank, \ti |
25 | #else | 25 | #else |
26 | mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp | 26 | mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp |
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 45e47c159a6e..4e08210cd4c2 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h | |||
@@ -44,6 +44,8 @@ extern enum sparc_cpu sparc_cpu_model; | |||
44 | 44 | ||
45 | #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ | 45 | #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ |
46 | 46 | ||
47 | extern char reboot_command[]; | ||
48 | |||
47 | extern struct thread_info *current_set[NR_CPUS]; | 49 | extern struct thread_info *current_set[NR_CPUS]; |
48 | 50 | ||
49 | extern unsigned long empty_bad_page; | 51 | extern unsigned long empty_bad_page; |
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index 7237dd87663e..5879d71afdaa 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h | |||
@@ -14,8 +14,6 @@ typedef u32 kprobe_opcode_t; | |||
14 | 14 | ||
15 | #define arch_remove_kprobe(p) do {} while (0) | 15 | #define arch_remove_kprobe(p) do {} while (0) |
16 | 16 | ||
17 | #define ARCH_SUPPORTS_KRETPROBES | ||
18 | |||
19 | #define flush_insn_slot(p) \ | 17 | #define flush_insn_slot(p) \ |
20 | do { flushi(&(p)->ainsn.insn[0]); \ | 18 | do { flushi(&(p)->ainsn.insn[0]); \ |
21 | flushi(&(p)->ainsn.insn[1]); \ | 19 | flushi(&(p)->ainsn.insn[1]); \ |
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index ed91a5d8d4f0..53eae091a171 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h | |||
@@ -30,6 +30,8 @@ enum sparc_cpu { | |||
30 | #define ARCH_SUN4C_SUN4 0 | 30 | #define ARCH_SUN4C_SUN4 0 |
31 | #define ARCH_SUN4 0 | 31 | #define ARCH_SUN4 0 |
32 | 32 | ||
33 | extern char reboot_command[]; | ||
34 | |||
33 | /* These are here in an effort to more fully work around Spitfire Errata | 35 | /* These are here in an effort to more fully work around Spitfire Errata |
34 | * #51. Essentially, if a memory barrier occurs soon after a mispredicted | 36 | * #51. Essentially, if a memory barrier occurs soon after a mispredicted |
35 | * branch, the chip can stop executing instructions until a trap occurs. | 37 | * branch, the chip can stop executing instructions until a trap occurs. |
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index cd9f894dd2d7..c9952ea9f698 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -102,6 +102,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
102 | static inline int | 102 | static inline int |
103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) |
104 | { | 104 | { |
105 | |||
106 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | ||
107 | /* Real i386 machines have no cmpxchg instruction */ | ||
108 | if (boot_cpu_data.x86 == 3) | ||
109 | return -ENOSYS; | ||
110 | #endif | ||
111 | |||
105 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 112 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) |
106 | return -EFAULT; | 113 | return -EFAULT; |
107 | 114 | ||
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 143476a3cb52..61ad7b5d142e 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h | |||
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t; | |||
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE \ | 42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE \ |
43 | - (unsigned long)(ADDR))) | 43 | - (unsigned long)(ADDR))) |
44 | 44 | ||
45 | #define ARCH_SUPPORTS_KRETPROBES | ||
46 | #define flush_insn_slot(p) do { } while (0) | 45 | #define flush_insn_slot(p) do { } while (0) |
47 | 46 | ||
48 | extern const int kretprobe_blacklist_size; | 47 | extern const int kretprobe_blacklist_size; |
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 4d9367b72976..9b17571e9bc3 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h | |||
@@ -23,6 +23,17 @@ | |||
23 | /* Found in switcher.S */ | 23 | /* Found in switcher.S */ |
24 | extern unsigned long default_idt_entries[]; | 24 | extern unsigned long default_idt_entries[]; |
25 | 25 | ||
26 | /* Declarations for definitions in lguest_guest.S */ | ||
27 | extern char lguest_noirq_start[], lguest_noirq_end[]; | ||
28 | extern const char lgstart_cli[], lgend_cli[]; | ||
29 | extern const char lgstart_sti[], lgend_sti[]; | ||
30 | extern const char lgstart_popf[], lgend_popf[]; | ||
31 | extern const char lgstart_pushf[], lgend_pushf[]; | ||
32 | extern const char lgstart_iret[], lgend_iret[]; | ||
33 | |||
34 | extern void lguest_iret(void); | ||
35 | extern void lguest_init(void); | ||
36 | |||
26 | struct lguest_regs | 37 | struct lguest_regs |
27 | { | 38 | { |
28 | /* Manually saved part. */ | 39 | /* Manually saved part. */ |
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index fec025c7f58c..e3b2bce0aff8 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h | |||
@@ -3,17 +3,29 @@ | |||
3 | 3 | ||
4 | /* Define nops for use with alternative() */ | 4 | /* Define nops for use with alternative() */ |
5 | 5 | ||
6 | /* generic versions from gas */ | 6 | /* generic versions from gas |
7 | #define GENERIC_NOP1 ".byte 0x90\n" | 7 | 1: nop |
8 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | 8 | 2: movl %esi,%esi |
9 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | 9 | 3: leal 0x00(%esi),%esi |
10 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | 10 | 4: leal 0x00(,%esi,1),%esi |
11 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | 11 | 6: leal 0x00000000(%esi),%esi |
12 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | 12 | 7: leal 0x00000000(,%esi,1),%esi |
13 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | 13 | */ |
14 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | 14 | #define GENERIC_NOP1 ".byte 0x90\n" |
15 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | ||
16 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | ||
17 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | ||
18 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | ||
19 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | ||
20 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | ||
21 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | ||
15 | 22 | ||
16 | /* Opteron 64bit nops */ | 23 | /* Opteron 64bit nops |
24 | 1: nop | ||
25 | 2: osp nop | ||
26 | 3: osp osp nop | ||
27 | 4: osp osp osp nop | ||
28 | */ | ||
17 | #define K8_NOP1 GENERIC_NOP1 | 29 | #define K8_NOP1 GENERIC_NOP1 |
18 | #define K8_NOP2 ".byte 0x66,0x90\n" | 30 | #define K8_NOP2 ".byte 0x66,0x90\n" |
19 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | 31 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" |
@@ -23,19 +35,35 @@ | |||
23 | #define K8_NOP7 K8_NOP4 K8_NOP3 | 35 | #define K8_NOP7 K8_NOP4 K8_NOP3 |
24 | #define K8_NOP8 K8_NOP4 K8_NOP4 | 36 | #define K8_NOP8 K8_NOP4 K8_NOP4 |
25 | 37 | ||
26 | /* K7 nops */ | 38 | /* K7 nops |
27 | /* uses eax dependencies (arbitary choice) */ | 39 | uses eax dependencies (arbitary choice) |
28 | #define K7_NOP1 GENERIC_NOP1 | 40 | 1: nop |
41 | 2: movl %eax,%eax | ||
42 | 3: leal (,%eax,1),%eax | ||
43 | 4: leal 0x00(,%eax,1),%eax | ||
44 | 6: leal 0x00000000(%eax),%eax | ||
45 | 7: leal 0x00000000(,%eax,1),%eax | ||
46 | */ | ||
47 | #define K7_NOP1 GENERIC_NOP1 | ||
29 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | 48 | #define K7_NOP2 ".byte 0x8b,0xc0\n" |
30 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | 49 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" |
31 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | 50 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" |
32 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | 51 | #define K7_NOP5 K7_NOP4 ASM_NOP1 |
33 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | 52 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" |
34 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | 53 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" |
35 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | 54 | #define K7_NOP8 K7_NOP7 ASM_NOP1 |
36 | 55 | ||
37 | /* P6 nops */ | 56 | /* P6 nops |
38 | /* uses eax dependencies (Intel-recommended choice) */ | 57 | uses eax dependencies (Intel-recommended choice) |
58 | 1: nop | ||
59 | 2: osp nop | ||
60 | 3: nopl (%eax) | ||
61 | 4: nopl 0x00(%eax) | ||
62 | 5: nopl 0x00(%eax,%eax,1) | ||
63 | 6: osp nopl 0x00(%eax,%eax,1) | ||
64 | 7: nopl 0x00000000(%eax) | ||
65 | 8: nopl 0x00000000(%eax,%eax,1) | ||
66 | */ | ||
39 | #define P6_NOP1 GENERIC_NOP1 | 67 | #define P6_NOP1 GENERIC_NOP1 |
40 | #define P6_NOP2 ".byte 0x66,0x90\n" | 68 | #define P6_NOP2 ".byte 0x66,0x90\n" |
41 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" | 69 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" |
@@ -63,9 +91,7 @@ | |||
63 | #define ASM_NOP6 K7_NOP6 | 91 | #define ASM_NOP6 K7_NOP6 |
64 | #define ASM_NOP7 K7_NOP7 | 92 | #define ASM_NOP7 K7_NOP7 |
65 | #define ASM_NOP8 K7_NOP8 | 93 | #define ASM_NOP8 K7_NOP8 |
66 | #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ | 94 | #elif defined(CONFIG_X86_P6_NOP) |
67 | defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ | ||
68 | defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) | ||
69 | #define ASM_NOP1 P6_NOP1 | 95 | #define ASM_NOP1 P6_NOP1 |
70 | #define ASM_NOP2 P6_NOP2 | 96 | #define ASM_NOP2 P6_NOP2 |
71 | #define ASM_NOP3 P6_NOP3 | 97 | #define ASM_NOP3 P6_NOP3 |
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index f7393bc516ef..143546073b95 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h | |||
@@ -47,8 +47,12 @@ | |||
47 | #define __PHYSICAL_MASK_SHIFT 46 | 47 | #define __PHYSICAL_MASK_SHIFT 46 |
48 | #define __VIRTUAL_MASK_SHIFT 48 | 48 | #define __VIRTUAL_MASK_SHIFT 48 |
49 | 49 | ||
50 | #define KERNEL_TEXT_SIZE (40*1024*1024) | 50 | /* |
51 | #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL) | 51 | * Kernel image size is limited to 128 MB (see level2_kernel_pgt in |
52 | * arch/x86/kernel/head_64.S), and it is mapped here: | ||
53 | */ | ||
54 | #define KERNEL_IMAGE_SIZE (128*1024*1024) | ||
55 | #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) | ||
52 | 56 | ||
53 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
54 | void clear_page(void *page); | 58 | void clear_page(void *page); |
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index 81a8ee4c55fc..f224eb3c3157 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h | |||
@@ -89,13 +89,13 @@ | |||
89 | */ | 89 | */ |
90 | struct ptrace_bts_config { | 90 | struct ptrace_bts_config { |
91 | /* requested or actual size of BTS buffer in bytes */ | 91 | /* requested or actual size of BTS buffer in bytes */ |
92 | u32 size; | 92 | __u32 size; |
93 | /* bitmask of below flags */ | 93 | /* bitmask of below flags */ |
94 | u32 flags; | 94 | __u32 flags; |
95 | /* buffer overflow signal */ | 95 | /* buffer overflow signal */ |
96 | u32 signal; | 96 | __u32 signal; |
97 | /* actual size of bts_struct in bytes */ | 97 | /* actual size of bts_struct in bytes */ |
98 | u32 bts_size; | 98 | __u32 bts_size; |
99 | }; | 99 | }; |
100 | #endif | 100 | #endif |
101 | 101 | ||
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index aada32fffec2..994df3780007 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -61,6 +61,7 @@ header-y += efs_fs_sb.h | |||
61 | header-y += elf-fdpic.h | 61 | header-y += elf-fdpic.h |
62 | header-y += elf-em.h | 62 | header-y += elf-em.h |
63 | header-y += fadvise.h | 63 | header-y += fadvise.h |
64 | header-y += falloc.h | ||
64 | header-y += fd.h | 65 | header-y += fd.h |
65 | header-y += fdreg.h | 66 | header-y += fdreg.h |
66 | header-y += fib_rules.h | 67 | header-y += fib_rules.h |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6fe67d1939c2..6f79d40dd3c0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -216,8 +216,8 @@ struct request { | |||
216 | unsigned int cmd_len; | 216 | unsigned int cmd_len; |
217 | unsigned char cmd[BLK_MAX_CDB]; | 217 | unsigned char cmd[BLK_MAX_CDB]; |
218 | 218 | ||
219 | unsigned int raw_data_len; | ||
220 | unsigned int data_len; | 219 | unsigned int data_len; |
220 | unsigned int extra_len; /* length of alignment and padding */ | ||
221 | unsigned int sense_len; | 221 | unsigned int sense_len; |
222 | void *data; | 222 | void *data; |
223 | void *sense; | 223 | void *sense; |
@@ -362,6 +362,7 @@ struct request_queue | |||
362 | unsigned long seg_boundary_mask; | 362 | unsigned long seg_boundary_mask; |
363 | void *dma_drain_buffer; | 363 | void *dma_drain_buffer; |
364 | unsigned int dma_drain_size; | 364 | unsigned int dma_drain_size; |
365 | unsigned int dma_pad_mask; | ||
365 | unsigned int dma_alignment; | 366 | unsigned int dma_alignment; |
366 | 367 | ||
367 | struct blk_queue_tag *queue_tags; | 368 | struct blk_queue_tag *queue_tags; |
@@ -701,6 +702,7 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | |||
701 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 702 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
702 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 703 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
703 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 704 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
705 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | ||
704 | extern int blk_queue_dma_drain(struct request_queue *q, | 706 | extern int blk_queue_dma_drain(struct request_queue *q, |
705 | dma_drain_needed_fn *dma_drain_needed, | 707 | dma_drain_needed_fn *dma_drain_needed, |
706 | void *buf, unsigned int size); | 708 | void *buf, unsigned int size); |
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ac6aad98b607..1ddebfc52565 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
@@ -37,7 +37,7 @@ SUBSYS(cpuacct) | |||
37 | 37 | ||
38 | /* */ | 38 | /* */ |
39 | 39 | ||
40 | #ifdef CONFIG_CGROUP_MEM_CONT | 40 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
41 | SUBSYS(mem_cgroup) | 41 | SUBSYS(mem_cgroup) |
42 | #endif | 42 | #endif |
43 | 43 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d0e17e1657dc..dcae0c8d97e6 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -138,6 +138,12 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
138 | #define noinline | 138 | #define noinline |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | /* | ||
142 | * Rather then using noinline to prevent stack consumption, use | ||
143 | * noinline_for_stack instead. For documentaiton reasons. | ||
144 | */ | ||
145 | #define noinline_for_stack noinline | ||
146 | |||
141 | #ifndef __always_inline | 147 | #ifndef __always_inline |
142 | #define __always_inline inline | 148 | #define __always_inline inline |
143 | #endif | 149 | #endif |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index f592d6de3b97..7266124361b4 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -27,6 +27,11 @@ struct debugfs_blob_wrapper { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | #if defined(CONFIG_DEBUG_FS) | 29 | #if defined(CONFIG_DEBUG_FS) |
30 | |||
31 | /* declared over in file.c */ | ||
32 | extern const struct file_operations debugfs_file_operations; | ||
33 | extern const struct inode_operations debugfs_link_operations; | ||
34 | |||
30 | struct dentry *debugfs_create_file(const char *name, mode_t mode, | 35 | struct dentry *debugfs_create_file(const char *name, mode_t mode, |
31 | struct dentry *parent, void *data, | 36 | struct dentry *parent, void *data, |
32 | const struct file_operations *fops); | 37 | const struct file_operations *fops); |
diff --git a/include/linux/delay.h b/include/linux/delay.h index 17ddb55430ae..54552d21296e 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h | |||
@@ -7,6 +7,8 @@ | |||
7 | * Delay routines, using a pre-computed "loops_per_jiffy" value. | 7 | * Delay routines, using a pre-computed "loops_per_jiffy" value. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | ||
11 | |||
10 | extern unsigned long loops_per_jiffy; | 12 | extern unsigned long loops_per_jiffy; |
11 | 13 | ||
12 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
@@ -32,7 +34,11 @@ extern unsigned long loops_per_jiffy; | |||
32 | #endif | 34 | #endif |
33 | 35 | ||
34 | #ifndef ndelay | 36 | #ifndef ndelay |
35 | #define ndelay(x) udelay(((x)+999)/1000) | 37 | static inline void ndelay(unsigned long x) |
38 | { | ||
39 | udelay(DIV_ROUND_UP(x, 1000)); | ||
40 | } | ||
41 | #define ndelay(x) ndelay(x) | ||
36 | #endif | 42 | #endif |
37 | 43 | ||
38 | void calibrate_delay(void); | 44 | void calibrate_delay(void); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index acbb364674ff..261e43a4c873 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -366,7 +366,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | |||
366 | */ | 366 | */ |
367 | static inline void dma_async_issue_pending(struct dma_chan *chan) | 367 | static inline void dma_async_issue_pending(struct dma_chan *chan) |
368 | { | 368 | { |
369 | return chan->device->device_issue_pending(chan); | 369 | chan->device->device_issue_pending(chan); |
370 | } | 370 | } |
371 | 371 | ||
372 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | 372 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) |
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index 532d13adabc4..0a90e1c3a422 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h | |||
@@ -45,8 +45,8 @@ struct compat_elf_prpsinfo | |||
45 | char pr_zomb; | 45 | char pr_zomb; |
46 | char pr_nice; | 46 | char pr_nice; |
47 | compat_ulong_t pr_flag; | 47 | compat_ulong_t pr_flag; |
48 | compat_uid_t pr_uid; | 48 | __compat_uid_t pr_uid; |
49 | compat_gid_t pr_gid; | 49 | __compat_gid_t pr_gid; |
50 | compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | 50 | compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; |
51 | char pr_fname[16]; | 51 | char pr_fname[16]; |
52 | char pr_psargs[ELF_PRARGSZ]; | 52 | char pr_psargs[ELF_PRARGSZ]; |
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h index 697da4bce6c5..1285c583b2d8 100644 --- a/include/linux/ext4_fs_extents.h +++ b/include/linux/ext4_fs_extents.h | |||
@@ -227,5 +227,6 @@ extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *, | |||
227 | ext4_lblk_t *, ext4_fsblk_t *); | 227 | ext4_lblk_t *, ext4_fsblk_t *); |
228 | extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *, | 228 | extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *, |
229 | ext4_lblk_t *, ext4_fsblk_t *); | 229 | ext4_lblk_t *, ext4_fsblk_t *); |
230 | extern void ext4_ext_drop_refs(struct ext4_ext_path *); | ||
230 | #endif /* _LINUX_EXT4_EXTENTS */ | 231 | #endif /* _LINUX_EXT4_EXTENTS */ |
231 | 232 | ||
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 09a3b18918c7..32c2ac49a070 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -18,11 +18,13 @@ | |||
18 | #define dev_to_disk(device) container_of(device, struct gendisk, dev) | 18 | #define dev_to_disk(device) container_of(device, struct gendisk, dev) |
19 | #define dev_to_part(device) container_of(device, struct hd_struct, dev) | 19 | #define dev_to_part(device) container_of(device, struct hd_struct, dev) |
20 | 20 | ||
21 | extern struct device_type disk_type; | ||
22 | extern struct device_type part_type; | 21 | extern struct device_type part_type; |
23 | extern struct kobject *block_depr; | 22 | extern struct kobject *block_depr; |
24 | extern struct class block_class; | 23 | extern struct class block_class; |
25 | 24 | ||
25 | extern const struct seq_operations partitions_op; | ||
26 | extern const struct seq_operations diskstats_op; | ||
27 | |||
26 | enum { | 28 | enum { |
27 | /* These three have identical behaviour; use the second one if DOS FDISK gets | 29 | /* These three have identical behaviour; use the second one if DOS FDISK gets |
28 | confused about extended/logical partitions starting past cylinder 1023. */ | 30 | confused about extended/logical partitions starting past cylinder 1023. */ |
@@ -556,7 +558,6 @@ extern struct gendisk *alloc_disk_node(int minors, int node_id); | |||
556 | extern struct gendisk *alloc_disk(int minors); | 558 | extern struct gendisk *alloc_disk(int minors); |
557 | extern struct kobject *get_disk(struct gendisk *disk); | 559 | extern struct kobject *get_disk(struct gendisk *disk); |
558 | extern void put_disk(struct gendisk *disk); | 560 | extern void put_disk(struct gendisk *disk); |
559 | extern void genhd_media_change_notify(struct gendisk *disk); | ||
560 | extern void blk_register_region(dev_t devt, unsigned long range, | 561 | extern void blk_register_region(dev_t devt, unsigned long range, |
561 | struct module *module, | 562 | struct module *module, |
562 | struct kobject *(*probe)(dev_t, int *, void *), | 563 | struct kobject *(*probe)(dev_t, int *, void *), |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h new file mode 100644 index 000000000000..4987a84078ef --- /dev/null +++ b/include/linux/gpio.h | |||
@@ -0,0 +1,95 @@ | |||
1 | #ifndef __LINUX_GPIO_H | ||
2 | #define __LINUX_GPIO_H | ||
3 | |||
4 | /* see Documentation/gpio.txt */ | ||
5 | |||
6 | #ifdef CONFIG_GENERIC_GPIO | ||
7 | #include <asm/gpio.h> | ||
8 | |||
9 | #else | ||
10 | |||
11 | /* | ||
12 | * Some platforms don't support the GPIO programming interface. | ||
13 | * | ||
14 | * In case some driver uses it anyway (it should normally have | ||
15 | * depended on GENERIC_GPIO), these routines help the compiler | ||
16 | * optimize out much GPIO-related code ... or trigger a runtime | ||
17 | * warning when something is wrongly called. | ||
18 | */ | ||
19 | |||
20 | static inline int gpio_is_valid(int number) | ||
21 | { | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | static inline int gpio_request(unsigned gpio, const char *label) | ||
26 | { | ||
27 | return -ENOSYS; | ||
28 | } | ||
29 | |||
30 | static inline void gpio_free(unsigned gpio) | ||
31 | { | ||
32 | /* GPIO can never have been requested */ | ||
33 | WARN_ON(1); | ||
34 | } | ||
35 | |||
36 | static inline int gpio_direction_input(unsigned gpio) | ||
37 | { | ||
38 | return -ENOSYS; | ||
39 | } | ||
40 | |||
41 | static inline int gpio_direction_output(unsigned gpio, int value) | ||
42 | { | ||
43 | return -ENOSYS; | ||
44 | } | ||
45 | |||
46 | static inline int gpio_get_value(unsigned gpio) | ||
47 | { | ||
48 | /* GPIO can never have been requested or set as {in,out}put */ | ||
49 | WARN_ON(1); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static inline void gpio_set_value(unsigned gpio, int value) | ||
54 | { | ||
55 | /* GPIO can never have been requested or set as output */ | ||
56 | WARN_ON(1); | ||
57 | } | ||
58 | |||
59 | static inline int gpio_cansleep(unsigned gpio) | ||
60 | { | ||
61 | /* GPIO can never have been requested or set as {in,out}put */ | ||
62 | WARN_ON(1); | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static inline int gpio_get_value_cansleep(unsigned gpio) | ||
67 | { | ||
68 | /* GPIO can never have been requested or set as {in,out}put */ | ||
69 | WARN_ON(1); | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static inline void gpio_set_value_cansleep(unsigned gpio, int value) | ||
74 | { | ||
75 | /* GPIO can never have been requested or set as output */ | ||
76 | WARN_ON(1); | ||
77 | } | ||
78 | |||
79 | static inline int gpio_to_irq(unsigned gpio) | ||
80 | { | ||
81 | /* GPIO can never have been requested or set as input */ | ||
82 | WARN_ON(1); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | static inline int irq_to_gpio(unsigned irq) | ||
87 | { | ||
88 | /* irq can never have been returned from gpio_to_irq() */ | ||
89 | WARN_ON(1); | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | #endif | ||
94 | |||
95 | #endif /* __LINUX_GPIO_H */ | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 2961ec788046..49829988bfa0 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -109,6 +109,14 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
109 | } | 109 | } |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | ||
113 | extern void rcu_irq_enter(void); | ||
114 | extern void rcu_irq_exit(void); | ||
115 | #else | ||
116 | # define rcu_irq_enter() do { } while (0) | ||
117 | # define rcu_irq_exit() do { } while (0) | ||
118 | #endif /* CONFIG_PREEMPT_RCU */ | ||
119 | |||
112 | /* | 120 | /* |
113 | * It is safe to do non-atomic ops on ->hardirq_context, | 121 | * It is safe to do non-atomic ops on ->hardirq_context, |
114 | * because NMI handlers may not preempt and the ops are | 122 | * because NMI handlers may not preempt and the ops are |
@@ -117,6 +125,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
117 | */ | 125 | */ |
118 | #define __irq_enter() \ | 126 | #define __irq_enter() \ |
119 | do { \ | 127 | do { \ |
128 | rcu_irq_enter(); \ | ||
120 | account_system_vtime(current); \ | 129 | account_system_vtime(current); \ |
121 | add_preempt_count(HARDIRQ_OFFSET); \ | 130 | add_preempt_count(HARDIRQ_OFFSET); \ |
122 | trace_hardirq_enter(); \ | 131 | trace_hardirq_enter(); \ |
@@ -135,6 +144,7 @@ extern void irq_enter(void); | |||
135 | trace_hardirq_exit(); \ | 144 | trace_hardirq_exit(); \ |
136 | account_system_vtime(current); \ | 145 | account_system_vtime(current); \ |
137 | sub_preempt_count(HARDIRQ_OFFSET); \ | 146 | sub_preempt_count(HARDIRQ_OFFSET); \ |
147 | rcu_irq_exit(); \ | ||
138 | } while (0) | 148 | } while (0) |
139 | 149 | ||
140 | /* | 150 | /* |
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index 4dd4c04ff2f4..c975caf75385 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h | |||
@@ -1,3 +1,6 @@ | |||
1 | extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, | ||
2 | unsigned long shift, | ||
3 | unsigned long boundary_size); | ||
1 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | 4 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, |
2 | unsigned long start, unsigned int nr, | 5 | unsigned long start, unsigned int nr, |
3 | unsigned long shift, | 6 | unsigned long shift, |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 4a6ce82ba039..0f28486f6360 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -125,11 +125,11 @@ struct jprobe { | |||
125 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); | 125 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); |
126 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 126 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
127 | 127 | ||
128 | #ifdef ARCH_SUPPORTS_KRETPROBES | 128 | #ifdef CONFIG_KRETPROBES |
129 | extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, | 129 | extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, |
130 | struct pt_regs *regs); | 130 | struct pt_regs *regs); |
131 | extern int arch_trampoline_kprobe(struct kprobe *p); | 131 | extern int arch_trampoline_kprobe(struct kprobe *p); |
132 | #else /* ARCH_SUPPORTS_KRETPROBES */ | 132 | #else /* CONFIG_KRETPROBES */ |
133 | static inline void arch_prepare_kretprobe(struct kretprobe *rp, | 133 | static inline void arch_prepare_kretprobe(struct kretprobe *rp, |
134 | struct pt_regs *regs) | 134 | struct pt_regs *regs) |
135 | { | 135 | { |
@@ -138,7 +138,7 @@ static inline int arch_trampoline_kprobe(struct kprobe *p) | |||
138 | { | 138 | { |
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | #endif /* ARCH_SUPPORTS_KRETPROBES */ | 141 | #endif /* CONFIG_KRETPROBES */ |
142 | /* | 142 | /* |
143 | * Function-return probe - | 143 | * Function-return probe - |
144 | * Note: | 144 | * Note: |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 4de4fd2d8607..c1ec04fd000d 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -221,6 +221,7 @@ struct kvm_vapic_addr { | |||
221 | * Get size for mmap(vcpu_fd) | 221 | * Get size for mmap(vcpu_fd) |
222 | */ | 222 | */ |
223 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ | 223 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ |
224 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) | ||
224 | 225 | ||
225 | /* | 226 | /* |
226 | * Extension capability list. | 227 | * Extension capability list. |
@@ -230,8 +231,8 @@ struct kvm_vapic_addr { | |||
230 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 | 231 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 |
231 | #define KVM_CAP_USER_MEMORY 3 | 232 | #define KVM_CAP_USER_MEMORY 3 |
232 | #define KVM_CAP_SET_TSS_ADDR 4 | 233 | #define KVM_CAP_SET_TSS_ADDR 4 |
233 | #define KVM_CAP_EXT_CPUID 5 | ||
234 | #define KVM_CAP_VAPIC 6 | 234 | #define KVM_CAP_VAPIC 6 |
235 | #define KVM_CAP_EXT_CPUID 7 | ||
235 | 236 | ||
236 | /* | 237 | /* |
237 | * ioctls for VM fds | 238 | * ioctls for VM fds |
@@ -249,7 +250,6 @@ struct kvm_vapic_addr { | |||
249 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) | 250 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) |
250 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) | 251 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) |
251 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) | 252 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) |
252 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2) | ||
253 | /* Device model IOC */ | 253 | /* Device model IOC */ |
254 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) | 254 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) |
255 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) | 255 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea4764b0a2f4..928b0d59e9ba 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -107,6 +107,7 @@ struct kvm_memory_slot { | |||
107 | struct kvm { | 107 | struct kvm { |
108 | struct mutex lock; /* protects the vcpus array and APIC accesses */ | 108 | struct mutex lock; /* protects the vcpus array and APIC accesses */ |
109 | spinlock_t mmu_lock; | 109 | spinlock_t mmu_lock; |
110 | struct rw_semaphore slots_lock; | ||
110 | struct mm_struct *mm; /* userspace tied to this vm */ | 111 | struct mm_struct *mm; /* userspace tied to this vm */ |
111 | int nmemslots; | 112 | int nmemslots; |
112 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 113 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
diff --git a/include/linux/maple.h b/include/linux/maple.h index 3f01e2bae1a1..d31e36ebb436 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h | |||
@@ -64,7 +64,6 @@ struct maple_driver { | |||
64 | int (*connect) (struct maple_device * dev); | 64 | int (*connect) (struct maple_device * dev); |
65 | void (*disconnect) (struct maple_device * dev); | 65 | void (*disconnect) (struct maple_device * dev); |
66 | struct device_driver drv; | 66 | struct device_driver drv; |
67 | int registered; | ||
68 | }; | 67 | }; |
69 | 68 | ||
70 | void maple_getcond_callback(struct maple_device *dev, | 69 | void maple_getcond_callback(struct maple_device *dev, |
diff --git a/include/linux/marker.h b/include/linux/marker.h index 5df879dc3776..430f6adf9762 100644 --- a/include/linux/marker.h +++ b/include/linux/marker.h | |||
@@ -104,10 +104,16 @@ static inline void marker_update_probe_range(struct marker *begin, | |||
104 | #define MARK_NOARGS " " | 104 | #define MARK_NOARGS " " |
105 | 105 | ||
106 | /* To be used for string format validity checking with gcc */ | 106 | /* To be used for string format validity checking with gcc */ |
107 | static inline void __printf(1, 2) __mark_check_format(const char *fmt, ...) | 107 | static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...) |
108 | { | 108 | { |
109 | } | 109 | } |
110 | 110 | ||
111 | #define __mark_check_format(format, args...) \ | ||
112 | do { \ | ||
113 | if (0) \ | ||
114 | ___mark_check_format(format, ## args); \ | ||
115 | } while (0) | ||
116 | |||
111 | extern marker_probe_func __mark_empty_function; | 117 | extern marker_probe_func __mark_empty_function; |
112 | 118 | ||
113 | extern void marker_probe_cb(const struct marker *mdata, | 119 | extern void marker_probe_cb(const struct marker *mdata, |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 04075628cb9a..8b1c4295848b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -25,18 +25,20 @@ struct page_cgroup; | |||
25 | struct page; | 25 | struct page; |
26 | struct mm_struct; | 26 | struct mm_struct; |
27 | 27 | ||
28 | #ifdef CONFIG_CGROUP_MEM_CONT | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
29 | 29 | ||
30 | extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p); | 30 | extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p); |
31 | extern void mm_free_cgroup(struct mm_struct *mm); | 31 | extern void mm_free_cgroup(struct mm_struct *mm); |
32 | extern void page_assign_page_cgroup(struct page *page, | 32 | |
33 | struct page_cgroup *pc); | 33 | #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0) |
34 | |||
34 | extern struct page_cgroup *page_get_page_cgroup(struct page *page); | 35 | extern struct page_cgroup *page_get_page_cgroup(struct page *page); |
35 | extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 36 | extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
36 | gfp_t gfp_mask); | 37 | gfp_t gfp_mask); |
37 | extern void mem_cgroup_uncharge(struct page_cgroup *pc); | 38 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
39 | gfp_t gfp_mask); | ||
38 | extern void mem_cgroup_uncharge_page(struct page *page); | 40 | extern void mem_cgroup_uncharge_page(struct page *page); |
39 | extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active); | 41 | extern void mem_cgroup_move_lists(struct page *page, bool active); |
40 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 42 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
41 | struct list_head *dst, | 43 | struct list_head *dst, |
42 | unsigned long *scanned, int order, | 44 | unsigned long *scanned, int order, |
@@ -44,11 +46,9 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
44 | struct mem_cgroup *mem_cont, | 46 | struct mem_cgroup *mem_cont, |
45 | int active); | 47 | int active); |
46 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); | 48 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
47 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | ||
48 | gfp_t gfp_mask); | ||
49 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | 49 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); |
50 | 50 | ||
51 | #define vm_match_cgroup(mm, cgroup) \ | 51 | #define mm_match_cgroup(mm, cgroup) \ |
52 | ((cgroup) == rcu_dereference((mm)->mem_cgroup)) | 52 | ((cgroup) == rcu_dereference((mm)->mem_cgroup)) |
53 | 53 | ||
54 | extern int mem_cgroup_prepare_migration(struct page *page); | 54 | extern int mem_cgroup_prepare_migration(struct page *page); |
@@ -72,7 +72,7 @@ extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, | |||
72 | extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | 72 | extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, |
73 | struct zone *zone, int priority); | 73 | struct zone *zone, int priority); |
74 | 74 | ||
75 | #else /* CONFIG_CGROUP_MEM_CONT */ | 75 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
76 | static inline void mm_init_cgroup(struct mm_struct *mm, | 76 | static inline void mm_init_cgroup(struct mm_struct *mm, |
77 | struct task_struct *p) | 77 | struct task_struct *p) |
78 | { | 78 | { |
@@ -82,8 +82,7 @@ static inline void mm_free_cgroup(struct mm_struct *mm) | |||
82 | { | 82 | { |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void page_assign_page_cgroup(struct page *page, | 85 | static inline void page_reset_bad_cgroup(struct page *page) |
86 | struct page_cgroup *pc) | ||
87 | { | 86 | { |
88 | } | 87 | } |
89 | 88 | ||
@@ -92,33 +91,27 @@ static inline struct page_cgroup *page_get_page_cgroup(struct page *page) | |||
92 | return NULL; | 91 | return NULL; |
93 | } | 92 | } |
94 | 93 | ||
95 | static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 94 | static inline int mem_cgroup_charge(struct page *page, |
96 | gfp_t gfp_mask) | 95 | struct mm_struct *mm, gfp_t gfp_mask) |
97 | { | 96 | { |
98 | return 0; | 97 | return 0; |
99 | } | 98 | } |
100 | 99 | ||
101 | static inline void mem_cgroup_uncharge(struct page_cgroup *pc) | 100 | static inline int mem_cgroup_cache_charge(struct page *page, |
101 | struct mm_struct *mm, gfp_t gfp_mask) | ||
102 | { | 102 | { |
103 | return 0; | ||
103 | } | 104 | } |
104 | 105 | ||
105 | static inline void mem_cgroup_uncharge_page(struct page *page) | 106 | static inline void mem_cgroup_uncharge_page(struct page *page) |
106 | { | 107 | { |
107 | } | 108 | } |
108 | 109 | ||
109 | static inline void mem_cgroup_move_lists(struct page_cgroup *pc, | 110 | static inline void mem_cgroup_move_lists(struct page *page, bool active) |
110 | bool active) | ||
111 | { | ||
112 | } | ||
113 | |||
114 | static inline int mem_cgroup_cache_charge(struct page *page, | ||
115 | struct mm_struct *mm, | ||
116 | gfp_t gfp_mask) | ||
117 | { | 111 | { |
118 | return 0; | ||
119 | } | 112 | } |
120 | 113 | ||
121 | static inline int vm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) | 114 | static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) |
122 | { | 115 | { |
123 | return 1; | 116 | return 1; |
124 | } | 117 | } |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bfee0bd1d435..af190ceab971 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -64,10 +64,7 @@ struct page { | |||
64 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | 64 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS |
65 | spinlock_t ptl; | 65 | spinlock_t ptl; |
66 | #endif | 66 | #endif |
67 | struct { | 67 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ |
68 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ | ||
69 | void *end; /* SLUB: end marker */ | ||
70 | }; | ||
71 | struct page *first_page; /* Compound tail pages */ | 68 | struct page *first_page; /* Compound tail pages */ |
72 | }; | 69 | }; |
73 | union { | 70 | union { |
@@ -91,7 +88,7 @@ struct page { | |||
91 | void *virtual; /* Kernel virtual address (NULL if | 88 | void *virtual; /* Kernel virtual address (NULL if |
92 | not kmapped, ie. highmem) */ | 89 | not kmapped, ie. highmem) */ |
93 | #endif /* WANT_PAGE_VIRTUAL */ | 90 | #endif /* WANT_PAGE_VIRTUAL */ |
94 | #ifdef CONFIG_CGROUP_MEM_CONT | 91 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
95 | unsigned long page_cgroup; | 92 | unsigned long page_cgroup; |
96 | #endif | 93 | #endif |
97 | }; | 94 | }; |
@@ -225,7 +222,7 @@ struct mm_struct { | |||
225 | /* aio bits */ | 222 | /* aio bits */ |
226 | rwlock_t ioctx_list_lock; | 223 | rwlock_t ioctx_list_lock; |
227 | struct kioctx *ioctx_list; | 224 | struct kioctx *ioctx_list; |
228 | #ifdef CONFIG_CGROUP_MEM_CONT | 225 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
229 | struct mem_cgroup *mem_cgroup; | 226 | struct mem_cgroup *mem_cgroup; |
230 | #endif | 227 | #endif |
231 | }; | 228 | }; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 87195b62de52..f3165e7ac431 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -389,6 +389,16 @@ struct pci_driver { | |||
389 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) | 389 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) |
390 | 390 | ||
391 | /** | 391 | /** |
392 | * DECLARE_PCI_DEVICE_TABLE - macro used to describe a pci device table | ||
393 | * @_table: device table name | ||
394 | * | ||
395 | * This macro is used to create a struct pci_device_id array (a device table) | ||
396 | * in a generic manner. | ||
397 | */ | ||
398 | #define DECLARE_PCI_DEVICE_TABLE(_table) \ | ||
399 | const struct pci_device_id _table[] __devinitconst | ||
400 | |||
401 | /** | ||
392 | * PCI_DEVICE - macro used to describe a specific pci device | 402 | * PCI_DEVICE - macro used to describe a specific pci device |
393 | * @vend: the 16 bit PCI Vendor ID | 403 | * @vend: the 16 bit PCI Vendor ID |
394 | * @dev: the 16 bit PCI Device ID | 404 | * @dev: the 16 bit PCI Device ID |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index e51b531cd0b2..47fbcba11850 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h | |||
@@ -235,6 +235,8 @@ struct bitmap { | |||
235 | 235 | ||
236 | unsigned long flags; | 236 | unsigned long flags; |
237 | 237 | ||
238 | int allclean; | ||
239 | |||
238 | unsigned long max_write_behind; /* write-behind mode */ | 240 | unsigned long max_write_behind; /* write-behind mode */ |
239 | atomic_t behind_writes; | 241 | atomic_t behind_writes; |
240 | 242 | ||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 85a068bab625..7bb6d1abf71e 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -83,6 +83,7 @@ struct mdk_rdev_s | |||
83 | #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ | 83 | #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ |
84 | #define AllReserved 6 /* If whole device is reserved for | 84 | #define AllReserved 6 /* If whole device is reserved for |
85 | * one array */ | 85 | * one array */ |
86 | #define AutoDetected 7 /* added by auto-detect */ | ||
86 | 87 | ||
87 | int desc_nr; /* descriptor index in the superblock */ | 88 | int desc_nr; /* descriptor index in the superblock */ |
88 | int raid_disk; /* role of device in array */ | 89 | int raid_disk; /* role of device in array */ |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4d6624260b4c..b3dccd68629e 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -160,5 +160,8 @@ extern void rcu_restart_cpu(int cpu); | |||
160 | extern long rcu_batches_completed(void); | 160 | extern long rcu_batches_completed(void); |
161 | extern long rcu_batches_completed_bh(void); | 161 | extern long rcu_batches_completed_bh(void); |
162 | 162 | ||
163 | #define rcu_enter_nohz() do { } while (0) | ||
164 | #define rcu_exit_nohz() do { } while (0) | ||
165 | |||
163 | #endif /* __KERNEL__ */ | 166 | #endif /* __KERNEL__ */ |
164 | #endif /* __LINUX_RCUCLASSIC_H */ | 167 | #endif /* __LINUX_RCUCLASSIC_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 60c2a033b19e..01152ed532c8 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -82,5 +82,27 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
82 | 82 | ||
83 | struct softirq_action; | 83 | struct softirq_action; |
84 | 84 | ||
85 | #ifdef CONFIG_NO_HZ | ||
86 | DECLARE_PER_CPU(long, dynticks_progress_counter); | ||
87 | |||
88 | static inline void rcu_enter_nohz(void) | ||
89 | { | ||
90 | __get_cpu_var(dynticks_progress_counter)++; | ||
91 | WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); | ||
92 | mb(); | ||
93 | } | ||
94 | |||
95 | static inline void rcu_exit_nohz(void) | ||
96 | { | ||
97 | mb(); | ||
98 | __get_cpu_var(dynticks_progress_counter)++; | ||
99 | WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); | ||
100 | } | ||
101 | |||
102 | #else /* CONFIG_NO_HZ */ | ||
103 | #define rcu_enter_nohz() do { } while (0) | ||
104 | #define rcu_exit_nohz() do { } while (0) | ||
105 | #endif /* CONFIG_NO_HZ */ | ||
106 | |||
85 | #endif /* __KERNEL__ */ | 107 | #endif /* __KERNEL__ */ |
86 | #endif /* __LINUX_RCUPREEMPT_H */ | 108 | #endif /* __LINUX_RCUPREEMPT_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e217d188a102..9ae4030067a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -242,6 +242,7 @@ struct task_struct; | |||
242 | 242 | ||
243 | extern void sched_init(void); | 243 | extern void sched_init(void); |
244 | extern void sched_init_smp(void); | 244 | extern void sched_init_smp(void); |
245 | extern asmlinkage void schedule_tail(struct task_struct *prev); | ||
245 | extern void init_idle(struct task_struct *idle, int cpu); | 246 | extern void init_idle(struct task_struct *idle, int cpu); |
246 | extern void init_idle_bootup_task(struct task_struct *idle); | 247 | extern void init_idle_bootup_task(struct task_struct *idle); |
247 | 248 | ||
@@ -1189,7 +1190,7 @@ struct task_struct { | |||
1189 | int softirq_context; | 1190 | int softirq_context; |
1190 | #endif | 1191 | #endif |
1191 | #ifdef CONFIG_LOCKDEP | 1192 | #ifdef CONFIG_LOCKDEP |
1192 | # define MAX_LOCK_DEPTH 30UL | 1193 | # define MAX_LOCK_DEPTH 48UL |
1193 | u64 curr_chain_key; | 1194 | u64 curr_chain_key; |
1194 | int lockdep_depth; | 1195 | int lockdep_depth; |
1195 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1196 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
@@ -1541,10 +1542,6 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
1541 | extern unsigned int sysctl_sched_features; | 1542 | extern unsigned int sysctl_sched_features; |
1542 | extern unsigned int sysctl_sched_migration_cost; | 1543 | extern unsigned int sysctl_sched_migration_cost; |
1543 | extern unsigned int sysctl_sched_nr_migrate; | 1544 | extern unsigned int sysctl_sched_nr_migrate; |
1544 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) | ||
1545 | extern unsigned int sysctl_sched_min_bal_int_shares; | ||
1546 | extern unsigned int sysctl_sched_max_bal_int_shares; | ||
1547 | #endif | ||
1548 | 1545 | ||
1549 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1546 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1550 | struct file *file, void __user *buffer, size_t *length, | 1547 | struct file *file, void __user *buffer, size_t *length, |
diff --git a/include/asm-sh/sci.h b/include/linux/serial_sci.h index 52e73660c129..893cc53486bc 100644 --- a/include/asm-sh/sci.h +++ b/include/linux/serial_sci.h | |||
@@ -1,12 +1,10 @@ | |||
1 | #ifndef __ASM_SH_SCI_H | 1 | #ifndef __LINUX_SERIAL_SCI_H |
2 | #define __ASM_SH_SCI_H | 2 | #define __LINUX_SERIAL_SCI_H |
3 | 3 | ||
4 | #include <linux/serial_core.h> | 4 | #include <linux/serial_core.h> |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * Generic header for SuperH SCI(F) | 7 | * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) |
8 | * | ||
9 | * Do not place SH-specific parts in here, sh64 and h8300 depend on this too. | ||
10 | */ | 8 | */ |
11 | 9 | ||
12 | /* Offsets into the sci_port->irqs array */ | 10 | /* Offsets into the sci_port->irqs array */ |
@@ -31,4 +29,4 @@ struct plat_sci_port { | |||
31 | 29 | ||
32 | int early_sci_setup(struct uart_port *port); | 30 | int early_sci_setup(struct uart_port *port); |
33 | 31 | ||
34 | #endif /* __ASM_SH_SCI_H */ | 32 | #endif /* __LINUX_SERIAL_SCI_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 57deecc79d52..b00c1c73eb0a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -61,7 +61,7 @@ struct kmem_cache { | |||
61 | int size; /* The size of an object including meta data */ | 61 | int size; /* The size of an object including meta data */ |
62 | int objsize; /* The size of an object without meta data */ | 62 | int objsize; /* The size of an object without meta data */ |
63 | int offset; /* Free pointer offset. */ | 63 | int offset; /* Free pointer offset. */ |
64 | int order; | 64 | int order; /* Current preferred allocation order */ |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Avoid an extra cache line for UP, SMP and for the node local to | 67 | * Avoid an extra cache line for UP, SMP and for the node local to |
@@ -138,11 +138,11 @@ static __always_inline int kmalloc_index(size_t size) | |||
138 | if (size <= 512) return 9; | 138 | if (size <= 512) return 9; |
139 | if (size <= 1024) return 10; | 139 | if (size <= 1024) return 10; |
140 | if (size <= 2 * 1024) return 11; | 140 | if (size <= 2 * 1024) return 11; |
141 | if (size <= 4 * 1024) return 12; | ||
141 | /* | 142 | /* |
142 | * The following is only needed to support architectures with a larger page | 143 | * The following is only needed to support architectures with a larger page |
143 | * size than 4k. | 144 | * size than 4k. |
144 | */ | 145 | */ |
145 | if (size <= 4 * 1024) return 12; | ||
146 | if (size <= 8 * 1024) return 13; | 146 | if (size <= 8 * 1024) return 13; |
147 | if (size <= 16 * 1024) return 14; | 147 | if (size <= 16 * 1024) return 14; |
148 | if (size <= 32 * 1024) return 15; | 148 | if (size <= 32 * 1024) return 15; |
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h index 64236b73c724..d53642d2d899 100644 --- a/include/linux/sm501-regs.h +++ b/include/linux/sm501-regs.h | |||
@@ -129,11 +129,14 @@ | |||
129 | 129 | ||
130 | #define SM501_DEVICEID_SM501 (0x05010000) | 130 | #define SM501_DEVICEID_SM501 (0x05010000) |
131 | #define SM501_DEVICEID_IDMASK (0xffff0000) | 131 | #define SM501_DEVICEID_IDMASK (0xffff0000) |
132 | #define SM501_DEVICEID_REVMASK (0x000000ff) | ||
132 | 133 | ||
133 | #define SM501_PLLCLOCK_COUNT (0x000064) | 134 | #define SM501_PLLCLOCK_COUNT (0x000064) |
134 | #define SM501_MISC_TIMING (0x000068) | 135 | #define SM501_MISC_TIMING (0x000068) |
135 | #define SM501_CURRENT_SDRAM_CLOCK (0x00006C) | 136 | #define SM501_CURRENT_SDRAM_CLOCK (0x00006C) |
136 | 137 | ||
138 | #define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074) | ||
139 | |||
137 | /* GPIO base */ | 140 | /* GPIO base */ |
138 | #define SM501_GPIO (0x010000) | 141 | #define SM501_GPIO (0x010000) |
139 | #define SM501_GPIO_DATA_LOW (0x00) | 142 | #define SM501_GPIO_DATA_LOW (0x00) |
diff --git a/include/linux/sm501.h b/include/linux/sm501.h index 932a9efee8a5..bca134544700 100644 --- a/include/linux/sm501.h +++ b/include/linux/sm501.h | |||
@@ -24,7 +24,8 @@ extern int sm501_unit_power(struct device *dev, | |||
24 | extern unsigned long sm501_set_clock(struct device *dev, | 24 | extern unsigned long sm501_set_clock(struct device *dev, |
25 | int clksrc, unsigned long freq); | 25 | int clksrc, unsigned long freq); |
26 | 26 | ||
27 | extern unsigned long sm501_find_clock(int clksrc, unsigned long req_freq); | 27 | extern unsigned long sm501_find_clock(struct device *dev, |
28 | int clksrc, unsigned long req_freq); | ||
28 | 29 | ||
29 | /* sm501_misc_control | 30 | /* sm501_misc_control |
30 | * | 31 | * |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 2372e2e6b527..583e0481dfa0 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -94,10 +94,9 @@ enum usb_interface_condition { | |||
94 | * @altsetting: array of interface structures, one for each alternate | 94 | * @altsetting: array of interface structures, one for each alternate |
95 | * setting that may be selected. Each one includes a set of | 95 | * setting that may be selected. Each one includes a set of |
96 | * endpoint configurations. They will be in no particular order. | 96 | * endpoint configurations. They will be in no particular order. |
97 | * @num_altsetting: number of altsettings defined. | ||
98 | * @cur_altsetting: the current altsetting. | 97 | * @cur_altsetting: the current altsetting. |
98 | * @num_altsetting: number of altsettings defined. | ||
99 | * @intf_assoc: interface association descriptor | 99 | * @intf_assoc: interface association descriptor |
100 | * @driver: the USB driver that is bound to this interface. | ||
101 | * @minor: the minor number assigned to this interface, if this | 100 | * @minor: the minor number assigned to this interface, if this |
102 | * interface is bound to a driver that uses the USB major number. | 101 | * interface is bound to a driver that uses the USB major number. |
103 | * If this interface does not use the USB major, this field should | 102 | * If this interface does not use the USB major, this field should |
@@ -781,8 +780,7 @@ static inline int usb_endpoint_is_isoc_out( | |||
781 | .idVendor = (vend), \ | 780 | .idVendor = (vend), \ |
782 | .idProduct = (prod) | 781 | .idProduct = (prod) |
783 | /** | 782 | /** |
784 | * USB_DEVICE_VER - macro used to describe a specific usb device with a | 783 | * USB_DEVICE_VER - describe a specific usb device with a version range |
785 | * version range | ||
786 | * @vend: the 16 bit USB Vendor ID | 784 | * @vend: the 16 bit USB Vendor ID |
787 | * @prod: the 16 bit USB Product ID | 785 | * @prod: the 16 bit USB Product ID |
788 | * @lo: the bcdDevice_lo value | 786 | * @lo: the bcdDevice_lo value |
@@ -799,8 +797,7 @@ static inline int usb_endpoint_is_isoc_out( | |||
799 | .bcdDevice_hi = (hi) | 797 | .bcdDevice_hi = (hi) |
800 | 798 | ||
801 | /** | 799 | /** |
802 | * USB_DEVICE_INTERFACE_PROTOCOL - macro used to describe a usb | 800 | * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol |
803 | * device with a specific interface protocol | ||
804 | * @vend: the 16 bit USB Vendor ID | 801 | * @vend: the 16 bit USB Vendor ID |
805 | * @prod: the 16 bit USB Product ID | 802 | * @prod: the 16 bit USB Product ID |
806 | * @pr: bInterfaceProtocol value | 803 | * @pr: bInterfaceProtocol value |
@@ -846,8 +843,7 @@ static inline int usb_endpoint_is_isoc_out( | |||
846 | .bInterfaceProtocol = (pr) | 843 | .bInterfaceProtocol = (pr) |
847 | 844 | ||
848 | /** | 845 | /** |
849 | * USB_DEVICE_AND_INTERFACE_INFO - macro used to describe a specific usb device | 846 | * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces |
850 | * with a class of usb interfaces | ||
851 | * @vend: the 16 bit USB Vendor ID | 847 | * @vend: the 16 bit USB Vendor ID |
852 | * @prod: the 16 bit USB Product ID | 848 | * @prod: the 16 bit USB Product ID |
853 | * @cl: bInterfaceClass value | 849 | * @cl: bInterfaceClass value |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 75370ec0923e..9f1b4b46151e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -246,8 +246,7 @@ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | |||
246 | static inline void __dec_zone_page_state(struct page *page, | 246 | static inline void __dec_zone_page_state(struct page *page, |
247 | enum zone_stat_item item) | 247 | enum zone_stat_item item) |
248 | { | 248 | { |
249 | atomic_long_dec(&page_zone(page)->vm_stat[item]); | 249 | __dec_zone_state(page_zone(page), item); |
250 | atomic_long_dec(&vm_stat[item]); | ||
251 | } | 250 | } |
252 | 251 | ||
253 | /* | 252 | /* |
diff --git a/init/Kconfig b/init/Kconfig index f698a5af5007..074ac97f55e3 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -366,10 +366,29 @@ config RESOURCE_COUNTERS | |||
366 | infrastructure that works with cgroups | 366 | infrastructure that works with cgroups |
367 | depends on CGROUPS | 367 | depends on CGROUPS |
368 | 368 | ||
369 | config CGROUP_MEM_RES_CTLR | ||
370 | bool "Memory Resource Controller for Control Groups" | ||
371 | depends on CGROUPS && RESOURCE_COUNTERS | ||
372 | help | ||
373 | Provides a memory resource controller that manages both page cache and | ||
374 | RSS memory. | ||
375 | |||
376 | Note that setting this option increases fixed memory overhead | ||
377 | associated with each page of memory in the system by 4/8 bytes | ||
378 | and also increases cache misses because struct page on many 64bit | ||
379 | systems will not fit into a single cache line anymore. | ||
380 | |||
381 | Only enable when you're ok with these trade offs and really | ||
382 | sure you need the memory resource controller. | ||
383 | |||
369 | config SYSFS_DEPRECATED | 384 | config SYSFS_DEPRECATED |
385 | bool | ||
386 | |||
387 | config SYSFS_DEPRECATED_V2 | ||
370 | bool "Create deprecated sysfs files" | 388 | bool "Create deprecated sysfs files" |
371 | depends on SYSFS | 389 | depends on SYSFS |
372 | default y | 390 | default y |
391 | select SYSFS_DEPRECATED | ||
373 | help | 392 | help |
374 | This option creates deprecated symlinks such as the | 393 | This option creates deprecated symlinks such as the |
375 | "device"-link, the <subsystem>:<name>-link, and the | 394 | "device"-link, the <subsystem>:<name>-link, and the |
@@ -382,25 +401,11 @@ config SYSFS_DEPRECATED | |||
382 | 401 | ||
383 | If enabled, this option will also move any device structures | 402 | If enabled, this option will also move any device structures |
384 | that belong to a class, back into the /sys/class hierarchy, in | 403 | that belong to a class, back into the /sys/class hierarchy, in |
385 | order to support older versions of udev. | 404 | order to support older versions of udev and some userspace |
386 | 405 | programs. | |
387 | If you are using a distro that was released in 2006 or later, | ||
388 | it should be safe to say N here. | ||
389 | |||
390 | config CGROUP_MEM_CONT | ||
391 | bool "Memory controller for cgroups" | ||
392 | depends on CGROUPS && RESOURCE_COUNTERS | ||
393 | help | ||
394 | Provides a memory controller that manages both page cache and | ||
395 | RSS memory. | ||
396 | 406 | ||
397 | Note that setting this option increases fixed memory overhead | 407 | If you are using a distro with the most recent userspace |
398 | associated with each page of memory in the system by 4/8 bytes | 408 | packages, it should be safe to say N here. |
399 | and also increases cache misses because struct page on many 64bit | ||
400 | systems will not fit into a single cache line anymore. | ||
401 | |||
402 | Only enable when you're ok with these trade offs and really | ||
403 | sure you need the memory controller. | ||
404 | 409 | ||
405 | config PROC_PID_CPUSET | 410 | config PROC_PID_CPUSET |
406 | bool "Include legacy /proc/<pid>/cpuset file" | 411 | bool "Include legacy /proc/<pid>/cpuset file" |
diff --git a/init/main.c b/init/main.c index 8b1982082ad8..fbb0167c6b8a 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -254,7 +254,7 @@ early_param("quiet", quiet_kernel); | |||
254 | static int __init loglevel(char *str) | 254 | static int __init loglevel(char *str) |
255 | { | 255 | { |
256 | get_option(&str, &console_loglevel); | 256 | get_option(&str, &console_loglevel); |
257 | return 1; | 257 | return 0; |
258 | } | 258 | } |
259 | 259 | ||
260 | early_param("loglevel", loglevel); | 260 | early_param("loglevel", loglevel); |
diff --git a/kernel/audit.c b/kernel/audit.c index 2eeea9a14240..10c4930c2bbf 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -170,7 +170,9 @@ void audit_panic(const char *message) | |||
170 | printk(KERN_ERR "audit: %s\n", message); | 170 | printk(KERN_ERR "audit: %s\n", message); |
171 | break; | 171 | break; |
172 | case AUDIT_FAIL_PANIC: | 172 | case AUDIT_FAIL_PANIC: |
173 | panic("audit: %s\n", message); | 173 | /* test audit_pid since printk is always losey, why bother? */ |
174 | if (audit_pid) | ||
175 | panic("audit: %s\n", message); | ||
174 | break; | 176 | break; |
175 | } | 177 | } |
176 | } | 178 | } |
@@ -352,6 +354,7 @@ static int kauditd_thread(void *dummy) | |||
352 | if (err < 0) { | 354 | if (err < 0) { |
353 | BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */ | 355 | BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */ |
354 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); | 356 | printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); |
357 | audit_log_lost("auditd dissapeared\n"); | ||
355 | audit_pid = 0; | 358 | audit_pid = 0; |
356 | } | 359 | } |
357 | } else { | 360 | } else { |
@@ -1350,17 +1353,19 @@ void audit_log_end(struct audit_buffer *ab) | |||
1350 | if (!audit_rate_check()) { | 1353 | if (!audit_rate_check()) { |
1351 | audit_log_lost("rate limit exceeded"); | 1354 | audit_log_lost("rate limit exceeded"); |
1352 | } else { | 1355 | } else { |
1356 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); | ||
1353 | if (audit_pid) { | 1357 | if (audit_pid) { |
1354 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); | ||
1355 | nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); | 1358 | nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); |
1356 | skb_queue_tail(&audit_skb_queue, ab->skb); | 1359 | skb_queue_tail(&audit_skb_queue, ab->skb); |
1357 | ab->skb = NULL; | 1360 | ab->skb = NULL; |
1358 | wake_up_interruptible(&kauditd_wait); | 1361 | wake_up_interruptible(&kauditd_wait); |
1359 | } else if (printk_ratelimit()) { | 1362 | } else if (nlh->nlmsg_type != AUDIT_EOE) { |
1360 | struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); | 1363 | if (printk_ratelimit()) { |
1361 | printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, ab->skb->data + NLMSG_SPACE(0)); | 1364 | printk(KERN_NOTICE "type=%d %s\n", |
1362 | } else { | 1365 | nlh->nlmsg_type, |
1363 | audit_log_lost("printk limit exceeded\n"); | 1366 | ab->skb->data + NLMSG_SPACE(0)); |
1367 | } else | ||
1368 | audit_log_lost("printk limit exceeded\n"); | ||
1364 | } | 1369 | } |
1365 | } | 1370 | } |
1366 | audit_buffer_free(ab); | 1371 | audit_buffer_free(ab); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 2087d6de67ea..782262e4107d 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1070,7 +1070,7 @@ static int audit_log_single_execve_arg(struct audit_context *context, | |||
1070 | * so we can be sure nothing was lost. | 1070 | * so we can be sure nothing was lost. |
1071 | */ | 1071 | */ |
1072 | if ((i == 0) && (too_long)) | 1072 | if ((i == 0) && (too_long)) |
1073 | audit_log_format(*ab, "a%d_len=%ld ", arg_num, | 1073 | audit_log_format(*ab, "a%d_len=%zu ", arg_num, |
1074 | has_cntl ? 2*len : len); | 1074 | has_cntl ? 2*len : len); |
1075 | 1075 | ||
1076 | /* | 1076 | /* |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d8abe996e009..e9c2fb01e89b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2232,7 +2232,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
2232 | 2232 | ||
2233 | mutex_lock(&cgroup_mutex); | 2233 | mutex_lock(&cgroup_mutex); |
2234 | 2234 | ||
2235 | cgrp->flags = 0; | ||
2236 | INIT_LIST_HEAD(&cgrp->sibling); | 2235 | INIT_LIST_HEAD(&cgrp->sibling); |
2237 | INIT_LIST_HEAD(&cgrp->children); | 2236 | INIT_LIST_HEAD(&cgrp->children); |
2238 | INIT_LIST_HEAD(&cgrp->css_sets); | 2237 | INIT_LIST_HEAD(&cgrp->css_sets); |
@@ -2242,6 +2241,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
2242 | cgrp->root = parent->root; | 2241 | cgrp->root = parent->root; |
2243 | cgrp->top_cgroup = parent->top_cgroup; | 2242 | cgrp->top_cgroup = parent->top_cgroup; |
2244 | 2243 | ||
2244 | if (notify_on_release(parent)) | ||
2245 | set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
2246 | |||
2245 | for_each_subsys(root, ss) { | 2247 | for_each_subsys(root, ss) { |
2246 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | 2248 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); |
2247 | if (IS_ERR(css)) { | 2249 | if (IS_ERR(css)) { |
diff --git a/kernel/exit.c b/kernel/exit.c index 506a957b665a..cd20bf07e9e3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -214,20 +214,19 @@ struct pid *session_of_pgrp(struct pid *pgrp) | |||
214 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) | 214 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) |
215 | { | 215 | { |
216 | struct task_struct *p; | 216 | struct task_struct *p; |
217 | int ret = 1; | ||
218 | 217 | ||
219 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 218 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
220 | if (p == ignored_task | 219 | if ((p == ignored_task) || |
221 | || p->exit_state | 220 | (p->exit_state && thread_group_empty(p)) || |
222 | || is_global_init(p->real_parent)) | 221 | is_global_init(p->real_parent)) |
223 | continue; | 222 | continue; |
223 | |||
224 | if (task_pgrp(p->real_parent) != pgrp && | 224 | if (task_pgrp(p->real_parent) != pgrp && |
225 | task_session(p->real_parent) == task_session(p)) { | 225 | task_session(p->real_parent) == task_session(p)) |
226 | ret = 0; | 226 | return 0; |
227 | break; | ||
228 | } | ||
229 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 227 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
230 | return ret; /* (sighing) "Often!" */ | 228 | |
229 | return 1; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | int is_current_pgrp_orphaned(void) | 232 | int is_current_pgrp_orphaned(void) |
@@ -255,6 +254,37 @@ static int has_stopped_jobs(struct pid *pgrp) | |||
255 | return retval; | 254 | return retval; |
256 | } | 255 | } |
257 | 256 | ||
257 | /* | ||
258 | * Check to see if any process groups have become orphaned as | ||
259 | * a result of our exiting, and if they have any stopped jobs, | ||
260 | * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | ||
261 | */ | ||
262 | static void | ||
263 | kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | ||
264 | { | ||
265 | struct pid *pgrp = task_pgrp(tsk); | ||
266 | struct task_struct *ignored_task = tsk; | ||
267 | |||
268 | if (!parent) | ||
269 | /* exit: our father is in a different pgrp than | ||
270 | * we are and we were the only connection outside. | ||
271 | */ | ||
272 | parent = tsk->real_parent; | ||
273 | else | ||
274 | /* reparent: our child is in a different pgrp than | ||
275 | * we are, and it was the only connection outside. | ||
276 | */ | ||
277 | ignored_task = NULL; | ||
278 | |||
279 | if (task_pgrp(parent) != pgrp && | ||
280 | task_session(parent) == task_session(tsk) && | ||
281 | will_become_orphaned_pgrp(pgrp, ignored_task) && | ||
282 | has_stopped_jobs(pgrp)) { | ||
283 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | ||
284 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | ||
285 | } | ||
286 | } | ||
287 | |||
258 | /** | 288 | /** |
259 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd | 289 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd |
260 | * | 290 | * |
@@ -635,22 +665,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |||
635 | p->exit_signal != -1 && thread_group_empty(p)) | 665 | p->exit_signal != -1 && thread_group_empty(p)) |
636 | do_notify_parent(p, p->exit_signal); | 666 | do_notify_parent(p, p->exit_signal); |
637 | 667 | ||
638 | /* | 668 | kill_orphaned_pgrp(p, father); |
639 | * process group orphan check | ||
640 | * Case ii: Our child is in a different pgrp | ||
641 | * than we are, and it was the only connection | ||
642 | * outside, so the child pgrp is now orphaned. | ||
643 | */ | ||
644 | if ((task_pgrp(p) != task_pgrp(father)) && | ||
645 | (task_session(p) == task_session(father))) { | ||
646 | struct pid *pgrp = task_pgrp(p); | ||
647 | |||
648 | if (will_become_orphaned_pgrp(pgrp, NULL) && | ||
649 | has_stopped_jobs(pgrp)) { | ||
650 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | ||
651 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | ||
652 | } | ||
653 | } | ||
654 | } | 669 | } |
655 | 670 | ||
656 | /* | 671 | /* |
@@ -735,11 +750,9 @@ static void forget_original_parent(struct task_struct *father) | |||
735 | * Send signals to all our closest relatives so that they know | 750 | * Send signals to all our closest relatives so that they know |
736 | * to properly mourn us.. | 751 | * to properly mourn us.. |
737 | */ | 752 | */ |
738 | static void exit_notify(struct task_struct *tsk) | 753 | static void exit_notify(struct task_struct *tsk, int group_dead) |
739 | { | 754 | { |
740 | int state; | 755 | int state; |
741 | struct task_struct *t; | ||
742 | struct pid *pgrp; | ||
743 | 756 | ||
744 | /* | 757 | /* |
745 | * This does two things: | 758 | * This does two things: |
@@ -753,25 +766,8 @@ static void exit_notify(struct task_struct *tsk) | |||
753 | exit_task_namespaces(tsk); | 766 | exit_task_namespaces(tsk); |
754 | 767 | ||
755 | write_lock_irq(&tasklist_lock); | 768 | write_lock_irq(&tasklist_lock); |
756 | /* | 769 | if (group_dead) |
757 | * Check to see if any process groups have become orphaned | 770 | kill_orphaned_pgrp(tsk->group_leader, NULL); |
758 | * as a result of our exiting, and if they have any stopped | ||
759 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | ||
760 | * | ||
761 | * Case i: Our father is in a different pgrp than we are | ||
762 | * and we were the only connection outside, so our pgrp | ||
763 | * is about to become orphaned. | ||
764 | */ | ||
765 | t = tsk->real_parent; | ||
766 | |||
767 | pgrp = task_pgrp(tsk); | ||
768 | if ((task_pgrp(t) != pgrp) && | ||
769 | (task_session(t) == task_session(tsk)) && | ||
770 | will_become_orphaned_pgrp(pgrp, tsk) && | ||
771 | has_stopped_jobs(pgrp)) { | ||
772 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | ||
773 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | ||
774 | } | ||
775 | 771 | ||
776 | /* Let father know we died | 772 | /* Let father know we died |
777 | * | 773 | * |
@@ -788,8 +784,8 @@ static void exit_notify(struct task_struct *tsk) | |||
788 | * the same after a fork. | 784 | * the same after a fork. |
789 | */ | 785 | */ |
790 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && | 786 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && |
791 | ( tsk->parent_exec_id != t->self_exec_id || | 787 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || |
792 | tsk->self_exec_id != tsk->parent_exec_id) | 788 | tsk->self_exec_id != tsk->parent_exec_id) |
793 | && !capable(CAP_KILL)) | 789 | && !capable(CAP_KILL)) |
794 | tsk->exit_signal = SIGCHLD; | 790 | tsk->exit_signal = SIGCHLD; |
795 | 791 | ||
@@ -986,7 +982,7 @@ NORET_TYPE void do_exit(long code) | |||
986 | module_put(tsk->binfmt->module); | 982 | module_put(tsk->binfmt->module); |
987 | 983 | ||
988 | proc_exit_connector(tsk); | 984 | proc_exit_connector(tsk); |
989 | exit_notify(tsk); | 985 | exit_notify(tsk, group_dead); |
990 | #ifdef CONFIG_NUMA | 986 | #ifdef CONFIG_NUMA |
991 | mpol_free(tsk->mempolicy); | 987 | mpol_free(tsk->mempolicy); |
992 | tsk->mempolicy = NULL; | 988 | tsk->mempolicy = NULL; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 7a86e6432338..fcfb580c3afc 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -498,27 +498,36 @@ static int __kprobes in_kprobes_functions(unsigned long addr) | |||
498 | return 0; | 498 | return 0; |
499 | } | 499 | } |
500 | 500 | ||
501 | /* | ||
502 | * If we have a symbol_name argument, look it up and add the offset field | ||
503 | * to it. This way, we can specify a relative address to a symbol. | ||
504 | */ | ||
505 | static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) | ||
506 | { | ||
507 | kprobe_opcode_t *addr = p->addr; | ||
508 | if (p->symbol_name) { | ||
509 | if (addr) | ||
510 | return NULL; | ||
511 | kprobe_lookup_name(p->symbol_name, addr); | ||
512 | } | ||
513 | |||
514 | if (!addr) | ||
515 | return NULL; | ||
516 | return (kprobe_opcode_t *)(((char *)addr) + p->offset); | ||
517 | } | ||
518 | |||
501 | static int __kprobes __register_kprobe(struct kprobe *p, | 519 | static int __kprobes __register_kprobe(struct kprobe *p, |
502 | unsigned long called_from) | 520 | unsigned long called_from) |
503 | { | 521 | { |
504 | int ret = 0; | 522 | int ret = 0; |
505 | struct kprobe *old_p; | 523 | struct kprobe *old_p; |
506 | struct module *probed_mod; | 524 | struct module *probed_mod; |
525 | kprobe_opcode_t *addr; | ||
507 | 526 | ||
508 | /* | 527 | addr = kprobe_addr(p); |
509 | * If we have a symbol_name argument look it up, | 528 | if (!addr) |
510 | * and add it to the address. That way the addr | ||
511 | * field can either be global or relative to a symbol. | ||
512 | */ | ||
513 | if (p->symbol_name) { | ||
514 | if (p->addr) | ||
515 | return -EINVAL; | ||
516 | kprobe_lookup_name(p->symbol_name, p->addr); | ||
517 | } | ||
518 | |||
519 | if (!p->addr) | ||
520 | return -EINVAL; | 529 | return -EINVAL; |
521 | p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); | 530 | p->addr = addr; |
522 | 531 | ||
523 | if (!kernel_text_address((unsigned long) p->addr) || | 532 | if (!kernel_text_address((unsigned long) p->addr) || |
524 | in_kprobes_functions((unsigned long) p->addr)) | 533 | in_kprobes_functions((unsigned long) p->addr)) |
@@ -678,8 +687,7 @@ void __kprobes unregister_jprobe(struct jprobe *jp) | |||
678 | unregister_kprobe(&jp->kp); | 687 | unregister_kprobe(&jp->kp); |
679 | } | 688 | } |
680 | 689 | ||
681 | #ifdef ARCH_SUPPORTS_KRETPROBES | 690 | #ifdef CONFIG_KRETPROBES |
682 | |||
683 | /* | 691 | /* |
684 | * This kprobe pre_handler is registered with every kretprobe. When probe | 692 | * This kprobe pre_handler is registered with every kretprobe. When probe |
685 | * hits it will set up the return probe. | 693 | * hits it will set up the return probe. |
@@ -722,12 +730,12 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
722 | int ret = 0; | 730 | int ret = 0; |
723 | struct kretprobe_instance *inst; | 731 | struct kretprobe_instance *inst; |
724 | int i; | 732 | int i; |
725 | void *addr = rp->kp.addr; | 733 | void *addr; |
726 | 734 | ||
727 | if (kretprobe_blacklist_size) { | 735 | if (kretprobe_blacklist_size) { |
728 | if (addr == NULL) | 736 | addr = kprobe_addr(&rp->kp); |
729 | kprobe_lookup_name(rp->kp.symbol_name, addr); | 737 | if (!addr) |
730 | addr += rp->kp.offset; | 738 | return -EINVAL; |
731 | 739 | ||
732 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { | 740 | for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
733 | if (kretprobe_blacklist[i].addr == addr) | 741 | if (kretprobe_blacklist[i].addr == addr) |
@@ -769,8 +777,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
769 | return ret; | 777 | return ret; |
770 | } | 778 | } |
771 | 779 | ||
772 | #else /* ARCH_SUPPORTS_KRETPROBES */ | 780 | #else /* CONFIG_KRETPROBES */ |
773 | |||
774 | int __kprobes register_kretprobe(struct kretprobe *rp) | 781 | int __kprobes register_kretprobe(struct kretprobe *rp) |
775 | { | 782 | { |
776 | return -ENOSYS; | 783 | return -ENOSYS; |
@@ -781,8 +788,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
781 | { | 788 | { |
782 | return 0; | 789 | return 0; |
783 | } | 790 | } |
784 | 791 | #endif /* CONFIG_KRETPROBES */ | |
785 | #endif /* ARCH_SUPPORTS_KRETPROBES */ | ||
786 | 792 | ||
787 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 793 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
788 | { | 794 | { |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3574379f4d62..81a4e4a3f087 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -779,6 +779,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
779 | * parallel walking of the hash-list safe: | 779 | * parallel walking of the hash-list safe: |
780 | */ | 780 | */ |
781 | list_add_tail_rcu(&class->hash_entry, hash_head); | 781 | list_add_tail_rcu(&class->hash_entry, hash_head); |
782 | /* | ||
783 | * Add it to the global list of classes: | ||
784 | */ | ||
785 | list_add_tail_rcu(&class->lock_entry, &all_lock_classes); | ||
782 | 786 | ||
783 | if (verbose(class)) { | 787 | if (verbose(class)) { |
784 | graph_unlock(); | 788 | graph_unlock(); |
@@ -2282,10 +2286,6 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2282 | return 0; | 2286 | return 0; |
2283 | break; | 2287 | break; |
2284 | case LOCK_USED: | 2288 | case LOCK_USED: |
2285 | /* | ||
2286 | * Add it to the global list of classes: | ||
2287 | */ | ||
2288 | list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes); | ||
2289 | debug_atomic_dec(&nr_unused_locks); | 2289 | debug_atomic_dec(&nr_unused_locks); |
2290 | break; | 2290 | break; |
2291 | default: | 2291 | default: |
diff --git a/kernel/marker.c b/kernel/marker.c index 50effc01d9a2..48a4ea5afffd 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -698,14 +698,12 @@ int marker_probe_unregister(const char *name, | |||
698 | { | 698 | { |
699 | struct marker_entry *entry; | 699 | struct marker_entry *entry; |
700 | struct marker_probe_closure *old; | 700 | struct marker_probe_closure *old; |
701 | int ret = 0; | 701 | int ret = -ENOENT; |
702 | 702 | ||
703 | mutex_lock(&markers_mutex); | 703 | mutex_lock(&markers_mutex); |
704 | entry = get_marker(name); | 704 | entry = get_marker(name); |
705 | if (!entry) { | 705 | if (!entry) |
706 | ret = -ENOENT; | ||
707 | goto end; | 706 | goto end; |
708 | } | ||
709 | if (entry->rcu_pending) | 707 | if (entry->rcu_pending) |
710 | rcu_barrier(); | 708 | rcu_barrier(); |
711 | old = marker_entry_remove_probe(entry, probe, probe_private); | 709 | old = marker_entry_remove_probe(entry, probe, probe_private); |
@@ -713,12 +711,15 @@ int marker_probe_unregister(const char *name, | |||
713 | marker_update_probes(); /* may update entry */ | 711 | marker_update_probes(); /* may update entry */ |
714 | mutex_lock(&markers_mutex); | 712 | mutex_lock(&markers_mutex); |
715 | entry = get_marker(name); | 713 | entry = get_marker(name); |
714 | if (!entry) | ||
715 | goto end; | ||
716 | entry->oldptr = old; | 716 | entry->oldptr = old; |
717 | entry->rcu_pending = 1; | 717 | entry->rcu_pending = 1; |
718 | /* write rcu_pending before calling the RCU callback */ | 718 | /* write rcu_pending before calling the RCU callback */ |
719 | smp_wmb(); | 719 | smp_wmb(); |
720 | call_rcu(&entry->rcu, free_old_closure); | 720 | call_rcu(&entry->rcu, free_old_closure); |
721 | remove_marker(name); /* Ignore busy error message */ | 721 | remove_marker(name); /* Ignore busy error message */ |
722 | ret = 0; | ||
722 | end: | 723 | end: |
723 | mutex_unlock(&markers_mutex); | 724 | mutex_unlock(&markers_mutex); |
724 | return ret; | 725 | return ret; |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 7c2118f9597f..f1d0b345c9ba 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -75,22 +75,15 @@ void refrigerator(void) | |||
75 | __set_current_state(save); | 75 | __set_current_state(save); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void fake_signal_wake_up(struct task_struct *p, int resume) | 78 | static void fake_signal_wake_up(struct task_struct *p) |
79 | { | 79 | { |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | 81 | ||
82 | spin_lock_irqsave(&p->sighand->siglock, flags); | 82 | spin_lock_irqsave(&p->sighand->siglock, flags); |
83 | signal_wake_up(p, resume); | 83 | signal_wake_up(p, 0); |
84 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 84 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void send_fake_signal(struct task_struct *p) | ||
88 | { | ||
89 | if (task_is_stopped(p)) | ||
90 | force_sig_specific(SIGSTOP, p); | ||
91 | fake_signal_wake_up(p, task_is_stopped(p)); | ||
92 | } | ||
93 | |||
94 | static int has_mm(struct task_struct *p) | 87 | static int has_mm(struct task_struct *p) |
95 | { | 88 | { |
96 | return (p->mm && !(p->flags & PF_BORROWED_MM)); | 89 | return (p->mm && !(p->flags & PF_BORROWED_MM)); |
@@ -121,7 +114,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only) | |||
121 | if (freezing(p)) { | 114 | if (freezing(p)) { |
122 | if (has_mm(p)) { | 115 | if (has_mm(p)) { |
123 | if (!signal_pending(p)) | 116 | if (!signal_pending(p)) |
124 | fake_signal_wake_up(p, 0); | 117 | fake_signal_wake_up(p); |
125 | } else { | 118 | } else { |
126 | if (with_mm_only) | 119 | if (with_mm_only) |
127 | ret = 0; | 120 | ret = 0; |
@@ -135,7 +128,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only) | |||
135 | } else { | 128 | } else { |
136 | if (has_mm(p)) { | 129 | if (has_mm(p)) { |
137 | set_freeze_flag(p); | 130 | set_freeze_flag(p); |
138 | send_fake_signal(p); | 131 | fake_signal_wake_up(p); |
139 | } else { | 132 | } else { |
140 | if (with_mm_only) { | 133 | if (with_mm_only) { |
141 | ret = 0; | 134 | ret = 0; |
@@ -182,15 +175,17 @@ static int try_to_freeze_tasks(int freeze_user_space) | |||
182 | if (frozen(p) || !freezeable(p)) | 175 | if (frozen(p) || !freezeable(p)) |
183 | continue; | 176 | continue; |
184 | 177 | ||
185 | if (task_is_traced(p) && frozen(p->parent)) { | ||
186 | cancel_freezing(p); | ||
187 | continue; | ||
188 | } | ||
189 | |||
190 | if (!freeze_task(p, freeze_user_space)) | 178 | if (!freeze_task(p, freeze_user_space)) |
191 | continue; | 179 | continue; |
192 | 180 | ||
193 | if (!freezer_should_skip(p)) | 181 | /* |
182 | * Now that we've done set_freeze_flag, don't | ||
183 | * perturb a task in TASK_STOPPED or TASK_TRACED. | ||
184 | * It is "frozen enough". If the task does wake | ||
185 | * up, it will immediately call try_to_freeze. | ||
186 | */ | ||
187 | if (!task_is_stopped_or_traced(p) && | ||
188 | !freezer_should_skip(p)) | ||
194 | todo++; | 189 | todo++; |
195 | } while_each_thread(g, p); | 190 | } while_each_thread(g, p); |
196 | read_unlock(&tasklist_lock); | 191 | read_unlock(&tasklist_lock); |
diff --git a/kernel/printk.c b/kernel/printk.c index bee36100f110..9adc2a473e6e 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -666,7 +666,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
666 | } | 666 | } |
667 | /* Emit the output into the temporary buffer */ | 667 | /* Emit the output into the temporary buffer */ |
668 | printed_len += vscnprintf(printk_buf + printed_len, | 668 | printed_len += vscnprintf(printk_buf + printed_len, |
669 | sizeof(printk_buf), fmt, args); | 669 | sizeof(printk_buf) - printed_len, fmt, args); |
670 | 670 | ||
671 | /* | 671 | /* |
672 | * Copy the output into log_buf. If the caller didn't provide | 672 | * Copy the output into log_buf. If the caller didn't provide |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 987cfb7ade89..e9517014b57c 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -23,6 +23,10 @@ | |||
23 | * to Suparna Bhattacharya for pushing me completely away | 23 | * to Suparna Bhattacharya for pushing me completely away |
24 | * from atomic instructions on the read side. | 24 | * from atomic instructions on the read side. |
25 | * | 25 | * |
26 | * - Added handling of Dynamic Ticks | ||
27 | * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com> | ||
28 | * - Steven Rostedt <srostedt@redhat.com> | ||
29 | * | ||
26 | * Papers: http://www.rdrop.com/users/paulmck/RCU | 30 | * Papers: http://www.rdrop.com/users/paulmck/RCU |
27 | * | 31 | * |
28 | * Design Document: http://lwn.net/Articles/253651/ | 32 | * Design Document: http://lwn.net/Articles/253651/ |
@@ -409,6 +413,212 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp) | |||
409 | } | 413 | } |
410 | } | 414 | } |
411 | 415 | ||
416 | #ifdef CONFIG_NO_HZ | ||
417 | |||
418 | DEFINE_PER_CPU(long, dynticks_progress_counter) = 1; | ||
419 | static DEFINE_PER_CPU(long, rcu_dyntick_snapshot); | ||
420 | static DEFINE_PER_CPU(int, rcu_update_flag); | ||
421 | |||
422 | /** | ||
423 | * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI. | ||
424 | * | ||
425 | * If the CPU was idle with dynamic ticks active, this updates the | ||
426 | * dynticks_progress_counter to let the RCU handling know that the | ||
427 | * CPU is active. | ||
428 | */ | ||
429 | void rcu_irq_enter(void) | ||
430 | { | ||
431 | int cpu = smp_processor_id(); | ||
432 | |||
433 | if (per_cpu(rcu_update_flag, cpu)) | ||
434 | per_cpu(rcu_update_flag, cpu)++; | ||
435 | |||
436 | /* | ||
437 | * Only update if we are coming from a stopped ticks mode | ||
438 | * (dynticks_progress_counter is even). | ||
439 | */ | ||
440 | if (!in_interrupt() && | ||
441 | (per_cpu(dynticks_progress_counter, cpu) & 0x1) == 0) { | ||
442 | /* | ||
443 | * The following might seem like we could have a race | ||
444 | * with NMI/SMIs. But this really isn't a problem. | ||
445 | * Here we do a read/modify/write, and the race happens | ||
446 | * when an NMI/SMI comes in after the read and before | ||
447 | * the write. But NMI/SMIs will increment this counter | ||
448 | * twice before returning, so the zero bit will not | ||
449 | * be corrupted by the NMI/SMI which is the most important | ||
450 | * part. | ||
451 | * | ||
452 | * The only thing is that we would bring back the counter | ||
453 | * to a postion that it was in during the NMI/SMI. | ||
454 | * But the zero bit would be set, so the rest of the | ||
455 | * counter would again be ignored. | ||
456 | * | ||
457 | * On return from the IRQ, the counter may have the zero | ||
458 | * bit be 0 and the counter the same as the return from | ||
459 | * the NMI/SMI. If the state machine was so unlucky to | ||
460 | * see that, it still doesn't matter, since all | ||
461 | * RCU read-side critical sections on this CPU would | ||
462 | * have already completed. | ||
463 | */ | ||
464 | per_cpu(dynticks_progress_counter, cpu)++; | ||
465 | /* | ||
466 | * The following memory barrier ensures that any | ||
467 | * rcu_read_lock() primitives in the irq handler | ||
468 | * are seen by other CPUs to follow the above | ||
469 | * increment to dynticks_progress_counter. This is | ||
470 | * required in order for other CPUs to correctly | ||
471 | * determine when it is safe to advance the RCU | ||
472 | * grace-period state machine. | ||
473 | */ | ||
474 | smp_mb(); /* see above block comment. */ | ||
475 | /* | ||
476 | * Since we can't determine the dynamic tick mode from | ||
477 | * the dynticks_progress_counter after this routine, | ||
478 | * we use a second flag to acknowledge that we came | ||
479 | * from an idle state with ticks stopped. | ||
480 | */ | ||
481 | per_cpu(rcu_update_flag, cpu)++; | ||
482 | /* | ||
483 | * If we take an NMI/SMI now, they will also increment | ||
484 | * the rcu_update_flag, and will not update the | ||
485 | * dynticks_progress_counter on exit. That is for | ||
486 | * this IRQ to do. | ||
487 | */ | ||
488 | } | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * rcu_irq_exit - Called from exiting Hard irq context. | ||
493 | * | ||
494 | * If the CPU was idle with dynamic ticks active, update the | ||
495 | * dynticks_progress_counter to put let the RCU handling be | ||
496 | * aware that the CPU is going back to idle with no ticks. | ||
497 | */ | ||
498 | void rcu_irq_exit(void) | ||
499 | { | ||
500 | int cpu = smp_processor_id(); | ||
501 | |||
502 | /* | ||
503 | * rcu_update_flag is set if we interrupted the CPU | ||
504 | * when it was idle with ticks stopped. | ||
505 | * Once this occurs, we keep track of interrupt nesting | ||
506 | * because a NMI/SMI could also come in, and we still | ||
507 | * only want the IRQ that started the increment of the | ||
508 | * dynticks_progress_counter to be the one that modifies | ||
509 | * it on exit. | ||
510 | */ | ||
511 | if (per_cpu(rcu_update_flag, cpu)) { | ||
512 | if (--per_cpu(rcu_update_flag, cpu)) | ||
513 | return; | ||
514 | |||
515 | /* This must match the interrupt nesting */ | ||
516 | WARN_ON(in_interrupt()); | ||
517 | |||
518 | /* | ||
519 | * If an NMI/SMI happens now we are still | ||
520 | * protected by the dynticks_progress_counter being odd. | ||
521 | */ | ||
522 | |||
523 | /* | ||
524 | * The following memory barrier ensures that any | ||
525 | * rcu_read_unlock() primitives in the irq handler | ||
526 | * are seen by other CPUs to preceed the following | ||
527 | * increment to dynticks_progress_counter. This | ||
528 | * is required in order for other CPUs to determine | ||
529 | * when it is safe to advance the RCU grace-period | ||
530 | * state machine. | ||
531 | */ | ||
532 | smp_mb(); /* see above block comment. */ | ||
533 | per_cpu(dynticks_progress_counter, cpu)++; | ||
534 | WARN_ON(per_cpu(dynticks_progress_counter, cpu) & 0x1); | ||
535 | } | ||
536 | } | ||
537 | |||
538 | static void dyntick_save_progress_counter(int cpu) | ||
539 | { | ||
540 | per_cpu(rcu_dyntick_snapshot, cpu) = | ||
541 | per_cpu(dynticks_progress_counter, cpu); | ||
542 | } | ||
543 | |||
544 | static inline int | ||
545 | rcu_try_flip_waitack_needed(int cpu) | ||
546 | { | ||
547 | long curr; | ||
548 | long snap; | ||
549 | |||
550 | curr = per_cpu(dynticks_progress_counter, cpu); | ||
551 | snap = per_cpu(rcu_dyntick_snapshot, cpu); | ||
552 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
553 | |||
554 | /* | ||
555 | * If the CPU remained in dynticks mode for the entire time | ||
556 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
557 | * then it cannot be in the middle of an rcu_read_lock(), so | ||
558 | * the next rcu_read_lock() it executes must use the new value | ||
559 | * of the counter. So we can safely pretend that this CPU | ||
560 | * already acknowledged the counter. | ||
561 | */ | ||
562 | |||
563 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
564 | return 0; | ||
565 | |||
566 | /* | ||
567 | * If the CPU passed through or entered a dynticks idle phase with | ||
568 | * no active irq handlers, then, as above, we can safely pretend | ||
569 | * that this CPU already acknowledged the counter. | ||
570 | */ | ||
571 | |||
572 | if ((curr - snap) > 2 || (snap & 0x1) == 0) | ||
573 | return 0; | ||
574 | |||
575 | /* We need this CPU to explicitly acknowledge the counter flip. */ | ||
576 | |||
577 | return 1; | ||
578 | } | ||
579 | |||
580 | static inline int | ||
581 | rcu_try_flip_waitmb_needed(int cpu) | ||
582 | { | ||
583 | long curr; | ||
584 | long snap; | ||
585 | |||
586 | curr = per_cpu(dynticks_progress_counter, cpu); | ||
587 | snap = per_cpu(rcu_dyntick_snapshot, cpu); | ||
588 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
589 | |||
590 | /* | ||
591 | * If the CPU remained in dynticks mode for the entire time | ||
592 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
593 | * then it cannot have executed an RCU read-side critical section | ||
594 | * during that time, so there is no need for it to execute a | ||
595 | * memory barrier. | ||
596 | */ | ||
597 | |||
598 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
599 | return 0; | ||
600 | |||
601 | /* | ||
602 | * If the CPU either entered or exited an outermost interrupt, | ||
603 | * SMI, NMI, or whatever handler, then we know that it executed | ||
604 | * a memory barrier when doing so. So we don't need another one. | ||
605 | */ | ||
606 | if (curr != snap) | ||
607 | return 0; | ||
608 | |||
609 | /* We need the CPU to execute a memory barrier. */ | ||
610 | |||
611 | return 1; | ||
612 | } | ||
613 | |||
614 | #else /* !CONFIG_NO_HZ */ | ||
615 | |||
616 | # define dyntick_save_progress_counter(cpu) do { } while (0) | ||
617 | # define rcu_try_flip_waitack_needed(cpu) (1) | ||
618 | # define rcu_try_flip_waitmb_needed(cpu) (1) | ||
619 | |||
620 | #endif /* CONFIG_NO_HZ */ | ||
621 | |||
412 | /* | 622 | /* |
413 | * Get here when RCU is idle. Decide whether we need to | 623 | * Get here when RCU is idle. Decide whether we need to |
414 | * move out of idle state, and return non-zero if so. | 624 | * move out of idle state, and return non-zero if so. |
@@ -447,8 +657,10 @@ rcu_try_flip_idle(void) | |||
447 | 657 | ||
448 | /* Now ask each CPU for acknowledgement of the flip. */ | 658 | /* Now ask each CPU for acknowledgement of the flip. */ |
449 | 659 | ||
450 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 660 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { |
451 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 661 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
662 | dyntick_save_progress_counter(cpu); | ||
663 | } | ||
452 | 664 | ||
453 | return 1; | 665 | return 1; |
454 | } | 666 | } |
@@ -464,7 +676,8 @@ rcu_try_flip_waitack(void) | |||
464 | 676 | ||
465 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 677 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
466 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 678 | for_each_cpu_mask(cpu, rcu_cpu_online_map) |
467 | if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 679 | if (rcu_try_flip_waitack_needed(cpu) && |
680 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | ||
468 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 681 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
469 | return 0; | 682 | return 0; |
470 | } | 683 | } |
@@ -509,8 +722,10 @@ rcu_try_flip_waitzero(void) | |||
509 | smp_mb(); /* ^^^^^^^^^^^^ */ | 722 | smp_mb(); /* ^^^^^^^^^^^^ */ |
510 | 723 | ||
511 | /* Call for a memory barrier from each CPU. */ | 724 | /* Call for a memory barrier from each CPU. */ |
512 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 725 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { |
513 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 726 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
727 | dyntick_save_progress_counter(cpu); | ||
728 | } | ||
514 | 729 | ||
515 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); | 730 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); |
516 | return 1; | 731 | return 1; |
@@ -528,7 +743,8 @@ rcu_try_flip_waitmb(void) | |||
528 | 743 | ||
529 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 744 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
530 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 745 | for_each_cpu_mask(cpu, rcu_cpu_online_map) |
531 | if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 746 | if (rcu_try_flip_waitmb_needed(cpu) && |
747 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | ||
532 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 748 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
533 | return 0; | 749 | return 0; |
534 | } | 750 | } |
@@ -702,8 +918,9 @@ void rcu_offline_cpu(int cpu) | |||
702 | * fix. | 918 | * fix. |
703 | */ | 919 | */ |
704 | 920 | ||
921 | local_irq_save(flags); | ||
705 | rdp = RCU_DATA_ME(); | 922 | rdp = RCU_DATA_ME(); |
706 | spin_lock_irqsave(&rdp->lock, flags); | 923 | spin_lock(&rdp->lock); |
707 | *rdp->nexttail = list; | 924 | *rdp->nexttail = list; |
708 | if (list) | 925 | if (list) |
709 | rdp->nexttail = tail; | 926 | rdp->nexttail = tail; |
@@ -735,9 +952,11 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
735 | { | 952 | { |
736 | unsigned long flags; | 953 | unsigned long flags; |
737 | struct rcu_head *next, *list; | 954 | struct rcu_head *next, *list; |
738 | struct rcu_data *rdp = RCU_DATA_ME(); | 955 | struct rcu_data *rdp; |
739 | 956 | ||
740 | spin_lock_irqsave(&rdp->lock, flags); | 957 | local_irq_save(flags); |
958 | rdp = RCU_DATA_ME(); | ||
959 | spin_lock(&rdp->lock); | ||
741 | list = rdp->donelist; | 960 | list = rdp->donelist; |
742 | if (list == NULL) { | 961 | if (list == NULL) { |
743 | spin_unlock_irqrestore(&rdp->lock, flags); | 962 | spin_unlock_irqrestore(&rdp->lock, flags); |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 16cbec2d5d60..efbfc0fc232f 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
@@ -113,6 +113,7 @@ ssize_t res_counter_write(struct res_counter *counter, int member, | |||
113 | 113 | ||
114 | ret = -EINVAL; | 114 | ret = -EINVAL; |
115 | 115 | ||
116 | strstrip(buf); | ||
116 | if (write_strategy) { | 117 | if (write_strategy) { |
117 | if (write_strategy(buf, &tmp)) { | 118 | if (write_strategy(buf, &tmp)) { |
118 | goto out_free; | 119 | goto out_free; |
diff --git a/kernel/sched.c b/kernel/sched.c index b387a8de26a5..dcd553cc4ee8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -174,41 +174,6 @@ struct task_group { | |||
174 | struct sched_entity **se; | 174 | struct sched_entity **se; |
175 | /* runqueue "owned" by this group on each cpu */ | 175 | /* runqueue "owned" by this group on each cpu */ |
176 | struct cfs_rq **cfs_rq; | 176 | struct cfs_rq **cfs_rq; |
177 | |||
178 | /* | ||
179 | * shares assigned to a task group governs how much of cpu bandwidth | ||
180 | * is allocated to the group. The more shares a group has, the more is | ||
181 | * the cpu bandwidth allocated to it. | ||
182 | * | ||
183 | * For ex, lets say that there are three task groups, A, B and C which | ||
184 | * have been assigned shares 1000, 2000 and 3000 respectively. Then, | ||
185 | * cpu bandwidth allocated by the scheduler to task groups A, B and C | ||
186 | * should be: | ||
187 | * | ||
188 | * Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66% | ||
189 | * Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33% | ||
190 | * Bw(C) = 3000/(1000+2000+3000) * 100 = 50% | ||
191 | * | ||
192 | * The weight assigned to a task group's schedulable entities on every | ||
193 | * cpu (task_group.se[a_cpu]->load.weight) is derived from the task | ||
194 | * group's shares. For ex: lets say that task group A has been | ||
195 | * assigned shares of 1000 and there are two CPUs in a system. Then, | ||
196 | * | ||
197 | * tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000; | ||
198 | * | ||
199 | * Note: It's not necessary that each of a task's group schedulable | ||
200 | * entity have the same weight on all CPUs. If the group | ||
201 | * has 2 of its tasks on CPU0 and 1 task on CPU1, then a | ||
202 | * better distribution of weight could be: | ||
203 | * | ||
204 | * tg_A->se[0]->load.weight = 2/3 * 2000 = 1333 | ||
205 | * tg_A->se[1]->load.weight = 1/2 * 2000 = 667 | ||
206 | * | ||
207 | * rebalance_shares() is responsible for distributing the shares of a | ||
208 | * task groups like this among the group's schedulable entities across | ||
209 | * cpus. | ||
210 | * | ||
211 | */ | ||
212 | unsigned long shares; | 177 | unsigned long shares; |
213 | #endif | 178 | #endif |
214 | 179 | ||
@@ -250,22 +215,12 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
250 | static DEFINE_MUTEX(doms_cur_mutex); | 215 | static DEFINE_MUTEX(doms_cur_mutex); |
251 | 216 | ||
252 | #ifdef CONFIG_FAIR_GROUP_SCHED | 217 | #ifdef CONFIG_FAIR_GROUP_SCHED |
253 | #ifdef CONFIG_SMP | ||
254 | /* kernel thread that runs rebalance_shares() periodically */ | ||
255 | static struct task_struct *lb_monitor_task; | ||
256 | static int load_balance_monitor(void *unused); | ||
257 | #endif | ||
258 | |||
259 | static void set_se_shares(struct sched_entity *se, unsigned long shares); | ||
260 | |||
261 | #ifdef CONFIG_USER_SCHED | 218 | #ifdef CONFIG_USER_SCHED |
262 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 219 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
263 | #else | 220 | #else |
264 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 221 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
265 | #endif | 222 | #endif |
266 | 223 | ||
267 | #define MIN_GROUP_SHARES 2 | ||
268 | |||
269 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 224 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
270 | #endif | 225 | #endif |
271 | 226 | ||
@@ -668,6 +623,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
668 | */ | 623 | */ |
669 | unsigned int sysctl_sched_rt_period = 1000000; | 624 | unsigned int sysctl_sched_rt_period = 1000000; |
670 | 625 | ||
626 | static __read_mostly int scheduler_running; | ||
627 | |||
671 | /* | 628 | /* |
672 | * part of the period that we allow rt tasks to run in us. | 629 | * part of the period that we allow rt tasks to run in us. |
673 | * default: 0.95s | 630 | * default: 0.95s |
@@ -689,14 +646,16 @@ unsigned long long cpu_clock(int cpu) | |||
689 | unsigned long flags; | 646 | unsigned long flags; |
690 | struct rq *rq; | 647 | struct rq *rq; |
691 | 648 | ||
692 | local_irq_save(flags); | ||
693 | rq = cpu_rq(cpu); | ||
694 | /* | 649 | /* |
695 | * Only call sched_clock() if the scheduler has already been | 650 | * Only call sched_clock() if the scheduler has already been |
696 | * initialized (some code might call cpu_clock() very early): | 651 | * initialized (some code might call cpu_clock() very early): |
697 | */ | 652 | */ |
698 | if (rq->idle) | 653 | if (unlikely(!scheduler_running)) |
699 | update_rq_clock(rq); | 654 | return 0; |
655 | |||
656 | local_irq_save(flags); | ||
657 | rq = cpu_rq(cpu); | ||
658 | update_rq_clock(rq); | ||
700 | now = rq->clock; | 659 | now = rq->clock; |
701 | local_irq_restore(flags); | 660 | local_irq_restore(flags); |
702 | 661 | ||
@@ -1241,16 +1200,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime); | |||
1241 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | 1200 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
1242 | #endif | 1201 | #endif |
1243 | 1202 | ||
1244 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) | ||
1245 | { | ||
1246 | update_load_add(&rq->load, load); | ||
1247 | } | ||
1248 | |||
1249 | static inline void dec_cpu_load(struct rq *rq, unsigned long load) | ||
1250 | { | ||
1251 | update_load_sub(&rq->load, load); | ||
1252 | } | ||
1253 | |||
1254 | #ifdef CONFIG_SMP | 1203 | #ifdef CONFIG_SMP |
1255 | static unsigned long source_load(int cpu, int type); | 1204 | static unsigned long source_load(int cpu, int type); |
1256 | static unsigned long target_load(int cpu, int type); | 1205 | static unsigned long target_load(int cpu, int type); |
@@ -1268,14 +1217,26 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
1268 | 1217 | ||
1269 | #define sched_class_highest (&rt_sched_class) | 1218 | #define sched_class_highest (&rt_sched_class) |
1270 | 1219 | ||
1271 | static void inc_nr_running(struct rq *rq) | 1220 | static inline void inc_load(struct rq *rq, const struct task_struct *p) |
1221 | { | ||
1222 | update_load_add(&rq->load, p->se.load.weight); | ||
1223 | } | ||
1224 | |||
1225 | static inline void dec_load(struct rq *rq, const struct task_struct *p) | ||
1226 | { | ||
1227 | update_load_sub(&rq->load, p->se.load.weight); | ||
1228 | } | ||
1229 | |||
1230 | static void inc_nr_running(struct task_struct *p, struct rq *rq) | ||
1272 | { | 1231 | { |
1273 | rq->nr_running++; | 1232 | rq->nr_running++; |
1233 | inc_load(rq, p); | ||
1274 | } | 1234 | } |
1275 | 1235 | ||
1276 | static void dec_nr_running(struct rq *rq) | 1236 | static void dec_nr_running(struct task_struct *p, struct rq *rq) |
1277 | { | 1237 | { |
1278 | rq->nr_running--; | 1238 | rq->nr_running--; |
1239 | dec_load(rq, p); | ||
1279 | } | 1240 | } |
1280 | 1241 | ||
1281 | static void set_load_weight(struct task_struct *p) | 1242 | static void set_load_weight(struct task_struct *p) |
@@ -1367,7 +1328,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1367 | rq->nr_uninterruptible--; | 1328 | rq->nr_uninterruptible--; |
1368 | 1329 | ||
1369 | enqueue_task(rq, p, wakeup); | 1330 | enqueue_task(rq, p, wakeup); |
1370 | inc_nr_running(rq); | 1331 | inc_nr_running(p, rq); |
1371 | } | 1332 | } |
1372 | 1333 | ||
1373 | /* | 1334 | /* |
@@ -1379,7 +1340,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | |||
1379 | rq->nr_uninterruptible++; | 1340 | rq->nr_uninterruptible++; |
1380 | 1341 | ||
1381 | dequeue_task(rq, p, sleep); | 1342 | dequeue_task(rq, p, sleep); |
1382 | dec_nr_running(rq); | 1343 | dec_nr_running(p, rq); |
1383 | } | 1344 | } |
1384 | 1345 | ||
1385 | /** | 1346 | /** |
@@ -2019,7 +1980,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2019 | * management (if any): | 1980 | * management (if any): |
2020 | */ | 1981 | */ |
2021 | p->sched_class->task_new(rq, p); | 1982 | p->sched_class->task_new(rq, p); |
2022 | inc_nr_running(rq); | 1983 | inc_nr_running(p, rq); |
2023 | } | 1984 | } |
2024 | check_preempt_curr(rq, p); | 1985 | check_preempt_curr(rq, p); |
2025 | #ifdef CONFIG_SMP | 1986 | #ifdef CONFIG_SMP |
@@ -3885,7 +3846,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | |||
3885 | asmlinkage void __sched schedule(void) | 3846 | asmlinkage void __sched schedule(void) |
3886 | { | 3847 | { |
3887 | struct task_struct *prev, *next; | 3848 | struct task_struct *prev, *next; |
3888 | long *switch_count; | 3849 | unsigned long *switch_count; |
3889 | struct rq *rq; | 3850 | struct rq *rq; |
3890 | int cpu; | 3851 | int cpu; |
3891 | 3852 | ||
@@ -4358,8 +4319,10 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4358 | goto out_unlock; | 4319 | goto out_unlock; |
4359 | } | 4320 | } |
4360 | on_rq = p->se.on_rq; | 4321 | on_rq = p->se.on_rq; |
4361 | if (on_rq) | 4322 | if (on_rq) { |
4362 | dequeue_task(rq, p, 0); | 4323 | dequeue_task(rq, p, 0); |
4324 | dec_load(rq, p); | ||
4325 | } | ||
4363 | 4326 | ||
4364 | p->static_prio = NICE_TO_PRIO(nice); | 4327 | p->static_prio = NICE_TO_PRIO(nice); |
4365 | set_load_weight(p); | 4328 | set_load_weight(p); |
@@ -4369,6 +4332,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4369 | 4332 | ||
4370 | if (on_rq) { | 4333 | if (on_rq) { |
4371 | enqueue_task(rq, p, 0); | 4334 | enqueue_task(rq, p, 0); |
4335 | inc_load(rq, p); | ||
4372 | /* | 4336 | /* |
4373 | * If the task increased its priority or is running and | 4337 | * If the task increased its priority or is running and |
4374 | * lowered its priority, then reschedule its CPU: | 4338 | * lowered its priority, then reschedule its CPU: |
@@ -7083,21 +7047,6 @@ void __init sched_init_smp(void) | |||
7083 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) | 7047 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) |
7084 | BUG(); | 7048 | BUG(); |
7085 | sched_init_granularity(); | 7049 | sched_init_granularity(); |
7086 | |||
7087 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7088 | if (nr_cpu_ids == 1) | ||
7089 | return; | ||
7090 | |||
7091 | lb_monitor_task = kthread_create(load_balance_monitor, NULL, | ||
7092 | "group_balance"); | ||
7093 | if (!IS_ERR(lb_monitor_task)) { | ||
7094 | lb_monitor_task->flags |= PF_NOFREEZE; | ||
7095 | wake_up_process(lb_monitor_task); | ||
7096 | } else { | ||
7097 | printk(KERN_ERR "Could not create load balance monitor thread" | ||
7098 | "(error = %ld) \n", PTR_ERR(lb_monitor_task)); | ||
7099 | } | ||
7100 | #endif | ||
7101 | } | 7050 | } |
7102 | #else | 7051 | #else |
7103 | void __init sched_init_smp(void) | 7052 | void __init sched_init_smp(void) |
@@ -7284,6 +7233,8 @@ void __init sched_init(void) | |||
7284 | * During early bootup we pretend to be a normal task: | 7233 | * During early bootup we pretend to be a normal task: |
7285 | */ | 7234 | */ |
7286 | current->sched_class = &fair_sched_class; | 7235 | current->sched_class = &fair_sched_class; |
7236 | |||
7237 | scheduler_running = 1; | ||
7287 | } | 7238 | } |
7288 | 7239 | ||
7289 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 7240 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP |
@@ -7418,157 +7369,6 @@ void set_curr_task(int cpu, struct task_struct *p) | |||
7418 | 7369 | ||
7419 | #ifdef CONFIG_GROUP_SCHED | 7370 | #ifdef CONFIG_GROUP_SCHED |
7420 | 7371 | ||
7421 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
7422 | /* | ||
7423 | * distribute shares of all task groups among their schedulable entities, | ||
7424 | * to reflect load distribution across cpus. | ||
7425 | */ | ||
7426 | static int rebalance_shares(struct sched_domain *sd, int this_cpu) | ||
7427 | { | ||
7428 | struct cfs_rq *cfs_rq; | ||
7429 | struct rq *rq = cpu_rq(this_cpu); | ||
7430 | cpumask_t sdspan = sd->span; | ||
7431 | int balanced = 1; | ||
7432 | |||
7433 | /* Walk thr' all the task groups that we have */ | ||
7434 | for_each_leaf_cfs_rq(rq, cfs_rq) { | ||
7435 | int i; | ||
7436 | unsigned long total_load = 0, total_shares; | ||
7437 | struct task_group *tg = cfs_rq->tg; | ||
7438 | |||
7439 | /* Gather total task load of this group across cpus */ | ||
7440 | for_each_cpu_mask(i, sdspan) | ||
7441 | total_load += tg->cfs_rq[i]->load.weight; | ||
7442 | |||
7443 | /* Nothing to do if this group has no load */ | ||
7444 | if (!total_load) | ||
7445 | continue; | ||
7446 | |||
7447 | /* | ||
7448 | * tg->shares represents the number of cpu shares the task group | ||
7449 | * is eligible to hold on a single cpu. On N cpus, it is | ||
7450 | * eligible to hold (N * tg->shares) number of cpu shares. | ||
7451 | */ | ||
7452 | total_shares = tg->shares * cpus_weight(sdspan); | ||
7453 | |||
7454 | /* | ||
7455 | * redistribute total_shares across cpus as per the task load | ||
7456 | * distribution. | ||
7457 | */ | ||
7458 | for_each_cpu_mask(i, sdspan) { | ||
7459 | unsigned long local_load, local_shares; | ||
7460 | |||
7461 | local_load = tg->cfs_rq[i]->load.weight; | ||
7462 | local_shares = (local_load * total_shares) / total_load; | ||
7463 | if (!local_shares) | ||
7464 | local_shares = MIN_GROUP_SHARES; | ||
7465 | if (local_shares == tg->se[i]->load.weight) | ||
7466 | continue; | ||
7467 | |||
7468 | spin_lock_irq(&cpu_rq(i)->lock); | ||
7469 | set_se_shares(tg->se[i], local_shares); | ||
7470 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
7471 | balanced = 0; | ||
7472 | } | ||
7473 | } | ||
7474 | |||
7475 | return balanced; | ||
7476 | } | ||
7477 | |||
7478 | /* | ||
7479 | * How frequently should we rebalance_shares() across cpus? | ||
7480 | * | ||
7481 | * The more frequently we rebalance shares, the more accurate is the fairness | ||
7482 | * of cpu bandwidth distribution between task groups. However higher frequency | ||
7483 | * also implies increased scheduling overhead. | ||
7484 | * | ||
7485 | * sysctl_sched_min_bal_int_shares represents the minimum interval between | ||
7486 | * consecutive calls to rebalance_shares() in the same sched domain. | ||
7487 | * | ||
7488 | * sysctl_sched_max_bal_int_shares represents the maximum interval between | ||
7489 | * consecutive calls to rebalance_shares() in the same sched domain. | ||
7490 | * | ||
7491 | * These settings allows for the appropriate trade-off between accuracy of | ||
7492 | * fairness and the associated overhead. | ||
7493 | * | ||
7494 | */ | ||
7495 | |||
7496 | /* default: 8ms, units: milliseconds */ | ||
7497 | const_debug unsigned int sysctl_sched_min_bal_int_shares = 8; | ||
7498 | |||
7499 | /* default: 128ms, units: milliseconds */ | ||
7500 | const_debug unsigned int sysctl_sched_max_bal_int_shares = 128; | ||
7501 | |||
7502 | /* kernel thread that runs rebalance_shares() periodically */ | ||
7503 | static int load_balance_monitor(void *unused) | ||
7504 | { | ||
7505 | unsigned int timeout = sysctl_sched_min_bal_int_shares; | ||
7506 | struct sched_param schedparm; | ||
7507 | int ret; | ||
7508 | |||
7509 | /* | ||
7510 | * We don't want this thread's execution to be limited by the shares | ||
7511 | * assigned to default group (init_task_group). Hence make it run | ||
7512 | * as a SCHED_RR RT task at the lowest priority. | ||
7513 | */ | ||
7514 | schedparm.sched_priority = 1; | ||
7515 | ret = sched_setscheduler(current, SCHED_RR, &schedparm); | ||
7516 | if (ret) | ||
7517 | printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance" | ||
7518 | " monitor thread (error = %d) \n", ret); | ||
7519 | |||
7520 | while (!kthread_should_stop()) { | ||
7521 | int i, cpu, balanced = 1; | ||
7522 | |||
7523 | /* Prevent cpus going down or coming up */ | ||
7524 | get_online_cpus(); | ||
7525 | /* lockout changes to doms_cur[] array */ | ||
7526 | lock_doms_cur(); | ||
7527 | /* | ||
7528 | * Enter a rcu read-side critical section to safely walk rq->sd | ||
7529 | * chain on various cpus and to walk task group list | ||
7530 | * (rq->leaf_cfs_rq_list) in rebalance_shares(). | ||
7531 | */ | ||
7532 | rcu_read_lock(); | ||
7533 | |||
7534 | for (i = 0; i < ndoms_cur; i++) { | ||
7535 | cpumask_t cpumap = doms_cur[i]; | ||
7536 | struct sched_domain *sd = NULL, *sd_prev = NULL; | ||
7537 | |||
7538 | cpu = first_cpu(cpumap); | ||
7539 | |||
7540 | /* Find the highest domain at which to balance shares */ | ||
7541 | for_each_domain(cpu, sd) { | ||
7542 | if (!(sd->flags & SD_LOAD_BALANCE)) | ||
7543 | continue; | ||
7544 | sd_prev = sd; | ||
7545 | } | ||
7546 | |||
7547 | sd = sd_prev; | ||
7548 | /* sd == NULL? No load balance reqd in this domain */ | ||
7549 | if (!sd) | ||
7550 | continue; | ||
7551 | |||
7552 | balanced &= rebalance_shares(sd, cpu); | ||
7553 | } | ||
7554 | |||
7555 | rcu_read_unlock(); | ||
7556 | |||
7557 | unlock_doms_cur(); | ||
7558 | put_online_cpus(); | ||
7559 | |||
7560 | if (!balanced) | ||
7561 | timeout = sysctl_sched_min_bal_int_shares; | ||
7562 | else if (timeout < sysctl_sched_max_bal_int_shares) | ||
7563 | timeout *= 2; | ||
7564 | |||
7565 | msleep_interruptible(timeout); | ||
7566 | } | ||
7567 | |||
7568 | return 0; | ||
7569 | } | ||
7570 | #endif /* CONFIG_SMP */ | ||
7571 | |||
7572 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7372 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7573 | static void free_fair_sched_group(struct task_group *tg) | 7373 | static void free_fair_sched_group(struct task_group *tg) |
7574 | { | 7374 | { |
@@ -7835,29 +7635,25 @@ void sched_move_task(struct task_struct *tsk) | |||
7835 | } | 7635 | } |
7836 | 7636 | ||
7837 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7637 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7838 | /* rq->lock to be locked by caller */ | ||
7839 | static void set_se_shares(struct sched_entity *se, unsigned long shares) | 7638 | static void set_se_shares(struct sched_entity *se, unsigned long shares) |
7840 | { | 7639 | { |
7841 | struct cfs_rq *cfs_rq = se->cfs_rq; | 7640 | struct cfs_rq *cfs_rq = se->cfs_rq; |
7842 | struct rq *rq = cfs_rq->rq; | 7641 | struct rq *rq = cfs_rq->rq; |
7843 | int on_rq; | 7642 | int on_rq; |
7844 | 7643 | ||
7845 | if (!shares) | 7644 | spin_lock_irq(&rq->lock); |
7846 | shares = MIN_GROUP_SHARES; | ||
7847 | 7645 | ||
7848 | on_rq = se->on_rq; | 7646 | on_rq = se->on_rq; |
7849 | if (on_rq) { | 7647 | if (on_rq) |
7850 | dequeue_entity(cfs_rq, se, 0); | 7648 | dequeue_entity(cfs_rq, se, 0); |
7851 | dec_cpu_load(rq, se->load.weight); | ||
7852 | } | ||
7853 | 7649 | ||
7854 | se->load.weight = shares; | 7650 | se->load.weight = shares; |
7855 | se->load.inv_weight = div64_64((1ULL<<32), shares); | 7651 | se->load.inv_weight = div64_64((1ULL<<32), shares); |
7856 | 7652 | ||
7857 | if (on_rq) { | 7653 | if (on_rq) |
7858 | enqueue_entity(cfs_rq, se, 0); | 7654 | enqueue_entity(cfs_rq, se, 0); |
7859 | inc_cpu_load(rq, se->load.weight); | 7655 | |
7860 | } | 7656 | spin_unlock_irq(&rq->lock); |
7861 | } | 7657 | } |
7862 | 7658 | ||
7863 | static DEFINE_MUTEX(shares_mutex); | 7659 | static DEFINE_MUTEX(shares_mutex); |
@@ -7867,18 +7663,18 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7867 | int i; | 7663 | int i; |
7868 | unsigned long flags; | 7664 | unsigned long flags; |
7869 | 7665 | ||
7666 | /* | ||
7667 | * A weight of 0 or 1 can cause arithmetics problems. | ||
7668 | * (The default weight is 1024 - so there's no practical | ||
7669 | * limitation from this.) | ||
7670 | */ | ||
7671 | if (shares < 2) | ||
7672 | shares = 2; | ||
7673 | |||
7870 | mutex_lock(&shares_mutex); | 7674 | mutex_lock(&shares_mutex); |
7871 | if (tg->shares == shares) | 7675 | if (tg->shares == shares) |
7872 | goto done; | 7676 | goto done; |
7873 | 7677 | ||
7874 | if (shares < MIN_GROUP_SHARES) | ||
7875 | shares = MIN_GROUP_SHARES; | ||
7876 | |||
7877 | /* | ||
7878 | * Prevent any load balance activity (rebalance_shares, | ||
7879 | * load_balance_fair) from referring to this group first, | ||
7880 | * by taking it off the rq->leaf_cfs_rq_list on each cpu. | ||
7881 | */ | ||
7882 | spin_lock_irqsave(&task_group_lock, flags); | 7678 | spin_lock_irqsave(&task_group_lock, flags); |
7883 | for_each_possible_cpu(i) | 7679 | for_each_possible_cpu(i) |
7884 | unregister_fair_sched_group(tg, i); | 7680 | unregister_fair_sched_group(tg, i); |
@@ -7892,11 +7688,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7892 | * w/o tripping rebalance_share or load_balance_fair. | 7688 | * w/o tripping rebalance_share or load_balance_fair. |
7893 | */ | 7689 | */ |
7894 | tg->shares = shares; | 7690 | tg->shares = shares; |
7895 | for_each_possible_cpu(i) { | 7691 | for_each_possible_cpu(i) |
7896 | spin_lock_irq(&cpu_rq(i)->lock); | ||
7897 | set_se_shares(tg->se[i], shares); | 7692 | set_se_shares(tg->se[i], shares); |
7898 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
7899 | } | ||
7900 | 7693 | ||
7901 | /* | 7694 | /* |
7902 | * Enable load balance activity on this group, by inserting it back on | 7695 | * Enable load balance activity on this group, by inserting it back on |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6c091d6e159d..3df4d46994ca 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | |||
202 | 202 | ||
203 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 203 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) |
204 | { | 204 | { |
205 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 205 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); |
206 | struct sched_entity *se = NULL; | ||
207 | struct rb_node *parent; | ||
208 | 206 | ||
209 | while (*link) { | 207 | if (!last) |
210 | parent = *link; | 208 | return NULL; |
211 | se = rb_entry(parent, struct sched_entity, run_node); | ||
212 | link = &parent->rb_right; | ||
213 | } | ||
214 | 209 | ||
215 | return se; | 210 | return rb_entry(last, struct sched_entity, run_node); |
216 | } | 211 | } |
217 | 212 | ||
218 | /************************************************************** | 213 | /************************************************************** |
@@ -732,8 +727,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) | |||
732 | return se->parent; | 727 | return se->parent; |
733 | } | 728 | } |
734 | 729 | ||
735 | #define GROUP_IMBALANCE_PCT 20 | ||
736 | |||
737 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 730 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
738 | 731 | ||
739 | #define for_each_sched_entity(se) \ | 732 | #define for_each_sched_entity(se) \ |
@@ -824,26 +817,15 @@ hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
824 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | 817 | static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) |
825 | { | 818 | { |
826 | struct cfs_rq *cfs_rq; | 819 | struct cfs_rq *cfs_rq; |
827 | struct sched_entity *se = &p->se, | 820 | struct sched_entity *se = &p->se; |
828 | *topse = NULL; /* Highest schedulable entity */ | ||
829 | int incload = 1; | ||
830 | 821 | ||
831 | for_each_sched_entity(se) { | 822 | for_each_sched_entity(se) { |
832 | topse = se; | 823 | if (se->on_rq) |
833 | if (se->on_rq) { | ||
834 | incload = 0; | ||
835 | break; | 824 | break; |
836 | } | ||
837 | cfs_rq = cfs_rq_of(se); | 825 | cfs_rq = cfs_rq_of(se); |
838 | enqueue_entity(cfs_rq, se, wakeup); | 826 | enqueue_entity(cfs_rq, se, wakeup); |
839 | wakeup = 1; | 827 | wakeup = 1; |
840 | } | 828 | } |
841 | /* Increment cpu load if we just enqueued the first task of a group on | ||
842 | * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs | ||
843 | * at the highest grouping level. | ||
844 | */ | ||
845 | if (incload) | ||
846 | inc_cpu_load(rq, topse->load.weight); | ||
847 | 829 | ||
848 | hrtick_start_fair(rq, rq->curr); | 830 | hrtick_start_fair(rq, rq->curr); |
849 | } | 831 | } |
@@ -856,28 +838,16 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | |||
856 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | 838 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) |
857 | { | 839 | { |
858 | struct cfs_rq *cfs_rq; | 840 | struct cfs_rq *cfs_rq; |
859 | struct sched_entity *se = &p->se, | 841 | struct sched_entity *se = &p->se; |
860 | *topse = NULL; /* Highest schedulable entity */ | ||
861 | int decload = 1; | ||
862 | 842 | ||
863 | for_each_sched_entity(se) { | 843 | for_each_sched_entity(se) { |
864 | topse = se; | ||
865 | cfs_rq = cfs_rq_of(se); | 844 | cfs_rq = cfs_rq_of(se); |
866 | dequeue_entity(cfs_rq, se, sleep); | 845 | dequeue_entity(cfs_rq, se, sleep); |
867 | /* Don't dequeue parent if it has other entities besides us */ | 846 | /* Don't dequeue parent if it has other entities besides us */ |
868 | if (cfs_rq->load.weight) { | 847 | if (cfs_rq->load.weight) |
869 | if (parent_entity(se)) | ||
870 | decload = 0; | ||
871 | break; | 848 | break; |
872 | } | ||
873 | sleep = 1; | 849 | sleep = 1; |
874 | } | 850 | } |
875 | /* Decrement cpu load if we just dequeued the last task of a group on | ||
876 | * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs | ||
877 | * at the highest grouping level. | ||
878 | */ | ||
879 | if (decload) | ||
880 | dec_cpu_load(rq, topse->load.weight); | ||
881 | 851 | ||
882 | hrtick_start_fair(rq, rq->curr); | 852 | hrtick_start_fair(rq, rq->curr); |
883 | } | 853 | } |
@@ -1191,6 +1161,25 @@ static struct task_struct *load_balance_next_fair(void *arg) | |||
1191 | return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); | 1161 | return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); |
1192 | } | 1162 | } |
1193 | 1163 | ||
1164 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1165 | static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) | ||
1166 | { | ||
1167 | struct sched_entity *curr; | ||
1168 | struct task_struct *p; | ||
1169 | |||
1170 | if (!cfs_rq->nr_running || !first_fair(cfs_rq)) | ||
1171 | return MAX_PRIO; | ||
1172 | |||
1173 | curr = cfs_rq->curr; | ||
1174 | if (!curr) | ||
1175 | curr = __pick_next_entity(cfs_rq); | ||
1176 | |||
1177 | p = task_of(curr); | ||
1178 | |||
1179 | return p->prio; | ||
1180 | } | ||
1181 | #endif | ||
1182 | |||
1194 | static unsigned long | 1183 | static unsigned long |
1195 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 1184 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
1196 | unsigned long max_load_move, | 1185 | unsigned long max_load_move, |
@@ -1200,45 +1189,28 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1200 | struct cfs_rq *busy_cfs_rq; | 1189 | struct cfs_rq *busy_cfs_rq; |
1201 | long rem_load_move = max_load_move; | 1190 | long rem_load_move = max_load_move; |
1202 | struct rq_iterator cfs_rq_iterator; | 1191 | struct rq_iterator cfs_rq_iterator; |
1203 | unsigned long load_moved; | ||
1204 | 1192 | ||
1205 | cfs_rq_iterator.start = load_balance_start_fair; | 1193 | cfs_rq_iterator.start = load_balance_start_fair; |
1206 | cfs_rq_iterator.next = load_balance_next_fair; | 1194 | cfs_rq_iterator.next = load_balance_next_fair; |
1207 | 1195 | ||
1208 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | 1196 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { |
1209 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1197 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1210 | struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu]; | 1198 | struct cfs_rq *this_cfs_rq; |
1211 | unsigned long maxload, task_load, group_weight; | 1199 | long imbalance; |
1212 | unsigned long thisload, per_task_load; | 1200 | unsigned long maxload; |
1213 | struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu]; | ||
1214 | |||
1215 | task_load = busy_cfs_rq->load.weight; | ||
1216 | group_weight = se->load.weight; | ||
1217 | 1201 | ||
1218 | /* | 1202 | this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); |
1219 | * 'group_weight' is contributed by tasks of total weight | ||
1220 | * 'task_load'. To move 'rem_load_move' worth of weight only, | ||
1221 | * we need to move a maximum task load of: | ||
1222 | * | ||
1223 | * maxload = (remload / group_weight) * task_load; | ||
1224 | */ | ||
1225 | maxload = (rem_load_move * task_load) / group_weight; | ||
1226 | 1203 | ||
1227 | if (!maxload || !task_load) | 1204 | imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; |
1205 | /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ | ||
1206 | if (imbalance <= 0) | ||
1228 | continue; | 1207 | continue; |
1229 | 1208 | ||
1230 | per_task_load = task_load / busy_cfs_rq->nr_running; | 1209 | /* Don't pull more than imbalance/2 */ |
1231 | /* | 1210 | imbalance /= 2; |
1232 | * balance_tasks will try to forcibly move atleast one task if | 1211 | maxload = min(rem_load_move, imbalance); |
1233 | * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if | ||
1234 | * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load. | ||
1235 | */ | ||
1236 | if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load) | ||
1237 | continue; | ||
1238 | 1212 | ||
1239 | /* Disable priority-based load balance */ | 1213 | *this_best_prio = cfs_rq_best_prio(this_cfs_rq); |
1240 | *this_best_prio = 0; | ||
1241 | thisload = this_cfs_rq->load.weight; | ||
1242 | #else | 1214 | #else |
1243 | # define maxload rem_load_move | 1215 | # define maxload rem_load_move |
1244 | #endif | 1216 | #endif |
@@ -1247,33 +1219,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1247 | * load_balance_[start|next]_fair iterators | 1219 | * load_balance_[start|next]_fair iterators |
1248 | */ | 1220 | */ |
1249 | cfs_rq_iterator.arg = busy_cfs_rq; | 1221 | cfs_rq_iterator.arg = busy_cfs_rq; |
1250 | load_moved = balance_tasks(this_rq, this_cpu, busiest, | 1222 | rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, |
1251 | maxload, sd, idle, all_pinned, | 1223 | maxload, sd, idle, all_pinned, |
1252 | this_best_prio, | 1224 | this_best_prio, |
1253 | &cfs_rq_iterator); | 1225 | &cfs_rq_iterator); |
1254 | 1226 | ||
1255 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1256 | /* | ||
1257 | * load_moved holds the task load that was moved. The | ||
1258 | * effective (group) weight moved would be: | ||
1259 | * load_moved_eff = load_moved/task_load * group_weight; | ||
1260 | */ | ||
1261 | load_moved = (group_weight * load_moved) / task_load; | ||
1262 | |||
1263 | /* Adjust shares on both cpus to reflect load_moved */ | ||
1264 | group_weight -= load_moved; | ||
1265 | set_se_shares(se, group_weight); | ||
1266 | |||
1267 | se = busy_cfs_rq->tg->se[this_cpu]; | ||
1268 | if (!thisload) | ||
1269 | group_weight = load_moved; | ||
1270 | else | ||
1271 | group_weight = se->load.weight + load_moved; | ||
1272 | set_se_shares(se, group_weight); | ||
1273 | #endif | ||
1274 | |||
1275 | rem_load_move -= load_moved; | ||
1276 | |||
1277 | if (rem_load_move <= 0) | 1227 | if (rem_load_move <= 0) |
1278 | break; | 1228 | break; |
1279 | } | 1229 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f54792b175b2..76e828517541 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -393,8 +393,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
393 | */ | 393 | */ |
394 | for_each_sched_rt_entity(rt_se) | 394 | for_each_sched_rt_entity(rt_se) |
395 | enqueue_rt_entity(rt_se); | 395 | enqueue_rt_entity(rt_se); |
396 | |||
397 | inc_cpu_load(rq, p->se.load.weight); | ||
398 | } | 396 | } |
399 | 397 | ||
400 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 398 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
@@ -414,8 +412,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
414 | if (rt_rq && rt_rq->rt_nr_running) | 412 | if (rt_rq && rt_rq->rt_nr_running) |
415 | enqueue_rt_entity(rt_se); | 413 | enqueue_rt_entity(rt_se); |
416 | } | 414 | } |
417 | |||
418 | dec_cpu_load(rq, p->se.load.weight); | ||
419 | } | 415 | } |
420 | 416 | ||
421 | /* | 417 | /* |
diff --git a/kernel/signal.c b/kernel/signal.c index 84917fe507f7..6af1210092c3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1623,7 +1623,6 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1623 | /* Let the debugger run. */ | 1623 | /* Let the debugger run. */ |
1624 | __set_current_state(TASK_TRACED); | 1624 | __set_current_state(TASK_TRACED); |
1625 | spin_unlock_irq(¤t->sighand->siglock); | 1625 | spin_unlock_irq(¤t->sighand->siglock); |
1626 | try_to_freeze(); | ||
1627 | read_lock(&tasklist_lock); | 1626 | read_lock(&tasklist_lock); |
1628 | if (!unlikely(killed) && may_ptrace_stop()) { | 1627 | if (!unlikely(killed) && may_ptrace_stop()) { |
1629 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1628 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
@@ -1641,6 +1640,13 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1641 | } | 1640 | } |
1642 | 1641 | ||
1643 | /* | 1642 | /* |
1643 | * While in TASK_TRACED, we were considered "frozen enough". | ||
1644 | * Now that we woke up, it's crucial if we're supposed to be | ||
1645 | * frozen that we freeze now before running anything substantial. | ||
1646 | */ | ||
1647 | try_to_freeze(); | ||
1648 | |||
1649 | /* | ||
1644 | * We are back. Now reacquire the siglock before touching | 1650 | * We are back. Now reacquire the siglock before touching |
1645 | * last_siginfo, so that we are sure to have synchronized with | 1651 | * last_siginfo, so that we are sure to have synchronized with |
1646 | * any signal-sending on another CPU that wants to examine it. | 1652 | * any signal-sending on another CPU that wants to examine it. |
@@ -1757,9 +1763,15 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | |||
1757 | sigset_t *mask = ¤t->blocked; | 1763 | sigset_t *mask = ¤t->blocked; |
1758 | int signr = 0; | 1764 | int signr = 0; |
1759 | 1765 | ||
1766 | relock: | ||
1767 | /* | ||
1768 | * We'll jump back here after any time we were stopped in TASK_STOPPED. | ||
1769 | * While in TASK_STOPPED, we were considered "frozen enough". | ||
1770 | * Now that we woke up, it's crucial if we're supposed to be | ||
1771 | * frozen that we freeze now before running anything substantial. | ||
1772 | */ | ||
1760 | try_to_freeze(); | 1773 | try_to_freeze(); |
1761 | 1774 | ||
1762 | relock: | ||
1763 | spin_lock_irq(¤t->sighand->siglock); | 1775 | spin_lock_irq(¤t->sighand->siglock); |
1764 | for (;;) { | 1776 | for (;;) { |
1765 | struct k_sigaction *ka; | 1777 | struct k_sigaction *ka; |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 5b3aea5f471e..31e9f2a47928 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -313,6 +313,7 @@ void irq_exit(void) | |||
313 | /* Make sure that timer wheel updates are propagated */ | 313 | /* Make sure that timer wheel updates are propagated */ |
314 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) | 314 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) |
315 | tick_nohz_stop_sched_tick(); | 315 | tick_nohz_stop_sched_tick(); |
316 | rcu_irq_exit(); | ||
316 | #endif | 317 | #endif |
317 | preempt_enable_no_resched(); | 318 | preempt_enable_no_resched(); |
318 | } | 319 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 7c2da88db4ed..01b6522fd92b 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -216,26 +216,27 @@ static int watchdog(void *__bind_cpu) | |||
216 | /* initialize timestamp */ | 216 | /* initialize timestamp */ |
217 | touch_softlockup_watchdog(); | 217 | touch_softlockup_watchdog(); |
218 | 218 | ||
219 | set_current_state(TASK_INTERRUPTIBLE); | ||
219 | /* | 220 | /* |
220 | * Run briefly once per second to reset the softlockup timestamp. | 221 | * Run briefly once per second to reset the softlockup timestamp. |
221 | * If this gets delayed for more than 60 seconds then the | 222 | * If this gets delayed for more than 60 seconds then the |
222 | * debug-printout triggers in softlockup_tick(). | 223 | * debug-printout triggers in softlockup_tick(). |
223 | */ | 224 | */ |
224 | while (!kthread_should_stop()) { | 225 | while (!kthread_should_stop()) { |
225 | set_current_state(TASK_INTERRUPTIBLE); | ||
226 | touch_softlockup_watchdog(); | 226 | touch_softlockup_watchdog(); |
227 | schedule(); | 227 | schedule(); |
228 | 228 | ||
229 | if (kthread_should_stop()) | 229 | if (kthread_should_stop()) |
230 | break; | 230 | break; |
231 | 231 | ||
232 | if (this_cpu != check_cpu) | 232 | if (this_cpu == check_cpu) { |
233 | continue; | 233 | if (sysctl_hung_task_timeout_secs) |
234 | 234 | check_hung_uninterruptible_tasks(this_cpu); | |
235 | if (sysctl_hung_task_timeout_secs) | 235 | } |
236 | check_hung_uninterruptible_tasks(this_cpu); | ||
237 | 236 | ||
237 | set_current_state(TASK_INTERRUPTIBLE); | ||
238 | } | 238 | } |
239 | __set_current_state(TASK_RUNNING); | ||
239 | 240 | ||
240 | return 0; | 241 | return 0; |
241 | } | 242 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8b7e95411795..b2a2d6889bab 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -311,24 +311,6 @@ static struct ctl_table kern_table[] = { | |||
311 | .mode = 0644, | 311 | .mode = 0644, |
312 | .proc_handler = &proc_dointvec, | 312 | .proc_handler = &proc_dointvec, |
313 | }, | 313 | }, |
314 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) | ||
315 | { | ||
316 | .ctl_name = CTL_UNNUMBERED, | ||
317 | .procname = "sched_min_bal_int_shares", | ||
318 | .data = &sysctl_sched_min_bal_int_shares, | ||
319 | .maxlen = sizeof(unsigned int), | ||
320 | .mode = 0644, | ||
321 | .proc_handler = &proc_dointvec, | ||
322 | }, | ||
323 | { | ||
324 | .ctl_name = CTL_UNNUMBERED, | ||
325 | .procname = "sched_max_bal_int_shares", | ||
326 | .data = &sysctl_sched_max_bal_int_shares, | ||
327 | .maxlen = sizeof(unsigned int), | ||
328 | .mode = 0644, | ||
329 | .proc_handler = &proc_dointvec, | ||
330 | }, | ||
331 | #endif | ||
332 | #endif | 314 | #endif |
333 | { | 315 | { |
334 | .ctl_name = CTL_UNNUMBERED, | 316 | .ctl_name = CTL_UNNUMBERED, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index fa9bb73dbdb4..2968298f8f36 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -282,6 +282,7 @@ void tick_nohz_stop_sched_tick(void) | |||
282 | ts->idle_tick = ts->sched_timer.expires; | 282 | ts->idle_tick = ts->sched_timer.expires; |
283 | ts->tick_stopped = 1; | 283 | ts->tick_stopped = 1; |
284 | ts->idle_jiffies = last_jiffies; | 284 | ts->idle_jiffies = last_jiffies; |
285 | rcu_enter_nohz(); | ||
285 | } | 286 | } |
286 | 287 | ||
287 | /* | 288 | /* |
@@ -375,6 +376,8 @@ void tick_nohz_restart_sched_tick(void) | |||
375 | return; | 376 | return; |
376 | } | 377 | } |
377 | 378 | ||
379 | rcu_exit_nohz(); | ||
380 | |||
378 | /* Update jiffies first */ | 381 | /* Update jiffies first */ |
379 | select_nohz_load_balancer(0); | 382 | select_nohz_load_balancer(0); |
380 | now = ktime_get(); | 383 | now = ktime_get(); |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 495575a59ca6..a3b8d4c3f77a 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -40,10 +40,12 @@ static inline void set_bit_area(unsigned long *map, unsigned long i, | |||
40 | } | 40 | } |
41 | } | 41 | } |
42 | 42 | ||
43 | static inline int is_span_boundary(unsigned int index, unsigned int nr, | 43 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
44 | unsigned long shift, | 44 | unsigned long shift, |
45 | unsigned long boundary_size) | 45 | unsigned long boundary_size) |
46 | { | 46 | { |
47 | BUG_ON(!is_power_of_2(boundary_size)); | ||
48 | |||
47 | shift = (shift + index) & (boundary_size - 1); | 49 | shift = (shift + index) & (boundary_size - 1); |
48 | return shift + nr > boundary_size; | 50 | return shift + nr > boundary_size; |
49 | } | 51 | } |
@@ -57,7 +59,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | |||
57 | again: | 59 | again: |
58 | index = find_next_zero_area(map, size, start, nr, align_mask); | 60 | index = find_next_zero_area(map, size, start, nr, align_mask); |
59 | if (index != -1) { | 61 | if (index != -1) { |
60 | if (is_span_boundary(index, nr, shift, boundary_size)) { | 62 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { |
61 | /* we could do more effectively */ | 63 | /* we could do more effectively */ |
62 | start = index + 1; | 64 | start = index + 1; |
63 | goto again; | 65 | goto again; |
diff --git a/lib/kobject.c b/lib/kobject.c index d784daeb8571..0d03252f87a8 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -153,6 +153,10 @@ static void kobject_init_internal(struct kobject *kobj) | |||
153 | return; | 153 | return; |
154 | kref_init(&kobj->kref); | 154 | kref_init(&kobj->kref); |
155 | INIT_LIST_HEAD(&kobj->entry); | 155 | INIT_LIST_HEAD(&kobj->entry); |
156 | kobj->state_in_sysfs = 0; | ||
157 | kobj->state_add_uevent_sent = 0; | ||
158 | kobj->state_remove_uevent_sent = 0; | ||
159 | kobj->state_initialized = 1; | ||
156 | } | 160 | } |
157 | 161 | ||
158 | 162 | ||
@@ -289,13 +293,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype) | |||
289 | dump_stack(); | 293 | dump_stack(); |
290 | } | 294 | } |
291 | 295 | ||
292 | kref_init(&kobj->kref); | 296 | kobject_init_internal(kobj); |
293 | INIT_LIST_HEAD(&kobj->entry); | ||
294 | kobj->ktype = ktype; | 297 | kobj->ktype = ktype; |
295 | kobj->state_in_sysfs = 0; | ||
296 | kobj->state_add_uevent_sent = 0; | ||
297 | kobj->state_remove_uevent_sent = 0; | ||
298 | kobj->state_initialized = 1; | ||
299 | return; | 298 | return; |
300 | 299 | ||
301 | error: | 300 | error: |
diff --git a/mm/Makefile b/mm/Makefile index 9f117bab5322..a5b0dd93427a 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -32,5 +32,5 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o | |||
32 | obj-$(CONFIG_MIGRATION) += migrate.o | 32 | obj-$(CONFIG_MIGRATION) += migrate.o |
33 | obj-$(CONFIG_SMP) += allocpercpu.o | 33 | obj-$(CONFIG_SMP) += allocpercpu.o |
34 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 34 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
35 | obj-$(CONFIG_CGROUP_MEM_CONT) += memcontrol.o | 35 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o |
36 | 36 | ||
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 7e58322b7134..b0012e27fea8 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
@@ -6,6 +6,10 @@ | |||
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | 8 | ||
9 | #ifndef cache_line_size | ||
10 | #define cache_line_size() L1_CACHE_BYTES | ||
11 | #endif | ||
12 | |||
9 | /** | 13 | /** |
10 | * percpu_depopulate - depopulate per-cpu data for given cpu | 14 | * percpu_depopulate - depopulate per-cpu data for given cpu |
11 | * @__pdata: per-cpu data to depopulate | 15 | * @__pdata: per-cpu data to depopulate |
@@ -52,6 +56,11 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | |||
52 | struct percpu_data *pdata = __percpu_disguise(__pdata); | 56 | struct percpu_data *pdata = __percpu_disguise(__pdata); |
53 | int node = cpu_to_node(cpu); | 57 | int node = cpu_to_node(cpu); |
54 | 58 | ||
59 | /* | ||
60 | * We should make sure each CPU gets private memory. | ||
61 | */ | ||
62 | size = roundup(size, cache_line_size()); | ||
63 | |||
55 | BUG_ON(pdata->ptrs[cpu]); | 64 | BUG_ON(pdata->ptrs[cpu]); |
56 | if (node_online(node)) | 65 | if (node_online(node)) |
57 | pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); | 66 | pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); |
@@ -98,7 +107,11 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask); | |||
98 | */ | 107 | */ |
99 | void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 108 | void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
100 | { | 109 | { |
101 | void *pdata = kzalloc(nr_cpu_ids * sizeof(void *), gfp); | 110 | /* |
111 | * We allocate whole cache lines to avoid false sharing | ||
112 | */ | ||
113 | size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); | ||
114 | void *pdata = kzalloc(sz, gfp); | ||
102 | void *__pdata = __percpu_disguise(pdata); | 115 | void *__pdata = __percpu_disguise(pdata); |
103 | 116 | ||
104 | if (unlikely(!pdata)) | 117 | if (unlikely(!pdata)) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 89e6286a7f57..dcacc811e70e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page) | |||
71 | free_huge_pages_node[nid]++; | 71 | free_huge_pages_node[nid]++; |
72 | } | 72 | } |
73 | 73 | ||
74 | static struct page *dequeue_huge_page(struct vm_area_struct *vma, | 74 | static struct page *dequeue_huge_page(void) |
75 | { | ||
76 | int nid; | ||
77 | struct page *page = NULL; | ||
78 | |||
79 | for (nid = 0; nid < MAX_NUMNODES; ++nid) { | ||
80 | if (!list_empty(&hugepage_freelists[nid])) { | ||
81 | page = list_entry(hugepage_freelists[nid].next, | ||
82 | struct page, lru); | ||
83 | list_del(&page->lru); | ||
84 | free_huge_pages--; | ||
85 | free_huge_pages_node[nid]--; | ||
86 | break; | ||
87 | } | ||
88 | } | ||
89 | return page; | ||
90 | } | ||
91 | |||
92 | static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, | ||
75 | unsigned long address) | 93 | unsigned long address) |
76 | { | 94 | { |
77 | int nid; | 95 | int nid; |
@@ -296,8 +314,10 @@ static int gather_surplus_pages(int delta) | |||
296 | int needed, allocated; | 314 | int needed, allocated; |
297 | 315 | ||
298 | needed = (resv_huge_pages + delta) - free_huge_pages; | 316 | needed = (resv_huge_pages + delta) - free_huge_pages; |
299 | if (needed <= 0) | 317 | if (needed <= 0) { |
318 | resv_huge_pages += delta; | ||
300 | return 0; | 319 | return 0; |
320 | } | ||
301 | 321 | ||
302 | allocated = 0; | 322 | allocated = 0; |
303 | INIT_LIST_HEAD(&surplus_list); | 323 | INIT_LIST_HEAD(&surplus_list); |
@@ -335,9 +355,12 @@ retry: | |||
335 | * The surplus_list now contains _at_least_ the number of extra pages | 355 | * The surplus_list now contains _at_least_ the number of extra pages |
336 | * needed to accomodate the reservation. Add the appropriate number | 356 | * needed to accomodate the reservation. Add the appropriate number |
337 | * of pages to the hugetlb pool and free the extras back to the buddy | 357 | * of pages to the hugetlb pool and free the extras back to the buddy |
338 | * allocator. | 358 | * allocator. Commit the entire reservation here to prevent another |
359 | * process from stealing the pages as they are added to the pool but | ||
360 | * before they are reserved. | ||
339 | */ | 361 | */ |
340 | needed += allocated; | 362 | needed += allocated; |
363 | resv_huge_pages += delta; | ||
341 | ret = 0; | 364 | ret = 0; |
342 | free: | 365 | free: |
343 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { | 366 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { |
@@ -371,6 +394,9 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages) | |||
371 | struct page *page; | 394 | struct page *page; |
372 | unsigned long nr_pages; | 395 | unsigned long nr_pages; |
373 | 396 | ||
397 | /* Uncommit the reservation */ | ||
398 | resv_huge_pages -= unused_resv_pages; | ||
399 | |||
374 | nr_pages = min(unused_resv_pages, surplus_huge_pages); | 400 | nr_pages = min(unused_resv_pages, surplus_huge_pages); |
375 | 401 | ||
376 | while (nr_pages) { | 402 | while (nr_pages) { |
@@ -402,7 +428,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, | |||
402 | struct page *page; | 428 | struct page *page; |
403 | 429 | ||
404 | spin_lock(&hugetlb_lock); | 430 | spin_lock(&hugetlb_lock); |
405 | page = dequeue_huge_page(vma, addr); | 431 | page = dequeue_huge_page_vma(vma, addr); |
406 | spin_unlock(&hugetlb_lock); | 432 | spin_unlock(&hugetlb_lock); |
407 | return page ? page : ERR_PTR(-VM_FAULT_OOM); | 433 | return page ? page : ERR_PTR(-VM_FAULT_OOM); |
408 | } | 434 | } |
@@ -417,7 +443,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma, | |||
417 | 443 | ||
418 | spin_lock(&hugetlb_lock); | 444 | spin_lock(&hugetlb_lock); |
419 | if (free_huge_pages > resv_huge_pages) | 445 | if (free_huge_pages > resv_huge_pages) |
420 | page = dequeue_huge_page(vma, addr); | 446 | page = dequeue_huge_page_vma(vma, addr); |
421 | spin_unlock(&hugetlb_lock); | 447 | spin_unlock(&hugetlb_lock); |
422 | if (!page) { | 448 | if (!page) { |
423 | page = alloc_buddy_huge_page(vma, addr); | 449 | page = alloc_buddy_huge_page(vma, addr); |
@@ -570,7 +596,7 @@ static unsigned long set_max_huge_pages(unsigned long count) | |||
570 | min_count = max(count, min_count); | 596 | min_count = max(count, min_count); |
571 | try_to_free_low(min_count); | 597 | try_to_free_low(min_count); |
572 | while (min_count < persistent_huge_pages) { | 598 | while (min_count < persistent_huge_pages) { |
573 | struct page *page = dequeue_huge_page(NULL, 0); | 599 | struct page *page = dequeue_huge_page(); |
574 | if (!page) | 600 | if (!page) |
575 | break; | 601 | break; |
576 | update_and_free_page(page); | 602 | update_and_free_page(page); |
@@ -1205,12 +1231,13 @@ static int hugetlb_acct_memory(long delta) | |||
1205 | if (gather_surplus_pages(delta) < 0) | 1231 | if (gather_surplus_pages(delta) < 0) |
1206 | goto out; | 1232 | goto out; |
1207 | 1233 | ||
1208 | if (delta > cpuset_mems_nr(free_huge_pages_node)) | 1234 | if (delta > cpuset_mems_nr(free_huge_pages_node)) { |
1235 | return_unused_surplus_pages(delta); | ||
1209 | goto out; | 1236 | goto out; |
1237 | } | ||
1210 | } | 1238 | } |
1211 | 1239 | ||
1212 | ret = 0; | 1240 | ret = 0; |
1213 | resv_huge_pages += delta; | ||
1214 | if (delta < 0) | 1241 | if (delta < 0) |
1215 | return_unused_surplus_pages((unsigned long) -delta); | 1242 | return_unused_surplus_pages((unsigned long) -delta); |
1216 | 1243 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 631002d085d1..8b9f6cae938e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -137,14 +137,21 @@ struct mem_cgroup { | |||
137 | */ | 137 | */ |
138 | struct mem_cgroup_stat stat; | 138 | struct mem_cgroup_stat stat; |
139 | }; | 139 | }; |
140 | static struct mem_cgroup init_mem_cgroup; | ||
140 | 141 | ||
141 | /* | 142 | /* |
142 | * We use the lower bit of the page->page_cgroup pointer as a bit spin | 143 | * We use the lower bit of the page->page_cgroup pointer as a bit spin |
143 | * lock. We need to ensure that page->page_cgroup is atleast two | 144 | * lock. We need to ensure that page->page_cgroup is at least two |
144 | * byte aligned (based on comments from Nick Piggin) | 145 | * byte aligned (based on comments from Nick Piggin). But since |
146 | * bit_spin_lock doesn't actually set that lock bit in a non-debug | ||
147 | * uniprocessor kernel, we should avoid setting it here too. | ||
145 | */ | 148 | */ |
146 | #define PAGE_CGROUP_LOCK_BIT 0x0 | 149 | #define PAGE_CGROUP_LOCK_BIT 0x0 |
147 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | 150 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
151 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | ||
152 | #else | ||
153 | #define PAGE_CGROUP_LOCK 0x0 | ||
154 | #endif | ||
148 | 155 | ||
149 | /* | 156 | /* |
150 | * A page_cgroup page is associated with every page descriptor. The | 157 | * A page_cgroup page is associated with every page descriptor. The |
@@ -154,37 +161,27 @@ struct page_cgroup { | |||
154 | struct list_head lru; /* per cgroup LRU list */ | 161 | struct list_head lru; /* per cgroup LRU list */ |
155 | struct page *page; | 162 | struct page *page; |
156 | struct mem_cgroup *mem_cgroup; | 163 | struct mem_cgroup *mem_cgroup; |
157 | atomic_t ref_cnt; /* Helpful when pages move b/w */ | 164 | int ref_cnt; /* cached, mapped, migrating */ |
158 | /* mapped and cached states */ | 165 | int flags; |
159 | int flags; | ||
160 | }; | 166 | }; |
161 | #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ | 167 | #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ |
162 | #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ | 168 | #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ |
163 | 169 | ||
164 | static inline int page_cgroup_nid(struct page_cgroup *pc) | 170 | static int page_cgroup_nid(struct page_cgroup *pc) |
165 | { | 171 | { |
166 | return page_to_nid(pc->page); | 172 | return page_to_nid(pc->page); |
167 | } | 173 | } |
168 | 174 | ||
169 | static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc) | 175 | static enum zone_type page_cgroup_zid(struct page_cgroup *pc) |
170 | { | 176 | { |
171 | return page_zonenum(pc->page); | 177 | return page_zonenum(pc->page); |
172 | } | 178 | } |
173 | 179 | ||
174 | enum { | ||
175 | MEM_CGROUP_TYPE_UNSPEC = 0, | ||
176 | MEM_CGROUP_TYPE_MAPPED, | ||
177 | MEM_CGROUP_TYPE_CACHED, | ||
178 | MEM_CGROUP_TYPE_ALL, | ||
179 | MEM_CGROUP_TYPE_MAX, | ||
180 | }; | ||
181 | |||
182 | enum charge_type { | 180 | enum charge_type { |
183 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | 181 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, |
184 | MEM_CGROUP_CHARGE_TYPE_MAPPED, | 182 | MEM_CGROUP_CHARGE_TYPE_MAPPED, |
185 | }; | 183 | }; |
186 | 184 | ||
187 | |||
188 | /* | 185 | /* |
189 | * Always modified under lru lock. Then, not necessary to preempt_disable() | 186 | * Always modified under lru lock. Then, not necessary to preempt_disable() |
190 | */ | 187 | */ |
@@ -193,23 +190,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, | |||
193 | { | 190 | { |
194 | int val = (charge)? 1 : -1; | 191 | int val = (charge)? 1 : -1; |
195 | struct mem_cgroup_stat *stat = &mem->stat; | 192 | struct mem_cgroup_stat *stat = &mem->stat; |
196 | VM_BUG_ON(!irqs_disabled()); | ||
197 | 193 | ||
194 | VM_BUG_ON(!irqs_disabled()); | ||
198 | if (flags & PAGE_CGROUP_FLAG_CACHE) | 195 | if (flags & PAGE_CGROUP_FLAG_CACHE) |
199 | __mem_cgroup_stat_add_safe(stat, | 196 | __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); |
200 | MEM_CGROUP_STAT_CACHE, val); | ||
201 | else | 197 | else |
202 | __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); | 198 | __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); |
203 | } | 199 | } |
204 | 200 | ||
205 | static inline struct mem_cgroup_per_zone * | 201 | static struct mem_cgroup_per_zone * |
206 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) | 202 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) |
207 | { | 203 | { |
208 | BUG_ON(!mem->info.nodeinfo[nid]); | ||
209 | return &mem->info.nodeinfo[nid]->zoneinfo[zid]; | 204 | return &mem->info.nodeinfo[nid]->zoneinfo[zid]; |
210 | } | 205 | } |
211 | 206 | ||
212 | static inline struct mem_cgroup_per_zone * | 207 | static struct mem_cgroup_per_zone * |
213 | page_cgroup_zoneinfo(struct page_cgroup *pc) | 208 | page_cgroup_zoneinfo(struct page_cgroup *pc) |
214 | { | 209 | { |
215 | struct mem_cgroup *mem = pc->mem_cgroup; | 210 | struct mem_cgroup *mem = pc->mem_cgroup; |
@@ -234,18 +229,14 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, | |||
234 | return total; | 229 | return total; |
235 | } | 230 | } |
236 | 231 | ||
237 | static struct mem_cgroup init_mem_cgroup; | 232 | static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) |
238 | |||
239 | static inline | ||
240 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | ||
241 | { | 233 | { |
242 | return container_of(cgroup_subsys_state(cont, | 234 | return container_of(cgroup_subsys_state(cont, |
243 | mem_cgroup_subsys_id), struct mem_cgroup, | 235 | mem_cgroup_subsys_id), struct mem_cgroup, |
244 | css); | 236 | css); |
245 | } | 237 | } |
246 | 238 | ||
247 | static inline | 239 | static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) |
248 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | ||
249 | { | 240 | { |
250 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 241 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), |
251 | struct mem_cgroup, css); | 242 | struct mem_cgroup, css); |
@@ -267,81 +258,33 @@ void mm_free_cgroup(struct mm_struct *mm) | |||
267 | 258 | ||
268 | static inline int page_cgroup_locked(struct page *page) | 259 | static inline int page_cgroup_locked(struct page *page) |
269 | { | 260 | { |
270 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, | 261 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
271 | &page->page_cgroup); | ||
272 | } | 262 | } |
273 | 263 | ||
274 | void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) | 264 | static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) |
275 | { | 265 | { |
276 | int locked; | 266 | VM_BUG_ON(!page_cgroup_locked(page)); |
277 | 267 | page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); | |
278 | /* | ||
279 | * While resetting the page_cgroup we might not hold the | ||
280 | * page_cgroup lock. free_hot_cold_page() is an example | ||
281 | * of such a scenario | ||
282 | */ | ||
283 | if (pc) | ||
284 | VM_BUG_ON(!page_cgroup_locked(page)); | ||
285 | locked = (page->page_cgroup & PAGE_CGROUP_LOCK); | ||
286 | page->page_cgroup = ((unsigned long)pc | locked); | ||
287 | } | 268 | } |
288 | 269 | ||
289 | struct page_cgroup *page_get_page_cgroup(struct page *page) | 270 | struct page_cgroup *page_get_page_cgroup(struct page *page) |
290 | { | 271 | { |
291 | return (struct page_cgroup *) | 272 | return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); |
292 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); | ||
293 | } | 273 | } |
294 | 274 | ||
295 | static void __always_inline lock_page_cgroup(struct page *page) | 275 | static void lock_page_cgroup(struct page *page) |
296 | { | 276 | { |
297 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 277 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
298 | VM_BUG_ON(!page_cgroup_locked(page)); | ||
299 | } | ||
300 | |||
301 | static void __always_inline unlock_page_cgroup(struct page *page) | ||
302 | { | ||
303 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | ||
304 | } | 278 | } |
305 | 279 | ||
306 | /* | 280 | static int try_lock_page_cgroup(struct page *page) |
307 | * Tie new page_cgroup to struct page under lock_page_cgroup() | ||
308 | * This can fail if the page has been tied to a page_cgroup. | ||
309 | * If success, returns 0. | ||
310 | */ | ||
311 | static int page_cgroup_assign_new_page_cgroup(struct page *page, | ||
312 | struct page_cgroup *pc) | ||
313 | { | 281 | { |
314 | int ret = 0; | 282 | return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
315 | |||
316 | lock_page_cgroup(page); | ||
317 | if (!page_get_page_cgroup(page)) | ||
318 | page_assign_page_cgroup(page, pc); | ||
319 | else /* A page is tied to other pc. */ | ||
320 | ret = 1; | ||
321 | unlock_page_cgroup(page); | ||
322 | return ret; | ||
323 | } | 283 | } |
324 | 284 | ||
325 | /* | 285 | static void unlock_page_cgroup(struct page *page) |
326 | * Clear page->page_cgroup member under lock_page_cgroup(). | ||
327 | * If given "pc" value is different from one page->page_cgroup, | ||
328 | * page->cgroup is not cleared. | ||
329 | * Returns a value of page->page_cgroup at lock taken. | ||
330 | * A can can detect failure of clearing by following | ||
331 | * clear_page_cgroup(page, pc) == pc | ||
332 | */ | ||
333 | |||
334 | static struct page_cgroup *clear_page_cgroup(struct page *page, | ||
335 | struct page_cgroup *pc) | ||
336 | { | 286 | { |
337 | struct page_cgroup *ret; | 287 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
338 | /* lock and clear */ | ||
339 | lock_page_cgroup(page); | ||
340 | ret = page_get_page_cgroup(page); | ||
341 | if (likely(ret == pc)) | ||
342 | page_assign_page_cgroup(page, NULL); | ||
343 | unlock_page_cgroup(page); | ||
344 | return ret; | ||
345 | } | 288 | } |
346 | 289 | ||
347 | static void __mem_cgroup_remove_list(struct page_cgroup *pc) | 290 | static void __mem_cgroup_remove_list(struct page_cgroup *pc) |
@@ -399,7 +342,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | |||
399 | int ret; | 342 | int ret; |
400 | 343 | ||
401 | task_lock(task); | 344 | task_lock(task); |
402 | ret = task->mm && vm_match_cgroup(task->mm, mem); | 345 | ret = task->mm && mm_match_cgroup(task->mm, mem); |
403 | task_unlock(task); | 346 | task_unlock(task); |
404 | return ret; | 347 | return ret; |
405 | } | 348 | } |
@@ -407,18 +350,30 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | |||
407 | /* | 350 | /* |
408 | * This routine assumes that the appropriate zone's lru lock is already held | 351 | * This routine assumes that the appropriate zone's lru lock is already held |
409 | */ | 352 | */ |
410 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | 353 | void mem_cgroup_move_lists(struct page *page, bool active) |
411 | { | 354 | { |
355 | struct page_cgroup *pc; | ||
412 | struct mem_cgroup_per_zone *mz; | 356 | struct mem_cgroup_per_zone *mz; |
413 | unsigned long flags; | 357 | unsigned long flags; |
414 | 358 | ||
415 | if (!pc) | 359 | /* |
360 | * We cannot lock_page_cgroup while holding zone's lru_lock, | ||
361 | * because other holders of lock_page_cgroup can be interrupted | ||
362 | * with an attempt to rotate_reclaimable_page. But we cannot | ||
363 | * safely get to page_cgroup without it, so just try_lock it: | ||
364 | * mem_cgroup_isolate_pages allows for page left on wrong list. | ||
365 | */ | ||
366 | if (!try_lock_page_cgroup(page)) | ||
416 | return; | 367 | return; |
417 | 368 | ||
418 | mz = page_cgroup_zoneinfo(pc); | 369 | pc = page_get_page_cgroup(page); |
419 | spin_lock_irqsave(&mz->lru_lock, flags); | 370 | if (pc) { |
420 | __mem_cgroup_move_lists(pc, active); | 371 | mz = page_cgroup_zoneinfo(pc); |
421 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 372 | spin_lock_irqsave(&mz->lru_lock, flags); |
373 | __mem_cgroup_move_lists(pc, active); | ||
374 | spin_unlock_irqrestore(&mz->lru_lock, flags); | ||
375 | } | ||
376 | unlock_page_cgroup(page); | ||
422 | } | 377 | } |
423 | 378 | ||
424 | /* | 379 | /* |
@@ -437,6 +392,7 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | |||
437 | rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); | 392 | rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); |
438 | return (int)((rss * 100L) / total); | 393 | return (int)((rss * 100L) / total); |
439 | } | 394 | } |
395 | |||
440 | /* | 396 | /* |
441 | * This function is called from vmscan.c. In page reclaiming loop. balance | 397 | * This function is called from vmscan.c. In page reclaiming loop. balance |
442 | * between active and inactive list is calculated. For memory controller | 398 | * between active and inactive list is calculated. For memory controller |
@@ -500,7 +456,6 @@ long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | |||
500 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 456 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); |
501 | 457 | ||
502 | nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); | 458 | nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); |
503 | |||
504 | return (nr_inactive >> priority); | 459 | return (nr_inactive >> priority); |
505 | } | 460 | } |
506 | 461 | ||
@@ -586,26 +541,21 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
586 | * with it | 541 | * with it |
587 | */ | 542 | */ |
588 | retry: | 543 | retry: |
589 | if (page) { | 544 | lock_page_cgroup(page); |
590 | lock_page_cgroup(page); | 545 | pc = page_get_page_cgroup(page); |
591 | pc = page_get_page_cgroup(page); | 546 | /* |
592 | /* | 547 | * The page_cgroup exists and |
593 | * The page_cgroup exists and | 548 | * the page has already been accounted. |
594 | * the page has already been accounted. | 549 | */ |
595 | */ | 550 | if (pc) { |
596 | if (pc) { | 551 | VM_BUG_ON(pc->page != page); |
597 | if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { | 552 | VM_BUG_ON(pc->ref_cnt <= 0); |
598 | /* this page is under being uncharged ? */ | 553 | |
599 | unlock_page_cgroup(page); | 554 | pc->ref_cnt++; |
600 | cpu_relax(); | ||
601 | goto retry; | ||
602 | } else { | ||
603 | unlock_page_cgroup(page); | ||
604 | goto done; | ||
605 | } | ||
606 | } | ||
607 | unlock_page_cgroup(page); | 555 | unlock_page_cgroup(page); |
556 | goto done; | ||
608 | } | 557 | } |
558 | unlock_page_cgroup(page); | ||
609 | 559 | ||
610 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); | 560 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); |
611 | if (pc == NULL) | 561 | if (pc == NULL) |
@@ -623,16 +573,11 @@ retry: | |||
623 | rcu_read_lock(); | 573 | rcu_read_lock(); |
624 | mem = rcu_dereference(mm->mem_cgroup); | 574 | mem = rcu_dereference(mm->mem_cgroup); |
625 | /* | 575 | /* |
626 | * For every charge from the cgroup, increment reference | 576 | * For every charge from the cgroup, increment reference count |
627 | * count | ||
628 | */ | 577 | */ |
629 | css_get(&mem->css); | 578 | css_get(&mem->css); |
630 | rcu_read_unlock(); | 579 | rcu_read_unlock(); |
631 | 580 | ||
632 | /* | ||
633 | * If we created the page_cgroup, we should free it on exceeding | ||
634 | * the cgroup limit. | ||
635 | */ | ||
636 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { | 581 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { |
637 | if (!(gfp_mask & __GFP_WAIT)) | 582 | if (!(gfp_mask & __GFP_WAIT)) |
638 | goto out; | 583 | goto out; |
@@ -641,12 +586,12 @@ retry: | |||
641 | continue; | 586 | continue; |
642 | 587 | ||
643 | /* | 588 | /* |
644 | * try_to_free_mem_cgroup_pages() might not give us a full | 589 | * try_to_free_mem_cgroup_pages() might not give us a full |
645 | * picture of reclaim. Some pages are reclaimed and might be | 590 | * picture of reclaim. Some pages are reclaimed and might be |
646 | * moved to swap cache or just unmapped from the cgroup. | 591 | * moved to swap cache or just unmapped from the cgroup. |
647 | * Check the limit again to see if the reclaim reduced the | 592 | * Check the limit again to see if the reclaim reduced the |
648 | * current usage of the cgroup before giving up | 593 | * current usage of the cgroup before giving up |
649 | */ | 594 | */ |
650 | if (res_counter_check_under_limit(&mem->res)) | 595 | if (res_counter_check_under_limit(&mem->res)) |
651 | continue; | 596 | continue; |
652 | 597 | ||
@@ -657,14 +602,16 @@ retry: | |||
657 | congestion_wait(WRITE, HZ/10); | 602 | congestion_wait(WRITE, HZ/10); |
658 | } | 603 | } |
659 | 604 | ||
660 | atomic_set(&pc->ref_cnt, 1); | 605 | pc->ref_cnt = 1; |
661 | pc->mem_cgroup = mem; | 606 | pc->mem_cgroup = mem; |
662 | pc->page = page; | 607 | pc->page = page; |
663 | pc->flags = PAGE_CGROUP_FLAG_ACTIVE; | 608 | pc->flags = PAGE_CGROUP_FLAG_ACTIVE; |
664 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | 609 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) |
665 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; | 610 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; |
666 | 611 | ||
667 | if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) { | 612 | lock_page_cgroup(page); |
613 | if (page_get_page_cgroup(page)) { | ||
614 | unlock_page_cgroup(page); | ||
668 | /* | 615 | /* |
669 | * Another charge has been added to this page already. | 616 | * Another charge has been added to this page already. |
670 | * We take lock_page_cgroup(page) again and read | 617 | * We take lock_page_cgroup(page) again and read |
@@ -673,17 +620,16 @@ retry: | |||
673 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 620 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
674 | css_put(&mem->css); | 621 | css_put(&mem->css); |
675 | kfree(pc); | 622 | kfree(pc); |
676 | if (!page) | ||
677 | goto done; | ||
678 | goto retry; | 623 | goto retry; |
679 | } | 624 | } |
625 | page_assign_page_cgroup(page, pc); | ||
680 | 626 | ||
681 | mz = page_cgroup_zoneinfo(pc); | 627 | mz = page_cgroup_zoneinfo(pc); |
682 | spin_lock_irqsave(&mz->lru_lock, flags); | 628 | spin_lock_irqsave(&mz->lru_lock, flags); |
683 | /* Update statistics vector */ | ||
684 | __mem_cgroup_add_list(pc); | 629 | __mem_cgroup_add_list(pc); |
685 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 630 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
686 | 631 | ||
632 | unlock_page_cgroup(page); | ||
687 | done: | 633 | done: |
688 | return 0; | 634 | return 0; |
689 | out: | 635 | out: |
@@ -693,70 +639,61 @@ err: | |||
693 | return -ENOMEM; | 639 | return -ENOMEM; |
694 | } | 640 | } |
695 | 641 | ||
696 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 642 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) |
697 | gfp_t gfp_mask) | ||
698 | { | 643 | { |
699 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 644 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
700 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | 645 | MEM_CGROUP_CHARGE_TYPE_MAPPED); |
701 | } | 646 | } |
702 | 647 | ||
703 | /* | ||
704 | * See if the cached pages should be charged at all? | ||
705 | */ | ||
706 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 648 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
707 | gfp_t gfp_mask) | 649 | gfp_t gfp_mask) |
708 | { | 650 | { |
709 | int ret = 0; | ||
710 | if (!mm) | 651 | if (!mm) |
711 | mm = &init_mm; | 652 | mm = &init_mm; |
712 | 653 | return mem_cgroup_charge_common(page, mm, gfp_mask, | |
713 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, | ||
714 | MEM_CGROUP_CHARGE_TYPE_CACHE); | 654 | MEM_CGROUP_CHARGE_TYPE_CACHE); |
715 | return ret; | ||
716 | } | 655 | } |
717 | 656 | ||
718 | /* | 657 | /* |
719 | * Uncharging is always a welcome operation, we never complain, simply | 658 | * Uncharging is always a welcome operation, we never complain, simply |
720 | * uncharge. This routine should be called with lock_page_cgroup held | 659 | * uncharge. |
721 | */ | 660 | */ |
722 | void mem_cgroup_uncharge(struct page_cgroup *pc) | 661 | void mem_cgroup_uncharge_page(struct page *page) |
723 | { | 662 | { |
663 | struct page_cgroup *pc; | ||
724 | struct mem_cgroup *mem; | 664 | struct mem_cgroup *mem; |
725 | struct mem_cgroup_per_zone *mz; | 665 | struct mem_cgroup_per_zone *mz; |
726 | struct page *page; | ||
727 | unsigned long flags; | 666 | unsigned long flags; |
728 | 667 | ||
729 | /* | 668 | /* |
730 | * Check if our page_cgroup is valid | 669 | * Check if our page_cgroup is valid |
731 | */ | 670 | */ |
671 | lock_page_cgroup(page); | ||
672 | pc = page_get_page_cgroup(page); | ||
732 | if (!pc) | 673 | if (!pc) |
733 | return; | 674 | goto unlock; |
734 | 675 | ||
735 | if (atomic_dec_and_test(&pc->ref_cnt)) { | 676 | VM_BUG_ON(pc->page != page); |
736 | page = pc->page; | 677 | VM_BUG_ON(pc->ref_cnt <= 0); |
678 | |||
679 | if (--(pc->ref_cnt) == 0) { | ||
737 | mz = page_cgroup_zoneinfo(pc); | 680 | mz = page_cgroup_zoneinfo(pc); |
738 | /* | 681 | spin_lock_irqsave(&mz->lru_lock, flags); |
739 | * get page->cgroup and clear it under lock. | 682 | __mem_cgroup_remove_list(pc); |
740 | * force_empty can drop page->cgroup without checking refcnt. | 683 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
741 | */ | 684 | |
685 | page_assign_page_cgroup(page, NULL); | ||
742 | unlock_page_cgroup(page); | 686 | unlock_page_cgroup(page); |
743 | if (clear_page_cgroup(page, pc) == pc) { | 687 | |
744 | mem = pc->mem_cgroup; | 688 | mem = pc->mem_cgroup; |
745 | css_put(&mem->css); | 689 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
746 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 690 | css_put(&mem->css); |
747 | spin_lock_irqsave(&mz->lru_lock, flags); | 691 | |
748 | __mem_cgroup_remove_list(pc); | 692 | kfree(pc); |
749 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 693 | return; |
750 | kfree(pc); | ||
751 | } | ||
752 | lock_page_cgroup(page); | ||
753 | } | 694 | } |
754 | } | ||
755 | 695 | ||
756 | void mem_cgroup_uncharge_page(struct page *page) | 696 | unlock: |
757 | { | ||
758 | lock_page_cgroup(page); | ||
759 | mem_cgroup_uncharge(page_get_page_cgroup(page)); | ||
760 | unlock_page_cgroup(page); | 697 | unlock_page_cgroup(page); |
761 | } | 698 | } |
762 | 699 | ||
@@ -764,63 +701,59 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
764 | * Returns non-zero if a page (under migration) has valid page_cgroup member. | 701 | * Returns non-zero if a page (under migration) has valid page_cgroup member. |
765 | * Refcnt of page_cgroup is incremented. | 702 | * Refcnt of page_cgroup is incremented. |
766 | */ | 703 | */ |
767 | |||
768 | int mem_cgroup_prepare_migration(struct page *page) | 704 | int mem_cgroup_prepare_migration(struct page *page) |
769 | { | 705 | { |
770 | struct page_cgroup *pc; | 706 | struct page_cgroup *pc; |
771 | int ret = 0; | 707 | |
772 | lock_page_cgroup(page); | 708 | lock_page_cgroup(page); |
773 | pc = page_get_page_cgroup(page); | 709 | pc = page_get_page_cgroup(page); |
774 | if (pc && atomic_inc_not_zero(&pc->ref_cnt)) | 710 | if (pc) |
775 | ret = 1; | 711 | pc->ref_cnt++; |
776 | unlock_page_cgroup(page); | 712 | unlock_page_cgroup(page); |
777 | return ret; | 713 | return pc != NULL; |
778 | } | 714 | } |
779 | 715 | ||
780 | void mem_cgroup_end_migration(struct page *page) | 716 | void mem_cgroup_end_migration(struct page *page) |
781 | { | 717 | { |
782 | struct page_cgroup *pc; | 718 | mem_cgroup_uncharge_page(page); |
783 | |||
784 | lock_page_cgroup(page); | ||
785 | pc = page_get_page_cgroup(page); | ||
786 | mem_cgroup_uncharge(pc); | ||
787 | unlock_page_cgroup(page); | ||
788 | } | 719 | } |
720 | |||
789 | /* | 721 | /* |
790 | * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. | 722 | * We know both *page* and *newpage* are now not-on-LRU and PG_locked. |
791 | * And no race with uncharge() routines because page_cgroup for *page* | 723 | * And no race with uncharge() routines because page_cgroup for *page* |
792 | * has extra one reference by mem_cgroup_prepare_migration. | 724 | * has extra one reference by mem_cgroup_prepare_migration. |
793 | */ | 725 | */ |
794 | |||
795 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) | 726 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) |
796 | { | 727 | { |
797 | struct page_cgroup *pc; | 728 | struct page_cgroup *pc; |
798 | struct mem_cgroup *mem; | ||
799 | unsigned long flags; | ||
800 | struct mem_cgroup_per_zone *mz; | 729 | struct mem_cgroup_per_zone *mz; |
801 | retry: | 730 | unsigned long flags; |
731 | |||
732 | lock_page_cgroup(page); | ||
802 | pc = page_get_page_cgroup(page); | 733 | pc = page_get_page_cgroup(page); |
803 | if (!pc) | 734 | if (!pc) { |
735 | unlock_page_cgroup(page); | ||
804 | return; | 736 | return; |
805 | mem = pc->mem_cgroup; | 737 | } |
738 | |||
806 | mz = page_cgroup_zoneinfo(pc); | 739 | mz = page_cgroup_zoneinfo(pc); |
807 | if (clear_page_cgroup(page, pc) != pc) | ||
808 | goto retry; | ||
809 | spin_lock_irqsave(&mz->lru_lock, flags); | 740 | spin_lock_irqsave(&mz->lru_lock, flags); |
810 | |||
811 | __mem_cgroup_remove_list(pc); | 741 | __mem_cgroup_remove_list(pc); |
812 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 742 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
813 | 743 | ||
744 | page_assign_page_cgroup(page, NULL); | ||
745 | unlock_page_cgroup(page); | ||
746 | |||
814 | pc->page = newpage; | 747 | pc->page = newpage; |
815 | lock_page_cgroup(newpage); | 748 | lock_page_cgroup(newpage); |
816 | page_assign_page_cgroup(newpage, pc); | 749 | page_assign_page_cgroup(newpage, pc); |
817 | unlock_page_cgroup(newpage); | ||
818 | 750 | ||
819 | mz = page_cgroup_zoneinfo(pc); | 751 | mz = page_cgroup_zoneinfo(pc); |
820 | spin_lock_irqsave(&mz->lru_lock, flags); | 752 | spin_lock_irqsave(&mz->lru_lock, flags); |
821 | __mem_cgroup_add_list(pc); | 753 | __mem_cgroup_add_list(pc); |
822 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 754 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
823 | return; | 755 | |
756 | unlock_page_cgroup(newpage); | ||
824 | } | 757 | } |
825 | 758 | ||
826 | /* | 759 | /* |
@@ -829,14 +762,13 @@ retry: | |||
829 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 762 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. |
830 | */ | 763 | */ |
831 | #define FORCE_UNCHARGE_BATCH (128) | 764 | #define FORCE_UNCHARGE_BATCH (128) |
832 | static void | 765 | static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, |
833 | mem_cgroup_force_empty_list(struct mem_cgroup *mem, | ||
834 | struct mem_cgroup_per_zone *mz, | 766 | struct mem_cgroup_per_zone *mz, |
835 | int active) | 767 | int active) |
836 | { | 768 | { |
837 | struct page_cgroup *pc; | 769 | struct page_cgroup *pc; |
838 | struct page *page; | 770 | struct page *page; |
839 | int count; | 771 | int count = FORCE_UNCHARGE_BATCH; |
840 | unsigned long flags; | 772 | unsigned long flags; |
841 | struct list_head *list; | 773 | struct list_head *list; |
842 | 774 | ||
@@ -845,46 +777,36 @@ mem_cgroup_force_empty_list(struct mem_cgroup *mem, | |||
845 | else | 777 | else |
846 | list = &mz->inactive_list; | 778 | list = &mz->inactive_list; |
847 | 779 | ||
848 | if (list_empty(list)) | ||
849 | return; | ||
850 | retry: | ||
851 | count = FORCE_UNCHARGE_BATCH; | ||
852 | spin_lock_irqsave(&mz->lru_lock, flags); | 780 | spin_lock_irqsave(&mz->lru_lock, flags); |
853 | 781 | while (!list_empty(list)) { | |
854 | while (--count && !list_empty(list)) { | ||
855 | pc = list_entry(list->prev, struct page_cgroup, lru); | 782 | pc = list_entry(list->prev, struct page_cgroup, lru); |
856 | page = pc->page; | 783 | page = pc->page; |
857 | /* Avoid race with charge */ | 784 | get_page(page); |
858 | atomic_set(&pc->ref_cnt, 0); | 785 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
859 | if (clear_page_cgroup(page, pc) == pc) { | 786 | mem_cgroup_uncharge_page(page); |
860 | css_put(&mem->css); | 787 | put_page(page); |
861 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 788 | if (--count <= 0) { |
862 | __mem_cgroup_remove_list(pc); | 789 | count = FORCE_UNCHARGE_BATCH; |
863 | kfree(pc); | 790 | cond_resched(); |
864 | } else /* being uncharged ? ...do relax */ | 791 | } |
865 | break; | 792 | spin_lock_irqsave(&mz->lru_lock, flags); |
866 | } | 793 | } |
867 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 794 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
868 | if (!list_empty(list)) { | ||
869 | cond_resched(); | ||
870 | goto retry; | ||
871 | } | ||
872 | return; | ||
873 | } | 795 | } |
874 | 796 | ||
875 | /* | 797 | /* |
876 | * make mem_cgroup's charge to be 0 if there is no task. | 798 | * make mem_cgroup's charge to be 0 if there is no task. |
877 | * This enables deleting this mem_cgroup. | 799 | * This enables deleting this mem_cgroup. |
878 | */ | 800 | */ |
879 | 801 | static int mem_cgroup_force_empty(struct mem_cgroup *mem) | |
880 | int mem_cgroup_force_empty(struct mem_cgroup *mem) | ||
881 | { | 802 | { |
882 | int ret = -EBUSY; | 803 | int ret = -EBUSY; |
883 | int node, zid; | 804 | int node, zid; |
805 | |||
884 | css_get(&mem->css); | 806 | css_get(&mem->css); |
885 | /* | 807 | /* |
886 | * page reclaim code (kswapd etc..) will move pages between | 808 | * page reclaim code (kswapd etc..) will move pages between |
887 | ` * active_list <-> inactive_list while we don't take a lock. | 809 | * active_list <-> inactive_list while we don't take a lock. |
888 | * So, we have to do loop here until all lists are empty. | 810 | * So, we have to do loop here until all lists are empty. |
889 | */ | 811 | */ |
890 | while (mem->res.usage > 0) { | 812 | while (mem->res.usage > 0) { |
@@ -906,9 +828,7 @@ out: | |||
906 | return ret; | 828 | return ret; |
907 | } | 829 | } |
908 | 830 | ||
909 | 831 | static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) | |
910 | |||
911 | int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) | ||
912 | { | 832 | { |
913 | *tmp = memparse(buf, &buf); | 833 | *tmp = memparse(buf, &buf); |
914 | if (*buf != '\0') | 834 | if (*buf != '\0') |
@@ -945,8 +865,7 @@ static ssize_t mem_force_empty_write(struct cgroup *cont, | |||
945 | size_t nbytes, loff_t *ppos) | 865 | size_t nbytes, loff_t *ppos) |
946 | { | 866 | { |
947 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 867 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); |
948 | int ret; | 868 | int ret = mem_cgroup_force_empty(mem); |
949 | ret = mem_cgroup_force_empty(mem); | ||
950 | if (!ret) | 869 | if (!ret) |
951 | ret = nbytes; | 870 | ret = nbytes; |
952 | return ret; | 871 | return ret; |
@@ -955,7 +874,6 @@ static ssize_t mem_force_empty_write(struct cgroup *cont, | |||
955 | /* | 874 | /* |
956 | * Note: This should be removed if cgroup supports write-only file. | 875 | * Note: This should be removed if cgroup supports write-only file. |
957 | */ | 876 | */ |
958 | |||
959 | static ssize_t mem_force_empty_read(struct cgroup *cont, | 877 | static ssize_t mem_force_empty_read(struct cgroup *cont, |
960 | struct cftype *cft, | 878 | struct cftype *cft, |
961 | struct file *file, char __user *userbuf, | 879 | struct file *file, char __user *userbuf, |
@@ -964,7 +882,6 @@ static ssize_t mem_force_empty_read(struct cgroup *cont, | |||
964 | return -EINVAL; | 882 | return -EINVAL; |
965 | } | 883 | } |
966 | 884 | ||
967 | |||
968 | static const struct mem_cgroup_stat_desc { | 885 | static const struct mem_cgroup_stat_desc { |
969 | const char *msg; | 886 | const char *msg; |
970 | u64 unit; | 887 | u64 unit; |
@@ -1017,8 +934,6 @@ static int mem_control_stat_open(struct inode *unused, struct file *file) | |||
1017 | return single_open(file, mem_control_stat_show, cont); | 934 | return single_open(file, mem_control_stat_show, cont); |
1018 | } | 935 | } |
1019 | 936 | ||
1020 | |||
1021 | |||
1022 | static struct cftype mem_cgroup_files[] = { | 937 | static struct cftype mem_cgroup_files[] = { |
1023 | { | 938 | { |
1024 | .name = "usage_in_bytes", | 939 | .name = "usage_in_bytes", |
@@ -1084,9 +999,6 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
1084 | kfree(mem->info.nodeinfo[node]); | 999 | kfree(mem->info.nodeinfo[node]); |
1085 | } | 1000 | } |
1086 | 1001 | ||
1087 | |||
1088 | static struct mem_cgroup init_mem_cgroup; | ||
1089 | |||
1090 | static struct cgroup_subsys_state * | 1002 | static struct cgroup_subsys_state * |
1091 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 1003 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) |
1092 | { | 1004 | { |
@@ -1176,7 +1088,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
1176 | 1088 | ||
1177 | out: | 1089 | out: |
1178 | mmput(mm); | 1090 | mmput(mm); |
1179 | return; | ||
1180 | } | 1091 | } |
1181 | 1092 | ||
1182 | struct cgroup_subsys mem_cgroup_subsys = { | 1093 | struct cgroup_subsys mem_cgroup_subsys = { |
diff --git a/mm/memory.c b/mm/memory.c index ce3c9e4492d8..0d14d1e58a5f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1711,7 +1711,7 @@ unlock: | |||
1711 | } | 1711 | } |
1712 | return ret; | 1712 | return ret; |
1713 | oom_free_new: | 1713 | oom_free_new: |
1714 | __free_page(new_page); | 1714 | page_cache_release(new_page); |
1715 | oom: | 1715 | oom: |
1716 | if (old_page) | 1716 | if (old_page) |
1717 | page_cache_release(old_page); | 1717 | page_cache_release(old_page); |
@@ -2093,12 +2093,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2093 | unlock_page(page); | 2093 | unlock_page(page); |
2094 | 2094 | ||
2095 | if (write_access) { | 2095 | if (write_access) { |
2096 | /* XXX: We could OR the do_wp_page code with this one? */ | 2096 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
2097 | if (do_wp_page(mm, vma, address, | 2097 | if (ret & VM_FAULT_ERROR) |
2098 | page_table, pmd, ptl, pte) & VM_FAULT_OOM) { | 2098 | ret &= VM_FAULT_ERROR; |
2099 | mem_cgroup_uncharge_page(page); | ||
2100 | ret = VM_FAULT_OOM; | ||
2101 | } | ||
2102 | goto out; | 2099 | goto out; |
2103 | } | 2100 | } |
2104 | 2101 | ||
@@ -2163,7 +2160,7 @@ release: | |||
2163 | page_cache_release(page); | 2160 | page_cache_release(page); |
2164 | goto unlock; | 2161 | goto unlock; |
2165 | oom_free_page: | 2162 | oom_free_page: |
2166 | __free_page(page); | 2163 | page_cache_release(page); |
2167 | oom: | 2164 | oom: |
2168 | return VM_FAULT_OOM; | 2165 | return VM_FAULT_OOM; |
2169 | } | 2166 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index a73504ff5ab9..4e0eccca5e26 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -153,11 +153,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, | |||
153 | return; | 153 | return; |
154 | } | 154 | } |
155 | 155 | ||
156 | if (mem_cgroup_charge(new, mm, GFP_KERNEL)) { | ||
157 | pte_unmap(ptep); | ||
158 | return; | ||
159 | } | ||
160 | |||
161 | ptl = pte_lockptr(mm, pmd); | 156 | ptl = pte_lockptr(mm, pmd); |
162 | spin_lock(ptl); | 157 | spin_lock(ptl); |
163 | pte = *ptep; | 158 | pte = *ptep; |
@@ -169,6 +164,20 @@ static void remove_migration_pte(struct vm_area_struct *vma, | |||
169 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) | 164 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) |
170 | goto out; | 165 | goto out; |
171 | 166 | ||
167 | /* | ||
168 | * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. | ||
169 | * Failure is not an option here: we're now expected to remove every | ||
170 | * migration pte, and will cause crashes otherwise. Normally this | ||
171 | * is not an issue: mem_cgroup_prepare_migration bumped up the old | ||
172 | * page_cgroup count for safety, that's now attached to the new page, | ||
173 | * so this charge should just be another incrementation of the count, | ||
174 | * to keep in balance with rmap.c's mem_cgroup_uncharging. But if | ||
175 | * there's been a force_empty, those reference counts may no longer | ||
176 | * be reliable, and this charge can actually fail: oh well, we don't | ||
177 | * make the situation any worse by proceeding as if it had succeeded. | ||
178 | */ | ||
179 | mem_cgroup_charge(new, mm, GFP_ATOMIC); | ||
180 | |||
172 | get_page(new); | 181 | get_page(new); |
173 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 182 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
174 | if (is_write_migration_entry(entry)) | 183 | if (is_write_migration_entry(entry)) |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 4194b9db0104..44b2da11bf43 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -412,7 +412,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
412 | return oom_kill_task(p); | 412 | return oom_kill_task(p); |
413 | } | 413 | } |
414 | 414 | ||
415 | #ifdef CONFIG_CGROUP_MEM_CONT | 415 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
416 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | 416 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) |
417 | { | 417 | { |
418 | unsigned long points = 0; | 418 | unsigned long points = 0; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8896e874a67d..402a504f1228 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/jiffies.h> | ||
22 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
23 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
@@ -221,13 +222,19 @@ static inline int bad_range(struct zone *zone, struct page *page) | |||
221 | 222 | ||
222 | static void bad_page(struct page *page) | 223 | static void bad_page(struct page *page) |
223 | { | 224 | { |
224 | printk(KERN_EMERG "Bad page state in process '%s'\n" | 225 | void *pc = page_get_page_cgroup(page); |
225 | KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | 226 | |
226 | KERN_EMERG "Trying to fix it up, but a reboot is needed\n" | 227 | printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG |
227 | KERN_EMERG "Backtrace:\n", | 228 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", |
228 | current->comm, page, (int)(2*sizeof(unsigned long)), | 229 | current->comm, page, (int)(2*sizeof(unsigned long)), |
229 | (unsigned long)page->flags, page->mapping, | 230 | (unsigned long)page->flags, page->mapping, |
230 | page_mapcount(page), page_count(page)); | 231 | page_mapcount(page), page_count(page)); |
232 | if (pc) { | ||
233 | printk(KERN_EMERG "cgroup:%p\n", pc); | ||
234 | page_reset_bad_cgroup(page); | ||
235 | } | ||
236 | printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" | ||
237 | KERN_EMERG "Backtrace:\n"); | ||
231 | dump_stack(); | 238 | dump_stack(); |
232 | page->flags &= ~(1 << PG_lru | | 239 | page->flags &= ~(1 << PG_lru | |
233 | 1 << PG_private | | 240 | 1 << PG_private | |
@@ -453,6 +460,7 @@ static inline int free_pages_check(struct page *page) | |||
453 | { | 460 | { |
454 | if (unlikely(page_mapcount(page) | | 461 | if (unlikely(page_mapcount(page) | |
455 | (page->mapping != NULL) | | 462 | (page->mapping != NULL) | |
463 | (page_get_page_cgroup(page) != NULL) | | ||
456 | (page_count(page) != 0) | | 464 | (page_count(page) != 0) | |
457 | (page->flags & ( | 465 | (page->flags & ( |
458 | 1 << PG_lru | | 466 | 1 << PG_lru | |
@@ -602,6 +610,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
602 | { | 610 | { |
603 | if (unlikely(page_mapcount(page) | | 611 | if (unlikely(page_mapcount(page) | |
604 | (page->mapping != NULL) | | 612 | (page->mapping != NULL) | |
613 | (page_get_page_cgroup(page) != NULL) | | ||
605 | (page_count(page) != 0) | | 614 | (page_count(page) != 0) | |
606 | (page->flags & ( | 615 | (page->flags & ( |
607 | 1 << PG_lru | | 616 | 1 << PG_lru | |
@@ -988,7 +997,6 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
988 | 997 | ||
989 | if (!PageHighMem(page)) | 998 | if (!PageHighMem(page)) |
990 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); | 999 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); |
991 | VM_BUG_ON(page_get_page_cgroup(page)); | ||
992 | arch_free_page(page, 0); | 1000 | arch_free_page(page, 0); |
993 | kernel_map_pages(page, 1, 0); | 1001 | kernel_map_pages(page, 1, 0); |
994 | 1002 | ||
@@ -1276,7 +1284,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) | |||
1276 | if (!zlc) | 1284 | if (!zlc) |
1277 | return NULL; | 1285 | return NULL; |
1278 | 1286 | ||
1279 | if (jiffies - zlc->last_full_zap > 1 * HZ) { | 1287 | if (time_after(jiffies, zlc->last_full_zap + HZ)) { |
1280 | bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); | 1288 | bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); |
1281 | zlc->last_full_zap = jiffies; | 1289 | zlc->last_full_zap = jiffies; |
1282 | } | 1290 | } |
@@ -2527,7 +2535,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2527 | set_page_links(page, zone, nid, pfn); | 2535 | set_page_links(page, zone, nid, pfn); |
2528 | init_page_count(page); | 2536 | init_page_count(page); |
2529 | reset_page_mapcount(page); | 2537 | reset_page_mapcount(page); |
2530 | page_assign_page_cgroup(page, NULL); | ||
2531 | SetPageReserved(page); | 2538 | SetPageReserved(page); |
2532 | 2539 | ||
2533 | /* | 2540 | /* |
@@ -321,7 +321,7 @@ static int page_referenced_anon(struct page *page, | |||
321 | * counting on behalf of references from different | 321 | * counting on behalf of references from different |
322 | * cgroups | 322 | * cgroups |
323 | */ | 323 | */ |
324 | if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont)) | 324 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) |
325 | continue; | 325 | continue; |
326 | referenced += page_referenced_one(page, vma, &mapcount); | 326 | referenced += page_referenced_one(page, vma, &mapcount); |
327 | if (!mapcount) | 327 | if (!mapcount) |
@@ -382,7 +382,7 @@ static int page_referenced_file(struct page *page, | |||
382 | * counting on behalf of references from different | 382 | * counting on behalf of references from different |
383 | * cgroups | 383 | * cgroups |
384 | */ | 384 | */ |
385 | if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont)) | 385 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) |
386 | continue; | 386 | continue; |
387 | if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) | 387 | if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) |
388 | == (VM_LOCKED|VM_MAYSHARE)) { | 388 | == (VM_LOCKED|VM_MAYSHARE)) { |
diff --git a/mm/shmem.c b/mm/shmem.c index 90b576cbc06e..3372bc579e89 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1370,14 +1370,17 @@ repeat: | |||
1370 | shmem_swp_unmap(entry); | 1370 | shmem_swp_unmap(entry); |
1371 | spin_unlock(&info->lock); | 1371 | spin_unlock(&info->lock); |
1372 | unlock_page(swappage); | 1372 | unlock_page(swappage); |
1373 | page_cache_release(swappage); | ||
1374 | if (error == -ENOMEM) { | 1373 | if (error == -ENOMEM) { |
1375 | /* allow reclaim from this memory cgroup */ | 1374 | /* allow reclaim from this memory cgroup */ |
1376 | error = mem_cgroup_cache_charge(NULL, | 1375 | error = mem_cgroup_cache_charge(swappage, |
1377 | current->mm, gfp & ~__GFP_HIGHMEM); | 1376 | current->mm, gfp & ~__GFP_HIGHMEM); |
1378 | if (error) | 1377 | if (error) { |
1378 | page_cache_release(swappage); | ||
1379 | goto failed; | 1379 | goto failed; |
1380 | } | ||
1381 | mem_cgroup_uncharge_page(swappage); | ||
1380 | } | 1382 | } |
1383 | page_cache_release(swappage); | ||
1381 | goto repeat; | 1384 | goto repeat; |
1382 | } | 1385 | } |
1383 | } else if (sgp == SGP_READ && !filepage) { | 1386 | } else if (sgp == SGP_READ && !filepage) { |
@@ -291,32 +291,16 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) | |||
291 | #endif | 291 | #endif |
292 | } | 292 | } |
293 | 293 | ||
294 | /* | 294 | /* Verify that a pointer has an address that is valid within a slab page */ |
295 | * The end pointer in a slab is special. It points to the first object in the | ||
296 | * slab but has bit 0 set to mark it. | ||
297 | * | ||
298 | * Note that SLUB relies on page_mapping returning NULL for pages with bit 0 | ||
299 | * in the mapping set. | ||
300 | */ | ||
301 | static inline int is_end(void *addr) | ||
302 | { | ||
303 | return (unsigned long)addr & PAGE_MAPPING_ANON; | ||
304 | } | ||
305 | |||
306 | static void *slab_address(struct page *page) | ||
307 | { | ||
308 | return page->end - PAGE_MAPPING_ANON; | ||
309 | } | ||
310 | |||
311 | static inline int check_valid_pointer(struct kmem_cache *s, | 295 | static inline int check_valid_pointer(struct kmem_cache *s, |
312 | struct page *page, const void *object) | 296 | struct page *page, const void *object) |
313 | { | 297 | { |
314 | void *base; | 298 | void *base; |
315 | 299 | ||
316 | if (object == page->end) | 300 | if (!object) |
317 | return 1; | 301 | return 1; |
318 | 302 | ||
319 | base = slab_address(page); | 303 | base = page_address(page); |
320 | if (object < base || object >= base + s->objects * s->size || | 304 | if (object < base || object >= base + s->objects * s->size || |
321 | (object - base) % s->size) { | 305 | (object - base) % s->size) { |
322 | return 0; | 306 | return 0; |
@@ -349,8 +333,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | |||
349 | 333 | ||
350 | /* Scan freelist */ | 334 | /* Scan freelist */ |
351 | #define for_each_free_object(__p, __s, __free) \ | 335 | #define for_each_free_object(__p, __s, __free) \ |
352 | for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\ | 336 | for (__p = (__free); __p; __p = get_freepointer((__s), __p)) |
353 | __p)) | ||
354 | 337 | ||
355 | /* Determine object index from a given position */ | 338 | /* Determine object index from a given position */ |
356 | static inline int slab_index(void *p, struct kmem_cache *s, void *addr) | 339 | static inline int slab_index(void *p, struct kmem_cache *s, void *addr) |
@@ -502,7 +485,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) | |||
502 | static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | 485 | static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
503 | { | 486 | { |
504 | unsigned int off; /* Offset of last byte */ | 487 | unsigned int off; /* Offset of last byte */ |
505 | u8 *addr = slab_address(page); | 488 | u8 *addr = page_address(page); |
506 | 489 | ||
507 | print_tracking(s, p); | 490 | print_tracking(s, p); |
508 | 491 | ||
@@ -637,7 +620,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, | |||
637 | * A. Free pointer (if we cannot overwrite object on free) | 620 | * A. Free pointer (if we cannot overwrite object on free) |
638 | * B. Tracking data for SLAB_STORE_USER | 621 | * B. Tracking data for SLAB_STORE_USER |
639 | * C. Padding to reach required alignment boundary or at mininum | 622 | * C. Padding to reach required alignment boundary or at mininum |
640 | * one word if debuggin is on to be able to detect writes | 623 | * one word if debugging is on to be able to detect writes |
641 | * before the word boundary. | 624 | * before the word boundary. |
642 | * | 625 | * |
643 | * Padding is done using 0x5a (POISON_INUSE) | 626 | * Padding is done using 0x5a (POISON_INUSE) |
@@ -680,7 +663,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
680 | if (!(s->flags & SLAB_POISON)) | 663 | if (!(s->flags & SLAB_POISON)) |
681 | return 1; | 664 | return 1; |
682 | 665 | ||
683 | start = slab_address(page); | 666 | start = page_address(page); |
684 | end = start + (PAGE_SIZE << s->order); | 667 | end = start + (PAGE_SIZE << s->order); |
685 | length = s->objects * s->size; | 668 | length = s->objects * s->size; |
686 | remainder = end - (start + length); | 669 | remainder = end - (start + length); |
@@ -748,7 +731,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
748 | * of the free objects in this slab. May cause | 731 | * of the free objects in this slab. May cause |
749 | * another error because the object count is now wrong. | 732 | * another error because the object count is now wrong. |
750 | */ | 733 | */ |
751 | set_freepointer(s, p, page->end); | 734 | set_freepointer(s, p, NULL); |
752 | return 0; | 735 | return 0; |
753 | } | 736 | } |
754 | return 1; | 737 | return 1; |
@@ -782,18 +765,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
782 | void *fp = page->freelist; | 765 | void *fp = page->freelist; |
783 | void *object = NULL; | 766 | void *object = NULL; |
784 | 767 | ||
785 | while (fp != page->end && nr <= s->objects) { | 768 | while (fp && nr <= s->objects) { |
786 | if (fp == search) | 769 | if (fp == search) |
787 | return 1; | 770 | return 1; |
788 | if (!check_valid_pointer(s, page, fp)) { | 771 | if (!check_valid_pointer(s, page, fp)) { |
789 | if (object) { | 772 | if (object) { |
790 | object_err(s, page, object, | 773 | object_err(s, page, object, |
791 | "Freechain corrupt"); | 774 | "Freechain corrupt"); |
792 | set_freepointer(s, object, page->end); | 775 | set_freepointer(s, object, NULL); |
793 | break; | 776 | break; |
794 | } else { | 777 | } else { |
795 | slab_err(s, page, "Freepointer corrupt"); | 778 | slab_err(s, page, "Freepointer corrupt"); |
796 | page->freelist = page->end; | 779 | page->freelist = NULL; |
797 | page->inuse = s->objects; | 780 | page->inuse = s->objects; |
798 | slab_fix(s, "Freelist cleared"); | 781 | slab_fix(s, "Freelist cleared"); |
799 | return 0; | 782 | return 0; |
@@ -870,7 +853,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | |||
870 | if (!check_slab(s, page)) | 853 | if (!check_slab(s, page)) |
871 | goto bad; | 854 | goto bad; |
872 | 855 | ||
873 | if (object && !on_freelist(s, page, object)) { | 856 | if (!on_freelist(s, page, object)) { |
874 | object_err(s, page, object, "Object already allocated"); | 857 | object_err(s, page, object, "Object already allocated"); |
875 | goto bad; | 858 | goto bad; |
876 | } | 859 | } |
@@ -880,7 +863,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | |||
880 | goto bad; | 863 | goto bad; |
881 | } | 864 | } |
882 | 865 | ||
883 | if (object && !check_object(s, page, object, 0)) | 866 | if (!check_object(s, page, object, 0)) |
884 | goto bad; | 867 | goto bad; |
885 | 868 | ||
886 | /* Success perform special debug activities for allocs */ | 869 | /* Success perform special debug activities for allocs */ |
@@ -899,7 +882,7 @@ bad: | |||
899 | */ | 882 | */ |
900 | slab_fix(s, "Marking all objects used"); | 883 | slab_fix(s, "Marking all objects used"); |
901 | page->inuse = s->objects; | 884 | page->inuse = s->objects; |
902 | page->freelist = page->end; | 885 | page->freelist = NULL; |
903 | } | 886 | } |
904 | return 0; | 887 | return 0; |
905 | } | 888 | } |
@@ -939,7 +922,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
939 | } | 922 | } |
940 | 923 | ||
941 | /* Special debug activities for freeing objects */ | 924 | /* Special debug activities for freeing objects */ |
942 | if (!SlabFrozen(page) && page->freelist == page->end) | 925 | if (!SlabFrozen(page) && !page->freelist) |
943 | remove_full(s, page); | 926 | remove_full(s, page); |
944 | if (s->flags & SLAB_STORE_USER) | 927 | if (s->flags & SLAB_STORE_USER) |
945 | set_track(s, object, TRACK_FREE, addr); | 928 | set_track(s, object, TRACK_FREE, addr); |
@@ -1015,30 +998,11 @@ static unsigned long kmem_cache_flags(unsigned long objsize, | |||
1015 | void (*ctor)(struct kmem_cache *, void *)) | 998 | void (*ctor)(struct kmem_cache *, void *)) |
1016 | { | 999 | { |
1017 | /* | 1000 | /* |
1018 | * The page->offset field is only 16 bit wide. This is an offset | 1001 | * Enable debugging if selected on the kernel commandline. |
1019 | * in units of words from the beginning of an object. If the slab | ||
1020 | * size is bigger then we cannot move the free pointer behind the | ||
1021 | * object anymore. | ||
1022 | * | ||
1023 | * On 32 bit platforms the limit is 256k. On 64bit platforms | ||
1024 | * the limit is 512k. | ||
1025 | * | ||
1026 | * Debugging or ctor may create a need to move the free | ||
1027 | * pointer. Fail if this happens. | ||
1028 | */ | 1002 | */ |
1029 | if (objsize >= 65535 * sizeof(void *)) { | 1003 | if (slub_debug && (!slub_debug_slabs || |
1030 | BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | | 1004 | strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) |
1031 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | 1005 | flags |= slub_debug; |
1032 | BUG_ON(ctor); | ||
1033 | } else { | ||
1034 | /* | ||
1035 | * Enable debugging if selected on the kernel commandline. | ||
1036 | */ | ||
1037 | if (slub_debug && (!slub_debug_slabs || | ||
1038 | strncmp(slub_debug_slabs, name, | ||
1039 | strlen(slub_debug_slabs)) == 0)) | ||
1040 | flags |= slub_debug; | ||
1041 | } | ||
1042 | 1006 | ||
1043 | return flags; | 1007 | return flags; |
1044 | } | 1008 | } |
@@ -1124,7 +1088,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1124 | SetSlabDebug(page); | 1088 | SetSlabDebug(page); |
1125 | 1089 | ||
1126 | start = page_address(page); | 1090 | start = page_address(page); |
1127 | page->end = start + 1; | ||
1128 | 1091 | ||
1129 | if (unlikely(s->flags & SLAB_POISON)) | 1092 | if (unlikely(s->flags & SLAB_POISON)) |
1130 | memset(start, POISON_INUSE, PAGE_SIZE << s->order); | 1093 | memset(start, POISON_INUSE, PAGE_SIZE << s->order); |
@@ -1136,7 +1099,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1136 | last = p; | 1099 | last = p; |
1137 | } | 1100 | } |
1138 | setup_object(s, page, last); | 1101 | setup_object(s, page, last); |
1139 | set_freepointer(s, last, page->end); | 1102 | set_freepointer(s, last, NULL); |
1140 | 1103 | ||
1141 | page->freelist = start; | 1104 | page->freelist = start; |
1142 | page->inuse = 0; | 1105 | page->inuse = 0; |
@@ -1152,7 +1115,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1152 | void *p; | 1115 | void *p; |
1153 | 1116 | ||
1154 | slab_pad_check(s, page); | 1117 | slab_pad_check(s, page); |
1155 | for_each_object(p, s, slab_address(page)) | 1118 | for_each_object(p, s, page_address(page)) |
1156 | check_object(s, page, p, 0); | 1119 | check_object(s, page, p, 0); |
1157 | ClearSlabDebug(page); | 1120 | ClearSlabDebug(page); |
1158 | } | 1121 | } |
@@ -1162,7 +1125,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1162 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1125 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
1163 | -pages); | 1126 | -pages); |
1164 | 1127 | ||
1165 | page->mapping = NULL; | ||
1166 | __free_pages(page, s->order); | 1128 | __free_pages(page, s->order); |
1167 | } | 1129 | } |
1168 | 1130 | ||
@@ -1307,7 +1269,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1307 | * may return off node objects because partial slabs are obtained | 1269 | * may return off node objects because partial slabs are obtained |
1308 | * from other nodes and filled up. | 1270 | * from other nodes and filled up. |
1309 | * | 1271 | * |
1310 | * If /sys/slab/xx/defrag_ratio is set to 100 (which makes | 1272 | * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes |
1311 | * defrag_ratio = 1000) then every (well almost) allocation will | 1273 | * defrag_ratio = 1000) then every (well almost) allocation will |
1312 | * first attempt to defrag slab caches on other nodes. This means | 1274 | * first attempt to defrag slab caches on other nodes. This means |
1313 | * scanning over all nodes to look for partial slabs which may be | 1275 | * scanning over all nodes to look for partial slabs which may be |
@@ -1366,7 +1328,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1366 | ClearSlabFrozen(page); | 1328 | ClearSlabFrozen(page); |
1367 | if (page->inuse) { | 1329 | if (page->inuse) { |
1368 | 1330 | ||
1369 | if (page->freelist != page->end) { | 1331 | if (page->freelist) { |
1370 | add_partial(n, page, tail); | 1332 | add_partial(n, page, tail); |
1371 | stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); | 1333 | stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); |
1372 | } else { | 1334 | } else { |
@@ -1382,9 +1344,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1382 | * Adding an empty slab to the partial slabs in order | 1344 | * Adding an empty slab to the partial slabs in order |
1383 | * to avoid page allocator overhead. This slab needs | 1345 | * to avoid page allocator overhead. This slab needs |
1384 | * to come after the other slabs with objects in | 1346 | * to come after the other slabs with objects in |
1385 | * order to fill them up. That way the size of the | 1347 | * so that the others get filled first. That way the |
1386 | * partial list stays small. kmem_cache_shrink can | 1348 | * size of the partial list stays small. |
1387 | * reclaim empty slabs from the partial list. | 1349 | * |
1350 | * kmem_cache_shrink can reclaim any empty slabs from the | ||
1351 | * partial list. | ||
1388 | */ | 1352 | */ |
1389 | add_partial(n, page, 1); | 1353 | add_partial(n, page, 1); |
1390 | slab_unlock(page); | 1354 | slab_unlock(page); |
@@ -1407,15 +1371,11 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1407 | if (c->freelist) | 1371 | if (c->freelist) |
1408 | stat(c, DEACTIVATE_REMOTE_FREES); | 1372 | stat(c, DEACTIVATE_REMOTE_FREES); |
1409 | /* | 1373 | /* |
1410 | * Merge cpu freelist into freelist. Typically we get here | 1374 | * Merge cpu freelist into slab freelist. Typically we get here |
1411 | * because both freelists are empty. So this is unlikely | 1375 | * because both freelists are empty. So this is unlikely |
1412 | * to occur. | 1376 | * to occur. |
1413 | * | ||
1414 | * We need to use _is_end here because deactivate slab may | ||
1415 | * be called for a debug slab. Then c->freelist may contain | ||
1416 | * a dummy pointer. | ||
1417 | */ | 1377 | */ |
1418 | while (unlikely(!is_end(c->freelist))) { | 1378 | while (unlikely(c->freelist)) { |
1419 | void **object; | 1379 | void **object; |
1420 | 1380 | ||
1421 | tail = 0; /* Hot objects. Put the slab first */ | 1381 | tail = 0; /* Hot objects. Put the slab first */ |
@@ -1442,6 +1402,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1442 | 1402 | ||
1443 | /* | 1403 | /* |
1444 | * Flush cpu slab. | 1404 | * Flush cpu slab. |
1405 | * | ||
1445 | * Called from IPI handler with interrupts disabled. | 1406 | * Called from IPI handler with interrupts disabled. |
1446 | */ | 1407 | */ |
1447 | static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) | 1408 | static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) |
@@ -1500,7 +1461,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1500 | * rest of the freelist to the lockless freelist. | 1461 | * rest of the freelist to the lockless freelist. |
1501 | * | 1462 | * |
1502 | * And if we were unable to get a new slab from the partial slab lists then | 1463 | * And if we were unable to get a new slab from the partial slab lists then |
1503 | * we need to allocate a new slab. This is slowest path since we may sleep. | 1464 | * we need to allocate a new slab. This is the slowest path since it involves |
1465 | * a call to the page allocator and the setup of a new slab. | ||
1504 | */ | 1466 | */ |
1505 | static void *__slab_alloc(struct kmem_cache *s, | 1467 | static void *__slab_alloc(struct kmem_cache *s, |
1506 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1468 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) |
@@ -1514,18 +1476,19 @@ static void *__slab_alloc(struct kmem_cache *s, | |||
1514 | slab_lock(c->page); | 1476 | slab_lock(c->page); |
1515 | if (unlikely(!node_match(c, node))) | 1477 | if (unlikely(!node_match(c, node))) |
1516 | goto another_slab; | 1478 | goto another_slab; |
1479 | |||
1517 | stat(c, ALLOC_REFILL); | 1480 | stat(c, ALLOC_REFILL); |
1481 | |||
1518 | load_freelist: | 1482 | load_freelist: |
1519 | object = c->page->freelist; | 1483 | object = c->page->freelist; |
1520 | if (unlikely(object == c->page->end)) | 1484 | if (unlikely(!object)) |
1521 | goto another_slab; | 1485 | goto another_slab; |
1522 | if (unlikely(SlabDebug(c->page))) | 1486 | if (unlikely(SlabDebug(c->page))) |
1523 | goto debug; | 1487 | goto debug; |
1524 | 1488 | ||
1525 | object = c->page->freelist; | ||
1526 | c->freelist = object[c->offset]; | 1489 | c->freelist = object[c->offset]; |
1527 | c->page->inuse = s->objects; | 1490 | c->page->inuse = s->objects; |
1528 | c->page->freelist = c->page->end; | 1491 | c->page->freelist = NULL; |
1529 | c->node = page_to_nid(c->page); | 1492 | c->node = page_to_nid(c->page); |
1530 | unlock_out: | 1493 | unlock_out: |
1531 | slab_unlock(c->page); | 1494 | slab_unlock(c->page); |
@@ -1578,7 +1541,6 @@ new_slab: | |||
1578 | 1541 | ||
1579 | return NULL; | 1542 | return NULL; |
1580 | debug: | 1543 | debug: |
1581 | object = c->page->freelist; | ||
1582 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1544 | if (!alloc_debug_processing(s, c->page, object, addr)) |
1583 | goto another_slab; | 1545 | goto another_slab; |
1584 | 1546 | ||
@@ -1607,7 +1569,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1607 | 1569 | ||
1608 | local_irq_save(flags); | 1570 | local_irq_save(flags); |
1609 | c = get_cpu_slab(s, smp_processor_id()); | 1571 | c = get_cpu_slab(s, smp_processor_id()); |
1610 | if (unlikely(is_end(c->freelist) || !node_match(c, node))) | 1572 | if (unlikely(!c->freelist || !node_match(c, node))) |
1611 | 1573 | ||
1612 | object = __slab_alloc(s, gfpflags, node, addr, c); | 1574 | object = __slab_alloc(s, gfpflags, node, addr, c); |
1613 | 1575 | ||
@@ -1659,6 +1621,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
1659 | 1621 | ||
1660 | if (unlikely(SlabDebug(page))) | 1622 | if (unlikely(SlabDebug(page))) |
1661 | goto debug; | 1623 | goto debug; |
1624 | |||
1662 | checks_ok: | 1625 | checks_ok: |
1663 | prior = object[offset] = page->freelist; | 1626 | prior = object[offset] = page->freelist; |
1664 | page->freelist = object; | 1627 | page->freelist = object; |
@@ -1673,11 +1636,10 @@ checks_ok: | |||
1673 | goto slab_empty; | 1636 | goto slab_empty; |
1674 | 1637 | ||
1675 | /* | 1638 | /* |
1676 | * Objects left in the slab. If it | 1639 | * Objects left in the slab. If it was not on the partial list before |
1677 | * was not on the partial list before | ||
1678 | * then add it. | 1640 | * then add it. |
1679 | */ | 1641 | */ |
1680 | if (unlikely(prior == page->end)) { | 1642 | if (unlikely(!prior)) { |
1681 | add_partial(get_node(s, page_to_nid(page)), page, 1); | 1643 | add_partial(get_node(s, page_to_nid(page)), page, 1); |
1682 | stat(c, FREE_ADD_PARTIAL); | 1644 | stat(c, FREE_ADD_PARTIAL); |
1683 | } | 1645 | } |
@@ -1687,7 +1649,7 @@ out_unlock: | |||
1687 | return; | 1649 | return; |
1688 | 1650 | ||
1689 | slab_empty: | 1651 | slab_empty: |
1690 | if (prior != page->end) { | 1652 | if (prior) { |
1691 | /* | 1653 | /* |
1692 | * Slab still on the partial list. | 1654 | * Slab still on the partial list. |
1693 | */ | 1655 | */ |
@@ -1724,8 +1686,8 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1724 | unsigned long flags; | 1686 | unsigned long flags; |
1725 | 1687 | ||
1726 | local_irq_save(flags); | 1688 | local_irq_save(flags); |
1727 | debug_check_no_locks_freed(object, s->objsize); | ||
1728 | c = get_cpu_slab(s, smp_processor_id()); | 1689 | c = get_cpu_slab(s, smp_processor_id()); |
1690 | debug_check_no_locks_freed(object, c->objsize); | ||
1729 | if (likely(page == c->page && c->node >= 0)) { | 1691 | if (likely(page == c->page && c->node >= 0)) { |
1730 | object[c->offset] = c->freelist; | 1692 | object[c->offset] = c->freelist; |
1731 | c->freelist = object; | 1693 | c->freelist = object; |
@@ -1888,13 +1850,11 @@ static unsigned long calculate_alignment(unsigned long flags, | |||
1888 | unsigned long align, unsigned long size) | 1850 | unsigned long align, unsigned long size) |
1889 | { | 1851 | { |
1890 | /* | 1852 | /* |
1891 | * If the user wants hardware cache aligned objects then | 1853 | * If the user wants hardware cache aligned objects then follow that |
1892 | * follow that suggestion if the object is sufficiently | 1854 | * suggestion if the object is sufficiently large. |
1893 | * large. | ||
1894 | * | 1855 | * |
1895 | * The hardware cache alignment cannot override the | 1856 | * The hardware cache alignment cannot override the specified |
1896 | * specified alignment though. If that is greater | 1857 | * alignment though. If that is greater then use it. |
1897 | * then use it. | ||
1898 | */ | 1858 | */ |
1899 | if ((flags & SLAB_HWCACHE_ALIGN) && | 1859 | if ((flags & SLAB_HWCACHE_ALIGN) && |
1900 | size > cache_line_size() / 2) | 1860 | size > cache_line_size() / 2) |
@@ -1910,7 +1870,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
1910 | struct kmem_cache_cpu *c) | 1870 | struct kmem_cache_cpu *c) |
1911 | { | 1871 | { |
1912 | c->page = NULL; | 1872 | c->page = NULL; |
1913 | c->freelist = (void *)PAGE_MAPPING_ANON; | 1873 | c->freelist = NULL; |
1914 | c->node = 0; | 1874 | c->node = 0; |
1915 | c->offset = s->offset / sizeof(void *); | 1875 | c->offset = s->offset / sizeof(void *); |
1916 | c->objsize = s->objsize; | 1876 | c->objsize = s->objsize; |
@@ -2092,6 +2052,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2092 | #endif | 2052 | #endif |
2093 | init_kmem_cache_node(n); | 2053 | init_kmem_cache_node(n); |
2094 | atomic_long_inc(&n->nr_slabs); | 2054 | atomic_long_inc(&n->nr_slabs); |
2055 | |||
2095 | /* | 2056 | /* |
2096 | * lockdep requires consistent irq usage for each lock | 2057 | * lockdep requires consistent irq usage for each lock |
2097 | * so even though there cannot be a race this early in | 2058 | * so even though there cannot be a race this early in |
@@ -2173,6 +2134,14 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2173 | unsigned long align = s->align; | 2134 | unsigned long align = s->align; |
2174 | 2135 | ||
2175 | /* | 2136 | /* |
2137 | * Round up object size to the next word boundary. We can only | ||
2138 | * place the free pointer at word boundaries and this determines | ||
2139 | * the possible location of the free pointer. | ||
2140 | */ | ||
2141 | size = ALIGN(size, sizeof(void *)); | ||
2142 | |||
2143 | #ifdef CONFIG_SLUB_DEBUG | ||
2144 | /* | ||
2176 | * Determine if we can poison the object itself. If the user of | 2145 | * Determine if we can poison the object itself. If the user of |
2177 | * the slab may touch the object after free or before allocation | 2146 | * the slab may touch the object after free or before allocation |
2178 | * then we should never poison the object itself. | 2147 | * then we should never poison the object itself. |
@@ -2183,14 +2152,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2183 | else | 2152 | else |
2184 | s->flags &= ~__OBJECT_POISON; | 2153 | s->flags &= ~__OBJECT_POISON; |
2185 | 2154 | ||
2186 | /* | ||
2187 | * Round up object size to the next word boundary. We can only | ||
2188 | * place the free pointer at word boundaries and this determines | ||
2189 | * the possible location of the free pointer. | ||
2190 | */ | ||
2191 | size = ALIGN(size, sizeof(void *)); | ||
2192 | 2155 | ||
2193 | #ifdef CONFIG_SLUB_DEBUG | ||
2194 | /* | 2156 | /* |
2195 | * If we are Redzoning then check if there is some space between the | 2157 | * If we are Redzoning then check if there is some space between the |
2196 | * end of the object and the free pointer. If not then add an | 2158 | * end of the object and the free pointer. If not then add an |
@@ -2343,7 +2305,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) | |||
2343 | /* | 2305 | /* |
2344 | * We could also check if the object is on the slabs freelist. | 2306 | * We could also check if the object is on the slabs freelist. |
2345 | * But this would be too expensive and it seems that the main | 2307 | * But this would be too expensive and it seems that the main |
2346 | * purpose of kmem_ptr_valid is to check if the object belongs | 2308 | * purpose of kmem_ptr_valid() is to check if the object belongs |
2347 | * to a certain slab. | 2309 | * to a certain slab. |
2348 | */ | 2310 | */ |
2349 | return 1; | 2311 | return 1; |
@@ -2630,13 +2592,24 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2630 | } | 2592 | } |
2631 | EXPORT_SYMBOL(__kmalloc); | 2593 | EXPORT_SYMBOL(__kmalloc); |
2632 | 2594 | ||
2595 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | ||
2596 | { | ||
2597 | struct page *page = alloc_pages_node(node, flags | __GFP_COMP, | ||
2598 | get_order(size)); | ||
2599 | |||
2600 | if (page) | ||
2601 | return page_address(page); | ||
2602 | else | ||
2603 | return NULL; | ||
2604 | } | ||
2605 | |||
2633 | #ifdef CONFIG_NUMA | 2606 | #ifdef CONFIG_NUMA |
2634 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2607 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2635 | { | 2608 | { |
2636 | struct kmem_cache *s; | 2609 | struct kmem_cache *s; |
2637 | 2610 | ||
2638 | if (unlikely(size > PAGE_SIZE)) | 2611 | if (unlikely(size > PAGE_SIZE)) |
2639 | return kmalloc_large(size, flags); | 2612 | return kmalloc_large_node(size, flags, node); |
2640 | 2613 | ||
2641 | s = get_slab(size, flags); | 2614 | s = get_slab(size, flags); |
2642 | 2615 | ||
@@ -2653,19 +2626,17 @@ size_t ksize(const void *object) | |||
2653 | struct page *page; | 2626 | struct page *page; |
2654 | struct kmem_cache *s; | 2627 | struct kmem_cache *s; |
2655 | 2628 | ||
2656 | BUG_ON(!object); | ||
2657 | if (unlikely(object == ZERO_SIZE_PTR)) | 2629 | if (unlikely(object == ZERO_SIZE_PTR)) |
2658 | return 0; | 2630 | return 0; |
2659 | 2631 | ||
2660 | page = virt_to_head_page(object); | 2632 | page = virt_to_head_page(object); |
2661 | BUG_ON(!page); | ||
2662 | 2633 | ||
2663 | if (unlikely(!PageSlab(page))) | 2634 | if (unlikely(!PageSlab(page))) |
2664 | return PAGE_SIZE << compound_order(page); | 2635 | return PAGE_SIZE << compound_order(page); |
2665 | 2636 | ||
2666 | s = page->slab; | 2637 | s = page->slab; |
2667 | BUG_ON(!s); | ||
2668 | 2638 | ||
2639 | #ifdef CONFIG_SLUB_DEBUG | ||
2669 | /* | 2640 | /* |
2670 | * Debugging requires use of the padding between object | 2641 | * Debugging requires use of the padding between object |
2671 | * and whatever may come after it. | 2642 | * and whatever may come after it. |
@@ -2673,6 +2644,7 @@ size_t ksize(const void *object) | |||
2673 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | 2644 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
2674 | return s->objsize; | 2645 | return s->objsize; |
2675 | 2646 | ||
2647 | #endif | ||
2676 | /* | 2648 | /* |
2677 | * If we have the need to store the freelist pointer | 2649 | * If we have the need to store the freelist pointer |
2678 | * back there or track user information then we can | 2650 | * back there or track user information then we can |
@@ -2680,7 +2652,6 @@ size_t ksize(const void *object) | |||
2680 | */ | 2652 | */ |
2681 | if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) | 2653 | if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) |
2682 | return s->inuse; | 2654 | return s->inuse; |
2683 | |||
2684 | /* | 2655 | /* |
2685 | * Else we can use all the padding etc for the allocation | 2656 | * Else we can use all the padding etc for the allocation |
2686 | */ | 2657 | */ |
@@ -2957,7 +2928,7 @@ void __init kmem_cache_init(void) | |||
2957 | /* | 2928 | /* |
2958 | * Patch up the size_index table if we have strange large alignment | 2929 | * Patch up the size_index table if we have strange large alignment |
2959 | * requirements for the kmalloc array. This is only the case for | 2930 | * requirements for the kmalloc array. This is only the case for |
2960 | * mips it seems. The standard arches will not generate any code here. | 2931 | * MIPS it seems. The standard arches will not generate any code here. |
2961 | * | 2932 | * |
2962 | * Largest permitted alignment is 256 bytes due to the way we | 2933 | * Largest permitted alignment is 256 bytes due to the way we |
2963 | * handle the index determination for the smaller caches. | 2934 | * handle the index determination for the smaller caches. |
@@ -2986,7 +2957,6 @@ void __init kmem_cache_init(void) | |||
2986 | kmem_size = sizeof(struct kmem_cache); | 2957 | kmem_size = sizeof(struct kmem_cache); |
2987 | #endif | 2958 | #endif |
2988 | 2959 | ||
2989 | |||
2990 | printk(KERN_INFO | 2960 | printk(KERN_INFO |
2991 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 2961 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," |
2992 | " CPUs=%d, Nodes=%d\n", | 2962 | " CPUs=%d, Nodes=%d\n", |
@@ -3083,12 +3053,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3083 | */ | 3053 | */ |
3084 | for_each_online_cpu(cpu) | 3054 | for_each_online_cpu(cpu) |
3085 | get_cpu_slab(s, cpu)->objsize = s->objsize; | 3055 | get_cpu_slab(s, cpu)->objsize = s->objsize; |
3056 | |||
3086 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 3057 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
3087 | up_write(&slub_lock); | 3058 | up_write(&slub_lock); |
3059 | |||
3088 | if (sysfs_slab_alias(s, name)) | 3060 | if (sysfs_slab_alias(s, name)) |
3089 | goto err; | 3061 | goto err; |
3090 | return s; | 3062 | return s; |
3091 | } | 3063 | } |
3064 | |||
3092 | s = kmalloc(kmem_size, GFP_KERNEL); | 3065 | s = kmalloc(kmem_size, GFP_KERNEL); |
3093 | if (s) { | 3066 | if (s) { |
3094 | if (kmem_cache_open(s, GFP_KERNEL, name, | 3067 | if (kmem_cache_open(s, GFP_KERNEL, name, |
@@ -3184,7 +3157,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3184 | struct kmem_cache *s; | 3157 | struct kmem_cache *s; |
3185 | 3158 | ||
3186 | if (unlikely(size > PAGE_SIZE)) | 3159 | if (unlikely(size > PAGE_SIZE)) |
3187 | return kmalloc_large(size, gfpflags); | 3160 | return kmalloc_large_node(size, gfpflags, node); |
3188 | 3161 | ||
3189 | s = get_slab(size, gfpflags); | 3162 | s = get_slab(size, gfpflags); |
3190 | 3163 | ||
@@ -3199,7 +3172,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page, | |||
3199 | unsigned long *map) | 3172 | unsigned long *map) |
3200 | { | 3173 | { |
3201 | void *p; | 3174 | void *p; |
3202 | void *addr = slab_address(page); | 3175 | void *addr = page_address(page); |
3203 | 3176 | ||
3204 | if (!check_slab(s, page) || | 3177 | if (!check_slab(s, page) || |
3205 | !on_freelist(s, page, NULL)) | 3178 | !on_freelist(s, page, NULL)) |
@@ -3482,7 +3455,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3482 | static void process_slab(struct loc_track *t, struct kmem_cache *s, | 3455 | static void process_slab(struct loc_track *t, struct kmem_cache *s, |
3483 | struct page *page, enum track_item alloc) | 3456 | struct page *page, enum track_item alloc) |
3484 | { | 3457 | { |
3485 | void *addr = slab_address(page); | 3458 | void *addr = page_address(page); |
3486 | DECLARE_BITMAP(map, s->objects); | 3459 | DECLARE_BITMAP(map, s->objects); |
3487 | void *p; | 3460 | void *p; |
3488 | 3461 | ||
@@ -3591,8 +3564,8 @@ enum slab_stat_type { | |||
3591 | #define SO_CPU (1 << SL_CPU) | 3564 | #define SO_CPU (1 << SL_CPU) |
3592 | #define SO_OBJECTS (1 << SL_OBJECTS) | 3565 | #define SO_OBJECTS (1 << SL_OBJECTS) |
3593 | 3566 | ||
3594 | static unsigned long slab_objects(struct kmem_cache *s, | 3567 | static ssize_t show_slab_objects(struct kmem_cache *s, |
3595 | char *buf, unsigned long flags) | 3568 | char *buf, unsigned long flags) |
3596 | { | 3569 | { |
3597 | unsigned long total = 0; | 3570 | unsigned long total = 0; |
3598 | int cpu; | 3571 | int cpu; |
@@ -3602,6 +3575,8 @@ static unsigned long slab_objects(struct kmem_cache *s, | |||
3602 | unsigned long *per_cpu; | 3575 | unsigned long *per_cpu; |
3603 | 3576 | ||
3604 | nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); | 3577 | nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); |
3578 | if (!nodes) | ||
3579 | return -ENOMEM; | ||
3605 | per_cpu = nodes + nr_node_ids; | 3580 | per_cpu = nodes + nr_node_ids; |
3606 | 3581 | ||
3607 | for_each_possible_cpu(cpu) { | 3582 | for_each_possible_cpu(cpu) { |
@@ -3754,25 +3729,25 @@ SLAB_ATTR_RO(aliases); | |||
3754 | 3729 | ||
3755 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) | 3730 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) |
3756 | { | 3731 | { |
3757 | return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); | 3732 | return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); |
3758 | } | 3733 | } |
3759 | SLAB_ATTR_RO(slabs); | 3734 | SLAB_ATTR_RO(slabs); |
3760 | 3735 | ||
3761 | static ssize_t partial_show(struct kmem_cache *s, char *buf) | 3736 | static ssize_t partial_show(struct kmem_cache *s, char *buf) |
3762 | { | 3737 | { |
3763 | return slab_objects(s, buf, SO_PARTIAL); | 3738 | return show_slab_objects(s, buf, SO_PARTIAL); |
3764 | } | 3739 | } |
3765 | SLAB_ATTR_RO(partial); | 3740 | SLAB_ATTR_RO(partial); |
3766 | 3741 | ||
3767 | static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) | 3742 | static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) |
3768 | { | 3743 | { |
3769 | return slab_objects(s, buf, SO_CPU); | 3744 | return show_slab_objects(s, buf, SO_CPU); |
3770 | } | 3745 | } |
3771 | SLAB_ATTR_RO(cpu_slabs); | 3746 | SLAB_ATTR_RO(cpu_slabs); |
3772 | 3747 | ||
3773 | static ssize_t objects_show(struct kmem_cache *s, char *buf) | 3748 | static ssize_t objects_show(struct kmem_cache *s, char *buf) |
3774 | { | 3749 | { |
3775 | return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); | 3750 | return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); |
3776 | } | 3751 | } |
3777 | SLAB_ATTR_RO(objects); | 3752 | SLAB_ATTR_RO(objects); |
3778 | 3753 | ||
@@ -3971,7 +3946,6 @@ SLAB_ATTR(remote_node_defrag_ratio); | |||
3971 | #endif | 3946 | #endif |
3972 | 3947 | ||
3973 | #ifdef CONFIG_SLUB_STATS | 3948 | #ifdef CONFIG_SLUB_STATS |
3974 | |||
3975 | static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) | 3949 | static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) |
3976 | { | 3950 | { |
3977 | unsigned long sum = 0; | 3951 | unsigned long sum = 0; |
@@ -4155,8 +4129,8 @@ static struct kset *slab_kset; | |||
4155 | #define ID_STR_LENGTH 64 | 4129 | #define ID_STR_LENGTH 64 |
4156 | 4130 | ||
4157 | /* Create a unique string id for a slab cache: | 4131 | /* Create a unique string id for a slab cache: |
4158 | * format | 4132 | * |
4159 | * :[flags-]size:[memory address of kmemcache] | 4133 | * Format :[flags-]size |
4160 | */ | 4134 | */ |
4161 | static char *create_unique_id(struct kmem_cache *s) | 4135 | static char *create_unique_id(struct kmem_cache *s) |
4162 | { | 4136 | { |
@@ -176,7 +176,7 @@ void activate_page(struct page *page) | |||
176 | SetPageActive(page); | 176 | SetPageActive(page); |
177 | add_page_to_active_list(zone, page); | 177 | add_page_to_active_list(zone, page); |
178 | __count_vm_event(PGACTIVATE); | 178 | __count_vm_event(PGACTIVATE); |
179 | mem_cgroup_move_lists(page_get_page_cgroup(page), true); | 179 | mem_cgroup_move_lists(page, true); |
180 | } | 180 | } |
181 | spin_unlock_irq(&zone->lru_lock); | 181 | spin_unlock_irq(&zone->lru_lock); |
182 | } | 182 | } |
diff --git a/mm/truncate.c b/mm/truncate.c index c35c49e54fb6..7d20ce41ecf5 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -134,8 +134,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * truncate_inode_pages - truncate range of pages specified by start and | 137 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
138 | * end byte offsets | ||
139 | * @mapping: mapping to truncate | 138 | * @mapping: mapping to truncate |
140 | * @lstart: offset from which to truncate | 139 | * @lstart: offset from which to truncate |
141 | * @lend: offset to which to truncate | 140 | * @lend: offset to which to truncate |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a26dabd62fed..45711585684e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -126,7 +126,7 @@ long vm_total_pages; /* The total number of pages which the VM controls */ | |||
126 | static LIST_HEAD(shrinker_list); | 126 | static LIST_HEAD(shrinker_list); |
127 | static DECLARE_RWSEM(shrinker_rwsem); | 127 | static DECLARE_RWSEM(shrinker_rwsem); |
128 | 128 | ||
129 | #ifdef CONFIG_CGROUP_MEM_CONT | 129 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
130 | #define scan_global_lru(sc) (!(sc)->mem_cgroup) | 130 | #define scan_global_lru(sc) (!(sc)->mem_cgroup) |
131 | #else | 131 | #else |
132 | #define scan_global_lru(sc) (1) | 132 | #define scan_global_lru(sc) (1) |
@@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1128 | ClearPageActive(page); | 1128 | ClearPageActive(page); |
1129 | 1129 | ||
1130 | list_move(&page->lru, &zone->inactive_list); | 1130 | list_move(&page->lru, &zone->inactive_list); |
1131 | mem_cgroup_move_lists(page_get_page_cgroup(page), false); | 1131 | mem_cgroup_move_lists(page, false); |
1132 | pgmoved++; | 1132 | pgmoved++; |
1133 | if (!pagevec_add(&pvec, page)) { | 1133 | if (!pagevec_add(&pvec, page)) { |
1134 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); | 1134 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); |
@@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1156 | VM_BUG_ON(PageLRU(page)); | 1156 | VM_BUG_ON(PageLRU(page)); |
1157 | SetPageLRU(page); | 1157 | SetPageLRU(page); |
1158 | VM_BUG_ON(!PageActive(page)); | 1158 | VM_BUG_ON(!PageActive(page)); |
1159 | |||
1159 | list_move(&page->lru, &zone->active_list); | 1160 | list_move(&page->lru, &zone->active_list); |
1160 | mem_cgroup_move_lists(page_get_page_cgroup(page), true); | 1161 | mem_cgroup_move_lists(page, true); |
1161 | pgmoved++; | 1162 | pgmoved++; |
1162 | if (!pagevec_add(&pvec, page)) { | 1163 | if (!pagevec_add(&pvec, page)) { |
1163 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); | 1164 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); |
@@ -1427,7 +1428,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) | |||
1427 | return do_try_to_free_pages(zones, gfp_mask, &sc); | 1428 | return do_try_to_free_pages(zones, gfp_mask, &sc); |
1428 | } | 1429 | } |
1429 | 1430 | ||
1430 | #ifdef CONFIG_CGROUP_MEM_CONT | 1431 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
1431 | 1432 | ||
1432 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | 1433 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, |
1433 | gfp_t gfp_mask) | 1434 | gfp_t gfp_mask) |
diff --git a/samples/Kconfig b/samples/Kconfig index 74d97cc24787..e1fb471cc501 100644 --- a/samples/Kconfig +++ b/samples/Kconfig | |||
@@ -22,5 +22,16 @@ config SAMPLE_KOBJECT | |||
22 | 22 | ||
23 | If in doubt, say "N" here. | 23 | If in doubt, say "N" here. |
24 | 24 | ||
25 | config SAMPLE_KPROBES | ||
26 | tristate "Build kprobes examples -- loadable modules only" | ||
27 | depends on KPROBES && m | ||
28 | help | ||
29 | This build several kprobes example modules. | ||
30 | |||
31 | config SAMPLE_KRETPROBES | ||
32 | tristate "Build kretprobes example -- loadable modules only" | ||
33 | default m | ||
34 | depends on SAMPLE_KPROBES && KRETPROBES | ||
35 | |||
25 | endif # SAMPLES | 36 | endif # SAMPLES |
26 | 37 | ||
diff --git a/samples/Makefile b/samples/Makefile index 8652d0f268ad..2e02575f7794 100644 --- a/samples/Makefile +++ b/samples/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | # Makefile for Linux samples code | 1 | # Makefile for Linux samples code |
2 | 2 | ||
3 | obj-$(CONFIG_SAMPLES) += markers/ kobject/ | 3 | obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ |
diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile new file mode 100644 index 000000000000..68739bc4fc6a --- /dev/null +++ b/samples/kprobes/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # builds the kprobes example kernel modules; | ||
2 | # then to use one (as root): insmod <module_name.ko> | ||
3 | |||
4 | obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o jprobe_example.o | ||
5 | obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o | ||
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c new file mode 100644 index 000000000000..b7541355b92b --- /dev/null +++ b/samples/kprobes/jprobe_example.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Here's a sample kernel module showing the use of jprobes to dump | ||
3 | * the arguments of do_fork(). | ||
4 | * | ||
5 | * For more information on theory of operation of jprobes, see | ||
6 | * Documentation/kprobes.txt | ||
7 | * | ||
8 | * Build and insert the kernel module as done in the kprobe example. | ||
9 | * You will see the trace data in /var/log/messages and on the | ||
10 | * console whenever do_fork() is invoked to create a new process. | ||
11 | * (Some messages may be suppressed if syslogd is configured to | ||
12 | * eliminate duplicate messages.) | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | |||
19 | /* | ||
20 | * Jumper probe for do_fork. | ||
21 | * Mirror principle enables access to arguments of the probed routine | ||
22 | * from the probe handler. | ||
23 | */ | ||
24 | |||
25 | /* Proxy routine having the same arguments as actual do_fork() routine */ | ||
26 | static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | ||
27 | struct pt_regs *regs, unsigned long stack_size, | ||
28 | int __user *parent_tidptr, int __user *child_tidptr) | ||
29 | { | ||
30 | printk(KERN_INFO "jprobe: clone_flags = 0x%lx, stack_size = 0x%lx," | ||
31 | " regs = 0x%p\n", | ||
32 | clone_flags, stack_size, regs); | ||
33 | |||
34 | /* Always end with a call to jprobe_return(). */ | ||
35 | jprobe_return(); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static struct jprobe my_jprobe = { | ||
40 | .entry = jdo_fork, | ||
41 | .kp = { | ||
42 | .symbol_name = "do_fork", | ||
43 | }, | ||
44 | }; | ||
45 | |||
46 | static int __init jprobe_init(void) | ||
47 | { | ||
48 | int ret; | ||
49 | |||
50 | ret = register_jprobe(&my_jprobe); | ||
51 | if (ret < 0) { | ||
52 | printk(KERN_INFO "register_jprobe failed, returned %d\n", ret); | ||
53 | return -1; | ||
54 | } | ||
55 | printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n", | ||
56 | my_jprobe.kp.addr, my_jprobe.entry); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static void __exit jprobe_exit(void) | ||
61 | { | ||
62 | unregister_jprobe(&my_jprobe); | ||
63 | printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr); | ||
64 | } | ||
65 | |||
66 | module_init(jprobe_init) | ||
67 | module_exit(jprobe_exit) | ||
68 | MODULE_LICENSE("GPL"); | ||
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c new file mode 100644 index 000000000000..a681998a871c --- /dev/null +++ b/samples/kprobes/kprobe_example.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * NOTE: This example is works on x86 and powerpc. | ||
3 | * Here's a sample kernel module showing the use of kprobes to dump a | ||
4 | * stack trace and selected registers when do_fork() is called. | ||
5 | * | ||
6 | * For more information on theory of operation of kprobes, see | ||
7 | * Documentation/kprobes.txt | ||
8 | * | ||
9 | * You will see the trace data in /var/log/messages and on the console | ||
10 | * whenever do_fork() is invoked to create a new process. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kprobes.h> | ||
16 | |||
17 | /* For each probe you need to allocate a kprobe structure */ | ||
18 | static struct kprobe kp = { | ||
19 | .symbol_name = "do_fork", | ||
20 | }; | ||
21 | |||
22 | /* kprobe pre_handler: called just before the probed instruction is executed */ | ||
23 | static int handler_pre(struct kprobe *p, struct pt_regs *regs) | ||
24 | { | ||
25 | #ifdef CONFIG_X86 | ||
26 | printk(KERN_INFO "pre_handler: p->addr = 0x%p, ip = %lx," | ||
27 | " flags = 0x%lx\n", | ||
28 | p->addr, regs->ip, regs->flags); | ||
29 | #endif | ||
30 | #ifdef CONFIG_PPC | ||
31 | printk(KERN_INFO "pre_handler: p->addr = 0x%p, nip = 0x%lx," | ||
32 | " msr = 0x%lx\n", | ||
33 | p->addr, regs->nip, regs->msr); | ||
34 | #endif | ||
35 | |||
36 | /* A dump_stack() here will give a stack backtrace */ | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | /* kprobe post_handler: called after the probed instruction is executed */ | ||
41 | static void handler_post(struct kprobe *p, struct pt_regs *regs, | ||
42 | unsigned long flags) | ||
43 | { | ||
44 | #ifdef CONFIG_X86 | ||
45 | printk(KERN_INFO "post_handler: p->addr = 0x%p, flags = 0x%lx\n", | ||
46 | p->addr, regs->flags); | ||
47 | #endif | ||
48 | #ifdef CONFIG_PPC | ||
49 | printk(KERN_INFO "post_handler: p->addr = 0x%p, msr = 0x%lx\n", | ||
50 | p->addr, regs->msr); | ||
51 | #endif | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * fault_handler: this is called if an exception is generated for any | ||
56 | * instruction within the pre- or post-handler, or when Kprobes | ||
57 | * single-steps the probed instruction. | ||
58 | */ | ||
59 | static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) | ||
60 | { | ||
61 | printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn", | ||
62 | p->addr, trapnr); | ||
63 | /* Return 0 because we don't handle the fault. */ | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static int __init kprobe_init(void) | ||
68 | { | ||
69 | int ret; | ||
70 | kp.pre_handler = handler_pre; | ||
71 | kp.post_handler = handler_post; | ||
72 | kp.fault_handler = handler_fault; | ||
73 | |||
74 | ret = register_kprobe(&kp); | ||
75 | if (ret < 0) { | ||
76 | printk(KERN_INFO "register_kprobe failed, returned %d\n", ret); | ||
77 | return ret; | ||
78 | } | ||
79 | printk(KERN_INFO "Planted kprobe at %p\n", kp.addr); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static void __exit kprobe_exit(void) | ||
84 | { | ||
85 | unregister_kprobe(&kp); | ||
86 | printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr); | ||
87 | } | ||
88 | |||
89 | module_init(kprobe_init) | ||
90 | module_exit(kprobe_exit) | ||
91 | MODULE_LICENSE("GPL"); | ||
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c new file mode 100644 index 000000000000..4e764b317d61 --- /dev/null +++ b/samples/kprobes/kretprobe_example.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * kretprobe_example.c | ||
3 | * | ||
4 | * Here's a sample kernel module showing the use of return probes to | ||
5 | * report the return value and total time taken for probed function | ||
6 | * to run. | ||
7 | * | ||
8 | * usage: insmod kretprobe_example.ko func=<func_name> | ||
9 | * | ||
10 | * If no func_name is specified, do_fork is instrumented | ||
11 | * | ||
12 | * For more information on theory of operation of kretprobes, see | ||
13 | * Documentation/kprobes.txt | ||
14 | * | ||
15 | * Build and insert the kernel module as done in the kprobe example. | ||
16 | * You will see the trace data in /var/log/messages and on the console | ||
17 | * whenever the probed function returns. (Some messages may be suppressed | ||
18 | * if syslogd is configured to eliminate duplicate messages.) | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/kprobes.h> | ||
24 | #include <linux/ktime.h> | ||
25 | #include <linux/limits.h> | ||
26 | |||
27 | static char func_name[NAME_MAX] = "do_fork"; | ||
28 | module_param_string(func, func_name, NAME_MAX, S_IRUGO); | ||
29 | MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" | ||
30 | " function's execution time"); | ||
31 | |||
32 | /* per-instance private data */ | ||
33 | struct my_data { | ||
34 | ktime_t entry_stamp; | ||
35 | }; | ||
36 | |||
37 | /* Here we use the entry_hanlder to timestamp function entry */ | ||
38 | static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
39 | { | ||
40 | struct my_data *data; | ||
41 | |||
42 | if (!current->mm) | ||
43 | return 1; /* Skip kernel threads */ | ||
44 | |||
45 | data = (struct my_data *)ri->data; | ||
46 | data->entry_stamp = ktime_get(); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Return-probe handler: Log the return value and duration. Duration may turn | ||
52 | * out to be zero consistently, depending upon the granularity of time | ||
53 | * accounting on the platform. | ||
54 | */ | ||
55 | static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | ||
56 | { | ||
57 | int retval = regs_return_value(regs); | ||
58 | struct my_data *data = (struct my_data *)ri->data; | ||
59 | s64 delta; | ||
60 | ktime_t now; | ||
61 | |||
62 | now = ktime_get(); | ||
63 | delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); | ||
64 | printk(KERN_INFO "%s returned %d and took %lld ns to execute\n", | ||
65 | func_name, retval, (long long)delta); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static struct kretprobe my_kretprobe = { | ||
70 | .handler = ret_handler, | ||
71 | .entry_handler = entry_handler, | ||
72 | .data_size = sizeof(struct my_data), | ||
73 | /* Probe up to 20 instances concurrently. */ | ||
74 | .maxactive = 20, | ||
75 | }; | ||
76 | |||
77 | static int __init kretprobe_init(void) | ||
78 | { | ||
79 | int ret; | ||
80 | |||
81 | my_kretprobe.kp.symbol_name = func_name; | ||
82 | ret = register_kretprobe(&my_kretprobe); | ||
83 | if (ret < 0) { | ||
84 | printk(KERN_INFO "register_kretprobe failed, returned %d\n", | ||
85 | ret); | ||
86 | return -1; | ||
87 | } | ||
88 | printk(KERN_INFO "Planted return probe at %s: %p\n", | ||
89 | my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static void __exit kretprobe_exit(void) | ||
94 | { | ||
95 | unregister_kretprobe(&my_kretprobe); | ||
96 | printk(KERN_INFO "kretprobe at %p unregistered\n", | ||
97 | my_kretprobe.kp.addr); | ||
98 | |||
99 | /* nmissed > 0 suggests that maxactive was set too low. */ | ||
100 | printk(KERN_INFO "Missed probing %d instances of %s\n", | ||
101 | my_kretprobe.nmissed, my_kretprobe.kp.symbol_name); | ||
102 | } | ||
103 | |||
104 | module_init(kretprobe_init) | ||
105 | module_exit(kretprobe_exit) | ||
106 | MODULE_LICENSE("GPL"); | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 2086a856400a..2a7cef9726e4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -9,7 +9,7 @@ use strict; | |||
9 | my $P = $0; | 9 | my $P = $0; |
10 | $P =~ s@.*/@@g; | 10 | $P =~ s@.*/@@g; |
11 | 11 | ||
12 | my $V = '0.14'; | 12 | my $V = '0.15'; |
13 | 13 | ||
14 | use Getopt::Long qw(:config no_auto_abbrev); | 14 | use Getopt::Long qw(:config no_auto_abbrev); |
15 | 15 | ||
@@ -105,8 +105,7 @@ our $Sparse = qr{ | |||
105 | __iomem| | 105 | __iomem| |
106 | __must_check| | 106 | __must_check| |
107 | __init_refok| | 107 | __init_refok| |
108 | __kprobes| | 108 | __kprobes |
109 | fastcall | ||
110 | }x; | 109 | }x; |
111 | our $Attribute = qr{ | 110 | our $Attribute = qr{ |
112 | const| | 111 | const| |
@@ -158,7 +157,10 @@ sub build_types { | |||
158 | \b | 157 | \b |
159 | (?:const\s+)? | 158 | (?:const\s+)? |
160 | (?:unsigned\s+)? | 159 | (?:unsigned\s+)? |
161 | $all | 160 | (?: |
161 | $all| | ||
162 | (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\) | ||
163 | ) | ||
162 | (?:\s+$Sparse|\s+const)* | 164 | (?:\s+$Sparse|\s+const)* |
163 | \b | 165 | \b |
164 | }x; | 166 | }x; |
@@ -362,6 +364,7 @@ sub ctx_statement_block { | |||
362 | 364 | ||
363 | my $type = ''; | 365 | my $type = ''; |
364 | my $level = 0; | 366 | my $level = 0; |
367 | my $p; | ||
365 | my $c; | 368 | my $c; |
366 | my $len = 0; | 369 | my $len = 0; |
367 | 370 | ||
@@ -386,6 +389,7 @@ sub ctx_statement_block { | |||
386 | last; | 389 | last; |
387 | } | 390 | } |
388 | } | 391 | } |
392 | $p = $c; | ||
389 | $c = substr($blk, $off, 1); | 393 | $c = substr($blk, $off, 1); |
390 | $remainder = substr($blk, $off); | 394 | $remainder = substr($blk, $off); |
391 | 395 | ||
@@ -397,8 +401,9 @@ sub ctx_statement_block { | |||
397 | } | 401 | } |
398 | 402 | ||
399 | # An else is really a conditional as long as its not else if | 403 | # An else is really a conditional as long as its not else if |
400 | if ($level == 0 && $remainder =~ /(\s+else)(?:\s|{)/ && | 404 | if ($level == 0 && (!defined($p) || $p =~ /(?:\s|\})/) && |
401 | $remainder !~ /\s+else\s+if\b/) { | 405 | $remainder =~ /(else)(?:\s|{)/ && |
406 | $remainder !~ /else\s+if\b/) { | ||
402 | $coff = $off + length($1); | 407 | $coff = $off + length($1); |
403 | } | 408 | } |
404 | 409 | ||
@@ -445,21 +450,73 @@ sub ctx_statement_block { | |||
445 | $line, $remain + 1, $off - $loff + 1, $level); | 450 | $line, $remain + 1, $off - $loff + 1, $level); |
446 | } | 451 | } |
447 | 452 | ||
453 | sub statement_lines { | ||
454 | my ($stmt) = @_; | ||
455 | |||
456 | # Strip the diff line prefixes and rip blank lines at start and end. | ||
457 | $stmt =~ s/(^|\n)./$1/g; | ||
458 | $stmt =~ s/^\s*//; | ||
459 | $stmt =~ s/\s*$//; | ||
460 | |||
461 | my @stmt_lines = ($stmt =~ /\n/g); | ||
462 | |||
463 | return $#stmt_lines + 2; | ||
464 | } | ||
465 | |||
466 | sub statement_rawlines { | ||
467 | my ($stmt) = @_; | ||
468 | |||
469 | my @stmt_lines = ($stmt =~ /\n/g); | ||
470 | |||
471 | return $#stmt_lines + 2; | ||
472 | } | ||
473 | |||
474 | sub statement_block_size { | ||
475 | my ($stmt) = @_; | ||
476 | |||
477 | $stmt =~ s/(^|\n)./$1/g; | ||
478 | $stmt =~ s/^\s*{//; | ||
479 | $stmt =~ s/}\s*$//; | ||
480 | $stmt =~ s/^\s*//; | ||
481 | $stmt =~ s/\s*$//; | ||
482 | |||
483 | my @stmt_lines = ($stmt =~ /\n/g); | ||
484 | my @stmt_statements = ($stmt =~ /;/g); | ||
485 | |||
486 | my $stmt_lines = $#stmt_lines + 2; | ||
487 | my $stmt_statements = $#stmt_statements + 1; | ||
488 | |||
489 | if ($stmt_lines > $stmt_statements) { | ||
490 | return $stmt_lines; | ||
491 | } else { | ||
492 | return $stmt_statements; | ||
493 | } | ||
494 | } | ||
495 | |||
448 | sub ctx_statement_full { | 496 | sub ctx_statement_full { |
449 | my ($linenr, $remain, $off) = @_; | 497 | my ($linenr, $remain, $off) = @_; |
450 | my ($statement, $condition, $level); | 498 | my ($statement, $condition, $level); |
451 | 499 | ||
452 | my (@chunks); | 500 | my (@chunks); |
453 | 501 | ||
502 | # Grab the first conditional/block pair. | ||
454 | ($statement, $condition, $linenr, $remain, $off, $level) = | 503 | ($statement, $condition, $linenr, $remain, $off, $level) = |
455 | ctx_statement_block($linenr, $remain, $off); | 504 | ctx_statement_block($linenr, $remain, $off); |
456 | #print "F: c<$condition> s<$statement>\n"; | 505 | #print "F: c<$condition> s<$statement>\n"; |
506 | push(@chunks, [ $condition, $statement ]); | ||
507 | if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) { | ||
508 | return ($level, $linenr, @chunks); | ||
509 | } | ||
510 | |||
511 | # Pull in the following conditional/block pairs and see if they | ||
512 | # could continue the statement. | ||
457 | for (;;) { | 513 | for (;;) { |
458 | push(@chunks, [ $condition, $statement ]); | ||
459 | last if (!($remain > 0 && $condition =~ /^.\s*(?:if|else|do)/)); | ||
460 | ($statement, $condition, $linenr, $remain, $off, $level) = | 514 | ($statement, $condition, $linenr, $remain, $off, $level) = |
461 | ctx_statement_block($linenr, $remain, $off); | 515 | ctx_statement_block($linenr, $remain, $off); |
462 | #print "C: c<$condition> s<$statement>\n"; | 516 | #print "C: c<$condition> s<$statement> remain<$remain>\n"; |
517 | last if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:else|do)\b/s)); | ||
518 | #print "C: push\n"; | ||
519 | push(@chunks, [ $condition, $statement ]); | ||
463 | } | 520 | } |
464 | 521 | ||
465 | return ($level, $linenr, @chunks); | 522 | return ($level, $linenr, @chunks); |
@@ -593,13 +650,13 @@ sub cat_vet { | |||
593 | } | 650 | } |
594 | 651 | ||
595 | my $av_preprocessor = 0; | 652 | my $av_preprocessor = 0; |
596 | my $av_paren = 0; | 653 | my $av_pending; |
597 | my @av_paren_type; | 654 | my @av_paren_type; |
598 | 655 | ||
599 | sub annotate_reset { | 656 | sub annotate_reset { |
600 | $av_preprocessor = 0; | 657 | $av_preprocessor = 0; |
601 | $av_paren = 0; | 658 | $av_pending = '_'; |
602 | @av_paren_type = (); | 659 | @av_paren_type = ('E'); |
603 | } | 660 | } |
604 | 661 | ||
605 | sub annotate_values { | 662 | sub annotate_values { |
@@ -611,12 +668,13 @@ sub annotate_values { | |||
611 | print "$stream\n" if ($dbg_values > 1); | 668 | print "$stream\n" if ($dbg_values > 1); |
612 | 669 | ||
613 | while (length($cur)) { | 670 | while (length($cur)) { |
614 | print " <$type> " if ($dbg_values > 1); | 671 | print " <" . join('', @av_paren_type) . |
672 | "> <$type> " if ($dbg_values > 1); | ||
615 | if ($cur =~ /^(\s+)/o) { | 673 | if ($cur =~ /^(\s+)/o) { |
616 | print "WS($1)\n" if ($dbg_values > 1); | 674 | print "WS($1)\n" if ($dbg_values > 1); |
617 | if ($1 =~ /\n/ && $av_preprocessor) { | 675 | if ($1 =~ /\n/ && $av_preprocessor) { |
676 | $type = pop(@av_paren_type); | ||
618 | $av_preprocessor = 0; | 677 | $av_preprocessor = 0; |
619 | $type = 'N'; | ||
620 | } | 678 | } |
621 | 679 | ||
622 | } elsif ($cur =~ /^($Type)/) { | 680 | } elsif ($cur =~ /^($Type)/) { |
@@ -626,11 +684,33 @@ sub annotate_values { | |||
626 | } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) { | 684 | } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) { |
627 | print "DEFINE($1)\n" if ($dbg_values > 1); | 685 | print "DEFINE($1)\n" if ($dbg_values > 1); |
628 | $av_preprocessor = 1; | 686 | $av_preprocessor = 1; |
629 | $av_paren_type[$av_paren] = 'N'; | 687 | $av_pending = 'N'; |
630 | 688 | ||
631 | } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if|else|elif|endif))/o) { | 689 | } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) { |
632 | print "PRE($1)\n" if ($dbg_values > 1); | 690 | print "PRE_START($1)\n" if ($dbg_values > 1); |
633 | $av_preprocessor = 1; | 691 | $av_preprocessor = 1; |
692 | |||
693 | push(@av_paren_type, $type); | ||
694 | push(@av_paren_type, $type); | ||
695 | $type = 'N'; | ||
696 | |||
697 | } elsif ($cur =~ /^(#\s*(?:else|elif))/o) { | ||
698 | print "PRE_RESTART($1)\n" if ($dbg_values > 1); | ||
699 | $av_preprocessor = 1; | ||
700 | |||
701 | push(@av_paren_type, $av_paren_type[$#av_paren_type]); | ||
702 | |||
703 | $type = 'N'; | ||
704 | |||
705 | } elsif ($cur =~ /^(#\s*(?:endif))/o) { | ||
706 | print "PRE_END($1)\n" if ($dbg_values > 1); | ||
707 | |||
708 | $av_preprocessor = 1; | ||
709 | |||
710 | # Assume all arms of the conditional end as this | ||
711 | # one does, and continue as if the #endif was not here. | ||
712 | pop(@av_paren_type); | ||
713 | push(@av_paren_type, $type); | ||
634 | $type = 'N'; | 714 | $type = 'N'; |
635 | 715 | ||
636 | } elsif ($cur =~ /^(\\\n)/o) { | 716 | } elsif ($cur =~ /^(\\\n)/o) { |
@@ -639,13 +719,13 @@ sub annotate_values { | |||
639 | } elsif ($cur =~ /^(sizeof)\s*(\()?/o) { | 719 | } elsif ($cur =~ /^(sizeof)\s*(\()?/o) { |
640 | print "SIZEOF($1)\n" if ($dbg_values > 1); | 720 | print "SIZEOF($1)\n" if ($dbg_values > 1); |
641 | if (defined $2) { | 721 | if (defined $2) { |
642 | $av_paren_type[$av_paren] = 'V'; | 722 | $av_pending = 'V'; |
643 | } | 723 | } |
644 | $type = 'N'; | 724 | $type = 'N'; |
645 | 725 | ||
646 | } elsif ($cur =~ /^(if|while|typeof|__typeof__|for)\b/o) { | 726 | } elsif ($cur =~ /^(if|while|typeof|__typeof__|for)\b/o) { |
647 | print "COND($1)\n" if ($dbg_values > 1); | 727 | print "COND($1)\n" if ($dbg_values > 1); |
648 | $av_paren_type[$av_paren] = 'N'; | 728 | $av_pending = 'N'; |
649 | $type = 'N'; | 729 | $type = 'N'; |
650 | 730 | ||
651 | } elsif ($cur =~/^(return|case|else)/o) { | 731 | } elsif ($cur =~/^(return|case|else)/o) { |
@@ -654,14 +734,14 @@ sub annotate_values { | |||
654 | 734 | ||
655 | } elsif ($cur =~ /^(\()/o) { | 735 | } elsif ($cur =~ /^(\()/o) { |
656 | print "PAREN('$1')\n" if ($dbg_values > 1); | 736 | print "PAREN('$1')\n" if ($dbg_values > 1); |
657 | $av_paren++; | 737 | push(@av_paren_type, $av_pending); |
738 | $av_pending = '_'; | ||
658 | $type = 'N'; | 739 | $type = 'N'; |
659 | 740 | ||
660 | } elsif ($cur =~ /^(\))/o) { | 741 | } elsif ($cur =~ /^(\))/o) { |
661 | $av_paren-- if ($av_paren > 0); | 742 | my $new_type = pop(@av_paren_type); |
662 | if (defined $av_paren_type[$av_paren]) { | 743 | if ($new_type ne '_') { |
663 | $type = $av_paren_type[$av_paren]; | 744 | $type = $new_type; |
664 | undef $av_paren_type[$av_paren]; | ||
665 | print "PAREN('$1') -> $type\n" | 745 | print "PAREN('$1') -> $type\n" |
666 | if ($dbg_values > 1); | 746 | if ($dbg_values > 1); |
667 | } else { | 747 | } else { |
@@ -670,7 +750,7 @@ sub annotate_values { | |||
670 | 750 | ||
671 | } elsif ($cur =~ /^($Ident)\(/o) { | 751 | } elsif ($cur =~ /^($Ident)\(/o) { |
672 | print "FUNC($1)\n" if ($dbg_values > 1); | 752 | print "FUNC($1)\n" if ($dbg_values > 1); |
673 | $av_paren_type[$av_paren] = 'V'; | 753 | $av_pending = 'V'; |
674 | 754 | ||
675 | } elsif ($cur =~ /^($Ident|$Constant)/o) { | 755 | } elsif ($cur =~ /^($Ident|$Constant)/o) { |
676 | print "IDENT($1)\n" if ($dbg_values > 1); | 756 | print "IDENT($1)\n" if ($dbg_values > 1); |
@@ -680,11 +760,11 @@ sub annotate_values { | |||
680 | print "ASSIGN($1)\n" if ($dbg_values > 1); | 760 | print "ASSIGN($1)\n" if ($dbg_values > 1); |
681 | $type = 'N'; | 761 | $type = 'N'; |
682 | 762 | ||
683 | } elsif ($cur =~/^(;)/) { | 763 | } elsif ($cur =~/^(;|{|})/) { |
684 | print "END($1)\n" if ($dbg_values > 1); | 764 | print "END($1)\n" if ($dbg_values > 1); |
685 | $type = 'E'; | 765 | $type = 'E'; |
686 | 766 | ||
687 | } elsif ($cur =~ /^(;|{|}|\?|:|\[)/o) { | 767 | } elsif ($cur =~ /^(;|\?|:|\[)/o) { |
688 | print "CLOSE($1)\n" if ($dbg_values > 1); | 768 | print "CLOSE($1)\n" if ($dbg_values > 1); |
689 | $type = 'N'; | 769 | $type = 'N'; |
690 | 770 | ||
@@ -988,7 +1068,7 @@ sub process { | |||
988 | } | 1068 | } |
989 | 1069 | ||
990 | # check for RCS/CVS revision markers | 1070 | # check for RCS/CVS revision markers |
991 | if ($rawline =~ /\$(Revision|Log|Id)(?:\$|)/) { | 1071 | if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) { |
992 | WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr); | 1072 | WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr); |
993 | } | 1073 | } |
994 | 1074 | ||
@@ -999,41 +1079,44 @@ sub process { | |||
999 | 1079 | ||
1000 | # Check for potential 'bare' types | 1080 | # Check for potential 'bare' types |
1001 | if ($realcnt) { | 1081 | if ($realcnt) { |
1082 | my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0); | ||
1083 | $s =~ s/\n./ /g; | ||
1084 | $s =~ s/{.*$//; | ||
1085 | |||
1002 | # Ignore goto labels. | 1086 | # Ignore goto labels. |
1003 | if ($line =~ /$Ident:\*$/) { | 1087 | if ($s =~ /$Ident:\*$/) { |
1004 | 1088 | ||
1005 | # Ignore functions being called | 1089 | # Ignore functions being called |
1006 | } elsif ($line =~ /^.\s*$Ident\s*\(/) { | 1090 | } elsif ($s =~ /^.\s*$Ident\s*\(/) { |
1007 | 1091 | ||
1008 | # definitions in global scope can only start with types | 1092 | # definitions in global scope can only start with types |
1009 | } elsif ($line =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) { | 1093 | } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) { |
1010 | possible($1, $line); | 1094 | possible($1, $s); |
1011 | 1095 | ||
1012 | # declarations always start with types | 1096 | # declarations always start with types |
1013 | } elsif ($prev_values eq 'E' && $line =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) { | 1097 | } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) { |
1014 | possible($1); | 1098 | possible($1, $s); |
1015 | } | 1099 | } |
1016 | 1100 | ||
1017 | # any (foo ... *) is a pointer cast, and foo is a type | 1101 | # any (foo ... *) is a pointer cast, and foo is a type |
1018 | while ($line =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) { | 1102 | while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) { |
1019 | possible($1, $line); | 1103 | possible($1, $s); |
1020 | } | 1104 | } |
1021 | 1105 | ||
1022 | # Check for any sort of function declaration. | 1106 | # Check for any sort of function declaration. |
1023 | # int foo(something bar, other baz); | 1107 | # int foo(something bar, other baz); |
1024 | # void (*store_gdt)(x86_descr_ptr *); | 1108 | # void (*store_gdt)(x86_descr_ptr *); |
1025 | if ($prev_values eq 'E' && $line =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) { | 1109 | if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) { |
1026 | my ($name_len) = length($1); | 1110 | my ($name_len) = length($1); |
1027 | my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, $name_len); | ||
1028 | my $ctx = join("\n", @ctx); | ||
1029 | 1111 | ||
1030 | $ctx =~ s/\n.//; | 1112 | my $ctx = $s; |
1031 | substr($ctx, 0, $name_len + 1) = ''; | 1113 | substr($ctx, 0, $name_len + 1) = ''; |
1032 | $ctx =~ s/\)[^\)]*$//; | 1114 | $ctx =~ s/\)[^\)]*$//; |
1115 | |||
1033 | for my $arg (split(/\s*,\s*/, $ctx)) { | 1116 | for my $arg (split(/\s*,\s*/, $ctx)) { |
1034 | if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/ || $arg =~ /^($Ident)$/) { | 1117 | if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/ || $arg =~ /^($Ident)$/) { |
1035 | 1118 | ||
1036 | possible($1, $line); | 1119 | possible($1, $s); |
1037 | } | 1120 | } |
1038 | } | 1121 | } |
1039 | } | 1122 | } |
@@ -1100,8 +1183,8 @@ sub process { | |||
1100 | $curr_values = $prev_values . $curr_values; | 1183 | $curr_values = $prev_values . $curr_values; |
1101 | if ($dbg_values) { | 1184 | if ($dbg_values) { |
1102 | my $outline = $opline; $outline =~ s/\t/ /g; | 1185 | my $outline = $opline; $outline =~ s/\t/ /g; |
1103 | warn "--> .$outline\n"; | 1186 | print "$linenr > .$outline\n"; |
1104 | warn "--> $curr_values\n"; | 1187 | print "$linenr > $curr_values\n"; |
1105 | } | 1188 | } |
1106 | $prev_values = substr($curr_values, -1); | 1189 | $prev_values = substr($curr_values, -1); |
1107 | 1190 | ||
@@ -1148,7 +1231,9 @@ sub process { | |||
1148 | if (($prevline !~ /^}/) && | 1231 | if (($prevline !~ /^}/) && |
1149 | ($prevline !~ /^\+}/) && | 1232 | ($prevline !~ /^\+}/) && |
1150 | ($prevline !~ /^ }/) && | 1233 | ($prevline !~ /^ }/) && |
1151 | ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=)/)) { | 1234 | ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) && |
1235 | ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) && | ||
1236 | ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) { | ||
1152 | WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr); | 1237 | WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr); |
1153 | } | 1238 | } |
1154 | } | 1239 | } |
@@ -1266,7 +1351,7 @@ sub process { | |||
1266 | =>|->|<<|>>|<|>|=|!|~| | 1351 | =>|->|<<|>>|<|>|=|!|~| |
1267 | &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|% | 1352 | &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|% |
1268 | }x; | 1353 | }x; |
1269 | my @elements = split(/($;+|$ops|;)/, $opline); | 1354 | my @elements = split(/($ops|;)/, $opline); |
1270 | my $off = 0; | 1355 | my $off = 0; |
1271 | 1356 | ||
1272 | my $blank = copy_spacing($opline); | 1357 | my $blank = copy_spacing($opline); |
@@ -1277,6 +1362,7 @@ sub process { | |||
1277 | my $a = ''; | 1362 | my $a = ''; |
1278 | $a = 'V' if ($elements[$n] ne ''); | 1363 | $a = 'V' if ($elements[$n] ne ''); |
1279 | $a = 'W' if ($elements[$n] =~ /\s$/); | 1364 | $a = 'W' if ($elements[$n] =~ /\s$/); |
1365 | $a = 'C' if ($elements[$n] =~ /$;$/); | ||
1280 | $a = 'B' if ($elements[$n] =~ /(\[|\()$/); | 1366 | $a = 'B' if ($elements[$n] =~ /(\[|\()$/); |
1281 | $a = 'O' if ($elements[$n] eq ''); | 1367 | $a = 'O' if ($elements[$n] eq ''); |
1282 | $a = 'E' if ($elements[$n] eq '' && $n == 0); | 1368 | $a = 'E' if ($elements[$n] eq '' && $n == 0); |
@@ -1287,6 +1373,7 @@ sub process { | |||
1287 | if (defined $elements[$n + 2]) { | 1373 | if (defined $elements[$n + 2]) { |
1288 | $c = 'V' if ($elements[$n + 2] ne ''); | 1374 | $c = 'V' if ($elements[$n + 2] ne ''); |
1289 | $c = 'W' if ($elements[$n + 2] =~ /^\s/); | 1375 | $c = 'W' if ($elements[$n + 2] =~ /^\s/); |
1376 | $c = 'C' if ($elements[$n + 2] =~ /^$;/); | ||
1290 | $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/); | 1377 | $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/); |
1291 | $c = 'O' if ($elements[$n + 2] eq ''); | 1378 | $c = 'O' if ($elements[$n + 2] eq ''); |
1292 | $c = 'E' if ($elements[$n + 2] =~ /\s*\\$/); | 1379 | $c = 'E' if ($elements[$n + 2] =~ /\s*\\$/); |
@@ -1330,13 +1417,13 @@ sub process { | |||
1330 | if ($op_type ne 'V' && | 1417 | if ($op_type ne 'V' && |
1331 | $ca =~ /\s$/ && $cc =~ /^\s*,/) { | 1418 | $ca =~ /\s$/ && $cc =~ /^\s*,/) { |
1332 | 1419 | ||
1333 | # Ignore comments | 1420 | # # Ignore comments |
1334 | } elsif ($op =~ /^$;+$/) { | 1421 | # } elsif ($op =~ /^$;+$/) { |
1335 | 1422 | ||
1336 | # ; should have either the end of line or a space or \ after it | 1423 | # ; should have either the end of line or a space or \ after it |
1337 | } elsif ($op eq ';') { | 1424 | } elsif ($op eq ';') { |
1338 | if ($ctx !~ /.x[WEB]/ && $cc !~ /^\\/ && | 1425 | if ($ctx !~ /.x[WEBC]/ && |
1339 | $cc !~ /^;/) { | 1426 | $cc !~ /^\\/ && $cc !~ /^;/) { |
1340 | ERROR("need space after that '$op' $at\n" . $hereptr); | 1427 | ERROR("need space after that '$op' $at\n" . $hereptr); |
1341 | } | 1428 | } |
1342 | 1429 | ||
@@ -1351,7 +1438,7 @@ sub process { | |||
1351 | 1438 | ||
1352 | # , must have a space on the right. | 1439 | # , must have a space on the right. |
1353 | } elsif ($op eq ',') { | 1440 | } elsif ($op eq ',') { |
1354 | if ($ctx !~ /.xW|.xE/ && $cc !~ /^}/) { | 1441 | if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) { |
1355 | ERROR("need space after that '$op' $at\n" . $hereptr); | 1442 | ERROR("need space after that '$op' $at\n" . $hereptr); |
1356 | } | 1443 | } |
1357 | 1444 | ||
@@ -1364,7 +1451,7 @@ sub process { | |||
1364 | # unary operator, or a cast | 1451 | # unary operator, or a cast |
1365 | } elsif ($op eq '!' || $op eq '~' || | 1452 | } elsif ($op eq '!' || $op eq '~' || |
1366 | ($is_unary && ($op eq '*' || $op eq '-' || $op eq '&'))) { | 1453 | ($is_unary && ($op eq '*' || $op eq '-' || $op eq '&'))) { |
1367 | if ($ctx !~ /[WEB]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { | 1454 | if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { |
1368 | ERROR("need space before that '$op' $at\n" . $hereptr); | 1455 | ERROR("need space before that '$op' $at\n" . $hereptr); |
1369 | } | 1456 | } |
1370 | if ($ctx =~ /.xW/) { | 1457 | if ($ctx =~ /.xW/) { |
@@ -1373,7 +1460,7 @@ sub process { | |||
1373 | 1460 | ||
1374 | # unary ++ and unary -- are allowed no space on one side. | 1461 | # unary ++ and unary -- are allowed no space on one side. |
1375 | } elsif ($op eq '++' or $op eq '--') { | 1462 | } elsif ($op eq '++' or $op eq '--') { |
1376 | if ($ctx !~ /[WOB]x[^W]/ && $ctx !~ /[^W]x[WOBE]/) { | 1463 | if ($ctx !~ /[WOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) { |
1377 | ERROR("need space one side of that '$op' $at\n" . $hereptr); | 1464 | ERROR("need space one side of that '$op' $at\n" . $hereptr); |
1378 | } | 1465 | } |
1379 | if ($ctx =~ /WxB/ || ($ctx =~ /Wx./ && $cc =~ /^;/)) { | 1466 | if ($ctx =~ /WxB/ || ($ctx =~ /Wx./ && $cc =~ /^;/)) { |
@@ -1387,13 +1474,13 @@ sub process { | |||
1387 | $op eq '*' or $op eq '/' or | 1474 | $op eq '*' or $op eq '/' or |
1388 | $op eq '%') | 1475 | $op eq '%') |
1389 | { | 1476 | { |
1390 | if ($ctx !~ /VxV|WxW|VxE|WxE|VxO/) { | 1477 | if ($ctx !~ /VxV|WxW|VxE|WxE|VxO|Cx.|.xC/) { |
1391 | ERROR("need consistent spacing around '$op' $at\n" . | 1478 | ERROR("need consistent spacing around '$op' $at\n" . |
1392 | $hereptr); | 1479 | $hereptr); |
1393 | } | 1480 | } |
1394 | 1481 | ||
1395 | # All the others need spaces both sides. | 1482 | # All the others need spaces both sides. |
1396 | } elsif ($ctx !~ /[EW]x[WE]/) { | 1483 | } elsif ($ctx !~ /[EWC]x[CWE]/) { |
1397 | # Ignore email addresses <foo@bar> | 1484 | # Ignore email addresses <foo@bar> |
1398 | if (!($op eq '<' && $cb =~ /$;\S+\@\S+>/) && | 1485 | if (!($op eq '<' && $cb =~ /$;\S+\@\S+>/) && |
1399 | !($op eq '>' && $cb =~ /<\S+\@\S+$;/)) { | 1486 | !($op eq '>' && $cb =~ /<\S+\@\S+$;/)) { |
@@ -1551,7 +1638,7 @@ sub process { | |||
1551 | 1638 | ||
1552 | # multi-statement macros should be enclosed in a do while loop, grab the | 1639 | # multi-statement macros should be enclosed in a do while loop, grab the |
1553 | # first statement and ensure its the whole macro if its not enclosed | 1640 | # first statement and ensure its the whole macro if its not enclosed |
1554 | # in a known goot container | 1641 | # in a known good container |
1555 | if ($prevline =~ /\#define.*\\/ && | 1642 | if ($prevline =~ /\#define.*\\/ && |
1556 | $prevline !~/(?:do\s+{|\(\{|\{)/ && | 1643 | $prevline !~/(?:do\s+{|\(\{|\{)/ && |
1557 | $line !~ /(?:do\s+{|\(\{|\{)/ && | 1644 | $line !~ /(?:do\s+{|\(\{|\{)/ && |
@@ -1599,84 +1686,95 @@ sub process { | |||
1599 | # check for redundant bracing round if etc | 1686 | # check for redundant bracing round if etc |
1600 | if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) { | 1687 | if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) { |
1601 | my ($level, $endln, @chunks) = | 1688 | my ($level, $endln, @chunks) = |
1602 | ctx_statement_full($linenr, $realcnt, 0); | 1689 | ctx_statement_full($linenr, $realcnt, 1); |
1603 | #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n"; | 1690 | #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n"; |
1604 | if ($#chunks > 1 && $level == 0) { | 1691 | #print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n"; |
1692 | if ($#chunks > 0 && $level == 0) { | ||
1605 | my $allowed = 0; | 1693 | my $allowed = 0; |
1606 | my $seen = 0; | 1694 | my $seen = 0; |
1695 | my $herectx = $here . "\n";; | ||
1696 | my $ln = $linenr - 1; | ||
1607 | for my $chunk (@chunks) { | 1697 | for my $chunk (@chunks) { |
1608 | my ($cond, $block) = @{$chunk}; | 1698 | my ($cond, $block) = @{$chunk}; |
1609 | 1699 | ||
1700 | $herectx .= "$rawlines[$ln]\n[...]\n"; | ||
1701 | $ln += statement_rawlines($block) - 1; | ||
1702 | |||
1610 | substr($block, 0, length($cond)) = ''; | 1703 | substr($block, 0, length($cond)) = ''; |
1611 | 1704 | ||
1612 | $seen++ if ($block =~ /^\s*{/); | 1705 | $seen++ if ($block =~ /^\s*{/); |
1613 | 1706 | ||
1614 | $block =~ s/(^|\n)./$1/g; | 1707 | #print "cond<$cond> block<$block> allowed<$allowed>\n"; |
1615 | $block =~ s/^\s*{//; | 1708 | if (statement_lines($cond) > 1) { |
1616 | $block =~ s/}\s*$//; | 1709 | #print "APW: ALLOWED: cond<$cond>\n"; |
1617 | $block =~ s/^\s*//; | ||
1618 | $block =~ s/\s*$//; | ||
1619 | |||
1620 | my @lines = ($block =~ /\n/g); | ||
1621 | my @statements = ($block =~ /;/g); | ||
1622 | |||
1623 | #print "cond<$cond> block<$block> lines<" . scalar(@lines) . "> statements<" . scalar(@statements) . "> seen<$seen> allowed<$allowed>\n"; | ||
1624 | if (scalar(@lines) != 0) { | ||
1625 | $allowed = 1; | 1710 | $allowed = 1; |
1626 | } | 1711 | } |
1627 | if ($block =~/\b(?:if|for|while)\b/) { | 1712 | if ($block =~/\b(?:if|for|while)\b/) { |
1713 | #print "APW: ALLOWED: block<$block>\n"; | ||
1628 | $allowed = 1; | 1714 | $allowed = 1; |
1629 | } | 1715 | } |
1630 | if (scalar(@statements) > 1) { | 1716 | if (statement_block_size($block) > 1) { |
1717 | #print "APW: ALLOWED: lines block<$block>\n"; | ||
1631 | $allowed = 1; | 1718 | $allowed = 1; |
1632 | } | 1719 | } |
1633 | } | 1720 | } |
1634 | if ($seen && !$allowed) { | 1721 | if ($seen && !$allowed) { |
1635 | WARN("braces {} are not necessary for any arm of this statement\n" . $herecurr); | 1722 | WARN("braces {} are not necessary for any arm of this statement\n" . $herectx); |
1636 | $suppress_ifbraces = $endln; | ||
1637 | } | 1723 | } |
1724 | # Either way we have looked over this whole | ||
1725 | # statement and said what needs to be said. | ||
1726 | $suppress_ifbraces = $endln; | ||
1638 | } | 1727 | } |
1639 | } | 1728 | } |
1640 | if ($linenr > $suppress_ifbraces && | 1729 | if ($linenr > $suppress_ifbraces && |
1641 | $line =~ /\b(if|while|for|else)\b/) { | 1730 | $line =~ /\b(if|while|for|else)\b/) { |
1642 | # Locate the end of the opening statement. | 1731 | my ($level, $endln, @chunks) = |
1643 | my @control = ctx_statement($linenr, $realcnt, 0); | 1732 | ctx_statement_full($linenr, $realcnt, $-[0]); |
1644 | my $nr = $linenr + (scalar(@control) - 1); | 1733 | |
1645 | my $cnt = $realcnt - (scalar(@control) - 1); | 1734 | my $allowed = 0; |
1646 | 1735 | ||
1647 | my $off = $realcnt - $cnt; | 1736 | # Check the pre-context. |
1648 | #print "$off: line<$line>end<" . $lines[$nr - 1] . ">\n"; | 1737 | if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) { |
1649 | 1738 | #print "APW: ALLOWED: pre<$1>\n"; | |
1650 | # If this is is a braced statement group check it | 1739 | $allowed = 1; |
1651 | if ($lines[$nr - 1] =~ /{\s*$/) { | 1740 | } |
1652 | my ($lvl, @block) = ctx_block_level($nr, $cnt); | 1741 | # Check the condition. |
1653 | 1742 | my ($cond, $block) = @{$chunks[0]}; | |
1654 | my $stmt = join("\n", @block); | 1743 | if (defined $cond) { |
1655 | # Drop the diff line leader. | 1744 | substr($block, 0, length($cond)) = ''; |
1656 | $stmt =~ s/\n./\n/g; | 1745 | } |
1657 | # Drop the code outside the block. | 1746 | if (statement_lines($cond) > 1) { |
1658 | $stmt =~ s/(^[^{]*){\s*//; | 1747 | #print "APW: ALLOWED: cond<$cond>\n"; |
1659 | my $before = $1; | 1748 | $allowed = 1; |
1660 | $stmt =~ s/\s*}([^}]*$)//; | 1749 | } |
1661 | my $after = $1; | 1750 | if ($block =~/\b(?:if|for|while)\b/) { |
1662 | 1751 | #print "APW: ALLOWED: block<$block>\n"; | |
1663 | #print "block<" . join(' ', @block) . "><" . scalar(@block) . ">\n"; | 1752 | $allowed = 1; |
1664 | #print "before<$before> stmt<$stmt> after<$after>\n\n"; | 1753 | } |
1665 | 1754 | if (statement_block_size($block) > 1) { | |
1666 | # Count the newlines, if there is only one | 1755 | #print "APW: ALLOWED: lines block<$block>\n"; |
1667 | # then the block should not have {}'s. | 1756 | $allowed = 1; |
1668 | my @lines = ($stmt =~ /\n/g); | 1757 | } |
1669 | my @statements = ($stmt =~ /;/g); | 1758 | # Check the post-context. |
1670 | #print "lines<" . scalar(@lines) . ">\n"; | 1759 | if (defined $chunks[1]) { |
1671 | #print "statements<" . scalar(@statements) . ">\n"; | 1760 | my ($cond, $block) = @{$chunks[1]}; |
1672 | if ($lvl == 0 && scalar(@lines) == 0 && | 1761 | if (defined $cond) { |
1673 | scalar(@statements) < 2 && | 1762 | substr($block, 0, length($cond)) = ''; |
1674 | $stmt !~ /{/ && $stmt !~ /\bif\b/ && | 1763 | } |
1675 | $before !~ /}/ && $after !~ /{/) { | 1764 | if ($block =~ /^\s*\{/) { |
1676 | my $herectx = "$here\n" . join("\n", @control, @block[1 .. $#block]) . "\n"; | 1765 | #print "APW: ALLOWED: chunk-1 block<$block>\n"; |
1677 | shift(@block); | 1766 | $allowed = 1; |
1678 | WARN("braces {} are not necessary for single statement blocks\n" . $herectx); | 1767 | } |
1768 | } | ||
1769 | if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) { | ||
1770 | my $herectx = $here . "\n";; | ||
1771 | my $end = $linenr + statement_rawlines($block) - 1; | ||
1772 | |||
1773 | for (my $ln = $linenr - 1; $ln < $end; $ln++) { | ||
1774 | $herectx .= $rawlines[$ln] . "\n";; | ||
1679 | } | 1775 | } |
1776 | |||
1777 | WARN("braces {} are not necessary for single statement blocks\n" . $herectx); | ||
1680 | } | 1778 | } |
1681 | } | 1779 | } |
1682 | 1780 | ||
@@ -1828,15 +1926,6 @@ sub process { | |||
1828 | print "are false positives report them to the maintainer, see\n"; | 1926 | print "are false positives report them to the maintainer, see\n"; |
1829 | print "CHECKPATCH in MAINTAINERS.\n"; | 1927 | print "CHECKPATCH in MAINTAINERS.\n"; |
1830 | } | 1928 | } |
1831 | print <<EOL if ($file == 1 && $quiet == 0); | ||
1832 | |||
1833 | WARNING: Using --file mode. Please do not send patches to linux-kernel | ||
1834 | that change whole existing files if you did not significantly change most | ||
1835 | of the the file for other reasons anyways or just wrote the file newly | ||
1836 | from scratch. Pure code style patches have a significant cost in a | ||
1837 | quickly changing code base like Linux because they cause rejects | ||
1838 | with other changes. | ||
1839 | EOL | ||
1840 | 1929 | ||
1841 | return $clean; | 1930 | return $clean; |
1842 | } | 1931 | } |
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c index 6304c3a89ba0..fe03bb820532 100644 --- a/sound/isa/sb/sb8_main.c +++ b/sound/isa/sb/sb8_main.c | |||
@@ -277,7 +277,7 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream) | |||
277 | } else { | 277 | } else { |
278 | snd_sbdsp_command(chip, 256 - runtime->rate_den); | 278 | snd_sbdsp_command(chip, 256 - runtime->rate_den); |
279 | } | 279 | } |
280 | if (chip->capture_format != SB_DSP_OUTPUT) { | 280 | if (chip->capture_format != SB_DSP_INPUT) { |
281 | count--; | 281 | count--; |
282 | snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE); | 282 | snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE); |
283 | snd_sbdsp_command(chip, count & 0xff); | 283 | snd_sbdsp_command(chip, count & 0xff); |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 19f08846d6fc..c8649282c2cf 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -1778,9 +1778,9 @@ static hda_nid_t ad1988_capsrc_nids[3] = { | |||
1778 | static struct hda_input_mux ad1988_6stack_capture_source = { | 1778 | static struct hda_input_mux ad1988_6stack_capture_source = { |
1779 | .num_items = 5, | 1779 | .num_items = 5, |
1780 | .items = { | 1780 | .items = { |
1781 | { "Front Mic", 0x0 }, | 1781 | { "Front Mic", 0x1 }, /* port-B */ |
1782 | { "Line", 0x1 }, | 1782 | { "Line", 0x2 }, /* port-C */ |
1783 | { "Mic", 0x4 }, | 1783 | { "Mic", 0x4 }, /* port-E */ |
1784 | { "CD", 0x5 }, | 1784 | { "CD", 0x5 }, |
1785 | { "Mix", 0x9 }, | 1785 | { "Mix", 0x9 }, |
1786 | }, | 1786 | }, |
@@ -1789,7 +1789,7 @@ static struct hda_input_mux ad1988_6stack_capture_source = { | |||
1789 | static struct hda_input_mux ad1988_laptop_capture_source = { | 1789 | static struct hda_input_mux ad1988_laptop_capture_source = { |
1790 | .num_items = 3, | 1790 | .num_items = 3, |
1791 | .items = { | 1791 | .items = { |
1792 | { "Mic/Line", 0x0 }, | 1792 | { "Mic/Line", 0x1 }, /* port-B */ |
1793 | { "CD", 0x5 }, | 1793 | { "CD", 0x5 }, |
1794 | { "Mix", 0x9 }, | 1794 | { "Mix", 0x9 }, |
1795 | }, | 1795 | }, |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index f7cd3a804b11..7206b30cbf94 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -1230,6 +1230,11 @@ static struct hda_verb cxt5047_toshiba_init_verbs[] = { | |||
1230 | static struct hda_verb cxt5047_hp_init_verbs[] = { | 1230 | static struct hda_verb cxt5047_hp_init_verbs[] = { |
1231 | /* pin sensing on HP jack */ | 1231 | /* pin sensing on HP jack */ |
1232 | {0x13, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, | 1232 | {0x13, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, |
1233 | /* 0x13 is actually shared by both HP and speaker; | ||
1234 | * setting the connection to 0 (=0x19) makes the master volume control | ||
1235 | * working mysteriouslly... | ||
1236 | */ | ||
1237 | {0x13, AC_VERB_SET_CONNECT_SEL, 0x0}, | ||
1233 | /* Record selector: Ext Mic */ | 1238 | /* Record selector: Ext Mic */ |
1234 | {0x12, AC_VERB_SET_CONNECT_SEL,0x03}, | 1239 | {0x12, AC_VERB_SET_CONNECT_SEL,0x03}, |
1235 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, | 1240 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 777f8c01ca7a..33282f9c01c7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3973,8 +3973,8 @@ static struct snd_kcontrol_new alc260_fujitsu_mixer[] = { | |||
3973 | ALC_PIN_MODE("Mic/Line Jack Mode", 0x12, ALC_PIN_DIR_IN), | 3973 | ALC_PIN_MODE("Mic/Line Jack Mode", 0x12, ALC_PIN_DIR_IN), |
3974 | HDA_CODEC_VOLUME("Beep Playback Volume", 0x07, 0x05, HDA_INPUT), | 3974 | HDA_CODEC_VOLUME("Beep Playback Volume", 0x07, 0x05, HDA_INPUT), |
3975 | HDA_CODEC_MUTE("Beep Playback Switch", 0x07, 0x05, HDA_INPUT), | 3975 | HDA_CODEC_MUTE("Beep Playback Switch", 0x07, 0x05, HDA_INPUT), |
3976 | HDA_CODEC_VOLUME("Internal Speaker Playback Volume", 0x09, 0x0, HDA_OUTPUT), | 3976 | HDA_CODEC_VOLUME("Speaker Playback Volume", 0x09, 0x0, HDA_OUTPUT), |
3977 | HDA_BIND_MUTE("Internal Speaker Playback Switch", 0x09, 2, HDA_INPUT), | 3977 | HDA_BIND_MUTE("Speaker Playback Switch", 0x09, 2, HDA_INPUT), |
3978 | { } /* end */ | 3978 | { } /* end */ |
3979 | }; | 3979 | }; |
3980 | 3980 | ||
@@ -4005,9 +4005,9 @@ static struct snd_kcontrol_new alc260_acer_mixer[] = { | |||
4005 | HDA_CODEC_VOLUME("Master Playback Volume", 0x08, 0x0, HDA_OUTPUT), | 4005 | HDA_CODEC_VOLUME("Master Playback Volume", 0x08, 0x0, HDA_OUTPUT), |
4006 | HDA_BIND_MUTE("Master Playback Switch", 0x08, 2, HDA_INPUT), | 4006 | HDA_BIND_MUTE("Master Playback Switch", 0x08, 2, HDA_INPUT), |
4007 | ALC_PIN_MODE("Headphone Jack Mode", 0x0f, ALC_PIN_DIR_INOUT), | 4007 | ALC_PIN_MODE("Headphone Jack Mode", 0x0f, ALC_PIN_DIR_INOUT), |
4008 | HDA_CODEC_VOLUME_MONO("Mono Speaker Playback Volume", 0x0a, 1, 0x0, | 4008 | HDA_CODEC_VOLUME_MONO("Speaker Playback Volume", 0x0a, 1, 0x0, |
4009 | HDA_OUTPUT), | 4009 | HDA_OUTPUT), |
4010 | HDA_BIND_MUTE_MONO("Mono Speaker Playback Switch", 0x0a, 1, 2, | 4010 | HDA_BIND_MUTE_MONO("Speaker Playback Switch", 0x0a, 1, 2, |
4011 | HDA_INPUT), | 4011 | HDA_INPUT), |
4012 | HDA_CODEC_VOLUME("CD Playback Volume", 0x07, 0x04, HDA_INPUT), | 4012 | HDA_CODEC_VOLUME("CD Playback Volume", 0x07, 0x04, HDA_INPUT), |
4013 | HDA_CODEC_MUTE("CD Playback Switch", 0x07, 0x04, HDA_INPUT), | 4013 | HDA_CODEC_MUTE("CD Playback Switch", 0x07, 0x04, HDA_INPUT), |
@@ -7639,6 +7639,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
7639 | SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763), | 7639 | SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763), |
7640 | SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763), | 7640 | SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763), |
7641 | SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2), | 7641 | SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2), |
7642 | SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG), | ||
7642 | SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), | 7643 | SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), |
7643 | SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), | 7644 | SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), |
7644 | {} | 7645 | {} |
@@ -8102,7 +8103,7 @@ static struct snd_kcontrol_new alc262_base_mixer[] = { | |||
8102 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), | 8103 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), |
8103 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), | 8104 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), |
8104 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), | 8105 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), |
8105 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ | 8106 | HDA_CODEC_MUTE("PC Beep Playback Switch", 0x0b, 0x05, HDA_INPUT), */ |
8106 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT), | 8107 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT), |
8107 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), | 8108 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), |
8108 | HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT), | 8109 | HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT), |
@@ -8124,7 +8125,7 @@ static struct snd_kcontrol_new alc262_hippo1_mixer[] = { | |||
8124 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), | 8125 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), |
8125 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), | 8126 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), |
8126 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), | 8127 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), |
8127 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ | 8128 | HDA_CODEC_MUTE("PC Beep Playback Switch", 0x0b, 0x05, HDA_INPUT), */ |
8128 | /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/ | 8129 | /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/ |
8129 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), | 8130 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), |
8130 | { } /* end */ | 8131 | { } /* end */ |
@@ -9238,6 +9239,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = { | |||
9238 | SND_PCI_QUIRK(0x104d, 0x900e, "Sony ASSAMD", ALC262_SONY_ASSAMD), | 9239 | SND_PCI_QUIRK(0x104d, 0x900e, "Sony ASSAMD", ALC262_SONY_ASSAMD), |
9239 | SND_PCI_QUIRK(0x104d, 0x9015, "Sony 0x9015", ALC262_SONY_ASSAMD), | 9240 | SND_PCI_QUIRK(0x104d, 0x9015, "Sony 0x9015", ALC262_SONY_ASSAMD), |
9240 | SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FUJITSU), | 9241 | SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FUJITSU), |
9242 | SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU), | ||
9241 | SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA), | 9243 | SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA), |
9242 | SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8), | 9244 | SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8), |
9243 | SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31), | 9245 | SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31), |
@@ -12993,8 +12995,8 @@ static struct snd_kcontrol_new alc662_lenovo_101e_mixer[] = { | |||
12993 | static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = { | 12995 | static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = { |
12994 | HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), | 12996 | HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), |
12995 | 12997 | ||
12996 | HDA_CODEC_VOLUME("LineOut Playback Volume", 0x02, 0x0, HDA_OUTPUT), | 12998 | HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT), |
12997 | HDA_CODEC_MUTE("LineOut Playback Switch", 0x1b, 0x0, HDA_OUTPUT), | 12999 | HDA_CODEC_MUTE("Line-Out Playback Switch", 0x1b, 0x0, HDA_OUTPUT), |
12998 | 13000 | ||
12999 | HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT), | 13001 | HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT), |
13000 | HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 13002 | HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
@@ -13007,8 +13009,8 @@ static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = { | |||
13007 | }; | 13009 | }; |
13008 | 13010 | ||
13009 | static struct snd_kcontrol_new alc662_eeepc_ep20_mixer[] = { | 13011 | static struct snd_kcontrol_new alc662_eeepc_ep20_mixer[] = { |
13010 | HDA_CODEC_VOLUME("LineOut Playback Volume", 0x02, 0x0, HDA_OUTPUT), | 13012 | HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT), |
13011 | HDA_CODEC_MUTE("LineOut Playback Switch", 0x14, 0x0, HDA_OUTPUT), | 13013 | HDA_CODEC_MUTE("Line-Out Playback Switch", 0x14, 0x0, HDA_OUTPUT), |
13012 | HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT), | 13014 | HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT), |
13013 | HDA_BIND_MUTE("Surround Playback Switch", 0x03, 2, HDA_INPUT), | 13015 | HDA_BIND_MUTE("Surround Playback Switch", 0x03, 2, HDA_INPUT), |
13014 | HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x04, 1, 0x0, HDA_OUTPUT), | 13016 | HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x04, 1, 0x0, HDA_OUTPUT), |
diff --git a/sound/pci/ice1712/phase.c b/sound/pci/ice1712/phase.c index 9ab4a9f383cb..5a158b73dcaa 100644 --- a/sound/pci/ice1712/phase.c +++ b/sound/pci/ice1712/phase.c | |||
@@ -51,7 +51,7 @@ | |||
51 | struct phase28_spec { | 51 | struct phase28_spec { |
52 | unsigned short master[2]; | 52 | unsigned short master[2]; |
53 | unsigned short vol[8]; | 53 | unsigned short vol[8]; |
54 | } phase28; | 54 | }; |
55 | 55 | ||
56 | /* WM8770 registers */ | 56 | /* WM8770 registers */ |
57 | #define WM_DAC_ATTEN 0x00 /* DAC1-8 analog attenuation */ | 57 | #define WM_DAC_ATTEN 0x00 /* DAC1-8 analog attenuation */ |
diff --git a/sound/pci/ice1712/revo.c b/sound/pci/ice1712/revo.c index ddd5fc8d4fe1..301bf929acd9 100644 --- a/sound/pci/ice1712/revo.c +++ b/sound/pci/ice1712/revo.c | |||
@@ -36,7 +36,7 @@ | |||
36 | struct revo51_spec { | 36 | struct revo51_spec { |
37 | struct snd_i2c_device *dev; | 37 | struct snd_i2c_device *dev; |
38 | struct snd_pt2258 *pt2258; | 38 | struct snd_pt2258 *pt2258; |
39 | } revo51; | 39 | }; |
40 | 40 | ||
41 | static void revo_i2s_mclk_changed(struct snd_ice1712 *ice) | 41 | static void revo_i2s_mclk_changed(struct snd_ice1712 *ice) |
42 | { | 42 | { |
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 061072c7db03..c52abd0bf22e 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
@@ -1708,6 +1708,12 @@ static struct ac97_pcm ac97_pcm_defs[] __devinitdata = { | |||
1708 | }; | 1708 | }; |
1709 | 1709 | ||
1710 | static struct ac97_quirk ac97_quirks[] __devinitdata = { | 1710 | static struct ac97_quirk ac97_quirks[] __devinitdata = { |
1711 | { | ||
1712 | .subvendor = 0x0e11, | ||
1713 | .subdevice = 0x000e, | ||
1714 | .name = "Compaq Deskpro EN", /* AD1885 */ | ||
1715 | .type = AC97_TUNE_HP_ONLY | ||
1716 | }, | ||
1711 | { | 1717 | { |
1712 | .subvendor = 0x0e11, | 1718 | .subvendor = 0x0e11, |
1713 | .subdevice = 0x008a, | 1719 | .subdevice = 0x008a, |
@@ -1740,6 +1746,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
1740 | }, | 1746 | }, |
1741 | { | 1747 | { |
1742 | .subvendor = 0x1025, | 1748 | .subvendor = 0x1025, |
1749 | .subdevice = 0x0082, | ||
1750 | .name = "Acer Travelmate 2310", | ||
1751 | .type = AC97_TUNE_HP_ONLY | ||
1752 | }, | ||
1753 | { | ||
1754 | .subvendor = 0x1025, | ||
1743 | .subdevice = 0x0083, | 1755 | .subdevice = 0x0083, |
1744 | .name = "Acer Aspire 3003LCi", | 1756 | .name = "Acer Aspire 3003LCi", |
1745 | .type = AC97_TUNE_HP_ONLY | 1757 | .type = AC97_TUNE_HP_ONLY |
diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c index 3ea1f05228a1..666f69a3312e 100644 --- a/sound/pci/oxygen/hifier.c +++ b/sound/pci/oxygen/hifier.c | |||
@@ -150,6 +150,7 @@ static const struct oxygen_model model_hifier = { | |||
150 | .shortname = "C-Media CMI8787", | 150 | .shortname = "C-Media CMI8787", |
151 | .longname = "C-Media Oxygen HD Audio", | 151 | .longname = "C-Media Oxygen HD Audio", |
152 | .chip = "CMI8788", | 152 | .chip = "CMI8788", |
153 | .owner = THIS_MODULE, | ||
153 | .init = hifier_init, | 154 | .init = hifier_init, |
154 | .control_filter = hifier_control_filter, | 155 | .control_filter = hifier_control_filter, |
155 | .mixer_init = hifier_mixer_init, | 156 | .mixer_init = hifier_mixer_init, |
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index 40e92f5cd69c..d163397b85cc 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c | |||
@@ -389,6 +389,7 @@ static const struct oxygen_model model_xonar = { | |||
389 | .shortname = "Asus AV200", | 389 | .shortname = "Asus AV200", |
390 | .longname = "Asus Virtuoso 200", | 390 | .longname = "Asus Virtuoso 200", |
391 | .chip = "AV200", | 391 | .chip = "AV200", |
392 | .owner = THIS_MODULE, | ||
392 | .init = xonar_init, | 393 | .init = xonar_init, |
393 | .control_filter = xonar_control_filter, | 394 | .control_filter = xonar_control_filter, |
394 | .mixer_init = xonar_mixer_init, | 395 | .mixer_init = xonar_mixer_init, |
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 710e0287ef8c..569ecaca0e8b 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c | |||
@@ -681,8 +681,8 @@ static const struct aic3x_rate_divs aic3x_divs[] = { | |||
681 | {22579200, 48000, 48000, 0x0, 8, 7075}, | 681 | {22579200, 48000, 48000, 0x0, 8, 7075}, |
682 | {33868800, 48000, 48000, 0x0, 5, 8049}, | 682 | {33868800, 48000, 48000, 0x0, 5, 8049}, |
683 | /* 64k */ | 683 | /* 64k */ |
684 | {22579200, 96000, 96000, 0x1, 8, 7075}, | 684 | {22579200, 64000, 96000, 0x1, 8, 7075}, |
685 | {33868800, 96000, 96000, 0x1, 5, 8049}, | 685 | {33868800, 64000, 96000, 0x1, 5, 8049}, |
686 | /* 88.2k */ | 686 | /* 88.2k */ |
687 | {22579200, 88200, 88200, 0x0, 8, 0}, | 687 | {22579200, 88200, 88200, 0x0, 8, 0}, |
688 | {33868800, 88200, 88200, 0x0, 5, 3333}, | 688 | {33868800, 88200, 88200, 0x0, 5, 3333}, |
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 590baea3c4c3..524f7450804f 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c | |||
@@ -176,7 +176,8 @@ static int wm9712_add_controls(struct snd_soc_codec *codec) | |||
176 | * the codec only has a single control that is shared by both channels. | 176 | * the codec only has a single control that is shared by both channels. |
177 | * This makes it impossible to determine the audio path. | 177 | * This makes it impossible to determine the audio path. |
178 | */ | 178 | */ |
179 | static int mixer_event (struct snd_soc_dapm_widget *w, int event) | 179 | static int mixer_event(struct snd_soc_dapm_widget *w, |
180 | struct snd_kcontrol *k, int event) | ||
180 | { | 181 | { |
181 | u16 l, r, beep, line, phone, mic, pcm, aux; | 182 | u16 l, r, beep, line, phone, mic, pcm, aux; |
182 | 183 | ||
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index 3f34e531bebf..1a70a6ac98ce 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c | |||
@@ -215,7 +215,8 @@ static int corgi_set_spk(struct snd_kcontrol *kcontrol, | |||
215 | return 1; | 215 | return 1; |
216 | } | 216 | } |
217 | 217 | ||
218 | static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event) | 218 | static int corgi_amp_event(struct snd_soc_dapm_widget *w, |
219 | struct snd_kcontrol *k, int event) | ||
219 | { | 220 | { |
220 | if (SND_SOC_DAPM_EVENT_ON(event)) | 221 | if (SND_SOC_DAPM_EVENT_ON(event)) |
221 | set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_APM_ON); | 222 | set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_APM_ON); |
@@ -225,7 +226,8 @@ static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event) | |||
225 | return 0; | 226 | return 0; |
226 | } | 227 | } |
227 | 228 | ||
228 | static int corgi_mic_event(struct snd_soc_dapm_widget *w, int event) | 229 | static int corgi_mic_event(struct snd_soc_dapm_widget *w, |
230 | struct snd_kcontrol *k, int event) | ||
229 | { | 231 | { |
230 | if (SND_SOC_DAPM_EVENT_ON(event)) | 232 | if (SND_SOC_DAPM_EVENT_ON(event)) |
231 | set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_MIC_BIAS); | 233 | set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_MIC_BIAS); |
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c index 5ae59bd309a3..4fbf8bba9627 100644 --- a/sound/soc/pxa/poodle.c +++ b/sound/soc/pxa/poodle.c | |||
@@ -196,7 +196,8 @@ static int poodle_set_spk(struct snd_kcontrol *kcontrol, | |||
196 | return 1; | 196 | return 1; |
197 | } | 197 | } |
198 | 198 | ||
199 | static int poodle_amp_event(struct snd_soc_dapm_widget *w, int event) | 199 | static int poodle_amp_event(struct snd_soc_dapm_widget *w, |
200 | struct snd_kcontrol *k, int event) | ||
200 | { | 201 | { |
201 | if (SND_SOC_DAPM_EVENT_ON(event)) | 202 | if (SND_SOC_DAPM_EVENT_ON(event)) |
202 | locomo_gpio_write(&poodle_locomo_device.dev, | 203 | locomo_gpio_write(&poodle_locomo_device.dev, |
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c index d56709e15435..ecca39033fcc 100644 --- a/sound/soc/pxa/spitz.c +++ b/sound/soc/pxa/spitz.c | |||
@@ -215,7 +215,8 @@ static int spitz_set_spk(struct snd_kcontrol *kcontrol, | |||
215 | return 1; | 215 | return 1; |
216 | } | 216 | } |
217 | 217 | ||
218 | static int spitz_mic_bias(struct snd_soc_dapm_widget *w, int event) | 218 | static int spitz_mic_bias(struct snd_soc_dapm_widget *w, |
219 | struct snd_kcontrol *k, int event) | ||
219 | { | 220 | { |
220 | if (machine_is_borzoi() || machine_is_spitz()) { | 221 | if (machine_is_borzoi() || machine_is_spitz()) { |
221 | if (SND_SOC_DAPM_EVENT_ON(event)) | 222 | if (SND_SOC_DAPM_EVENT_ON(event)) |
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index e4d40b528ca4..7346d7e5d066 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -135,7 +135,8 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol, | |||
135 | } | 135 | } |
136 | 136 | ||
137 | /* tosa dapm event handlers */ | 137 | /* tosa dapm event handlers */ |
138 | static int tosa_hp_event(struct snd_soc_dapm_widget *w, int event) | 138 | static int tosa_hp_event(struct snd_soc_dapm_widget *w, |
139 | struct snd_kcontrol *k, int event) | ||
139 | { | 140 | { |
140 | if (SND_SOC_DAPM_EVENT_ON(event)) | 141 | if (SND_SOC_DAPM_EVENT_ON(event)) |
141 | set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); | 142 | set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); |
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index 8fa935665702..675672f313be 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c | |||
@@ -479,6 +479,33 @@ static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs, | |||
479 | return 0; | 479 | return 0; |
480 | } | 480 | } |
481 | 481 | ||
482 | /* | ||
483 | * process after E-Mu 0202/0404 high speed playback sync complete | ||
484 | * | ||
485 | * These devices return the number of samples per packet instead of the number | ||
486 | * of samples per microframe. | ||
487 | */ | ||
488 | static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs, | ||
489 | struct snd_pcm_runtime *runtime, | ||
490 | struct urb *urb) | ||
491 | { | ||
492 | unsigned int f; | ||
493 | unsigned long flags; | ||
494 | |||
495 | if (urb->iso_frame_desc[0].status == 0 && | ||
496 | urb->iso_frame_desc[0].actual_length == 4) { | ||
497 | f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff; | ||
498 | f >>= subs->datainterval; | ||
499 | if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { | ||
500 | spin_lock_irqsave(&subs->lock, flags); | ||
501 | subs->freqm = f; | ||
502 | spin_unlock_irqrestore(&subs->lock, flags); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | return 0; | ||
507 | } | ||
508 | |||
482 | /* determine the number of frames in the next packet */ | 509 | /* determine the number of frames in the next packet */ |
483 | static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs) | 510 | static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs) |
484 | { | 511 | { |
@@ -2219,10 +2246,17 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo | |||
2219 | subs->stream = as; | 2246 | subs->stream = as; |
2220 | subs->direction = stream; | 2247 | subs->direction = stream; |
2221 | subs->dev = as->chip->dev; | 2248 | subs->dev = as->chip->dev; |
2222 | if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) | 2249 | if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) { |
2223 | subs->ops = audio_urb_ops[stream]; | 2250 | subs->ops = audio_urb_ops[stream]; |
2224 | else | 2251 | } else { |
2225 | subs->ops = audio_urb_ops_high_speed[stream]; | 2252 | subs->ops = audio_urb_ops_high_speed[stream]; |
2253 | switch (as->chip->usb_id) { | ||
2254 | case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ | ||
2255 | case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ | ||
2256 | subs->ops.retire_sync = retire_playback_sync_urb_hs_emu; | ||
2257 | break; | ||
2258 | } | ||
2259 | } | ||
2226 | snd_pcm_set_ops(as->pcm, stream, | 2260 | snd_pcm_set_ops(as->pcm, stream, |
2227 | stream == SNDRV_PCM_STREAM_PLAYBACK ? | 2261 | stream == SNDRV_PCM_STREAM_PLAYBACK ? |
2228 | &snd_usb_playback_ops : &snd_usb_capture_ops); | 2262 | &snd_usb_playback_ops : &snd_usb_capture_ops); |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 317f8e211cd2..4232fd75dd20 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -211,6 +211,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
211 | case IOAPIC_LOWEST_PRIORITY: | 211 | case IOAPIC_LOWEST_PRIORITY: |
212 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, | 212 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, |
213 | deliver_bitmask); | 213 | deliver_bitmask); |
214 | #ifdef CONFIG_X86 | ||
215 | if (irq == 0) | ||
216 | vcpu = ioapic->kvm->vcpus[0]; | ||
217 | #endif | ||
214 | if (vcpu != NULL) | 218 | if (vcpu != NULL) |
215 | ioapic_inj_irq(ioapic, vcpu, vector, | 219 | ioapic_inj_irq(ioapic, vcpu, vector, |
216 | trig_mode, delivery_mode); | 220 | trig_mode, delivery_mode); |
@@ -220,6 +224,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
220 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); | 224 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); |
221 | break; | 225 | break; |
222 | case IOAPIC_FIXED: | 226 | case IOAPIC_FIXED: |
227 | #ifdef CONFIG_X86 | ||
228 | if (irq == 0) | ||
229 | deliver_bitmask = 1; | ||
230 | #endif | ||
223 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | 231 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { |
224 | if (!(deliver_bitmask & (1 << vcpu_id))) | 232 | if (!(deliver_bitmask & (1 << vcpu_id))) |
225 | continue; | 233 | continue; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 32fbf8006969..b2e12893e3f4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -169,6 +169,7 @@ static struct kvm *kvm_create_vm(void) | |||
169 | kvm_io_bus_init(&kvm->pio_bus); | 169 | kvm_io_bus_init(&kvm->pio_bus); |
170 | mutex_init(&kvm->lock); | 170 | mutex_init(&kvm->lock); |
171 | kvm_io_bus_init(&kvm->mmio_bus); | 171 | kvm_io_bus_init(&kvm->mmio_bus); |
172 | init_rwsem(&kvm->slots_lock); | ||
172 | spin_lock(&kvm_lock); | 173 | spin_lock(&kvm_lock); |
173 | list_add(&kvm->vm_list, &vm_list); | 174 | list_add(&kvm->vm_list, &vm_list); |
174 | spin_unlock(&kvm_lock); | 175 | spin_unlock(&kvm_lock); |
@@ -339,9 +340,9 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
339 | { | 340 | { |
340 | int r; | 341 | int r; |
341 | 342 | ||
342 | down_write(¤t->mm->mmap_sem); | 343 | down_write(&kvm->slots_lock); |
343 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | 344 | r = __kvm_set_memory_region(kvm, mem, user_alloc); |
344 | up_write(¤t->mm->mmap_sem); | 345 | up_write(&kvm->slots_lock); |
345 | return r; | 346 | return r; |
346 | } | 347 | } |
347 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 348 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |