diff options
38 files changed, 405 insertions, 182 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index b959659c5df..b3f35e5f9c9 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -603,3 +603,19 @@ Why: The adm9240, w83792d and w83793 hardware monitoring drivers have | |||
| 603 | Who: Jean Delvare <khali@linux-fr.org> | 603 | Who: Jean Delvare <khali@linux-fr.org> |
| 604 | 604 | ||
| 605 | ---------------------------- | 605 | ---------------------------- |
| 606 | |||
| 607 | What: noswapaccount kernel command line parameter | ||
| 608 | When: 2.6.40 | ||
| 609 | Why: The original implementation of memsw feature enabled by | ||
| 610 | CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount | ||
| 611 | kernel parameter (introduced in 2.6.29-rc1). Later on, this decision | ||
| 612 | turned out to be not ideal because we cannot have the feature compiled | ||
| 613 | in and disabled by default and let only interested to enable it | ||
| 614 | (e.g. general distribution kernels might need it). Therefore we have | ||
| 615 | added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides | ||
| 616 | the both possibilities. If we remove noswapaccount we will have | ||
| 617 | less command line parameters with the same functionality and we | ||
| 618 | can also cleanup the parameter handling a bit (). | ||
| 619 | Who: Michal Hocko <mhocko@suse.cz> | ||
| 620 | |||
| 621 | ---------------------------- | ||
diff --git a/Documentation/scheduler/sched-stats.txt b/Documentation/scheduler/sched-stats.txt index 01e69404ee5..1cd5d51bc76 100644 --- a/Documentation/scheduler/sched-stats.txt +++ b/Documentation/scheduler/sched-stats.txt | |||
| @@ -1,3 +1,7 @@ | |||
| 1 | Version 15 of schedstats dropped counters for some sched_yield: | ||
| 2 | yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is | ||
| 3 | identical to version 14. | ||
| 4 | |||
| 1 | Version 14 of schedstats includes support for sched_domains, which hit the | 5 | Version 14 of schedstats includes support for sched_domains, which hit the |
| 2 | mainline kernel in 2.6.20 although it is identical to the stats from version | 6 | mainline kernel in 2.6.20 although it is identical to the stats from version |
| 3 | 12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel | 7 | 12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel |
| @@ -28,32 +32,25 @@ to write their own scripts, the fields are described here. | |||
| 28 | 32 | ||
| 29 | CPU statistics | 33 | CPU statistics |
| 30 | -------------- | 34 | -------------- |
| 31 | cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12 | 35 | cpu<N> 1 2 3 4 5 6 7 8 9 |
| 32 | |||
| 33 | NOTE: In the sched_yield() statistics, the active queue is considered empty | ||
| 34 | if it has only one process in it, since obviously the process calling | ||
| 35 | sched_yield() is that process. | ||
| 36 | 36 | ||
| 37 | First four fields are sched_yield() statistics: | 37 | First field is a sched_yield() statistic: |
| 38 | 1) # of times both the active and the expired queue were empty | 38 | 1) # of times sched_yield() was called |
| 39 | 2) # of times just the active queue was empty | ||
| 40 | 3) # of times just the expired queue was empty | ||
| 41 | 4) # of times sched_yield() was called | ||
| 42 | 39 | ||
| 43 | Next three are schedule() statistics: | 40 | Next three are schedule() statistics: |
| 44 | 5) # of times we switched to the expired queue and reused it | 41 | 2) # of times we switched to the expired queue and reused it |
| 45 | 6) # of times schedule() was called | 42 | 3) # of times schedule() was called |
| 46 | 7) # of times schedule() left the processor idle | 43 | 4) # of times schedule() left the processor idle |
| 47 | 44 | ||
| 48 | Next two are try_to_wake_up() statistics: | 45 | Next two are try_to_wake_up() statistics: |
| 49 | 8) # of times try_to_wake_up() was called | 46 | 5) # of times try_to_wake_up() was called |
| 50 | 9) # of times try_to_wake_up() was called to wake up the local cpu | 47 | 6) # of times try_to_wake_up() was called to wake up the local cpu |
| 51 | 48 | ||
| 52 | Next three are statistics describing scheduling latency: | 49 | Next three are statistics describing scheduling latency: |
| 53 | 10) sum of all time spent running by tasks on this processor (in jiffies) | 50 | 7) sum of all time spent running by tasks on this processor (in jiffies) |
| 54 | 11) sum of all time spent waiting to run by tasks on this processor (in | 51 | 8) sum of all time spent waiting to run by tasks on this processor (in |
| 55 | jiffies) | 52 | jiffies) |
| 56 | 12) # of timeslices run on this cpu | 53 | 9) # of timeslices run on this cpu |
| 57 | 54 | ||
| 58 | 55 | ||
| 59 | Domain statistics | 56 | Domain statistics |
diff --git a/MAINTAINERS b/MAINTAINERS index 445537d46e7..9511bff301c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -978,6 +978,8 @@ S: Maintained | |||
| 978 | F: arch/arm/plat-samsung/ | 978 | F: arch/arm/plat-samsung/ |
| 979 | F: arch/arm/plat-s3c24xx/ | 979 | F: arch/arm/plat-s3c24xx/ |
| 980 | F: arch/arm/plat-s5p/ | 980 | F: arch/arm/plat-s5p/ |
| 981 | F: drivers/*/*s3c2410* | ||
| 982 | F: drivers/*/*/*s3c2410* | ||
| 981 | 983 | ||
| 982 | ARM/S3C2410 ARM ARCHITECTURE | 984 | ARM/S3C2410 ARM ARCHITECTURE |
| 983 | M: Ben Dooks <ben-linux@fluff.org> | 985 | M: Ben Dooks <ben-linux@fluff.org> |
| @@ -5614,18 +5616,20 @@ F: include/linux/sfi*.h | |||
| 5614 | 5616 | ||
| 5615 | SIMTEC EB110ATX (Chalice CATS) | 5617 | SIMTEC EB110ATX (Chalice CATS) |
| 5616 | P: Ben Dooks | 5618 | P: Ben Dooks |
| 5617 | M: Vincent Sanders <support@simtec.co.uk> | 5619 | P: Vincent Sanders <vince@simtec.co.uk> |
| 5620 | M: Simtec Linux Team <linux@simtec.co.uk> | ||
| 5618 | W: http://www.simtec.co.uk/products/EB110ATX/ | 5621 | W: http://www.simtec.co.uk/products/EB110ATX/ |
| 5619 | S: Supported | 5622 | S: Supported |
| 5620 | 5623 | ||
| 5621 | SIMTEC EB2410ITX (BAST) | 5624 | SIMTEC EB2410ITX (BAST) |
| 5622 | P: Ben Dooks | 5625 | P: Ben Dooks |
| 5623 | M: Vincent Sanders <support@simtec.co.uk> | 5626 | P: Vincent Sanders <vince@simtec.co.uk> |
| 5627 | M: Simtec Linux Team <linux@simtec.co.uk> | ||
| 5624 | W: http://www.simtec.co.uk/products/EB2410ITX/ | 5628 | W: http://www.simtec.co.uk/products/EB2410ITX/ |
| 5625 | S: Supported | 5629 | S: Supported |
| 5626 | F: arch/arm/mach-s3c2410/ | 5630 | F: arch/arm/mach-s3c2410/mach-bast.c |
| 5627 | F: drivers/*/*s3c2410* | 5631 | F: arch/arm/mach-s3c2410/bast-ide.c |
| 5628 | F: drivers/*/*/*s3c2410* | 5632 | F: arch/arm/mach-s3c2410/bast-irq.c |
| 5629 | 5633 | ||
| 5630 | TI DAVINCI MACHINE SUPPORT | 5634 | TI DAVINCI MACHINE SUPPORT |
| 5631 | M: Kevin Hilman <khilman@deeprootsystems.com> | 5635 | M: Kevin Hilman <khilman@deeprootsystems.com> |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ff19efdf6fe..636bcb81d06 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -406,7 +406,7 @@ config QDIO | |||
| 406 | If unsure, say Y. | 406 | If unsure, say Y. |
| 407 | 407 | ||
| 408 | config CHSC_SCH | 408 | config CHSC_SCH |
| 409 | def_tristate y | 409 | def_tristate m |
| 410 | prompt "Support for CHSC subchannels" | 410 | prompt "Support for CHSC subchannels" |
| 411 | help | 411 | help |
| 412 | This driver allows usage of CHSC subchannels. A CHSC subchannel | 412 | This driver allows usage of CHSC subchannels. A CHSC subchannel |
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 405cc97c624..7e1f7762062 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h | |||
| @@ -1,29 +1,8 @@ | |||
| 1 | #ifndef _S390_CACHEFLUSH_H | 1 | #ifndef _S390_CACHEFLUSH_H |
| 2 | #define _S390_CACHEFLUSH_H | 2 | #define _S390_CACHEFLUSH_H |
| 3 | 3 | ||
| 4 | /* Keep includes the same across arches. */ | ||
| 5 | #include <linux/mm.h> | ||
| 6 | |||
| 7 | /* Caches aren't brain-dead on the s390. */ | 4 | /* Caches aren't brain-dead on the s390. */ |
| 8 | #define flush_cache_all() do { } while (0) | 5 | #include <asm-generic/cacheflush.h> |
| 9 | #define flush_cache_mm(mm) do { } while (0) | ||
| 10 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
| 11 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
| 12 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
| 13 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
| 14 | #define flush_dcache_page(page) do { } while (0) | ||
| 15 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
| 16 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
| 17 | #define flush_icache_range(start, end) do { } while (0) | ||
| 18 | #define flush_icache_page(vma,pg) do { } while (0) | ||
| 19 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
| 20 | #define flush_cache_vmap(start, end) do { } while (0) | ||
| 21 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
| 22 | |||
| 23 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
| 24 | memcpy(dst, src, len) | ||
| 25 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
| 26 | memcpy(dst, src, len) | ||
| 27 | 6 | ||
| 28 | #ifdef CONFIG_DEBUG_PAGEALLOC | 7 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 29 | void kernel_map_pages(struct page *page, int numpages, int enable); | 8 | void kernel_map_pages(struct page *page, int numpages, int enable); |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index f1f644f2240..9074a54c4d1 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
| 25 | #include <linux/pagemap.h> | ||
| 25 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
| 26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
| 27 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index 07deaeee14c..a6c4f7ed24a 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
| @@ -125,9 +125,9 @@ static size_t copy_in_user_std(size_t size, void __user *to, | |||
| 125 | unsigned long tmp1; | 125 | unsigned long tmp1; |
| 126 | 126 | ||
| 127 | asm volatile( | 127 | asm volatile( |
| 128 | " sacf 256\n" | ||
| 128 | " "AHI" %0,-1\n" | 129 | " "AHI" %0,-1\n" |
| 129 | " jo 5f\n" | 130 | " jo 5f\n" |
| 130 | " sacf 256\n" | ||
| 131 | " bras %3,3f\n" | 131 | " bras %3,3f\n" |
| 132 | "0:"AHI" %0,257\n" | 132 | "0:"AHI" %0,257\n" |
| 133 | "1: mvc 0(1,%1),0(%2)\n" | 133 | "1: mvc 0(1,%1),0(%2)\n" |
| @@ -142,9 +142,8 @@ static size_t copy_in_user_std(size_t size, void __user *to, | |||
| 142 | "3:"AHI" %0,-256\n" | 142 | "3:"AHI" %0,-256\n" |
| 143 | " jnm 2b\n" | 143 | " jnm 2b\n" |
| 144 | "4: ex %0,1b-0b(%3)\n" | 144 | "4: ex %0,1b-0b(%3)\n" |
| 145 | " sacf 0\n" | ||
| 146 | "5: "SLR" %0,%0\n" | 145 | "5: "SLR" %0,%0\n" |
| 147 | "6:\n" | 146 | "6: sacf 0\n" |
| 148 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | 147 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) |
| 149 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) | 148 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) |
| 150 | : : "cc", "memory"); | 149 | : : "cc", "memory"); |
| @@ -156,9 +155,9 @@ static size_t clear_user_std(size_t size, void __user *to) | |||
| 156 | unsigned long tmp1, tmp2; | 155 | unsigned long tmp1, tmp2; |
| 157 | 156 | ||
| 158 | asm volatile( | 157 | asm volatile( |
| 158 | " sacf 256\n" | ||
| 159 | " "AHI" %0,-1\n" | 159 | " "AHI" %0,-1\n" |
| 160 | " jo 5f\n" | 160 | " jo 5f\n" |
| 161 | " sacf 256\n" | ||
| 162 | " bras %3,3f\n" | 161 | " bras %3,3f\n" |
| 163 | " xc 0(1,%1),0(%1)\n" | 162 | " xc 0(1,%1),0(%1)\n" |
| 164 | "0:"AHI" %0,257\n" | 163 | "0:"AHI" %0,257\n" |
| @@ -178,9 +177,8 @@ static size_t clear_user_std(size_t size, void __user *to) | |||
| 178 | "3:"AHI" %0,-256\n" | 177 | "3:"AHI" %0,-256\n" |
| 179 | " jnm 2b\n" | 178 | " jnm 2b\n" |
| 180 | "4: ex %0,0(%3)\n" | 179 | "4: ex %0,0(%3)\n" |
| 181 | " sacf 0\n" | ||
| 182 | "5: "SLR" %0,%0\n" | 180 | "5: "SLR" %0,%0\n" |
| 183 | "6:\n" | 181 | "6: sacf 0\n" |
| 184 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | 182 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) |
| 185 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) | 183 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) |
| 186 | : : "cc", "memory"); | 184 | : : "cc", "memory"); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0c719c61972..e1850c28cd6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
| @@ -336,7 +336,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
| 336 | page->flags ^= bits; | 336 | page->flags ^= bits; |
| 337 | if (page->flags & FRAG_MASK) { | 337 | if (page->flags & FRAG_MASK) { |
| 338 | /* Page now has some free pgtable fragments. */ | 338 | /* Page now has some free pgtable fragments. */ |
| 339 | list_move(&page->lru, &mm->context.pgtable_list); | 339 | if (!list_empty(&page->lru)) |
| 340 | list_move(&page->lru, &mm->context.pgtable_list); | ||
| 340 | page = NULL; | 341 | page = NULL; |
| 341 | } else | 342 | } else |
| 342 | /* All fragments of the 4K page have been freed. */ | 343 | /* All fragments of the 4K page have been freed. */ |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index e56b9bfbabd..f7a0993c1e7 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
| @@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event) | |||
| 682 | * if an event is shared accross the logical threads | 682 | * if an event is shared accross the logical threads |
| 683 | * the user needs special permissions to be able to use it | 683 | * the user needs special permissions to be able to use it |
| 684 | */ | 684 | */ |
| 685 | if (p4_event_bind_map[v].shared) { | 685 | if (p4_ht_active() && p4_event_bind_map[v].shared) { |
| 686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
| 687 | return -EACCES; | 687 | return -EACCES; |
| 688 | } | 688 | } |
| @@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event) | |||
| 727 | event->hw.config = p4_set_ht_bit(event->hw.config); | 727 | event->hw.config = p4_set_ht_bit(event->hw.config); |
| 728 | 728 | ||
| 729 | if (event->attr.type == PERF_TYPE_RAW) { | 729 | if (event->attr.type == PERF_TYPE_RAW) { |
| 730 | 730 | struct p4_event_bind *bind; | |
| 731 | unsigned int esel; | ||
| 731 | /* | 732 | /* |
| 732 | * Clear bits we reserve to be managed by kernel itself | 733 | * Clear bits we reserve to be managed by kernel itself |
| 733 | * and never allowed from a user space | 734 | * and never allowed from a user space |
| @@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event) | |||
| 743 | * bits since we keep additional info here (for cache events and etc) | 744 | * bits since we keep additional info here (for cache events and etc) |
| 744 | */ | 745 | */ |
| 745 | event->hw.config |= event->attr.config; | 746 | event->hw.config |= event->attr.config; |
| 747 | bind = p4_config_get_bind(event->attr.config); | ||
| 748 | if (!bind) { | ||
| 749 | rc = -EINVAL; | ||
| 750 | goto out; | ||
| 751 | } | ||
| 752 | esel = P4_OPCODE_ESEL(bind->opcode); | ||
| 753 | event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); | ||
| 746 | } | 754 | } |
| 747 | 755 | ||
| 748 | rc = x86_setup_perfctr(event); | 756 | rc = x86_setup_perfctr(event); |
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index f011c5d9dea..1c5cc65ea1e 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* ir-lirc-codec.c - ir-core to classic lirc interface bridge | 1 | /* ir-lirc-codec.c - rc-core to classic lirc interface bridge |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> | 3 | * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> |
| 4 | * | 4 | * |
| @@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev) | |||
| 47 | /* Carrier reports */ | 47 | /* Carrier reports */ |
| 48 | if (ev.carrier_report) { | 48 | if (ev.carrier_report) { |
| 49 | sample = LIRC_FREQUENCY(ev.carrier); | 49 | sample = LIRC_FREQUENCY(ev.carrier); |
| 50 | IR_dprintk(2, "carrier report (freq: %d)\n", sample); | ||
| 50 | 51 | ||
| 51 | /* Packet end */ | 52 | /* Packet end */ |
| 52 | } else if (ev.timeout) { | 53 | } else if (ev.timeout) { |
| @@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev) | |||
| 62 | return 0; | 63 | return 0; |
| 63 | 64 | ||
| 64 | sample = LIRC_TIMEOUT(ev.duration / 1000); | 65 | sample = LIRC_TIMEOUT(ev.duration / 1000); |
| 66 | IR_dprintk(2, "timeout report (duration: %d)\n", sample); | ||
| 65 | 67 | ||
| 66 | /* Normal sample */ | 68 | /* Normal sample */ |
| 67 | } else { | 69 | } else { |
| @@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev) | |||
| 85 | 87 | ||
| 86 | sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) : | 88 | sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) : |
| 87 | LIRC_SPACE(ev.duration / 1000); | 89 | LIRC_SPACE(ev.duration / 1000); |
| 90 | IR_dprintk(2, "delivering %uus %s to lirc_dev\n", | ||
| 91 | TO_US(ev.duration), TO_STR(ev.pulse)); | ||
| 88 | } | 92 | } |
| 89 | 93 | ||
| 90 | lirc_buffer_write(dev->raw->lirc.drv->rbuf, | 94 | lirc_buffer_write(dev->raw->lirc.drv->rbuf, |
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c index 3bf3337875d..2f5dc0622b9 100644 --- a/drivers/media/rc/keymaps/rc-rc6-mce.c +++ b/drivers/media/rc/keymaps/rc-rc6-mce.c | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> | 4 | * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> |
| 5 | * | 5 | * |
| 6 | * See http://mediacenterguides.com/book/export/html/31 for details on | ||
| 7 | * key mappings. | ||
| 8 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or | 11 | * the Free Software Foundation; either version 2 of the License, or |
| @@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = { | |||
| 60 | { 0x800f0426, KEY_EPG }, /* Guide */ | 63 | { 0x800f0426, KEY_EPG }, /* Guide */ |
| 61 | { 0x800f0427, KEY_ZOOM }, /* Aspect */ | 64 | { 0x800f0427, KEY_ZOOM }, /* Aspect */ |
| 62 | 65 | ||
| 66 | { 0x800f0432, KEY_MODE }, /* Visualization */ | ||
| 67 | { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */ | ||
| 68 | { 0x800f0434, KEY_EJECTCD }, | ||
| 63 | { 0x800f043a, KEY_BRIGHTNESSUP }, | 69 | { 0x800f043a, KEY_BRIGHTNESSUP }, |
| 64 | 70 | ||
| 65 | { 0x800f0446, KEY_TV }, | 71 | { 0x800f0446, KEY_TV }, |
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 079353e5d55..6df0a498064 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c | |||
| @@ -816,7 +816,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) | |||
| 816 | switch (ir->buf_in[index]) { | 816 | switch (ir->buf_in[index]) { |
| 817 | /* 2-byte return value commands */ | 817 | /* 2-byte return value commands */ |
| 818 | case MCE_CMD_S_TIMEOUT: | 818 | case MCE_CMD_S_TIMEOUT: |
| 819 | ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2); | 819 | ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2); |
| 820 | break; | 820 | break; |
| 821 | 821 | ||
| 822 | /* 1-byte return value commands */ | 822 | /* 1-byte return value commands */ |
| @@ -855,9 +855,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) | |||
| 855 | break; | 855 | break; |
| 856 | case PARSE_IRDATA: | 856 | case PARSE_IRDATA: |
| 857 | ir->rem--; | 857 | ir->rem--; |
| 858 | init_ir_raw_event(&rawir); | ||
| 858 | rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); | 859 | rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); |
| 859 | rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) | 860 | rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) |
| 860 | * MS_TO_US(MCE_TIME_UNIT); | 861 | * US_TO_NS(MCE_TIME_UNIT); |
| 861 | 862 | ||
| 862 | dev_dbg(ir->dev, "Storing %s with duration %d\n", | 863 | dev_dbg(ir->dev, "Storing %s with duration %d\n", |
| 863 | rawir.pulse ? "pulse" : "space", | 864 | rawir.pulse ? "pulse" : "space", |
| @@ -883,6 +884,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) | |||
| 883 | i, ir->rem + 1, false); | 884 | i, ir->rem + 1, false); |
| 884 | if (ir->rem) | 885 | if (ir->rem) |
| 885 | ir->parser_state = PARSE_IRDATA; | 886 | ir->parser_state = PARSE_IRDATA; |
| 887 | else | ||
| 888 | ir_raw_event_reset(ir->rc); | ||
| 886 | break; | 889 | break; |
| 887 | } | 890 | } |
| 888 | 891 | ||
| @@ -1060,7 +1063,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir) | |||
| 1060 | rc->priv = ir; | 1063 | rc->priv = ir; |
| 1061 | rc->driver_type = RC_DRIVER_IR_RAW; | 1064 | rc->driver_type = RC_DRIVER_IR_RAW; |
| 1062 | rc->allowed_protos = RC_TYPE_ALL; | 1065 | rc->allowed_protos = RC_TYPE_ALL; |
| 1063 | rc->timeout = MS_TO_NS(1000); | 1066 | rc->timeout = US_TO_NS(1000); |
| 1064 | if (!ir->flags.no_tx) { | 1067 | if (!ir->flags.no_tx) { |
| 1065 | rc->s_tx_mask = mceusb_set_tx_mask; | 1068 | rc->s_tx_mask = mceusb_set_tx_mask; |
| 1066 | rc->s_tx_carrier = mceusb_set_tx_carrier; | 1069 | rc->s_tx_carrier = mceusb_set_tx_carrier; |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index dd4caf8ef80..273d9d67479 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
| @@ -460,7 +460,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt) | |||
| 460 | return 0; | 460 | return 0; |
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | carrier = (count * 1000000) / duration; | 463 | carrier = MS_TO_NS(count) / duration; |
| 464 | 464 | ||
| 465 | if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) | 465 | if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) |
| 466 | nvt_dbg("WTF? Carrier frequency out of range!"); | 466 | nvt_dbg("WTF? Carrier frequency out of range!"); |
| @@ -612,8 +612,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) | |||
| 612 | sample = nvt->buf[i]; | 612 | sample = nvt->buf[i]; |
| 613 | 613 | ||
| 614 | rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); | 614 | rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); |
| 615 | rawir.duration = (sample & BUF_LEN_MASK) | 615 | rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) |
| 616 | * SAMPLE_PERIOD * 1000; | 616 | * SAMPLE_PERIOD); |
| 617 | 617 | ||
| 618 | if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { | 618 | if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { |
| 619 | if (nvt->rawir.pulse == rawir.pulse) | 619 | if (nvt->rawir.pulse == rawir.pulse) |
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 6e2911c2abf..e435d94c077 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c | |||
| @@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz, | |||
| 164 | sz->signal_start.tv_usec - | 164 | sz->signal_start.tv_usec - |
| 165 | sz->signal_last.tv_usec); | 165 | sz->signal_last.tv_usec); |
| 166 | rawir.duration -= sz->sum; | 166 | rawir.duration -= sz->sum; |
| 167 | rawir.duration *= 1000; | 167 | rawir.duration = US_TO_NS(rawir.duration); |
| 168 | rawir.duration &= IR_MAX_DURATION; | 168 | rawir.duration &= IR_MAX_DURATION; |
| 169 | } | 169 | } |
| 170 | sz_push(sz, rawir); | 170 | sz_push(sz, rawir); |
| @@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz, | |||
| 177 | rawir.duration = ((int) value) * SZ_RESOLUTION; | 177 | rawir.duration = ((int) value) * SZ_RESOLUTION; |
| 178 | rawir.duration += SZ_RESOLUTION / 2; | 178 | rawir.duration += SZ_RESOLUTION / 2; |
| 179 | sz->sum += rawir.duration; | 179 | sz->sum += rawir.duration; |
| 180 | rawir.duration *= 1000; | 180 | rawir.duration = US_TO_NS(rawir.duration); |
| 181 | rawir.duration &= IR_MAX_DURATION; | 181 | rawir.duration &= IR_MAX_DURATION; |
| 182 | sz_push(sz, rawir); | 182 | sz_push(sz, rawir); |
| 183 | } | 183 | } |
| @@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz, | |||
| 197 | rawir.duration = ((int) value) * SZ_RESOLUTION; | 197 | rawir.duration = ((int) value) * SZ_RESOLUTION; |
| 198 | rawir.duration += SZ_RESOLUTION / 2; | 198 | rawir.duration += SZ_RESOLUTION / 2; |
| 199 | sz->sum += rawir.duration; | 199 | sz->sum += rawir.duration; |
| 200 | rawir.duration *= 1000; | 200 | rawir.duration = US_TO_NS(rawir.duration); |
| 201 | sz_push(sz, rawir); | 201 | sz_push(sz, rawir); |
| 202 | } | 202 | } |
| 203 | 203 | ||
| @@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb) | |||
| 273 | if (sz->timeout_enabled) | 273 | if (sz->timeout_enabled) |
| 274 | sz_push(sz, rawir); | 274 | sz_push(sz, rawir); |
| 275 | ir_raw_event_handle(sz->rdev); | 275 | ir_raw_event_handle(sz->rdev); |
| 276 | ir_raw_event_reset(sz->rdev); | ||
| 276 | } else { | 277 | } else { |
| 277 | sz_push_full_space(sz, sz->buf_in[i]); | 278 | sz_push_full_space(sz, sz->buf_in[i]); |
| 278 | } | 279 | } |
| @@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb) | |||
| 290 | } | 291 | } |
| 291 | } | 292 | } |
| 292 | 293 | ||
| 294 | ir_raw_event_handle(sz->rdev); | ||
| 293 | usb_submit_urb(urb, GFP_ATOMIC); | 295 | usb_submit_urb(urb, GFP_ATOMIC); |
| 294 | 296 | ||
| 295 | return; | 297 | return; |
| @@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf, | |||
| 430 | sz->decoder_state = PulseSpace; | 432 | sz->decoder_state = PulseSpace; |
| 431 | /* FIXME: don't yet have a way to set this */ | 433 | /* FIXME: don't yet have a way to set this */ |
| 432 | sz->timeout_enabled = true; | 434 | sz->timeout_enabled = true; |
| 433 | sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) & | 435 | sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) & |
| 434 | IR_MAX_DURATION) | 0x03000000); | 436 | IR_MAX_DURATION) | 0x03000000); |
| 435 | #if 0 | 437 | #if 0 |
| 436 | /* not yet supported, depends on patches from maxim */ | 438 | /* not yet supported, depends on patches from maxim */ |
| 437 | /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ | 439 | /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ |
| 438 | sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; | 440 | sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); |
| 439 | sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; | 441 | sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); |
| 440 | #endif | 442 | #endif |
| 441 | 443 | ||
| 442 | do_gettimeofday(&sz->signal_start); | 444 | do_gettimeofday(&sz->signal_start); |
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index 865216e9362..47236a58bf3 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c | |||
| @@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev, | |||
| 5793 | break; | 5793 | break; |
| 5794 | default: | 5794 | default: |
| 5795 | /* case 0xdd: * delay */ | 5795 | /* case 0xdd: * delay */ |
| 5796 | msleep(action->val / 64 + 10); | 5796 | msleep(action->idx); |
| 5797 | break; | 5797 | break; |
| 5798 | } | 5798 | } |
| 5799 | action++; | 5799 | action++; |
| @@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev) | |||
| 5830 | [SENSOR_GC0305] = gc0305_matrix, | 5830 | [SENSOR_GC0305] = gc0305_matrix, |
| 5831 | [SENSOR_HDCS2020b] = NULL, | 5831 | [SENSOR_HDCS2020b] = NULL, |
| 5832 | [SENSOR_HV7131B] = NULL, | 5832 | [SENSOR_HV7131B] = NULL, |
| 5833 | [SENSOR_HV7131R] = NULL, | 5833 | [SENSOR_HV7131R] = po2030_matrix, |
| 5834 | [SENSOR_ICM105A] = po2030_matrix, | 5834 | [SENSOR_ICM105A] = po2030_matrix, |
| 5835 | [SENSOR_MC501CB] = NULL, | 5835 | [SENSOR_MC501CB] = NULL, |
| 5836 | [SENSOR_MT9V111_1] = gc0305_matrix, | 5836 | [SENSOR_MT9V111_1] = gc0305_matrix, |
| @@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev) | |||
| 5936 | case SENSOR_ADCM2700: | 5936 | case SENSOR_ADCM2700: |
| 5937 | case SENSOR_GC0305: | 5937 | case SENSOR_GC0305: |
| 5938 | case SENSOR_HV7131B: | 5938 | case SENSOR_HV7131B: |
| 5939 | case SENSOR_HV7131R: | ||
| 5939 | case SENSOR_OV7620: | 5940 | case SENSOR_OV7620: |
| 5940 | case SENSOR_PAS202B: | 5941 | case SENSOR_PAS202B: |
| 5941 | case SENSOR_PO2030: | 5942 | case SENSOR_PO2030: |
| @@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor) | |||
| 6108 | reg_w(gspca_dev, 0x02, 0x003b); | 6109 | reg_w(gspca_dev, 0x02, 0x003b); |
| 6109 | reg_w(gspca_dev, 0x00, 0x0038); | 6110 | reg_w(gspca_dev, 0x00, 0x0038); |
| 6110 | break; | 6111 | break; |
| 6112 | case SENSOR_HV7131R: | ||
| 6111 | case SENSOR_PAS202B: | 6113 | case SENSOR_PAS202B: |
| 6112 | reg_w(gspca_dev, 0x03, 0x003b); | 6114 | reg_w(gspca_dev, 0x03, 0x003b); |
| 6113 | reg_w(gspca_dev, 0x0c, 0x003a); | 6115 | reg_w(gspca_dev, 0x0c, 0x003a); |
| 6114 | reg_w(gspca_dev, 0x0b, 0x0039); | 6116 | reg_w(gspca_dev, 0x0b, 0x0039); |
| 6115 | reg_w(gspca_dev, 0x0b, 0x0038); | 6117 | if (sensor == SENSOR_PAS202B) |
| 6118 | reg_w(gspca_dev, 0x0b, 0x0038); | ||
| 6116 | break; | 6119 | break; |
| 6117 | } | 6120 | } |
| 6118 | } | 6121 | } |
| @@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
| 6704 | reg_w(gspca_dev, 0x02, 0x003b); | 6707 | reg_w(gspca_dev, 0x02, 0x003b); |
| 6705 | reg_w(gspca_dev, 0x00, 0x0038); | 6708 | reg_w(gspca_dev, 0x00, 0x0038); |
| 6706 | break; | 6709 | break; |
| 6710 | case SENSOR_HV7131R: | ||
| 6707 | case SENSOR_PAS202B: | 6711 | case SENSOR_PAS202B: |
| 6708 | reg_w(gspca_dev, 0x03, 0x003b); | 6712 | reg_w(gspca_dev, 0x03, 0x003b); |
| 6709 | reg_w(gspca_dev, 0x0c, 0x003a); | 6713 | reg_w(gspca_dev, 0x0c, 0x003a); |
| 6710 | reg_w(gspca_dev, 0x0b, 0x0039); | 6714 | reg_w(gspca_dev, 0x0b, 0x0039); |
| 6715 | if (sd->sensor == SENSOR_HV7131R) | ||
| 6716 | reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN); | ||
| 6711 | break; | 6717 | break; |
| 6712 | } | 6718 | } |
| 6713 | 6719 | ||
| @@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
| 6720 | break; | 6726 | break; |
| 6721 | case SENSOR_PAS202B: | 6727 | case SENSOR_PAS202B: |
| 6722 | case SENSOR_GC0305: | 6728 | case SENSOR_GC0305: |
| 6729 | case SENSOR_HV7131R: | ||
| 6723 | case SENSOR_TAS5130C: | 6730 | case SENSOR_TAS5130C: |
| 6724 | reg_r(gspca_dev, 0x0008); | 6731 | reg_r(gspca_dev, 0x0008); |
| 6725 | /* fall thru */ | 6732 | /* fall thru */ |
| @@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev) | |||
| 6760 | /* ms-win + */ | 6767 | /* ms-win + */ |
| 6761 | reg_w(gspca_dev, 0x40, 0x0117); | 6768 | reg_w(gspca_dev, 0x40, 0x0117); |
| 6762 | break; | 6769 | break; |
| 6770 | case SENSOR_HV7131R: | ||
| 6771 | i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */ | ||
| 6772 | i2c_write(gspca_dev, 0x26, 0x93, 0x00); | ||
| 6773 | i2c_write(gspca_dev, 0x27, 0xe0, 0x00); | ||
| 6774 | reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN); | ||
| 6775 | break; | ||
| 6763 | case SENSOR_GC0305: | 6776 | case SENSOR_GC0305: |
| 6764 | case SENSOR_TAS5130C: | 6777 | case SENSOR_TAS5130C: |
| 6765 | reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ | 6778 | reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ |
| @@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, | |||
| 6808 | { | 6821 | { |
| 6809 | struct sd *sd = (struct sd *) gspca_dev; | 6822 | struct sd *sd = (struct sd *) gspca_dev; |
| 6810 | 6823 | ||
| 6811 | if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */ | 6824 | /* check the JPEG end of frame */ |
| 6825 | if (len >= 3 | ||
| 6826 | && data[len - 3] == 0xff && data[len - 2] == 0xd9) { | ||
| 6827 | /*fixme: what does the last byte mean?*/ | ||
| 6812 | gspca_frame_add(gspca_dev, LAST_PACKET, | 6828 | gspca_frame_add(gspca_dev, LAST_PACKET, |
| 6813 | NULL, 0); | 6829 | data, len - 1); |
| 6830 | return; | ||
| 6831 | } | ||
| 6832 | |||
| 6833 | /* check the JPEG start of a frame */ | ||
| 6834 | if (data[0] == 0xff && data[1] == 0xd8) { | ||
| 6814 | /* put the JPEG header in the new frame */ | 6835 | /* put the JPEG header in the new frame */ |
| 6815 | gspca_frame_add(gspca_dev, FIRST_PACKET, | 6836 | gspca_frame_add(gspca_dev, FIRST_PACKET, |
| 6816 | sd->jpeg_hdr, JPEG_HDR_SZ); | 6837 | sd->jpeg_hdr, JPEG_HDR_SZ); |
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c index a6572e5ae36..a27d93b503a 100644 --- a/drivers/media/video/hdpvr/hdpvr-core.c +++ b/drivers/media/video/hdpvr/hdpvr-core.c | |||
| @@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 283 | struct hdpvr_device *dev; | 283 | struct hdpvr_device *dev; |
| 284 | struct usb_host_interface *iface_desc; | 284 | struct usb_host_interface *iface_desc; |
| 285 | struct usb_endpoint_descriptor *endpoint; | 285 | struct usb_endpoint_descriptor *endpoint; |
| 286 | struct i2c_client *client; | ||
| 286 | size_t buffer_size; | 287 | size_t buffer_size; |
| 287 | int i; | 288 | int i; |
| 288 | int retval = -ENOMEM; | 289 | int retval = -ENOMEM; |
| @@ -381,13 +382,21 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 381 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | 382 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) |
| 382 | retval = hdpvr_register_i2c_adapter(dev); | 383 | retval = hdpvr_register_i2c_adapter(dev); |
| 383 | if (retval < 0) { | 384 | if (retval < 0) { |
| 384 | v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n"); | 385 | v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); |
| 385 | goto error; | 386 | goto error; |
| 386 | } | 387 | } |
| 387 | 388 | ||
| 388 | retval = hdpvr_register_i2c_ir(dev); | 389 | client = hdpvr_register_ir_rx_i2c(dev); |
| 389 | if (retval < 0) | 390 | if (!client) { |
| 390 | v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n"); | 391 | v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n"); |
| 392 | goto reg_fail; | ||
| 393 | } | ||
| 394 | |||
| 395 | client = hdpvr_register_ir_tx_i2c(dev); | ||
| 396 | if (!client) { | ||
| 397 | v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n"); | ||
| 398 | goto reg_fail; | ||
| 399 | } | ||
| 391 | #endif | 400 | #endif |
| 392 | 401 | ||
| 393 | /* let the user know what node this device is now attached to */ | 402 | /* let the user know what node this device is now attached to */ |
| @@ -395,6 +404,10 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
| 395 | video_device_node_name(dev->video_dev)); | 404 | video_device_node_name(dev->video_dev)); |
| 396 | return 0; | 405 | return 0; |
| 397 | 406 | ||
| 407 | reg_fail: | ||
| 408 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | ||
| 409 | i2c_del_adapter(&dev->i2c_adapter); | ||
| 410 | #endif | ||
| 398 | error: | 411 | error: |
| 399 | if (dev) { | 412 | if (dev) { |
| 400 | /* Destroy single thread */ | 413 | /* Destroy single thread */ |
| @@ -424,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface) | |||
| 424 | mutex_lock(&dev->io_mutex); | 437 | mutex_lock(&dev->io_mutex); |
| 425 | hdpvr_cancel_queue(dev); | 438 | hdpvr_cancel_queue(dev); |
| 426 | mutex_unlock(&dev->io_mutex); | 439 | mutex_unlock(&dev->io_mutex); |
| 440 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | ||
| 441 | i2c_del_adapter(&dev->i2c_adapter); | ||
| 442 | #endif | ||
| 427 | video_unregister_device(dev->video_dev); | 443 | video_unregister_device(dev->video_dev); |
| 428 | atomic_dec(&dev_nr); | 444 | atomic_dec(&dev_nr); |
| 429 | } | 445 | } |
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c index 89b71faeaac..e53fa55d56a 100644 --- a/drivers/media/video/hdpvr/hdpvr-i2c.c +++ b/drivers/media/video/hdpvr/hdpvr-i2c.c | |||
| @@ -31,26 +31,34 @@ | |||
| 31 | #define Z8F0811_IR_RX_I2C_ADDR 0x71 | 31 | #define Z8F0811_IR_RX_I2C_ADDR 0x71 |
| 32 | 32 | ||
| 33 | 33 | ||
| 34 | static struct i2c_board_info hdpvr_i2c_board_info = { | 34 | struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev) |
| 35 | I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR), | 35 | { |
| 36 | I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR), | 36 | struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; |
| 37 | }; | 37 | struct i2c_board_info hdpvr_ir_tx_i2c_board_info = { |
| 38 | I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR), | ||
| 39 | }; | ||
| 40 | |||
| 41 | init_data->name = "HD-PVR"; | ||
| 42 | hdpvr_ir_tx_i2c_board_info.platform_data = init_data; | ||
| 38 | 43 | ||
| 39 | int hdpvr_register_i2c_ir(struct hdpvr_device *dev) | 44 | return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info); |
| 45 | } | ||
| 46 | |||
| 47 | struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev) | ||
| 40 | { | 48 | { |
| 41 | struct i2c_client *c; | ||
| 42 | struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; | 49 | struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; |
| 50 | struct i2c_board_info hdpvr_ir_rx_i2c_board_info = { | ||
| 51 | I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR), | ||
| 52 | }; | ||
| 43 | 53 | ||
| 44 | /* Our default information for ir-kbd-i2c.c to use */ | 54 | /* Our default information for ir-kbd-i2c.c to use */ |
| 45 | init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW; | 55 | init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW; |
| 46 | init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; | 56 | init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; |
| 47 | init_data->type = RC_TYPE_RC5; | 57 | init_data->type = RC_TYPE_RC5; |
| 48 | init_data->name = "HD PVR"; | 58 | init_data->name = "HD-PVR"; |
| 49 | hdpvr_i2c_board_info.platform_data = init_data; | 59 | hdpvr_ir_rx_i2c_board_info.platform_data = init_data; |
| 50 | |||
| 51 | c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info); | ||
| 52 | 60 | ||
| 53 | return (c == NULL) ? -ENODEV : 0; | 61 | return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info); |
| 54 | } | 62 | } |
| 55 | 63 | ||
| 56 | static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, | 64 | static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, |
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h index ee74e3be9a6..072f23c570f 100644 --- a/drivers/media/video/hdpvr/hdpvr.h +++ b/drivers/media/video/hdpvr/hdpvr.h | |||
| @@ -313,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev); | |||
| 313 | /* i2c adapter registration */ | 313 | /* i2c adapter registration */ |
| 314 | int hdpvr_register_i2c_adapter(struct hdpvr_device *dev); | 314 | int hdpvr_register_i2c_adapter(struct hdpvr_device *dev); |
| 315 | 315 | ||
| 316 | int hdpvr_register_i2c_ir(struct hdpvr_device *dev); | 316 | struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev); |
| 317 | struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev); | ||
| 317 | 318 | ||
| 318 | /*========================================================================*/ | 319 | /*========================================================================*/ |
| 319 | /* buffer management */ | 320 | /* buffer management */ |
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index d2b20ad383a..a221ad68b33 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c | |||
| @@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) | |||
| 128 | 128 | ||
| 129 | static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) | 129 | static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) |
| 130 | { | 130 | { |
| 131 | int ret; | ||
| 132 | unsigned char buf[1] = { 0 }; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * This is the same apparent "are you ready?" poll command observed | ||
| 136 | * watching Windows driver traffic and implemented in lirc_zilog. With | ||
| 137 | * this added, we get far saner remote behavior with z8 chips on usb | ||
| 138 | * connected devices, even with the default polling interval of 100ms. | ||
| 139 | */ | ||
| 140 | ret = i2c_master_send(ir->c, buf, 1); | ||
| 141 | if (ret != 1) | ||
| 142 | return (ret < 0) ? ret : -EINVAL; | ||
| 143 | |||
| 131 | return get_key_haup_common (ir, ir_key, ir_raw, 6, 3); | 144 | return get_key_haup_common (ir, ir_key, ir_raw, 6, 3); |
| 132 | } | 145 | } |
| 133 | 146 | ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c index ccc884948f3..451ecd485f9 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c +++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c | |||
| @@ -597,7 +597,6 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw) | |||
| 597 | init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; | 597 | init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; |
| 598 | init_data->type = RC_TYPE_RC5; | 598 | init_data->type = RC_TYPE_RC5; |
| 599 | init_data->name = hdw->hdw_desc->description; | 599 | init_data->name = hdw->hdw_desc->description; |
| 600 | init_data->polling_interval = 260; /* ms From lirc_zilog */ | ||
| 601 | /* IR Receiver */ | 600 | /* IR Receiver */ |
| 602 | info.addr = 0x71; | 601 | info.addr = 0x71; |
| 603 | info.platform_data = init_data; | 602 | info.platform_data = init_data; |
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c index f35459d1f42..0db90922ee9 100644 --- a/drivers/media/video/saa7115.c +++ b/drivers/media/video/saa7115.c | |||
| @@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client, | |||
| 1565 | chip_id = name[5]; | 1565 | chip_id = name[5]; |
| 1566 | 1566 | ||
| 1567 | /* Check whether this chip is part of the saa711x series */ | 1567 | /* Check whether this chip is part of the saa711x series */ |
| 1568 | if (memcmp(name, "1f711", 5)) { | 1568 | if (memcmp(name + 1, "f711", 4)) { |
| 1569 | v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", | 1569 | v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", |
| 1570 | client->addr << 1, name); | 1570 | client->addr << 1, name); |
| 1571 | return -ENODEV; | 1571 | return -ENODEV; |
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 4155805dcdf..2b771f18d1a 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c | |||
| @@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) | |||
| 319 | 319 | ||
| 320 | private = (struct dasd_eckd_private *) device->private; | 320 | private = (struct dasd_eckd_private *) device->private; |
| 321 | lcu = private->lcu; | 321 | lcu = private->lcu; |
| 322 | /* nothing to do if already disconnected */ | ||
| 323 | if (!lcu) | ||
| 324 | return; | ||
| 322 | device->discipline->get_uid(device, &uid); | 325 | device->discipline->get_uid(device, &uid); |
| 323 | spin_lock_irqsave(&lcu->lock, flags); | 326 | spin_lock_irqsave(&lcu->lock, flags); |
| 324 | list_del_init(&device->alias_list); | 327 | list_del_init(&device->alias_list); |
| @@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device) | |||
| 680 | 683 | ||
| 681 | private = (struct dasd_eckd_private *) device->private; | 684 | private = (struct dasd_eckd_private *) device->private; |
| 682 | lcu = private->lcu; | 685 | lcu = private->lcu; |
| 686 | /* nothing to do if already removed */ | ||
| 687 | if (!lcu) | ||
| 688 | return 0; | ||
| 683 | spin_lock_irqsave(&lcu->lock, flags); | 689 | spin_lock_irqsave(&lcu->lock, flags); |
| 684 | _remove_device_from_lcu(lcu, device); | 690 | _remove_device_from_lcu(lcu, device); |
| 685 | spin_unlock_irqrestore(&lcu->lock, flags); | 691 | spin_unlock_irqrestore(&lcu->lock, flags); |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index e9fff2b9bce..5640c89cd9d 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
| @@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) | |||
| 476 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 476 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
| 477 | { | 477 | { |
| 478 | int count, stop; | 478 | int count, stop; |
| 479 | unsigned char state; | 479 | unsigned char state = 0; |
| 480 | 480 | ||
| 481 | /* | 481 | /* |
| 482 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved | 482 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved |
| @@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data) | |||
| 643 | static int get_outbound_buffer_frontier(struct qdio_q *q) | 643 | static int get_outbound_buffer_frontier(struct qdio_q *q) |
| 644 | { | 644 | { |
| 645 | int count, stop; | 645 | int count, stop; |
| 646 | unsigned char state; | 646 | unsigned char state = 0; |
| 647 | 647 | ||
| 648 | if (need_siga_sync(q)) | 648 | if (need_siga_sync(q)) |
| 649 | if (((queue_type(q) != QDIO_IQDIO_QFMT) && | 649 | if (((queue_type(q) != QDIO_IQDIO_QFMT) && |
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c index 3fe5f416019..0aad0d7a74a 100644 --- a/drivers/staging/lirc/lirc_zilog.c +++ b/drivers/staging/lirc/lirc_zilog.c | |||
| @@ -495,7 +495,7 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block) | |||
| 495 | /* send boot data to the IR TX device */ | 495 | /* send boot data to the IR TX device */ |
| 496 | static int send_boot_data(struct IR_tx *tx) | 496 | static int send_boot_data(struct IR_tx *tx) |
| 497 | { | 497 | { |
| 498 | int ret; | 498 | int ret, i; |
| 499 | unsigned char buf[4]; | 499 | unsigned char buf[4]; |
| 500 | 500 | ||
| 501 | /* send the boot block */ | 501 | /* send the boot block */ |
| @@ -503,7 +503,7 @@ static int send_boot_data(struct IR_tx *tx) | |||
| 503 | if (ret != 0) | 503 | if (ret != 0) |
| 504 | return ret; | 504 | return ret; |
| 505 | 505 | ||
| 506 | /* kick it off? */ | 506 | /* Hit the go button to activate the new boot data */ |
| 507 | buf[0] = 0x00; | 507 | buf[0] = 0x00; |
| 508 | buf[1] = 0x20; | 508 | buf[1] = 0x20; |
| 509 | ret = i2c_master_send(tx->c, buf, 2); | 509 | ret = i2c_master_send(tx->c, buf, 2); |
| @@ -511,7 +511,19 @@ static int send_boot_data(struct IR_tx *tx) | |||
| 511 | zilog_error("i2c_master_send failed with %d\n", ret); | 511 | zilog_error("i2c_master_send failed with %d\n", ret); |
| 512 | return ret < 0 ? ret : -EFAULT; | 512 | return ret < 0 ? ret : -EFAULT; |
| 513 | } | 513 | } |
| 514 | ret = i2c_master_send(tx->c, buf, 1); | 514 | |
| 515 | /* | ||
| 516 | * Wait for zilog to settle after hitting go post boot block upload. | ||
| 517 | * Without this delay, the HD-PVR and HVR-1950 both return an -EIO | ||
| 518 | * upon attempting to get firmware revision, and tx probe thus fails. | ||
| 519 | */ | ||
| 520 | for (i = 0; i < 10; i++) { | ||
| 521 | ret = i2c_master_send(tx->c, buf, 1); | ||
| 522 | if (ret == 1) | ||
| 523 | break; | ||
| 524 | udelay(100); | ||
| 525 | } | ||
| 526 | |||
| 515 | if (ret != 1) { | 527 | if (ret != 1) { |
| 516 | zilog_error("i2c_master_send failed with %d\n", ret); | 528 | zilog_error("i2c_master_send failed with %d\n", ret); |
| 517 | return ret < 0 ? ret : -EFAULT; | 529 | return ret < 0 ? ret : -EFAULT; |
| @@ -523,8 +535,8 @@ static int send_boot_data(struct IR_tx *tx) | |||
| 523 | zilog_error("i2c_master_recv failed with %d\n", ret); | 535 | zilog_error("i2c_master_recv failed with %d\n", ret); |
| 524 | return 0; | 536 | return 0; |
| 525 | } | 537 | } |
| 526 | if (buf[0] != 0x80) { | 538 | if ((buf[0] != 0x80) && (buf[0] != 0xa0)) { |
| 527 | zilog_error("unexpected IR TX response: %02x\n", buf[0]); | 539 | zilog_error("unexpected IR TX init response: %02x\n", buf[0]); |
| 528 | return 0; | 540 | return 0; |
| 529 | } | 541 | } |
| 530 | zilog_notify("Zilog/Hauppauge IR blaster firmware version " | 542 | zilog_notify("Zilog/Hauppauge IR blaster firmware version " |
| @@ -827,7 +839,15 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key) | |||
| 827 | zilog_error("i2c_master_send failed with %d\n", ret); | 839 | zilog_error("i2c_master_send failed with %d\n", ret); |
| 828 | return ret < 0 ? ret : -EFAULT; | 840 | return ret < 0 ? ret : -EFAULT; |
| 829 | } | 841 | } |
| 830 | ret = i2c_master_send(tx->c, buf, 1); | 842 | |
| 843 | /* Give the z8 a moment to process data block */ | ||
| 844 | for (i = 0; i < 10; i++) { | ||
| 845 | ret = i2c_master_send(tx->c, buf, 1); | ||
| 846 | if (ret == 1) | ||
| 847 | break; | ||
| 848 | udelay(100); | ||
| 849 | } | ||
| 850 | |||
| 831 | if (ret != 1) { | 851 | if (ret != 1) { |
| 832 | zilog_error("i2c_master_send failed with %d\n", ret); | 852 | zilog_error("i2c_master_send failed with %d\n", ret); |
| 833 | return ret < 0 ? ret : -EFAULT; | 853 | return ret < 0 ? ret : -EFAULT; |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index cc8a9b7d606..267d0ada454 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -1114,6 +1114,17 @@ static int ep_send_events(struct eventpoll *ep, | |||
| 1114 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed); | 1114 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed); |
| 1115 | } | 1115 | } |
| 1116 | 1116 | ||
| 1117 | static inline struct timespec ep_set_mstimeout(long ms) | ||
| 1118 | { | ||
| 1119 | struct timespec now, ts = { | ||
| 1120 | .tv_sec = ms / MSEC_PER_SEC, | ||
| 1121 | .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), | ||
| 1122 | }; | ||
| 1123 | |||
| 1124 | ktime_get_ts(&now); | ||
| 1125 | return timespec_add_safe(now, ts); | ||
| 1126 | } | ||
| 1127 | |||
| 1117 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | 1128 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
| 1118 | int maxevents, long timeout) | 1129 | int maxevents, long timeout) |
| 1119 | { | 1130 | { |
| @@ -1121,12 +1132,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | |||
| 1121 | unsigned long flags; | 1132 | unsigned long flags; |
| 1122 | long slack; | 1133 | long slack; |
| 1123 | wait_queue_t wait; | 1134 | wait_queue_t wait; |
| 1124 | struct timespec end_time; | ||
| 1125 | ktime_t expires, *to = NULL; | 1135 | ktime_t expires, *to = NULL; |
| 1126 | 1136 | ||
| 1127 | if (timeout > 0) { | 1137 | if (timeout > 0) { |
| 1128 | ktime_get_ts(&end_time); | 1138 | struct timespec end_time = ep_set_mstimeout(timeout); |
| 1129 | timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC); | 1139 | |
| 1130 | slack = select_estimate_accuracy(&end_time); | 1140 | slack = select_estimate_accuracy(&end_time); |
| 1131 | to = &expires; | 1141 | to = &expires; |
| 1132 | *to = timespec_to_ktime(end_time); | 1142 | *to = timespec_to_ktime(end_time); |
| @@ -120,7 +120,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) | |||
| 120 | goto out; | 120 | goto out; |
| 121 | 121 | ||
| 122 | file = do_filp_open(AT_FDCWD, tmp, | 122 | file = do_filp_open(AT_FDCWD, tmp, |
| 123 | O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, | 123 | O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, |
| 124 | MAY_READ | MAY_EXEC | MAY_OPEN); | 124 | MAY_READ | MAY_EXEC | MAY_OPEN); |
| 125 | putname(tmp); | 125 | putname(tmp); |
| 126 | error = PTR_ERR(file); | 126 | error = PTR_ERR(file); |
| @@ -723,7 +723,7 @@ struct file *open_exec(const char *name) | |||
| 723 | int err; | 723 | int err; |
| 724 | 724 | ||
| 725 | file = do_filp_open(AT_FDCWD, name, | 725 | file = do_filp_open(AT_FDCWD, name, |
| 726 | O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, | 726 | O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, |
| 727 | MAY_EXEC | MAY_OPEN); | 727 | MAY_EXEC | MAY_OPEN); |
| 728 | if (IS_ERR(file)) | 728 | if (IS_ERR(file)) |
| 729 | goto out; | 729 | goto out; |
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 42685424817..a7555238c41 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
| @@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) | |||
| 1030 | memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); | 1030 | memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); |
| 1031 | } | 1031 | } |
| 1032 | 1032 | ||
| 1033 | inode->i_mapping->backing_dev_info = sb->s_bdi; | ||
| 1034 | if (S_ISREG(inode->i_mode)) { | 1033 | if (S_ISREG(inode->i_mode)) { |
| 1035 | inode->i_op = &exofs_file_inode_operations; | 1034 | inode->i_op = &exofs_file_inode_operations; |
| 1036 | inode->i_fop = &exofs_file_operations; | 1035 | inode->i_fop = &exofs_file_operations; |
| @@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode) | |||
| 1131 | 1130 | ||
| 1132 | sbi = sb->s_fs_info; | 1131 | sbi = sb->s_fs_info; |
| 1133 | 1132 | ||
| 1134 | inode->i_mapping->backing_dev_info = sb->s_bdi; | ||
| 1135 | sb->s_dirt = 1; | 1133 | sb->s_dirt = 1; |
| 1136 | inode_init_owner(inode, dir, mode); | 1134 | inode_init_owner(inode, dir, mode); |
| 1137 | inode->i_ino = sbi->s_nextid++; | 1135 | inode->i_ino = sbi->s_nextid++; |
diff --git a/fs/fcntl.c b/fs/fcntl.c index ecc8b3954ed..cb1026181bd 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
| @@ -815,7 +815,7 @@ static int __init fcntl_init(void) | |||
| 815 | __O_SYNC | O_DSYNC | FASYNC | | 815 | __O_SYNC | O_DSYNC | FASYNC | |
| 816 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 816 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
| 817 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 817 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
| 818 | FMODE_EXEC | 818 | __FMODE_EXEC |
| 819 | )); | 819 | )); |
| 820 | 820 | ||
| 821 | fasync_cache = kmem_cache_create("fasync_cache", | 821 | fasync_cache = kmem_cache_create("fasync_cache", |
diff --git a/fs/ioctl.c b/fs/ioctl.c index a59635e295f..1eebeb72b20 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
| @@ -273,6 +273,13 @@ int __generic_block_fiemap(struct inode *inode, | |||
| 273 | len = isize; | 273 | len = isize; |
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | /* | ||
| 277 | * Some filesystems can't deal with being asked to map less than | ||
| 278 | * blocksize, so make sure our len is at least block length. | ||
| 279 | */ | ||
| 280 | if (logical_to_blk(inode, len) == 0) | ||
| 281 | len = blk_to_logical(inode, 1); | ||
| 282 | |||
| 276 | start_blk = logical_to_blk(inode, start); | 283 | start_blk = logical_to_blk(inode, start); |
| 277 | last_blk = logical_to_blk(inode, start + len - 1); | 284 | last_blk = logical_to_blk(inode, start + len - 1); |
| 278 | 285 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 32b38cd829d..bd3215940c3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2555,9 +2555,12 @@ int proc_nr_inodes(struct ctl_table *table, int write, | |||
| 2555 | void __user *buffer, size_t *lenp, loff_t *ppos); | 2555 | void __user *buffer, size_t *lenp, loff_t *ppos); |
| 2556 | int __init get_filesystem_list(char *buf); | 2556 | int __init get_filesystem_list(char *buf); |
| 2557 | 2557 | ||
| 2558 | #define __FMODE_EXEC ((__force int) FMODE_EXEC) | ||
| 2559 | #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) | ||
| 2560 | |||
| 2558 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | 2561 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) |
| 2559 | #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ | 2562 | #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ |
| 2560 | (flag & FMODE_NONOTIFY))) | 2563 | (flag & __FMODE_NONOTIFY))) |
| 2561 | 2564 | ||
| 2562 | #endif /* __KERNEL__ */ | 2565 | #endif /* __KERNEL__ */ |
| 2563 | #endif /* _LINUX_FS_H */ | 2566 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index fcb9884df61..a5930cb6614 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h | |||
| @@ -182,6 +182,26 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) | |||
| 182 | return ret; | 182 | return ret; |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | /** | ||
| 186 | * res_counter_check_margin - check if the counter allows charging | ||
| 187 | * @cnt: the resource counter to check | ||
| 188 | * @bytes: the number of bytes to check the remaining space against | ||
| 189 | * | ||
| 190 | * Returns a boolean value on whether the counter can be charged | ||
| 191 | * @bytes or whether this would exceed the limit. | ||
| 192 | */ | ||
| 193 | static inline bool res_counter_check_margin(struct res_counter *cnt, | ||
| 194 | unsigned long bytes) | ||
| 195 | { | ||
| 196 | bool ret; | ||
| 197 | unsigned long flags; | ||
| 198 | |||
| 199 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 200 | ret = cnt->limit - cnt->usage >= bytes; | ||
| 201 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 202 | return ret; | ||
| 203 | } | ||
| 204 | |||
| 185 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) | 205 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) |
| 186 | { | 206 | { |
| 187 | bool ret; | 207 | bool ret; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 126a302c481..999835b6112 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info) | |||
| 1901 | return; | 1901 | return; |
| 1902 | 1902 | ||
| 1903 | raw_spin_lock(&ctx->lock); | 1903 | raw_spin_lock(&ctx->lock); |
| 1904 | update_context_time(ctx); | 1904 | if (ctx->is_active) |
| 1905 | update_context_time(ctx); | ||
| 1905 | update_event_times(event); | 1906 | update_event_times(event); |
| 1907 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
| 1908 | event->pmu->read(event); | ||
| 1906 | raw_spin_unlock(&ctx->lock); | 1909 | raw_spin_unlock(&ctx->lock); |
| 1907 | |||
| 1908 | event->pmu->read(event); | ||
| 1909 | } | 1910 | } |
| 1910 | 1911 | ||
| 1911 | static inline u64 perf_event_count(struct perf_event *event) | 1912 | static inline u64 perf_event_count(struct perf_event *event) |
| @@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void) | |||
| 1999 | * accessed from NMI. Use a temporary manual per cpu allocation | 2000 | * accessed from NMI. Use a temporary manual per cpu allocation |
| 2000 | * until that gets sorted out. | 2001 | * until that gets sorted out. |
| 2001 | */ | 2002 | */ |
| 2002 | size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * | 2003 | size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); |
| 2003 | num_possible_cpus(); | ||
| 2004 | 2004 | ||
| 2005 | entries = kzalloc(size, GFP_KERNEL); | 2005 | entries = kzalloc(size, GFP_KERNEL); |
| 2006 | if (!entries) | 2006 | if (!entries) |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c914ec747ca..ad6267714c8 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -625,7 +625,7 @@ static void update_curr_rt(struct rq *rq) | |||
| 625 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 625 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 626 | u64 delta_exec; | 626 | u64 delta_exec; |
| 627 | 627 | ||
| 628 | if (!task_has_rt_policy(curr)) | 628 | if (curr->sched_class != &rt_sched_class) |
| 629 | return; | 629 | return; |
| 630 | 630 | ||
| 631 | delta_exec = rq->clock_task - curr->se.exec_start; | 631 | delta_exec = rq->clock_task - curr->se.exec_start; |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d7ebdf4cea9..f37f974aa81 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #include <asm/irq_regs.h> | 27 | #include <asm/irq_regs.h> |
| 28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
| 29 | 29 | ||
| 30 | int watchdog_enabled; | 30 | int watchdog_enabled = 1; |
| 31 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly softlockup_thresh = 60; |
| 32 | 32 | ||
| 33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
| @@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
| 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
| 44 | #endif | 44 | #endif |
| 45 | 45 | ||
| 46 | static int no_watchdog; | ||
| 47 | |||
| 48 | |||
| 49 | /* boot commands */ | 46 | /* boot commands */ |
| 50 | /* | 47 | /* |
| 51 | * Should we panic when a soft-lockup or hard-lockup occurs: | 48 | * Should we panic when a soft-lockup or hard-lockup occurs: |
| @@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str) | |||
| 58 | if (!strncmp(str, "panic", 5)) | 55 | if (!strncmp(str, "panic", 5)) |
| 59 | hardlockup_panic = 1; | 56 | hardlockup_panic = 1; |
| 60 | else if (!strncmp(str, "0", 1)) | 57 | else if (!strncmp(str, "0", 1)) |
| 61 | no_watchdog = 1; | 58 | watchdog_enabled = 0; |
| 62 | return 1; | 59 | return 1; |
| 63 | } | 60 | } |
| 64 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 61 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
| @@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); | |||
| 77 | 74 | ||
| 78 | static int __init nowatchdog_setup(char *str) | 75 | static int __init nowatchdog_setup(char *str) |
| 79 | { | 76 | { |
| 80 | no_watchdog = 1; | 77 | watchdog_enabled = 0; |
| 81 | return 1; | 78 | return 1; |
| 82 | } | 79 | } |
| 83 | __setup("nowatchdog", nowatchdog_setup); | 80 | __setup("nowatchdog", nowatchdog_setup); |
| @@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup); | |||
| 85 | /* deprecated */ | 82 | /* deprecated */ |
| 86 | static int __init nosoftlockup_setup(char *str) | 83 | static int __init nosoftlockup_setup(char *str) |
| 87 | { | 84 | { |
| 88 | no_watchdog = 1; | 85 | watchdog_enabled = 0; |
| 89 | return 1; | 86 | return 1; |
| 90 | } | 87 | } |
| 91 | __setup("nosoftlockup", nosoftlockup_setup); | 88 | __setup("nosoftlockup", nosoftlockup_setup); |
| @@ -432,9 +429,6 @@ static int watchdog_enable(int cpu) | |||
| 432 | wake_up_process(p); | 429 | wake_up_process(p); |
| 433 | } | 430 | } |
| 434 | 431 | ||
| 435 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
| 436 | watchdog_enabled = 1; | ||
| 437 | |||
| 438 | return 0; | 432 | return 0; |
| 439 | } | 433 | } |
| 440 | 434 | ||
| @@ -462,12 +456,16 @@ static void watchdog_disable(int cpu) | |||
| 462 | static void watchdog_enable_all_cpus(void) | 456 | static void watchdog_enable_all_cpus(void) |
| 463 | { | 457 | { |
| 464 | int cpu; | 458 | int cpu; |
| 465 | int result = 0; | 459 | |
| 460 | watchdog_enabled = 0; | ||
| 466 | 461 | ||
| 467 | for_each_online_cpu(cpu) | 462 | for_each_online_cpu(cpu) |
| 468 | result += watchdog_enable(cpu); | 463 | if (!watchdog_enable(cpu)) |
| 464 | /* if any cpu succeeds, watchdog is considered | ||
| 465 | enabled for the system */ | ||
| 466 | watchdog_enabled = 1; | ||
| 469 | 467 | ||
| 470 | if (result) | 468 | if (!watchdog_enabled) |
| 471 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); | 469 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); |
| 472 | 470 | ||
| 473 | } | 471 | } |
| @@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void) | |||
| 476 | { | 474 | { |
| 477 | int cpu; | 475 | int cpu; |
| 478 | 476 | ||
| 479 | if (no_watchdog) | ||
| 480 | return; | ||
| 481 | |||
| 482 | for_each_online_cpu(cpu) | 477 | for_each_online_cpu(cpu) |
| 483 | watchdog_disable(cpu); | 478 | watchdog_disable(cpu); |
| 484 | 479 | ||
| @@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, | |||
| 498 | { | 493 | { |
| 499 | proc_dointvec(table, write, buffer, length, ppos); | 494 | proc_dointvec(table, write, buffer, length, ppos); |
| 500 | 495 | ||
| 501 | if (watchdog_enabled) | 496 | if (write) { |
| 502 | watchdog_enable_all_cpus(); | 497 | if (watchdog_enabled) |
| 503 | else | 498 | watchdog_enable_all_cpus(); |
| 504 | watchdog_disable_all_cpus(); | 499 | else |
| 500 | watchdog_disable_all_cpus(); | ||
| 501 | } | ||
| 505 | return 0; | 502 | return 0; |
| 506 | } | 503 | } |
| 507 | 504 | ||
| @@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 530 | break; | 527 | break; |
| 531 | case CPU_ONLINE: | 528 | case CPU_ONLINE: |
| 532 | case CPU_ONLINE_FROZEN: | 529 | case CPU_ONLINE_FROZEN: |
| 533 | err = watchdog_enable(hotcpu); | 530 | if (watchdog_enabled) |
| 531 | err = watchdog_enable(hotcpu); | ||
| 534 | break; | 532 | break; |
| 535 | #ifdef CONFIG_HOTPLUG_CPU | 533 | #ifdef CONFIG_HOTPLUG_CPU |
| 536 | case CPU_UP_CANCELED: | 534 | case CPU_UP_CANCELED: |
| @@ -555,9 +553,6 @@ void __init lockup_detector_init(void) | |||
| 555 | void *cpu = (void *)(long)smp_processor_id(); | 553 | void *cpu = (void *)(long)smp_processor_id(); |
| 556 | int err; | 554 | int err; |
| 557 | 555 | ||
| 558 | if (no_watchdog) | ||
| 559 | return; | ||
| 560 | |||
| 561 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 556 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
| 562 | WARN_ON(notifier_to_errno(err)); | 557 | WARN_ON(notifier_to_errno(err)); |
| 563 | 558 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e187454d82f..b6c1ce3c53b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1162,7 +1162,12 @@ static void __split_huge_page_refcount(struct page *page) | |||
| 1162 | /* after clearing PageTail the gup refcount can be released */ | 1162 | /* after clearing PageTail the gup refcount can be released */ |
| 1163 | smp_mb(); | 1163 | smp_mb(); |
| 1164 | 1164 | ||
| 1165 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; | 1165 | /* |
| 1166 | * retain hwpoison flag of the poisoned tail page: | ||
| 1167 | * fix for the unsuitable process killed on Guest Machine(KVM) | ||
| 1168 | * by the memory-failure. | ||
| 1169 | */ | ||
| 1170 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; | ||
| 1166 | page_tail->flags |= (page->flags & | 1171 | page_tail->flags |= (page->flags & |
| 1167 | ((1L << PG_referenced) | | 1172 | ((1L << PG_referenced) | |
| 1168 | (1L << PG_swapbacked) | | 1173 | (1L << PG_swapbacked) | |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3878cfe399d..da53a252b25 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -612,8 +612,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | |||
| 612 | /* pagein of a big page is an event. So, ignore page size */ | 612 | /* pagein of a big page is an event. So, ignore page size */ |
| 613 | if (nr_pages > 0) | 613 | if (nr_pages > 0) |
| 614 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); | 614 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); |
| 615 | else | 615 | else { |
| 616 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); | 616 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); |
| 617 | nr_pages = -nr_pages; /* for event */ | ||
| 618 | } | ||
| 617 | 619 | ||
| 618 | __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages); | 620 | __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages); |
| 619 | 621 | ||
| @@ -1111,6 +1113,23 @@ static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) | |||
| 1111 | return false; | 1113 | return false; |
| 1112 | } | 1114 | } |
| 1113 | 1115 | ||
| 1116 | /** | ||
| 1117 | * mem_cgroup_check_margin - check if the memory cgroup allows charging | ||
| 1118 | * @mem: memory cgroup to check | ||
| 1119 | * @bytes: the number of bytes the caller intends to charge | ||
| 1120 | * | ||
| 1121 | * Returns a boolean value on whether @mem can be charged @bytes or | ||
| 1122 | * whether this would exceed the limit. | ||
| 1123 | */ | ||
| 1124 | static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes) | ||
| 1125 | { | ||
| 1126 | if (!res_counter_check_margin(&mem->res, bytes)) | ||
| 1127 | return false; | ||
| 1128 | if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes)) | ||
| 1129 | return false; | ||
| 1130 | return true; | ||
| 1131 | } | ||
| 1132 | |||
| 1114 | static unsigned int get_swappiness(struct mem_cgroup *memcg) | 1133 | static unsigned int get_swappiness(struct mem_cgroup *memcg) |
| 1115 | { | 1134 | { |
| 1116 | struct cgroup *cgrp = memcg->css.cgroup; | 1135 | struct cgroup *cgrp = memcg->css.cgroup; |
| @@ -1837,23 +1856,34 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | |||
| 1837 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; | 1856 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; |
| 1838 | } else | 1857 | } else |
| 1839 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); | 1858 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); |
| 1840 | 1859 | /* | |
| 1841 | if (csize > PAGE_SIZE) /* change csize and retry */ | 1860 | * csize can be either a huge page (HPAGE_SIZE), a batch of |
| 1861 | * regular pages (CHARGE_SIZE), or a single regular page | ||
| 1862 | * (PAGE_SIZE). | ||
| 1863 | * | ||
| 1864 | * Never reclaim on behalf of optional batching, retry with a | ||
| 1865 | * single page instead. | ||
| 1866 | */ | ||
| 1867 | if (csize == CHARGE_SIZE) | ||
| 1842 | return CHARGE_RETRY; | 1868 | return CHARGE_RETRY; |
| 1843 | 1869 | ||
| 1844 | if (!(gfp_mask & __GFP_WAIT)) | 1870 | if (!(gfp_mask & __GFP_WAIT)) |
| 1845 | return CHARGE_WOULDBLOCK; | 1871 | return CHARGE_WOULDBLOCK; |
| 1846 | 1872 | ||
| 1847 | ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, | 1873 | ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, |
| 1848 | gfp_mask, flags); | 1874 | gfp_mask, flags); |
| 1875 | if (mem_cgroup_check_margin(mem_over_limit, csize)) | ||
| 1876 | return CHARGE_RETRY; | ||
| 1849 | /* | 1877 | /* |
| 1850 | * try_to_free_mem_cgroup_pages() might not give us a full | 1878 | * Even though the limit is exceeded at this point, reclaim |
| 1851 | * picture of reclaim. Some pages are reclaimed and might be | 1879 | * may have been able to free some pages. Retry the charge |
| 1852 | * moved to swap cache or just unmapped from the cgroup. | 1880 | * before killing the task. |
| 1853 | * Check the limit again to see if the reclaim reduced the | 1881 | * |
| 1854 | * current usage of the cgroup before giving up | 1882 | * Only for regular pages, though: huge pages are rather |
| 1883 | * unlikely to succeed so close to the limit, and we fall back | ||
| 1884 | * to regular pages anyway in case of failure. | ||
| 1855 | */ | 1885 | */ |
| 1856 | if (ret || mem_cgroup_check_under_limit(mem_over_limit)) | 1886 | if (csize == PAGE_SIZE && ret) |
| 1857 | return CHARGE_RETRY; | 1887 | return CHARGE_RETRY; |
| 1858 | 1888 | ||
| 1859 | /* | 1889 | /* |
| @@ -2323,13 +2353,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
| 2323 | gfp_t gfp_mask, enum charge_type ctype) | 2353 | gfp_t gfp_mask, enum charge_type ctype) |
| 2324 | { | 2354 | { |
| 2325 | struct mem_cgroup *mem = NULL; | 2355 | struct mem_cgroup *mem = NULL; |
| 2356 | int page_size = PAGE_SIZE; | ||
| 2326 | struct page_cgroup *pc; | 2357 | struct page_cgroup *pc; |
| 2358 | bool oom = true; | ||
| 2327 | int ret; | 2359 | int ret; |
| 2328 | int page_size = PAGE_SIZE; | ||
| 2329 | 2360 | ||
| 2330 | if (PageTransHuge(page)) { | 2361 | if (PageTransHuge(page)) { |
| 2331 | page_size <<= compound_order(page); | 2362 | page_size <<= compound_order(page); |
| 2332 | VM_BUG_ON(!PageTransHuge(page)); | 2363 | VM_BUG_ON(!PageTransHuge(page)); |
| 2364 | /* | ||
| 2365 | * Never OOM-kill a process for a huge page. The | ||
| 2366 | * fault handler will fall back to regular pages. | ||
| 2367 | */ | ||
| 2368 | oom = false; | ||
| 2333 | } | 2369 | } |
| 2334 | 2370 | ||
| 2335 | pc = lookup_page_cgroup(page); | 2371 | pc = lookup_page_cgroup(page); |
| @@ -2338,7 +2374,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
| 2338 | return 0; | 2374 | return 0; |
| 2339 | prefetchw(pc); | 2375 | prefetchw(pc); |
| 2340 | 2376 | ||
| 2341 | ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size); | 2377 | ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size); |
| 2342 | if (ret || !mem) | 2378 | if (ret || !mem) |
| 2343 | return ret; | 2379 | return ret; |
| 2344 | 2380 | ||
| @@ -5024,9 +5060,9 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
| 5024 | static int __init enable_swap_account(char *s) | 5060 | static int __init enable_swap_account(char *s) |
| 5025 | { | 5061 | { |
| 5026 | /* consider enabled if no parameter or 1 is given */ | 5062 | /* consider enabled if no parameter or 1 is given */ |
| 5027 | if (!s || !strcmp(s, "1")) | 5063 | if (!(*s) || !strcmp(s, "=1")) |
| 5028 | really_do_swap_account = 1; | 5064 | really_do_swap_account = 1; |
| 5029 | else if (!strcmp(s, "0")) | 5065 | else if (!strcmp(s, "=0")) |
| 5030 | really_do_swap_account = 0; | 5066 | really_do_swap_account = 0; |
| 5031 | return 1; | 5067 | return 1; |
| 5032 | } | 5068 | } |
| @@ -5034,7 +5070,8 @@ __setup("swapaccount", enable_swap_account); | |||
| 5034 | 5070 | ||
| 5035 | static int __init disable_swap_account(char *s) | 5071 | static int __init disable_swap_account(char *s) |
| 5036 | { | 5072 | { |
| 5037 | enable_swap_account("0"); | 5073 | printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n"); |
| 5074 | enable_swap_account("=0"); | ||
| 5038 | return 1; | 5075 | return 1; |
| 5039 | } | 5076 | } |
| 5040 | __setup("noswapaccount", disable_swap_account); | 5077 | __setup("noswapaccount", disable_swap_account); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 548fbd70f02..0207c2f6f8b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -233,8 +233,8 @@ void shake_page(struct page *p, int access) | |||
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | /* | 235 | /* |
| 236 | * Only all shrink_slab here (which would also | 236 | * Only call shrink_slab here (which would also shrink other caches) if |
| 237 | * shrink other caches) if access is not potentially fatal. | 237 | * access is not potentially fatal. |
| 238 | */ | 238 | */ |
| 239 | if (access) { | 239 | if (access) { |
| 240 | int nr; | 240 | int nr; |
| @@ -386,8 +386,6 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
| 386 | struct task_struct *tsk; | 386 | struct task_struct *tsk; |
| 387 | struct anon_vma *av; | 387 | struct anon_vma *av; |
| 388 | 388 | ||
| 389 | if (!PageHuge(page) && unlikely(split_huge_page(page))) | ||
| 390 | return; | ||
| 391 | read_lock(&tasklist_lock); | 389 | read_lock(&tasklist_lock); |
| 392 | av = page_lock_anon_vma(page); | 390 | av = page_lock_anon_vma(page); |
| 393 | if (av == NULL) /* Not actually mapped anymore */ | 391 | if (av == NULL) /* Not actually mapped anymore */ |
| @@ -856,6 +854,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 856 | int ret; | 854 | int ret; |
| 857 | int kill = 1; | 855 | int kill = 1; |
| 858 | struct page *hpage = compound_head(p); | 856 | struct page *hpage = compound_head(p); |
| 857 | struct page *ppage; | ||
| 859 | 858 | ||
| 860 | if (PageReserved(p) || PageSlab(p)) | 859 | if (PageReserved(p) || PageSlab(p)) |
| 861 | return SWAP_SUCCESS; | 860 | return SWAP_SUCCESS; |
| @@ -897,6 +896,44 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 897 | } | 896 | } |
| 898 | 897 | ||
| 899 | /* | 898 | /* |
| 899 | * ppage: poisoned page | ||
| 900 | * if p is regular page(4k page) | ||
| 901 | * ppage == real poisoned page; | ||
| 902 | * else p is hugetlb or THP, ppage == head page. | ||
| 903 | */ | ||
| 904 | ppage = hpage; | ||
| 905 | |||
| 906 | if (PageTransHuge(hpage)) { | ||
| 907 | /* | ||
| 908 | * Verify that this isn't a hugetlbfs head page, the check for | ||
| 909 | * PageAnon is just for avoid tripping a split_huge_page | ||
| 910 | * internal debug check, as split_huge_page refuses to deal with | ||
| 911 | * anything that isn't an anon page. PageAnon can't go away fro | ||
| 912 | * under us because we hold a refcount on the hpage, without a | ||
| 913 | * refcount on the hpage. split_huge_page can't be safely called | ||
| 914 | * in the first place, having a refcount on the tail isn't | ||
| 915 | * enough * to be safe. | ||
| 916 | */ | ||
| 917 | if (!PageHuge(hpage) && PageAnon(hpage)) { | ||
| 918 | if (unlikely(split_huge_page(hpage))) { | ||
| 919 | /* | ||
| 920 | * FIXME: if splitting THP is failed, it is | ||
| 921 | * better to stop the following operation rather | ||
| 922 | * than causing panic by unmapping. System might | ||
| 923 | * survive if the page is freed later. | ||
| 924 | */ | ||
| 925 | printk(KERN_INFO | ||
| 926 | "MCE %#lx: failed to split THP\n", pfn); | ||
| 927 | |||
| 928 | BUG_ON(!PageHWPoison(p)); | ||
| 929 | return SWAP_FAIL; | ||
| 930 | } | ||
| 931 | /* THP is split, so ppage should be the real poisoned page. */ | ||
| 932 | ppage = p; | ||
| 933 | } | ||
| 934 | } | ||
| 935 | |||
| 936 | /* | ||
| 900 | * First collect all the processes that have the page | 937 | * First collect all the processes that have the page |
| 901 | * mapped in dirty form. This has to be done before try_to_unmap, | 938 | * mapped in dirty form. This has to be done before try_to_unmap, |
| 902 | * because ttu takes the rmap data structures down. | 939 | * because ttu takes the rmap data structures down. |
| @@ -905,12 +942,18 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 905 | * there's nothing that can be done. | 942 | * there's nothing that can be done. |
| 906 | */ | 943 | */ |
| 907 | if (kill) | 944 | if (kill) |
| 908 | collect_procs(hpage, &tokill); | 945 | collect_procs(ppage, &tokill); |
| 946 | |||
| 947 | if (hpage != ppage) | ||
| 948 | lock_page_nosync(ppage); | ||
| 909 | 949 | ||
| 910 | ret = try_to_unmap(hpage, ttu); | 950 | ret = try_to_unmap(ppage, ttu); |
| 911 | if (ret != SWAP_SUCCESS) | 951 | if (ret != SWAP_SUCCESS) |
| 912 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | 952 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", |
| 913 | pfn, page_mapcount(hpage)); | 953 | pfn, page_mapcount(ppage)); |
| 954 | |||
| 955 | if (hpage != ppage) | ||
| 956 | unlock_page(ppage); | ||
| 914 | 957 | ||
| 915 | /* | 958 | /* |
| 916 | * Now that the dirty bit has been propagated to the | 959 | * Now that the dirty bit has been propagated to the |
| @@ -921,7 +964,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 921 | * use a more force-full uncatchable kill to prevent | 964 | * use a more force-full uncatchable kill to prevent |
| 922 | * any accesses to the poisoned memory. | 965 | * any accesses to the poisoned memory. |
| 923 | */ | 966 | */ |
| 924 | kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, | 967 | kill_procs_ao(&tokill, !!PageDirty(ppage), trapno, |
| 925 | ret != SWAP_SUCCESS, p, pfn); | 968 | ret != SWAP_SUCCESS, p, pfn); |
| 926 | 969 | ||
| 927 | return ret; | 970 | return ret; |
| @@ -1022,19 +1065,22 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
| 1022 | * The check (unnecessarily) ignores LRU pages being isolated and | 1065 | * The check (unnecessarily) ignores LRU pages being isolated and |
| 1023 | * walked by the page reclaim code, however that's not a big loss. | 1066 | * walked by the page reclaim code, however that's not a big loss. |
| 1024 | */ | 1067 | */ |
| 1025 | if (!PageLRU(p) && !PageHuge(p)) | 1068 | if (!PageHuge(p) && !PageTransCompound(p)) { |
| 1026 | shake_page(p, 0); | 1069 | if (!PageLRU(p)) |
| 1027 | if (!PageLRU(p) && !PageHuge(p)) { | 1070 | shake_page(p, 0); |
| 1028 | /* | 1071 | if (!PageLRU(p)) { |
| 1029 | * shake_page could have turned it free. | 1072 | /* |
| 1030 | */ | 1073 | * shake_page could have turned it free. |
| 1031 | if (is_free_buddy_page(p)) { | 1074 | */ |
| 1032 | action_result(pfn, "free buddy, 2nd try", DELAYED); | 1075 | if (is_free_buddy_page(p)) { |
| 1033 | return 0; | 1076 | action_result(pfn, "free buddy, 2nd try", |
| 1077 | DELAYED); | ||
| 1078 | return 0; | ||
| 1079 | } | ||
| 1080 | action_result(pfn, "non LRU", IGNORED); | ||
| 1081 | put_page(p); | ||
| 1082 | return -EBUSY; | ||
| 1034 | } | 1083 | } |
| 1035 | action_result(pfn, "non LRU", IGNORED); | ||
| 1036 | put_page(p); | ||
| 1037 | return -EBUSY; | ||
| 1038 | } | 1084 | } |
| 1039 | 1085 | ||
| 1040 | /* | 1086 | /* |
| @@ -1064,7 +1110,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
| 1064 | * For error on the tail page, we should set PG_hwpoison | 1110 | * For error on the tail page, we should set PG_hwpoison |
| 1065 | * on the head page to show that the hugepage is hwpoisoned | 1111 | * on the head page to show that the hugepage is hwpoisoned |
| 1066 | */ | 1112 | */ |
| 1067 | if (PageTail(p) && TestSetPageHWPoison(hpage)) { | 1113 | if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { |
| 1068 | action_result(pfn, "hugepage already hardware poisoned", | 1114 | action_result(pfn, "hugepage already hardware poisoned", |
| 1069 | IGNORED); | 1115 | IGNORED); |
| 1070 | unlock_page(hpage); | 1116 | unlock_page(hpage); |
| @@ -1295,7 +1341,10 @@ static int soft_offline_huge_page(struct page *page, int flags) | |||
| 1295 | ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0, | 1341 | ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0, |
| 1296 | true); | 1342 | true); |
| 1297 | if (ret) { | 1343 | if (ret) { |
| 1298 | putback_lru_pages(&pagelist); | 1344 | struct page *page1, *page2; |
| 1345 | list_for_each_entry_safe(page1, page2, &pagelist, lru) | ||
| 1346 | put_page(page1); | ||
| 1347 | |||
| 1299 | pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", | 1348 | pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", |
| 1300 | pfn, ret, page->flags); | 1349 | pfn, ret, page->flags); |
| 1301 | if (ret > 0) | 1350 | if (ret > 0) |
| @@ -1419,6 +1468,7 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1419 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, | 1468 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, |
| 1420 | 0, true); | 1469 | 0, true); |
| 1421 | if (ret) { | 1470 | if (ret) { |
| 1471 | putback_lru_pages(&pagelist); | ||
| 1422 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", | 1472 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", |
| 1423 | pfn, ret, page->flags); | 1473 | pfn, ret, page->flags); |
| 1424 | if (ret > 0) | 1474 | if (ret > 0) |
diff --git a/mm/migrate.c b/mm/migrate.c index 9f29a3b7aac..76611525380 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -772,6 +772,7 @@ uncharge: | |||
| 772 | unlock: | 772 | unlock: |
| 773 | unlock_page(page); | 773 | unlock_page(page); |
| 774 | 774 | ||
| 775 | move_newpage: | ||
| 775 | if (rc != -EAGAIN) { | 776 | if (rc != -EAGAIN) { |
| 776 | /* | 777 | /* |
| 777 | * A page that has been migrated has all references | 778 | * A page that has been migrated has all references |
| @@ -785,8 +786,6 @@ unlock: | |||
| 785 | putback_lru_page(page); | 786 | putback_lru_page(page); |
| 786 | } | 787 | } |
| 787 | 788 | ||
| 788 | move_newpage: | ||
| 789 | |||
| 790 | /* | 789 | /* |
| 791 | * Move the new page to the LRU. If migration was not successful | 790 | * Move the new page to the LRU. If migration was not successful |
| 792 | * then this will free the page. | 791 | * then this will free the page. |
| @@ -981,10 +980,6 @@ int migrate_huge_pages(struct list_head *from, | |||
| 981 | } | 980 | } |
| 982 | rc = 0; | 981 | rc = 0; |
| 983 | out: | 982 | out: |
| 984 | |||
| 985 | list_for_each_entry_safe(page, page2, from, lru) | ||
| 986 | put_page(page); | ||
| 987 | |||
| 988 | if (rc) | 983 | if (rc) |
| 989 | return rc; | 984 | return rc; |
| 990 | 985 | ||
