diff options
95 files changed, 1050 insertions, 962 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 8511d3532c27..d8362cf9909e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -577,9 +577,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 577 | a memory unit (amount[KMG]). See also | 577 | a memory unit (amount[KMG]). See also |
| 578 | Documentation/kdump/kdump.txt for a example. | 578 | Documentation/kdump/kdump.txt for a example. |
| 579 | 579 | ||
| 580 | cs4232= [HW,OSS] | ||
| 581 | Format: <io>,<irq>,<dma>,<dma2>,<mpuio>,<mpuirq> | ||
| 582 | |||
| 583 | cs89x0_dma= [HW,NET] | 580 | cs89x0_dma= [HW,NET] |
| 584 | Format: <dma> | 581 | Format: <dma> |
| 585 | 582 | ||
| @@ -732,10 +729,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 732 | Default value is 0. | 729 | Default value is 0. |
| 733 | Value can be changed at runtime via /selinux/enforce. | 730 | Value can be changed at runtime via /selinux/enforce. |
| 734 | 731 | ||
| 735 | es1371= [HW,OSS] | ||
| 736 | Format: <spdif>,[<nomix>,[<amplifier>]] | ||
| 737 | See also header of sound/oss/es1371.c. | ||
| 738 | |||
| 739 | ether= [HW,NET] Ethernet cards parameters | 732 | ether= [HW,NET] Ethernet cards parameters |
| 740 | This option is obsoleted by the "netdev=" option, which | 733 | This option is obsoleted by the "netdev=" option, which |
| 741 | has equivalent usage. See its documentation for details. | 734 | has equivalent usage. See its documentation for details. |
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 64eb1100eec1..0f5d26bea80f 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt | |||
| @@ -349,6 +349,7 @@ STAC92HD73* | |||
| 349 | STAC92HD83* | 349 | STAC92HD83* |
| 350 | =========== | 350 | =========== |
| 351 | ref Reference board | 351 | ref Reference board |
| 352 | mic-ref Reference board with power managment for ports | ||
| 352 | 353 | ||
| 353 | STAC9872 | 354 | STAC9872 |
| 354 | ======== | 355 | ======== |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa5..02b47a603fc8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
| 6 | * | ||
| 7 | * Note: inlines with more than a single statement should be marked | ||
| 8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | ||
| 6 | */ | 9 | */ |
| 7 | 10 | ||
| 8 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
| @@ -53,7 +56,8 @@ | |||
| 53 | * Note that @nr may be almost arbitrarily large; this function is not | 56 | * Note that @nr may be almost arbitrarily large; this function is not |
| 54 | * restricted to acting on a single-word quantity. | 57 | * restricted to acting on a single-word quantity. |
| 55 | */ | 58 | */ |
| 56 | static inline void set_bit(unsigned int nr, volatile unsigned long *addr) | 59 | static __always_inline void |
| 60 | set_bit(unsigned int nr, volatile unsigned long *addr) | ||
| 57 | { | 61 | { |
| 58 | if (IS_IMMEDIATE(nr)) { | 62 | if (IS_IMMEDIATE(nr)) { |
| 59 | asm volatile(LOCK_PREFIX "orb %1,%0" | 63 | asm volatile(LOCK_PREFIX "orb %1,%0" |
| @@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
| 90 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 91 | * in order to ensure changes are visible on other processors. | 95 | * in order to ensure changes are visible on other processors. |
| 92 | */ | 96 | */ |
| 93 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 97 | static __always_inline void |
| 98 | clear_bit(int nr, volatile unsigned long *addr) | ||
| 94 | { | 99 | { |
| 95 | if (IS_IMMEDIATE(nr)) { | 100 | if (IS_IMMEDIATE(nr)) { |
| 96 | asm volatile(LOCK_PREFIX "andb %1,%0" | 101 | asm volatile(LOCK_PREFIX "andb %1,%0" |
| @@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
| 204 | * | 209 | * |
| 205 | * This is the same as test_and_set_bit on x86. | 210 | * This is the same as test_and_set_bit on x86. |
| 206 | */ | 211 | */ |
| 207 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 212 | static __always_inline int |
| 213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
| 208 | { | 214 | { |
| 209 | return test_and_set_bit(nr, addr); | 215 | return test_and_set_bit(nr, addr); |
| 210 | } | 216 | } |
| @@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 300 | return oldbit; | 306 | return oldbit; |
| 301 | } | 307 | } |
| 302 | 308 | ||
| 303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
| 304 | { | 310 | { |
| 305 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
| 306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485fa..1dbbdf4be9b4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
| @@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
| 99 | * A boot-time mapping is currently limited to at most 16 pages. | 99 | * A boot-time mapping is currently limited to at most 16 pages. |
| 100 | */ | 100 | */ |
| 101 | extern void early_ioremap_init(void); | 101 | extern void early_ioremap_init(void); |
| 102 | extern void early_ioremap_clear(void); | ||
| 103 | extern void early_ioremap_reset(void); | 102 | extern void early_ioremap_reset(void); |
| 104 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 103 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
| 105 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 104 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index cb58643947b9..358acc59ae04 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -202,6 +202,35 @@ | |||
| 202 | #define MSR_IA32_THERM_STATUS 0x0000019c | 202 | #define MSR_IA32_THERM_STATUS 0x0000019c |
| 203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 | 203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
| 204 | 204 | ||
| 205 | /* MISC_ENABLE bits: architectural */ | ||
| 206 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) | ||
| 207 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) | ||
| 208 | #define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) | ||
| 209 | #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) | ||
| 210 | #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) | ||
| 211 | #define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) | ||
| 212 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) | ||
| 213 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) | ||
| 214 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) | ||
| 215 | #define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) | ||
| 216 | |||
| 217 | /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ | ||
| 218 | #define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) | ||
| 219 | #define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) | ||
| 220 | #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) | ||
| 221 | #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) | ||
| 222 | #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) | ||
| 223 | #define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) | ||
| 224 | #define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) | ||
| 225 | #define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) | ||
| 226 | #define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) | ||
| 227 | #define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) | ||
| 228 | #define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) | ||
| 229 | #define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) | ||
| 230 | #define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) | ||
| 231 | #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) | ||
| 232 | #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) | ||
| 233 | |||
| 205 | /* Intel Model 6 */ | 234 | /* Intel Model 6 */ |
| 206 | #define MSR_P6_EVNTSEL0 0x00000186 | 235 | #define MSR_P6_EVNTSEL0 0x00000186 |
| 207 | #define MSR_P6_EVNTSEL1 0x00000187 | 236 | #define MSR_P6_EVNTSEL1 0x00000187 |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index cb7c151a8bff..dd14c54ac718 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
| @@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |||
| 42 | 42 | ||
| 43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | 43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
| 44 | { | 44 | { |
| 45 | pgtable_page_dtor(pte); | ||
| 45 | __free_page(pte); | 46 | __free_page(pte); |
| 46 | } | 47 | } |
| 47 | 48 | ||
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 9c6797c3e56c..c0b0bda754ee 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
| @@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, | |||
| 40 | struct old_sigaction __user *); | 40 | struct old_sigaction __user *); |
| 41 | asmlinkage int sys_sigaltstack(unsigned long); | 41 | asmlinkage int sys_sigaltstack(unsigned long); |
| 42 | asmlinkage unsigned long sys_sigreturn(unsigned long); | 42 | asmlinkage unsigned long sys_sigreturn(unsigned long); |
| 43 | asmlinkage int sys_rt_sigreturn(struct pt_regs); | 43 | asmlinkage int sys_rt_sigreturn(unsigned long); |
| 44 | 44 | ||
| 45 | /* kernel/ioport.c */ | 45 | /* kernel/ioport.c */ |
| 46 | asmlinkage long sys_iopl(unsigned long); | 46 | asmlinkage long sys_iopl(unsigned long); |
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index 1287dc1347d6..b5c9d45c981f 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h | |||
| @@ -1,18 +1,13 @@ | |||
| 1 | /* x86 architecture timex specifications */ | ||
| 2 | #ifndef _ASM_X86_TIMEX_H | 1 | #ifndef _ASM_X86_TIMEX_H |
| 3 | #define _ASM_X86_TIMEX_H | 2 | #define _ASM_X86_TIMEX_H |
| 4 | 3 | ||
| 5 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
| 6 | #include <asm/tsc.h> | 5 | #include <asm/tsc.h> |
| 7 | 6 | ||
| 8 | #ifdef CONFIG_X86_ELAN | 7 | /* The PIT ticks at this frequency (in HZ): */ |
| 9 | # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 8 | #define PIT_TICK_RATE 1193182 |
| 10 | #elif defined(CONFIG_X86_RDC321X) | 9 | |
| 11 | # define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ | 10 | #define CLOCK_TICK_RATE PIT_TICK_RATE |
| 12 | #else | ||
| 13 | # define PIT_TICK_RATE 1193182 /* Underlying HZ */ | ||
| 14 | #endif | ||
| 15 | #define CLOCK_TICK_RATE PIT_TICK_RATE | ||
| 16 | 11 | ||
| 17 | #define ARCH_HAS_READ_CURRENT_TIMER | 12 | #define ARCH_HAS_READ_CURRENT_TIMER |
| 18 | 13 | ||
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f5675..4b6df2469fe3 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
| @@ -895,6 +895,10 @@ void disable_local_APIC(void) | |||
| 895 | { | 895 | { |
| 896 | unsigned int value; | 896 | unsigned int value; |
| 897 | 897 | ||
| 898 | /* APIC hasn't been mapped yet */ | ||
| 899 | if (!apic_phys) | ||
| 900 | return; | ||
| 901 | |||
| 898 | clear_local_APIC(); | 902 | clear_local_APIC(); |
| 899 | 903 | ||
| 900 | /* | 904 | /* |
| @@ -1833,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
| 1833 | num_processors++; | 1837 | num_processors++; |
| 1834 | cpu = cpumask_next_zero(-1, cpu_present_mask); | 1838 | cpu = cpumask_next_zero(-1, cpu_present_mask); |
| 1835 | 1839 | ||
| 1840 | if (version != apic_version[boot_cpu_physical_apicid]) | ||
| 1841 | WARN_ONCE(1, | ||
| 1842 | "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", | ||
| 1843 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
| 1844 | |||
| 1836 | physid_set(apicid, phys_cpu_present_map); | 1845 | physid_set(apicid, phys_cpu_present_map); |
| 1837 | if (apicid == boot_cpu_physical_apicid) { | 1846 | if (apicid == boot_cpu_physical_apicid) { |
| 1838 | /* | 1847 | /* |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c5..4b1c319d30c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -145,13 +145,14 @@ typedef union { | |||
| 145 | 145 | ||
| 146 | struct drv_cmd { | 146 | struct drv_cmd { |
| 147 | unsigned int type; | 147 | unsigned int type; |
| 148 | cpumask_var_t mask; | 148 | const struct cpumask *mask; |
| 149 | drv_addr_union addr; | 149 | drv_addr_union addr; |
| 150 | u32 val; | 150 | u32 val; |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | static void do_drv_read(struct drv_cmd *cmd) | 153 | static long do_drv_read(void *_cmd) |
| 154 | { | 154 | { |
| 155 | struct drv_cmd *cmd = _cmd; | ||
| 155 | u32 h; | 156 | u32 h; |
| 156 | 157 | ||
| 157 | switch (cmd->type) { | 158 | switch (cmd->type) { |
| @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) | |||
| 166 | default: | 167 | default: |
| 167 | break; | 168 | break; |
| 168 | } | 169 | } |
| 170 | return 0; | ||
| 169 | } | 171 | } |
| 170 | 172 | ||
| 171 | static void do_drv_write(struct drv_cmd *cmd) | 173 | static long do_drv_write(void *_cmd) |
| 172 | { | 174 | { |
| 175 | struct drv_cmd *cmd = _cmd; | ||
| 173 | u32 lo, hi; | 176 | u32 lo, hi; |
| 174 | 177 | ||
| 175 | switch (cmd->type) { | 178 | switch (cmd->type) { |
| @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) | |||
| 186 | default: | 189 | default: |
| 187 | break; | 190 | break; |
| 188 | } | 191 | } |
| 192 | return 0; | ||
| 189 | } | 193 | } |
| 190 | 194 | ||
| 191 | static void drv_read(struct drv_cmd *cmd) | 195 | static void drv_read(struct drv_cmd *cmd) |
| 192 | { | 196 | { |
| 193 | cpumask_t saved_mask = current->cpus_allowed; | ||
| 194 | cmd->val = 0; | 197 | cmd->val = 0; |
| 195 | 198 | ||
| 196 | set_cpus_allowed_ptr(current, cmd->mask); | 199 | work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); |
| 197 | do_drv_read(cmd); | ||
| 198 | set_cpus_allowed_ptr(current, &saved_mask); | ||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | static void drv_write(struct drv_cmd *cmd) | 202 | static void drv_write(struct drv_cmd *cmd) |
| 202 | { | 203 | { |
| 203 | cpumask_t saved_mask = current->cpus_allowed; | ||
| 204 | unsigned int i; | 204 | unsigned int i; |
| 205 | 205 | ||
| 206 | for_each_cpu(i, cmd->mask) { | 206 | for_each_cpu(i, cmd->mask) { |
| 207 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 207 | work_on_cpu(i, do_drv_write, cmd); |
| 208 | do_drv_write(cmd); | ||
| 209 | } | 208 | } |
| 210 | |||
| 211 | set_cpus_allowed_ptr(current, &saved_mask); | ||
| 212 | return; | ||
| 213 | } | 209 | } |
| 214 | 210 | ||
| 215 | static u32 get_cur_val(const struct cpumask *mask) | 211 | static u32 get_cur_val(const struct cpumask *mask) |
| @@ -235,8 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
| 235 | return 0; | 231 | return 0; |
| 236 | } | 232 | } |
| 237 | 233 | ||
| 238 | cpumask_copy(cmd.mask, mask); | 234 | cmd.mask = mask; |
| 239 | |||
| 240 | drv_read(&cmd); | 235 | drv_read(&cmd); |
| 241 | 236 | ||
| 242 | dprintk("get_cur_val = %u\n", cmd.val); | 237 | dprintk("get_cur_val = %u\n", cmd.val); |
| @@ -368,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
| 368 | return freq; | 363 | return freq; |
| 369 | } | 364 | } |
| 370 | 365 | ||
| 371 | static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, | 366 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
| 372 | struct acpi_cpufreq_data *data) | 367 | struct acpi_cpufreq_data *data) |
| 373 | { | 368 | { |
| 374 | unsigned int cur_freq; | 369 | unsigned int cur_freq; |
| @@ -403,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
| 403 | return -ENODEV; | 398 | return -ENODEV; |
| 404 | } | 399 | } |
| 405 | 400 | ||
| 406 | if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) | ||
| 407 | return -ENOMEM; | ||
| 408 | |||
| 409 | perf = data->acpi_data; | 401 | perf = data->acpi_data; |
| 410 | result = cpufreq_frequency_table_target(policy, | 402 | result = cpufreq_frequency_table_target(policy, |
| 411 | data->freq_table, | 403 | data->freq_table, |
| @@ -450,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
| 450 | 442 | ||
| 451 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 443 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
| 452 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | 444 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
| 453 | cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); | 445 | cmd.mask = policy->cpus; |
| 454 | else | 446 | else |
| 455 | cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); | 447 | cmd.mask = cpumask_of(policy->cpu); |
| 456 | 448 | ||
| 457 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 449 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
| 458 | freqs.new = data->freq_table[next_state].frequency; | 450 | freqs.new = data->freq_table[next_state].frequency; |
| @@ -479,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
| 479 | perf->state = next_perf_state; | 471 | perf->state = next_perf_state; |
| 480 | 472 | ||
| 481 | out: | 473 | out: |
| 482 | free_cpumask_var(cmd.mask); | ||
| 483 | return result; | 474 | return result; |
| 484 | } | 475 | } |
| 485 | 476 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974c..549f2ada55f5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -29,6 +29,19 @@ | |||
| 29 | 29 | ||
| 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
| 31 | { | 31 | { |
| 32 | /* Unmask CPUID levels if masked: */ | ||
| 33 | if (c->x86 == 6 && c->x86_model >= 15) { | ||
| 34 | u64 misc_enable; | ||
| 35 | |||
| 36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
| 37 | |||
| 38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | ||
| 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | ||
| 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
| 41 | c->cpuid_level = cpuid_eax(0); | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 32 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
| 33 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
| 34 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd8..0c0a455fe95c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -33,11 +33,13 @@ u64 mtrr_tom2; | |||
| 33 | struct mtrr_state_type mtrr_state = {}; | 33 | struct mtrr_state_type mtrr_state = {}; |
| 34 | EXPORT_SYMBOL_GPL(mtrr_state); | 34 | EXPORT_SYMBOL_GPL(mtrr_state); |
| 35 | 35 | ||
| 36 | #undef MODULE_PARAM_PREFIX | 36 | static int __initdata mtrr_show; |
| 37 | #define MODULE_PARAM_PREFIX "mtrr." | 37 | static int __init mtrr_debug(char *opt) |
| 38 | 38 | { | |
| 39 | static int mtrr_show; | 39 | mtrr_show = 1; |
| 40 | module_param_named(show, mtrr_show, bool, 0); | 40 | return 0; |
| 41 | } | ||
| 42 | early_param("mtrr.show", mtrr_debug); | ||
| 41 | 43 | ||
| 42 | /* | 44 | /* |
| 43 | * Returns the effective MTRR type for the region | 45 | * Returns the effective MTRR type for the region |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cd759ad90690..64d5ad0b8add 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -628,11 +628,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n, | |||
| 628 | 628 | ||
| 629 | switch (action & 0xf) { | 629 | switch (action & 0xf) { |
| 630 | case CPU_ONLINE: | 630 | case CPU_ONLINE: |
| 631 | INIT_DELAYED_WORK(&work.work, hpet_work); | 631 | INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); |
| 632 | init_completion(&work.complete); | 632 | init_completion(&work.complete); |
| 633 | /* FIXME: add schedule_work_on() */ | 633 | /* FIXME: add schedule_work_on() */ |
| 634 | schedule_delayed_work_on(cpu, &work.work, 0); | 634 | schedule_delayed_work_on(cpu, &work.work, 0); |
| 635 | wait_for_completion(&work.complete); | 635 | wait_for_completion(&work.complete); |
| 636 | destroy_timer_on_stack(&work.work.timer); | ||
| 636 | break; | 637 | break; |
| 637 | case CPU_DEAD: | 638 | case CPU_DEAD: |
| 638 | if (hdev) { | 639 | if (hdev) { |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba0..01161077a49c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
| @@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) | |||
| 136 | #ifdef CONFIG_X86_64 | 136 | #ifdef CONFIG_X86_64 |
| 137 | 137 | ||
| 138 | /* correctly size the local cpu masks */ | 138 | /* correctly size the local cpu masks */ |
| 139 | static void setup_cpu_local_masks(void) | 139 | static void __init setup_cpu_local_masks(void) |
| 140 | { | 140 | { |
| 141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | 141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| 142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | 142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041d..df0587f24c54 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
| @@ -632,9 +632,16 @@ badframe: | |||
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | #ifdef CONFIG_X86_32 | 634 | #ifdef CONFIG_X86_32 |
| 635 | asmlinkage int sys_rt_sigreturn(struct pt_regs regs) | 635 | /* |
| 636 | * Note: do not pass in pt_regs directly as with tail-call optimization | ||
| 637 | * GCC will incorrectly stomp on the caller's frame and corrupt user-space | ||
| 638 | * register state: | ||
| 639 | */ | ||
| 640 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | ||
| 636 | { | 641 | { |
| 637 | return do_rt_sigreturn(®s); | 642 | struct pt_regs *regs = (struct pt_regs *)&__unused; |
| 643 | |||
| 644 | return do_rt_sigreturn(regs); | ||
| 638 | } | 645 | } |
| 639 | #else /* !CONFIG_X86_32 */ | 646 | #else /* !CONFIG_X86_32 */ |
| 640 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 647 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e0..6812b829ed83 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
| @@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
| 200 | destination_timeouts = 0; | 200 | destination_timeouts = 0; |
| 201 | } | 201 | } |
| 202 | } | 202 | } |
| 203 | cpu_relax(); | ||
| 203 | } | 204 | } |
| 204 | return FLUSH_COMPLETE; | 205 | return FLUSH_COMPLETE; |
| 205 | } | 206 | } |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 23206ba16874..1d3302cc2ddf 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
| @@ -858,7 +858,7 @@ void __init vmi_init(void) | |||
| 858 | #endif | 858 | #endif |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | void vmi_activate(void) | 861 | void __init vmi_activate(void) |
| 862 | { | 862 | { |
| 863 | unsigned long flags; | 863 | unsigned long flags; |
| 864 | 864 | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 4a20b2f9a381..7c8ca91bb9ec 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
| @@ -56,7 +56,7 @@ do { \ | |||
| 56 | " jmp 2b\n" \ | 56 | " jmp 2b\n" \ |
| 57 | ".previous\n" \ | 57 | ".previous\n" \ |
| 58 | _ASM_EXTABLE(0b,3b) \ | 58 | _ASM_EXTABLE(0b,3b) \ |
| 59 | : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 59 | : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
| 60 | "=&D" (__d2) \ | 60 | "=&D" (__d2) \ |
| 61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
| 62 | : "memory"); \ | 62 | : "memory"); \ |
| @@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) | |||
| 218 | " .align 4\n" | 218 | " .align 4\n" |
| 219 | " .long 0b,2b\n" | 219 | " .long 0b,2b\n" |
| 220 | ".previous" | 220 | ".previous" |
| 221 | :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) | 221 | :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) |
| 222 | :"0" (n), "1" (s), "2" (0), "3" (mask) | 222 | :"0" (n), "1" (s), "2" (0), "3" (mask) |
| 223 | :"cc"); | 223 | :"cc"); |
| 224 | return res & mask; | 224 | return res & mask; |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 64d6c84e6353..ec13cb5f17ed 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
| @@ -32,7 +32,7 @@ do { \ | |||
| 32 | " jmp 2b\n" \ | 32 | " jmp 2b\n" \ |
| 33 | ".previous\n" \ | 33 | ".previous\n" \ |
| 34 | _ASM_EXTABLE(0b,3b) \ | 34 | _ASM_EXTABLE(0b,3b) \ |
| 35 | : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 35 | : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
| 36 | "=&D" (__d2) \ | 36 | "=&D" (__d2) \ |
| 37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
| 38 | : "memory"); \ | 38 | : "memory"); \ |
| @@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
| 86 | ".previous\n" | 86 | ".previous\n" |
| 87 | _ASM_EXTABLE(0b,3b) | 87 | _ASM_EXTABLE(0b,3b) |
| 88 | _ASM_EXTABLE(1b,2b) | 88 | _ASM_EXTABLE(1b,2b) |
| 89 | : [size8] "=c"(size), [dst] "=&D" (__d0) | 89 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
| 90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), | 90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
| 91 | [zero] "r" (0UL), [eight] "r" (8UL)); | 91 | [zero] "r" (0UL), [eight] "r" (8UL)); |
| 92 | return size; | 92 | return size; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3be..2cef05074413 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
| @@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
| 138 | return pte_offset_kernel(pmd, 0); | 138 | return pte_offset_kernel(pmd, 0); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | ||
| 142 | unsigned long vaddr, pte_t *lastpte) | ||
| 143 | { | ||
| 144 | #ifdef CONFIG_HIGHMEM | ||
| 145 | /* | ||
| 146 | * Something (early fixmap) may already have put a pte | ||
| 147 | * page here, which causes the page table allocation | ||
| 148 | * to become nonlinear. Attempt to fix it, and if it | ||
| 149 | * is still nonlinear then we have to bug. | ||
| 150 | */ | ||
| 151 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | ||
| 152 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | ||
| 153 | |||
| 154 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | ||
| 155 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | ||
| 156 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | ||
| 157 | && ((__pa(pte) >> PAGE_SHIFT) < table_start | ||
| 158 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { | ||
| 159 | pte_t *newpte; | ||
| 160 | int i; | ||
| 161 | |||
| 162 | BUG_ON(after_init_bootmem); | ||
| 163 | newpte = alloc_low_page(); | ||
| 164 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
| 165 | set_pte(newpte + i, pte[i]); | ||
| 166 | |||
| 167 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | ||
| 168 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | ||
| 169 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | ||
| 170 | __flush_tlb_all(); | ||
| 171 | |||
| 172 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | ||
| 173 | pte = newpte; | ||
| 174 | } | ||
| 175 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | ||
| 176 | && vaddr > fix_to_virt(FIX_KMAP_END) | ||
| 177 | && lastpte && lastpte + PTRS_PER_PTE != pte); | ||
| 178 | #endif | ||
| 179 | return pte; | ||
| 180 | } | ||
| 181 | |||
| 141 | /* | 182 | /* |
| 142 | * This function initializes a certain range of kernel virtual memory | 183 | * This function initializes a certain range of kernel virtual memory |
| 143 | * with new bootmem page tables, everywhere page tables are missing in | 184 | * with new bootmem page tables, everywhere page tables are missing in |
| @@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
| 154 | unsigned long vaddr; | 195 | unsigned long vaddr; |
| 155 | pgd_t *pgd; | 196 | pgd_t *pgd; |
| 156 | pmd_t *pmd; | 197 | pmd_t *pmd; |
| 198 | pte_t *pte = NULL; | ||
| 157 | 199 | ||
| 158 | vaddr = start; | 200 | vaddr = start; |
| 159 | pgd_idx = pgd_index(vaddr); | 201 | pgd_idx = pgd_index(vaddr); |
| @@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
| 165 | pmd = pmd + pmd_index(vaddr); | 207 | pmd = pmd + pmd_index(vaddr); |
| 166 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | 208 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
| 167 | pmd++, pmd_idx++) { | 209 | pmd++, pmd_idx++) { |
| 168 | one_page_table_init(pmd); | 210 | pte = page_table_kmap_check(one_page_table_init(pmd), |
| 211 | pmd, vaddr, pte); | ||
| 169 | 212 | ||
| 170 | vaddr += PMD_SIZE; | 213 | vaddr += PMD_SIZE; |
| 171 | } | 214 | } |
| @@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) | |||
| 508 | * Fixed mappings, only the page table structure has to be | 551 | * Fixed mappings, only the page table structure has to be |
| 509 | * created - mappings will be set by set_fixmap(): | 552 | * created - mappings will be set by set_fixmap(): |
| 510 | */ | 553 | */ |
| 511 | early_ioremap_clear(); | ||
| 512 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 554 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
| 513 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 555 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
| 514 | page_table_range_init(vaddr, end, pgd_base); | 556 | page_table_range_init(vaddr, end, pgd_base); |
| @@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
| 801 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 843 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); |
| 802 | 844 | ||
| 803 | /* for fixmap */ | 845 | /* for fixmap */ |
| 804 | tables += PAGE_SIZE * 2; | 846 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); |
| 805 | 847 | ||
| 806 | /* | 848 | /* |
| 807 | * RED-PEN putting page tables only on node 0 could | 849 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 23f68e77ad1f..e6d36b490250 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -596,7 +596,7 @@ static void __init init_gbpages(void) | |||
| 596 | direct_gbpages = 0; | 596 | direct_gbpages = 0; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, | 599 | static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, |
| 600 | unsigned long end, | 600 | unsigned long end, |
| 601 | unsigned long page_size_mask) | 601 | unsigned long page_size_mask) |
| 602 | { | 602 | { |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d0151d8ce452..ca53224fc56c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <asm/iomap.h> | 19 | #include <asm/iomap.h> |
| 20 | #include <asm/pat.h> | ||
| 20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 21 | 22 | ||
| 22 | /* Map 'pfn' using fixed map 'type' and protections 'prot' | 23 | /* Map 'pfn' using fixed map 'type' and protections 'prot' |
| @@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
| 29 | 30 | ||
| 30 | pagefault_disable(); | 31 | pagefault_disable(); |
| 31 | 32 | ||
| 33 | /* | ||
| 34 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. | ||
| 35 | * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the | ||
| 36 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the | ||
| 37 | * user, which is "WC if the MTRR is WC, UC if you can't do that." | ||
| 38 | */ | ||
| 39 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | ||
| 40 | prot = PAGE_KERNEL_UC_MINUS; | ||
| 41 | |||
| 32 | idx = type + KM_TYPE_NR*smp_processor_id(); | 42 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 33 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 34 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); | 44 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e1..af750ab973b6 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -557,34 +557,9 @@ void __init early_ioremap_init(void) | |||
| 557 | } | 557 | } |
| 558 | } | 558 | } |
| 559 | 559 | ||
| 560 | void __init early_ioremap_clear(void) | ||
| 561 | { | ||
| 562 | pmd_t *pmd; | ||
| 563 | |||
| 564 | if (early_ioremap_debug) | ||
| 565 | printk(KERN_INFO "early_ioremap_clear()\n"); | ||
| 566 | |||
| 567 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
| 568 | pmd_clear(pmd); | ||
| 569 | paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); | ||
| 570 | __flush_tlb_all(); | ||
| 571 | } | ||
| 572 | |||
| 573 | void __init early_ioremap_reset(void) | 560 | void __init early_ioremap_reset(void) |
| 574 | { | 561 | { |
| 575 | enum fixed_addresses idx; | ||
| 576 | unsigned long addr, phys; | ||
| 577 | pte_t *pte; | ||
| 578 | |||
| 579 | after_paging_init = 1; | 562 | after_paging_init = 1; |
| 580 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
| 581 | addr = fix_to_virt(idx); | ||
| 582 | pte = early_ioremap_pte(addr); | ||
| 583 | if (pte_present(*pte)) { | ||
| 584 | phys = pte_val(*pte) & PAGE_MASK; | ||
| 585 | set_fixmap(idx, phys); | ||
| 586 | } | ||
| 587 | } | ||
| 588 | } | 563 | } |
| 589 | 564 | ||
| 590 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 565 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..84ba74820ad6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -534,6 +534,36 @@ out_unlock: | |||
| 534 | return 0; | 534 | return 0; |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
| 538 | int primary) | ||
| 539 | { | ||
| 540 | /* | ||
| 541 | * Ignore all non primary paths. | ||
| 542 | */ | ||
| 543 | if (!primary) | ||
| 544 | return 0; | ||
| 545 | |||
| 546 | /* | ||
| 547 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
| 548 | * to have holes. | ||
| 549 | * Also set numpages to '1' indicating that we processed cpa req for | ||
| 550 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
| 551 | * on the initial value and the level returned by lookup_address(). | ||
| 552 | */ | ||
| 553 | if (within(vaddr, PAGE_OFFSET, | ||
| 554 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | ||
| 555 | cpa->numpages = 1; | ||
| 556 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
| 557 | return 0; | ||
| 558 | } else { | ||
| 559 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
| 560 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
| 561 | *cpa->vaddr); | ||
| 562 | |||
| 563 | return -EFAULT; | ||
| 564 | } | ||
| 565 | } | ||
| 566 | |||
| 537 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 567 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
| 538 | { | 568 | { |
| 539 | unsigned long address; | 569 | unsigned long address; |
| @@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
| 549 | repeat: | 579 | repeat: |
| 550 | kpte = lookup_address(address, &level); | 580 | kpte = lookup_address(address, &level); |
| 551 | if (!kpte) | 581 | if (!kpte) |
| 552 | return 0; | 582 | return __cpa_process_fault(cpa, address, primary); |
| 553 | 583 | ||
| 554 | old_pte = *kpte; | 584 | old_pte = *kpte; |
| 555 | if (!pte_val(old_pte)) { | 585 | if (!pte_val(old_pte)) |
| 556 | if (!primary) | 586 | return __cpa_process_fault(cpa, address, primary); |
| 557 | return 0; | ||
| 558 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
| 559 | "vaddr = %lx cpa->vaddr = %lx\n", address, | ||
| 560 | *cpa->vaddr); | ||
| 561 | return -EINVAL; | ||
| 562 | } | ||
| 563 | 587 | ||
| 564 | if (level == PG_LEVEL_4K) { | 588 | if (level == PG_LEVEL_4K) { |
| 565 | pte_t new_pte; | 589 | pte_t new_pte; |
| @@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
| 657 | vaddr = *cpa->vaddr; | 681 | vaddr = *cpa->vaddr; |
| 658 | 682 | ||
| 659 | if (!(within(vaddr, PAGE_OFFSET, | 683 | if (!(within(vaddr, PAGE_OFFSET, |
| 660 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | 684 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
| 661 | #ifdef CONFIG_X86_64 | ||
| 662 | || within(vaddr, PAGE_OFFSET + (1UL<<32), | ||
| 663 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | ||
| 664 | #endif | ||
| 665 | )) { | ||
| 666 | 685 | ||
| 667 | alias_cpa = *cpa; | 686 | alias_cpa = *cpa; |
| 668 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 687 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8b08fb955274..7b61036427df 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -333,11 +333,23 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 333 | req_type & _PAGE_CACHE_MASK); | 333 | req_type & _PAGE_CACHE_MASK); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | is_range_ram = pagerange_is_ram(start, end); | 336 | if (new_type) |
| 337 | if (is_range_ram == 1) | 337 | *new_type = actual_type; |
| 338 | return reserve_ram_pages_type(start, end, req_type, new_type); | 338 | |
| 339 | else if (is_range_ram < 0) | 339 | /* |
| 340 | return -EINVAL; | 340 | * For legacy reasons, some parts of the physical address range in the |
| 341 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in | ||
| 342 | * the e820 tables). So we will track the memory attributes of this | ||
| 343 | * legacy 1MB region using the linear memtype_list always. | ||
| 344 | */ | ||
| 345 | if (end >= ISA_END_ADDRESS) { | ||
| 346 | is_range_ram = pagerange_is_ram(start, end); | ||
| 347 | if (is_range_ram == 1) | ||
| 348 | return reserve_ram_pages_type(start, end, req_type, | ||
| 349 | new_type); | ||
| 350 | else if (is_range_ram < 0) | ||
| 351 | return -EINVAL; | ||
| 352 | } | ||
| 341 | 353 | ||
| 342 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 354 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
| 343 | if (!new) | 355 | if (!new) |
| @@ -347,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 347 | new->end = end; | 359 | new->end = end; |
| 348 | new->type = actual_type; | 360 | new->type = actual_type; |
| 349 | 361 | ||
| 350 | if (new_type) | ||
| 351 | *new_type = actual_type; | ||
| 352 | |||
| 353 | spin_lock(&memtype_lock); | 362 | spin_lock(&memtype_lock); |
| 354 | 363 | ||
| 355 | if (cached_entry && start >= cached_start) | 364 | if (cached_entry && start >= cached_start) |
| @@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end) | |||
| 437 | if (is_ISA_range(start, end - 1)) | 446 | if (is_ISA_range(start, end - 1)) |
| 438 | return 0; | 447 | return 0; |
| 439 | 448 | ||
| 440 | is_range_ram = pagerange_is_ram(start, end); | 449 | /* |
| 441 | if (is_range_ram == 1) | 450 | * For legacy reasons, some parts of the physical address range in the |
| 442 | return free_ram_pages_type(start, end); | 451 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in |
| 443 | else if (is_range_ram < 0) | 452 | * the e820 tables). So we will track the memory attributes of this |
| 444 | return -EINVAL; | 453 | * legacy 1MB region using the linear memtype_list always. |
| 454 | */ | ||
| 455 | if (end >= ISA_END_ADDRESS) { | ||
| 456 | is_range_ram = pagerange_is_ram(start, end); | ||
| 457 | if (is_range_ram == 1) | ||
| 458 | return free_ram_pages_type(start, end); | ||
| 459 | else if (is_range_ram < 0) | ||
| 460 | return -EINVAL; | ||
| 461 | } | ||
| 445 | 462 | ||
| 446 | spin_lock(&memtype_lock); | 463 | spin_lock(&memtype_lock); |
| 447 | list_for_each_entry(entry, &memtype_list, nd) { | 464 | list_for_each_entry(entry, &memtype_list, nd) { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 96316fd47233..33fbeb664f08 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3427,7 +3427,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
| 3427 | ret = i915_gem_init_phys_object(dev, id, | 3427 | ret = i915_gem_init_phys_object(dev, id, |
| 3428 | obj->size); | 3428 | obj->size); |
| 3429 | if (ret) { | 3429 | if (ret) { |
| 3430 | DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); | 3430 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); |
| 3431 | goto out; | 3431 | goto out; |
| 3432 | } | 3432 | } |
| 3433 | } | 3433 | } |
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index a5ba820d69bb..a638e952d67a 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c | |||
| @@ -82,7 +82,7 @@ static const struct ide_tp_ops falconide_tp_ops = { | |||
| 82 | 82 | ||
| 83 | static const struct ide_port_info falconide_port_info = { | 83 | static const struct ide_port_info falconide_port_info = { |
| 84 | .tp_ops = &falconide_tp_ops, | 84 | .tp_ops = &falconide_tp_ops, |
| 85 | .host_flags = IDE_HFLAG_NO_DMA, | 85 | .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_SERIALIZE, |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | static void __init falconide_setup_ports(hw_regs_t *hw) | 88 | static void __init falconide_setup_ports(hw_regs_t *hw) |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 312127ea443a..0db1ed9f5fc2 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
| @@ -649,7 +649,8 @@ static int ide_register_port(ide_hwif_t *hwif) | |||
| 649 | /* register with global device tree */ | 649 | /* register with global device tree */ |
| 650 | dev_set_name(&hwif->gendev, hwif->name); | 650 | dev_set_name(&hwif->gendev, hwif->name); |
| 651 | hwif->gendev.driver_data = hwif; | 651 | hwif->gendev.driver_data = hwif; |
| 652 | hwif->gendev.parent = hwif->dev; | 652 | if (hwif->gendev.parent == NULL) |
| 653 | hwif->gendev.parent = hwif->dev; | ||
| 653 | hwif->gendev.release = hwif_release_dev; | 654 | hwif->gendev.release = hwif_release_dev; |
| 654 | 655 | ||
| 655 | ret = device_register(&hwif->gendev); | 656 | ret = device_register(&hwif->gendev); |
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index a7ac490c9ae3..f38aac78044c 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c | |||
| @@ -346,7 +346,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) | |||
| 346 | { | 346 | { |
| 347 | struct clk *clk; | 347 | struct clk *clk; |
| 348 | struct resource *mem, *irq; | 348 | struct resource *mem, *irq; |
| 349 | unsigned long base, rate; | 349 | void __iomem *base; |
| 350 | unsigned long rate; | ||
| 350 | int i, rc; | 351 | int i, rc; |
| 351 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; | 352 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
| 352 | 353 | ||
| @@ -382,11 +383,13 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) | |||
| 382 | base = IO_ADDRESS(mem->start); | 383 | base = IO_ADDRESS(mem->start); |
| 383 | 384 | ||
| 384 | /* Configure the Palm Chip controller */ | 385 | /* Configure the Palm Chip controller */ |
| 385 | palm_bk3710_chipinit((void __iomem *)base); | 386 | palm_bk3710_chipinit(base); |
| 386 | 387 | ||
| 387 | for (i = 0; i < IDE_NR_PORTS - 2; i++) | 388 | for (i = 0; i < IDE_NR_PORTS - 2; i++) |
| 388 | hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; | 389 | hw.io_ports_array[i] = (unsigned long) |
| 389 | hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; | 390 | (base + IDE_PALM_ATA_PRI_REG_OFFSET + i); |
| 391 | hw.io_ports.ctl_addr = (unsigned long) | ||
| 392 | (base + IDE_PALM_ATA_PRI_CTL_OFFSET); | ||
| 390 | hw.irq = irq->start; | 393 | hw.irq = irq->start; |
| 391 | hw.dev = &pdev->dev; | 394 | hw.dev = &pdev->dev; |
| 392 | hw.chipset = ide_palm3710; | 395 | hw.chipset = ide_palm3710; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 2e03b6d796d3..e76d715e4342 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
| @@ -393,16 +393,21 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | |||
| 393 | return; | 393 | return; |
| 394 | 394 | ||
| 395 | fail: | 395 | fail: |
| 396 | entry->event = NULL; | ||
| 396 | cpu_buf->sample_lost_overflow++; | 397 | cpu_buf->sample_lost_overflow++; |
| 397 | } | 398 | } |
| 398 | 399 | ||
| 399 | int oprofile_add_data(struct op_entry *entry, unsigned long val) | 400 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
| 400 | { | 401 | { |
| 402 | if (!entry->event) | ||
| 403 | return 0; | ||
| 401 | return op_cpu_buffer_add_data(entry, val); | 404 | return op_cpu_buffer_add_data(entry, val); |
| 402 | } | 405 | } |
| 403 | 406 | ||
| 404 | int oprofile_write_commit(struct op_entry *entry) | 407 | int oprofile_write_commit(struct op_entry *entry) |
| 405 | { | 408 | { |
| 409 | if (!entry->event) | ||
| 410 | return -EINVAL; | ||
| 406 | return op_cpu_buffer_write_commit(entry); | 411 | return op_cpu_buffer_write_commit(entry); |
| 407 | } | 412 | } |
| 408 | 413 | ||
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 63f81c44846a..272995d20293 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
| @@ -66,6 +66,13 @@ static inline void op_cpu_buffer_reset(int cpu) | |||
| 66 | cpu_buf->last_task = NULL; | 66 | cpu_buf->last_task = NULL; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | /* | ||
| 70 | * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be | ||
| 71 | * called only if op_cpu_buffer_write_reserve() did not return NULL or | ||
| 72 | * entry->event != NULL, otherwise entry->size or entry->event will be | ||
| 73 | * used uninitialized. | ||
| 74 | */ | ||
| 75 | |||
| 69 | struct op_sample | 76 | struct op_sample |
| 70 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); | 77 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); |
| 71 | int op_cpu_buffer_write_commit(struct op_entry *entry); | 78 | int op_cpu_buffer_write_commit(struct op_entry *entry); |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 8dc7109d61b7..2ba8f95516a0 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -298,6 +298,14 @@ static int decrease_reservation(unsigned long nr_pages) | |||
| 298 | frame_list[i] = pfn_to_mfn(pfn); | 298 | frame_list[i] = pfn_to_mfn(pfn); |
| 299 | 299 | ||
| 300 | scrub_page(page); | 300 | scrub_page(page); |
| 301 | |||
| 302 | if (!PageHighMem(page)) { | ||
| 303 | ret = HYPERVISOR_update_va_mapping( | ||
| 304 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
| 305 | __pte_ma(0), 0); | ||
| 306 | BUG_ON(ret); | ||
| 307 | } | ||
| 308 | |||
| 301 | } | 309 | } |
| 302 | 310 | ||
| 303 | /* Ensure that ballooned highmem pages don't have kmaps. */ | 311 | /* Ensure that ballooned highmem pages don't have kmaps. */ |
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index 875a4c59c594..a9592d981b10 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c | |||
| @@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch, | |||
| 291 | static int xenbus_write_transaction(unsigned msg_type, | 291 | static int xenbus_write_transaction(unsigned msg_type, |
| 292 | struct xenbus_file_priv *u) | 292 | struct xenbus_file_priv *u) |
| 293 | { | 293 | { |
| 294 | int rc, ret; | 294 | int rc; |
| 295 | void *reply; | 295 | void *reply; |
| 296 | struct xenbus_transaction_holder *trans = NULL; | 296 | struct xenbus_transaction_holder *trans = NULL; |
| 297 | LIST_HEAD(staging_q); | 297 | LIST_HEAD(staging_q); |
| @@ -326,15 +326,14 @@ static int xenbus_write_transaction(unsigned msg_type, | |||
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | mutex_lock(&u->reply_mutex); | 328 | mutex_lock(&u->reply_mutex); |
| 329 | ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); | 329 | rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); |
| 330 | if (!ret) | 330 | if (!rc) |
| 331 | ret = queue_reply(&staging_q, reply, u->u.msg.len); | 331 | rc = queue_reply(&staging_q, reply, u->u.msg.len); |
| 332 | if (!ret) { | 332 | if (!rc) { |
| 333 | list_splice_tail(&staging_q, &u->read_buffers); | 333 | list_splice_tail(&staging_q, &u->read_buffers); |
| 334 | wake_up(&u->read_waitq); | 334 | wake_up(&u->read_waitq); |
| 335 | } else { | 335 | } else { |
| 336 | queue_cleanup(&staging_q); | 336 | queue_cleanup(&staging_q); |
| 337 | rc = ret; | ||
| 338 | } | 337 | } |
| 339 | mutex_unlock(&u->reply_mutex); | 338 | mutex_unlock(&u->reply_mutex); |
| 340 | 339 | ||
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e0c7ada08a1f..ba76b68c52ff 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -281,7 +281,8 @@ __releases(&fc->lock) | |||
| 281 | fc->blocked = 0; | 281 | fc->blocked = 0; |
| 282 | wake_up_all(&fc->blocked_waitq); | 282 | wake_up_all(&fc->blocked_waitq); |
| 283 | } | 283 | } |
| 284 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { | 284 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD && |
| 285 | fc->connected) { | ||
| 285 | clear_bdi_congested(&fc->bdi, READ); | 286 | clear_bdi_congested(&fc->bdi, READ); |
| 286 | clear_bdi_congested(&fc->bdi, WRITE); | 287 | clear_bdi_congested(&fc->bdi, WRITE); |
| 287 | } | 288 | } |
| @@ -825,16 +826,21 @@ static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, | |||
| 825 | struct fuse_copy_state *cs) | 826 | struct fuse_copy_state *cs) |
| 826 | { | 827 | { |
| 827 | struct fuse_notify_poll_wakeup_out outarg; | 828 | struct fuse_notify_poll_wakeup_out outarg; |
| 828 | int err; | 829 | int err = -EINVAL; |
| 829 | 830 | ||
| 830 | if (size != sizeof(outarg)) | 831 | if (size != sizeof(outarg)) |
| 831 | return -EINVAL; | 832 | goto err; |
| 832 | 833 | ||
| 833 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | 834 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); |
| 834 | if (err) | 835 | if (err) |
| 835 | return err; | 836 | goto err; |
| 836 | 837 | ||
| 838 | fuse_copy_finish(cs); | ||
| 837 | return fuse_notify_poll_wakeup(fc, &outarg); | 839 | return fuse_notify_poll_wakeup(fc, &outarg); |
| 840 | |||
| 841 | err: | ||
| 842 | fuse_copy_finish(cs); | ||
| 843 | return err; | ||
| 838 | } | 844 | } |
| 839 | 845 | ||
| 840 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 846 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
| @@ -845,6 +851,7 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | |||
| 845 | return fuse_notify_poll(fc, size, cs); | 851 | return fuse_notify_poll(fc, size, cs); |
| 846 | 852 | ||
| 847 | default: | 853 | default: |
| 854 | fuse_copy_finish(cs); | ||
| 848 | return -EINVAL; | 855 | return -EINVAL; |
| 849 | } | 856 | } |
| 850 | } | 857 | } |
| @@ -923,7 +930,6 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 923 | */ | 930 | */ |
| 924 | if (!oh.unique) { | 931 | if (!oh.unique) { |
| 925 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); | 932 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); |
| 926 | fuse_copy_finish(&cs); | ||
| 927 | return err ? err : nbytes; | 933 | return err ? err : nbytes; |
| 928 | } | 934 | } |
| 929 | 935 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e8162646a9b5..d9fdb7cec538 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) | |||
| 54 | ff->reserved_req = fuse_request_alloc(); | 54 | ff->reserved_req = fuse_request_alloc(); |
| 55 | if (!ff->reserved_req) { | 55 | if (!ff->reserved_req) { |
| 56 | kfree(ff); | 56 | kfree(ff); |
| 57 | ff = NULL; | 57 | return NULL; |
| 58 | } else { | 58 | } else { |
| 59 | INIT_LIST_HEAD(&ff->write_entry); | 59 | INIT_LIST_HEAD(&ff->write_entry); |
| 60 | atomic_set(&ff->count, 0); | 60 | atomic_set(&ff->count, 0); |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 47c96fdca1ac..459b73dd45e1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
| @@ -292,6 +292,7 @@ static void fuse_put_super(struct super_block *sb) | |||
| 292 | list_del(&fc->entry); | 292 | list_del(&fc->entry); |
| 293 | fuse_ctl_remove_conn(fc); | 293 | fuse_ctl_remove_conn(fc); |
| 294 | mutex_unlock(&fuse_mutex); | 294 | mutex_unlock(&fuse_mutex); |
| 295 | bdi_destroy(&fc->bdi); | ||
| 295 | fuse_conn_put(fc); | 296 | fuse_conn_put(fc); |
| 296 | } | 297 | } |
| 297 | 298 | ||
| @@ -532,7 +533,6 @@ void fuse_conn_put(struct fuse_conn *fc) | |||
| 532 | if (fc->destroy_req) | 533 | if (fc->destroy_req) |
| 533 | fuse_request_free(fc->destroy_req); | 534 | fuse_request_free(fc->destroy_req); |
| 534 | mutex_destroy(&fc->inst_mutex); | 535 | mutex_destroy(&fc->inst_mutex); |
| 535 | bdi_destroy(&fc->bdi); | ||
| 536 | fc->release(fc); | 536 | fc->release(fc); |
| 537 | } | 537 | } |
| 538 | } | 538 | } |
| @@ -805,16 +805,18 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
| 805 | int err; | 805 | int err; |
| 806 | int is_bdev = sb->s_bdev != NULL; | 806 | int is_bdev = sb->s_bdev != NULL; |
| 807 | 807 | ||
| 808 | err = -EINVAL; | ||
| 808 | if (sb->s_flags & MS_MANDLOCK) | 809 | if (sb->s_flags & MS_MANDLOCK) |
| 809 | return -EINVAL; | 810 | goto err; |
| 810 | 811 | ||
| 811 | if (!parse_fuse_opt((char *) data, &d, is_bdev)) | 812 | if (!parse_fuse_opt((char *) data, &d, is_bdev)) |
| 812 | return -EINVAL; | 813 | goto err; |
| 813 | 814 | ||
| 814 | if (is_bdev) { | 815 | if (is_bdev) { |
| 815 | #ifdef CONFIG_BLOCK | 816 | #ifdef CONFIG_BLOCK |
| 817 | err = -EINVAL; | ||
| 816 | if (!sb_set_blocksize(sb, d.blksize)) | 818 | if (!sb_set_blocksize(sb, d.blksize)) |
| 817 | return -EINVAL; | 819 | goto err; |
| 818 | #endif | 820 | #endif |
| 819 | } else { | 821 | } else { |
| 820 | sb->s_blocksize = PAGE_CACHE_SIZE; | 822 | sb->s_blocksize = PAGE_CACHE_SIZE; |
| @@ -826,20 +828,22 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
| 826 | sb->s_export_op = &fuse_export_operations; | 828 | sb->s_export_op = &fuse_export_operations; |
| 827 | 829 | ||
| 828 | file = fget(d.fd); | 830 | file = fget(d.fd); |
| 831 | err = -EINVAL; | ||
| 829 | if (!file) | 832 | if (!file) |
| 830 | return -EINVAL; | 833 | goto err; |
| 831 | 834 | ||
| 832 | if (file->f_op != &fuse_dev_operations) | 835 | if (file->f_op != &fuse_dev_operations) |
| 833 | return -EINVAL; | 836 | goto err_fput; |
| 834 | 837 | ||
| 835 | fc = kmalloc(sizeof(*fc), GFP_KERNEL); | 838 | fc = kmalloc(sizeof(*fc), GFP_KERNEL); |
| 839 | err = -ENOMEM; | ||
| 836 | if (!fc) | 840 | if (!fc) |
| 837 | return -ENOMEM; | 841 | goto err_fput; |
| 838 | 842 | ||
| 839 | err = fuse_conn_init(fc, sb); | 843 | err = fuse_conn_init(fc, sb); |
| 840 | if (err) { | 844 | if (err) { |
| 841 | kfree(fc); | 845 | kfree(fc); |
| 842 | return err; | 846 | goto err_fput; |
| 843 | } | 847 | } |
| 844 | 848 | ||
| 845 | fc->release = fuse_free_conn; | 849 | fc->release = fuse_free_conn; |
| @@ -854,12 +858,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
| 854 | err = -ENOMEM; | 858 | err = -ENOMEM; |
| 855 | root = fuse_get_root_inode(sb, d.rootmode); | 859 | root = fuse_get_root_inode(sb, d.rootmode); |
| 856 | if (!root) | 860 | if (!root) |
| 857 | goto err; | 861 | goto err_put_conn; |
| 858 | 862 | ||
| 859 | root_dentry = d_alloc_root(root); | 863 | root_dentry = d_alloc_root(root); |
| 860 | if (!root_dentry) { | 864 | if (!root_dentry) { |
| 861 | iput(root); | 865 | iput(root); |
| 862 | goto err; | 866 | goto err_put_conn; |
| 863 | } | 867 | } |
| 864 | 868 | ||
| 865 | init_req = fuse_request_alloc(); | 869 | init_req = fuse_request_alloc(); |
| @@ -903,9 +907,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
| 903 | fuse_request_free(init_req); | 907 | fuse_request_free(init_req); |
| 904 | err_put_root: | 908 | err_put_root: |
| 905 | dput(root_dentry); | 909 | dput(root_dentry); |
| 906 | err: | 910 | err_put_conn: |
| 907 | fput(file); | ||
| 908 | fuse_conn_put(fc); | 911 | fuse_conn_put(fc); |
| 912 | err_fput: | ||
| 913 | fput(file); | ||
| 914 | err: | ||
| 909 | return err; | 915 | return err; |
| 910 | } | 916 | } |
| 911 | 917 | ||
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index d53a1838d6e8..bed766e435b5 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
| @@ -427,10 +427,61 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) | |||
| 427 | return ret; | 427 | return ret; |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | /* | ||
| 431 | * Get an inotify_kernel_event if one exists and is small | ||
| 432 | * enough to fit in "count". Return an error pointer if | ||
| 433 | * not large enough. | ||
| 434 | * | ||
| 435 | * Called with the device ev_mutex held. | ||
| 436 | */ | ||
| 437 | static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, | ||
| 438 | size_t count) | ||
| 439 | { | ||
| 440 | size_t event_size = sizeof(struct inotify_event); | ||
| 441 | struct inotify_kernel_event *kevent; | ||
| 442 | |||
| 443 | if (list_empty(&dev->events)) | ||
| 444 | return NULL; | ||
| 445 | |||
| 446 | kevent = inotify_dev_get_event(dev); | ||
| 447 | if (kevent->name) | ||
| 448 | event_size += kevent->event.len; | ||
| 449 | |||
| 450 | if (event_size > count) | ||
| 451 | return ERR_PTR(-EINVAL); | ||
| 452 | |||
| 453 | remove_kevent(dev, kevent); | ||
| 454 | return kevent; | ||
| 455 | } | ||
| 456 | |||
| 457 | /* | ||
| 458 | * Copy an event to user space, returning how much we copied. | ||
| 459 | * | ||
| 460 | * We already checked that the event size is smaller than the | ||
| 461 | * buffer we had in "get_one_event()" above. | ||
| 462 | */ | ||
| 463 | static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent, | ||
| 464 | char __user *buf) | ||
| 465 | { | ||
| 466 | size_t event_size = sizeof(struct inotify_event); | ||
| 467 | |||
| 468 | if (copy_to_user(buf, &kevent->event, event_size)) | ||
| 469 | return -EFAULT; | ||
| 470 | |||
| 471 | if (kevent->name) { | ||
| 472 | buf += event_size; | ||
| 473 | |||
| 474 | if (copy_to_user(buf, kevent->name, kevent->event.len)) | ||
| 475 | return -EFAULT; | ||
| 476 | |||
| 477 | event_size += kevent->event.len; | ||
| 478 | } | ||
| 479 | return event_size; | ||
| 480 | } | ||
| 481 | |||
| 430 | static ssize_t inotify_read(struct file *file, char __user *buf, | 482 | static ssize_t inotify_read(struct file *file, char __user *buf, |
| 431 | size_t count, loff_t *pos) | 483 | size_t count, loff_t *pos) |
| 432 | { | 484 | { |
| 433 | size_t event_size = sizeof (struct inotify_event); | ||
| 434 | struct inotify_device *dev; | 485 | struct inotify_device *dev; |
| 435 | char __user *start; | 486 | char __user *start; |
| 436 | int ret; | 487 | int ret; |
| @@ -440,81 +491,43 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
| 440 | dev = file->private_data; | 491 | dev = file->private_data; |
| 441 | 492 | ||
| 442 | while (1) { | 493 | while (1) { |
| 494 | struct inotify_kernel_event *kevent; | ||
| 443 | 495 | ||
| 444 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | 496 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); |
| 445 | 497 | ||
| 446 | mutex_lock(&dev->ev_mutex); | 498 | mutex_lock(&dev->ev_mutex); |
| 447 | if (!list_empty(&dev->events)) { | 499 | kevent = get_one_event(dev, count); |
| 448 | ret = 0; | ||
| 449 | break; | ||
| 450 | } | ||
| 451 | mutex_unlock(&dev->ev_mutex); | 500 | mutex_unlock(&dev->ev_mutex); |
| 452 | 501 | ||
| 453 | if (file->f_flags & O_NONBLOCK) { | 502 | if (kevent) { |
| 454 | ret = -EAGAIN; | 503 | ret = PTR_ERR(kevent); |
| 455 | break; | 504 | if (IS_ERR(kevent)) |
| 456 | } | 505 | break; |
| 457 | 506 | ret = copy_event_to_user(kevent, buf); | |
| 458 | if (signal_pending(current)) { | 507 | free_kevent(kevent); |
| 459 | ret = -EINTR; | 508 | if (ret < 0) |
| 460 | break; | 509 | break; |
| 510 | buf += ret; | ||
| 511 | count -= ret; | ||
| 512 | continue; | ||
| 461 | } | 513 | } |
| 462 | 514 | ||
| 463 | schedule(); | 515 | ret = -EAGAIN; |
| 464 | } | 516 | if (file->f_flags & O_NONBLOCK) |
| 465 | |||
| 466 | finish_wait(&dev->wq, &wait); | ||
| 467 | if (ret) | ||
| 468 | return ret; | ||
| 469 | |||
| 470 | while (1) { | ||
| 471 | struct inotify_kernel_event *kevent; | ||
| 472 | |||
| 473 | ret = buf - start; | ||
| 474 | if (list_empty(&dev->events)) | ||
| 475 | break; | 517 | break; |
| 476 | 518 | ret = -EINTR; | |
| 477 | kevent = inotify_dev_get_event(dev); | 519 | if (signal_pending(current)) |
| 478 | if (event_size + kevent->event.len > count) { | ||
| 479 | if (ret == 0 && count > 0) { | ||
| 480 | /* | ||
| 481 | * could not get a single event because we | ||
| 482 | * didn't have enough buffer space. | ||
| 483 | */ | ||
| 484 | ret = -EINVAL; | ||
| 485 | } | ||
| 486 | break; | 520 | break; |
| 487 | } | ||
| 488 | remove_kevent(dev, kevent); | ||
| 489 | 521 | ||
| 490 | /* | 522 | if (start != buf) |
| 491 | * Must perform the copy_to_user outside the mutex in order | ||
| 492 | * to avoid a lock order reversal with mmap_sem. | ||
| 493 | */ | ||
| 494 | mutex_unlock(&dev->ev_mutex); | ||
| 495 | |||
| 496 | if (copy_to_user(buf, &kevent->event, event_size)) { | ||
| 497 | ret = -EFAULT; | ||
| 498 | break; | 523 | break; |
| 499 | } | ||
| 500 | buf += event_size; | ||
| 501 | count -= event_size; | ||
| 502 | |||
| 503 | if (kevent->name) { | ||
| 504 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | ||
| 505 | ret = -EFAULT; | ||
| 506 | break; | ||
| 507 | } | ||
| 508 | buf += kevent->event.len; | ||
| 509 | count -= kevent->event.len; | ||
| 510 | } | ||
| 511 | |||
| 512 | free_kevent(kevent); | ||
| 513 | 524 | ||
| 514 | mutex_lock(&dev->ev_mutex); | 525 | schedule(); |
| 515 | } | 526 | } |
| 516 | mutex_unlock(&dev->ev_mutex); | ||
| 517 | 527 | ||
| 528 | finish_wait(&dev->wq, &wait); | ||
| 529 | if (start != buf && ret != -EFAULT) | ||
| 530 | ret = buf - start; | ||
| 518 | return ret; | 531 | return ret; |
| 519 | } | 532 | } |
| 520 | 533 | ||
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 3f53dd101f99..29228f5899cd 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config XFS_FS | 1 | config XFS_FS |
| 2 | tristate "XFS filesystem support" | 2 | tristate "XFS filesystem support" |
| 3 | depends on BLOCK | 3 | depends on BLOCK |
| 4 | select EXPORTFS | ||
| 4 | help | 5 | help |
| 5 | XFS is a high performance journaling filesystem which originated | 6 | XFS is a high performance journaling filesystem which originated |
| 6 | on the SGI IRIX platform. It is completely multi-threaded, can | 7 | on the SGI IRIX platform. It is completely multi-threaded, can |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index e5be1e0be802..4bd112313f33 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
| @@ -50,12 +50,14 @@ | |||
| 50 | #include "xfs_vnodeops.h" | 50 | #include "xfs_vnodeops.h" |
| 51 | #include "xfs_quota.h" | 51 | #include "xfs_quota.h" |
| 52 | #include "xfs_inode_item.h" | 52 | #include "xfs_inode_item.h" |
| 53 | #include "xfs_export.h" | ||
| 53 | 54 | ||
| 54 | #include <linux/capability.h> | 55 | #include <linux/capability.h> |
| 55 | #include <linux/dcache.h> | 56 | #include <linux/dcache.h> |
| 56 | #include <linux/mount.h> | 57 | #include <linux/mount.h> |
| 57 | #include <linux/namei.h> | 58 | #include <linux/namei.h> |
| 58 | #include <linux/pagemap.h> | 59 | #include <linux/pagemap.h> |
| 60 | #include <linux/exportfs.h> | ||
| 59 | 61 | ||
| 60 | /* | 62 | /* |
| 61 | * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to | 63 | * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to |
| @@ -164,97 +166,69 @@ xfs_find_handle( | |||
| 164 | return 0; | 166 | return 0; |
| 165 | } | 167 | } |
| 166 | 168 | ||
| 167 | |||
| 168 | /* | 169 | /* |
| 169 | * Convert userspace handle data into inode. | 170 | * No need to do permission checks on the various pathname components |
| 170 | * | 171 | * as the handle operations are privileged. |
| 171 | * We use the fact that all the fsop_handlereq ioctl calls have a data | ||
| 172 | * structure argument whose first component is always a xfs_fsop_handlereq_t, | ||
| 173 | * so we can pass that sub structure into this handy, shared routine. | ||
| 174 | * | ||
| 175 | * If no error, caller must always iput the returned inode. | ||
| 176 | */ | 172 | */ |
| 177 | STATIC int | 173 | STATIC int |
| 178 | xfs_vget_fsop_handlereq( | 174 | xfs_handle_acceptable( |
| 179 | xfs_mount_t *mp, | 175 | void *context, |
| 180 | struct inode *parinode, /* parent inode pointer */ | 176 | struct dentry *dentry) |
| 181 | xfs_fsop_handlereq_t *hreq, | 177 | { |
| 182 | struct inode **inode) | 178 | return 1; |
| 179 | } | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Convert userspace handle data into a dentry. | ||
| 183 | */ | ||
| 184 | struct dentry * | ||
| 185 | xfs_handle_to_dentry( | ||
| 186 | struct file *parfilp, | ||
| 187 | void __user *uhandle, | ||
| 188 | u32 hlen) | ||
| 183 | { | 189 | { |
| 184 | void __user *hanp; | ||
| 185 | size_t hlen; | ||
| 186 | xfs_fid_t *xfid; | ||
| 187 | xfs_handle_t *handlep; | ||
| 188 | xfs_handle_t handle; | 190 | xfs_handle_t handle; |
| 189 | xfs_inode_t *ip; | 191 | struct xfs_fid64 fid; |
| 190 | xfs_ino_t ino; | ||
| 191 | __u32 igen; | ||
| 192 | int error; | ||
| 193 | 192 | ||
| 194 | /* | 193 | /* |
| 195 | * Only allow handle opens under a directory. | 194 | * Only allow handle opens under a directory. |
| 196 | */ | 195 | */ |
| 197 | if (!S_ISDIR(parinode->i_mode)) | 196 | if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode)) |
| 198 | return XFS_ERROR(ENOTDIR); | 197 | return ERR_PTR(-ENOTDIR); |
| 199 | 198 | ||
| 200 | hanp = hreq->ihandle; | 199 | if (hlen != sizeof(xfs_handle_t)) |
| 201 | hlen = hreq->ihandlen; | 200 | return ERR_PTR(-EINVAL); |
| 202 | handlep = &handle; | 201 | if (copy_from_user(&handle, uhandle, hlen)) |
| 203 | 202 | return ERR_PTR(-EFAULT); | |
| 204 | if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) | 203 | if (handle.ha_fid.fid_len != |
| 205 | return XFS_ERROR(EINVAL); | 204 | sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) |
| 206 | if (copy_from_user(handlep, hanp, hlen)) | 205 | return ERR_PTR(-EINVAL); |
| 207 | return XFS_ERROR(EFAULT); | 206 | |
| 208 | if (hlen < sizeof(*handlep)) | 207 | memset(&fid, 0, sizeof(struct fid)); |
| 209 | memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); | 208 | fid.ino = handle.ha_fid.fid_ino; |
| 210 | if (hlen > sizeof(handlep->ha_fsid)) { | 209 | fid.gen = handle.ha_fid.fid_gen; |
| 211 | if (handlep->ha_fid.fid_len != | 210 | |
| 212 | (hlen - sizeof(handlep->ha_fsid) - | 211 | return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, |
| 213 | sizeof(handlep->ha_fid.fid_len)) || | 212 | FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, |
| 214 | handlep->ha_fid.fid_pad) | 213 | xfs_handle_acceptable, NULL); |
| 215 | return XFS_ERROR(EINVAL); | 214 | } |
| 216 | } | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Crack the handle, obtain the inode # & generation # | ||
| 220 | */ | ||
| 221 | xfid = (struct xfs_fid *)&handlep->ha_fid; | ||
| 222 | if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { | ||
| 223 | ino = xfid->fid_ino; | ||
| 224 | igen = xfid->fid_gen; | ||
| 225 | } else { | ||
| 226 | return XFS_ERROR(EINVAL); | ||
| 227 | } | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Get the XFS inode, building a Linux inode to go with it. | ||
| 231 | */ | ||
| 232 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); | ||
| 233 | if (error) | ||
| 234 | return error; | ||
| 235 | if (ip == NULL) | ||
| 236 | return XFS_ERROR(EIO); | ||
| 237 | if (ip->i_d.di_gen != igen) { | ||
| 238 | xfs_iput_new(ip, XFS_ILOCK_SHARED); | ||
| 239 | return XFS_ERROR(ENOENT); | ||
| 240 | } | ||
| 241 | |||
| 242 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
| 243 | 215 | ||
| 244 | *inode = VFS_I(ip); | 216 | STATIC struct dentry * |
| 245 | return 0; | 217 | xfs_handlereq_to_dentry( |
| 218 | struct file *parfilp, | ||
| 219 | xfs_fsop_handlereq_t *hreq) | ||
| 220 | { | ||
| 221 | return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); | ||
| 246 | } | 222 | } |
| 247 | 223 | ||
| 248 | int | 224 | int |
| 249 | xfs_open_by_handle( | 225 | xfs_open_by_handle( |
| 250 | xfs_mount_t *mp, | ||
| 251 | xfs_fsop_handlereq_t *hreq, | ||
| 252 | struct file *parfilp, | 226 | struct file *parfilp, |
| 253 | struct inode *parinode) | 227 | xfs_fsop_handlereq_t *hreq) |
| 254 | { | 228 | { |
| 255 | const struct cred *cred = current_cred(); | 229 | const struct cred *cred = current_cred(); |
| 256 | int error; | 230 | int error; |
| 257 | int new_fd; | 231 | int fd; |
| 258 | int permflag; | 232 | int permflag; |
| 259 | struct file *filp; | 233 | struct file *filp; |
| 260 | struct inode *inode; | 234 | struct inode *inode; |
| @@ -263,19 +237,21 @@ xfs_open_by_handle( | |||
| 263 | if (!capable(CAP_SYS_ADMIN)) | 237 | if (!capable(CAP_SYS_ADMIN)) |
| 264 | return -XFS_ERROR(EPERM); | 238 | return -XFS_ERROR(EPERM); |
| 265 | 239 | ||
| 266 | error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); | 240 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); |
| 267 | if (error) | 241 | if (IS_ERR(dentry)) |
| 268 | return -error; | 242 | return PTR_ERR(dentry); |
| 243 | inode = dentry->d_inode; | ||
| 269 | 244 | ||
| 270 | /* Restrict xfs_open_by_handle to directories & regular files. */ | 245 | /* Restrict xfs_open_by_handle to directories & regular files. */ |
| 271 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { | 246 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { |
| 272 | iput(inode); | 247 | error = -XFS_ERROR(EPERM); |
| 273 | return -XFS_ERROR(EINVAL); | 248 | goto out_dput; |
| 274 | } | 249 | } |
| 275 | 250 | ||
| 276 | #if BITS_PER_LONG != 32 | 251 | #if BITS_PER_LONG != 32 |
| 277 | hreq->oflags |= O_LARGEFILE; | 252 | hreq->oflags |= O_LARGEFILE; |
| 278 | #endif | 253 | #endif |
| 254 | |||
| 279 | /* Put open permission in namei format. */ | 255 | /* Put open permission in namei format. */ |
| 280 | permflag = hreq->oflags; | 256 | permflag = hreq->oflags; |
| 281 | if ((permflag+1) & O_ACCMODE) | 257 | if ((permflag+1) & O_ACCMODE) |
| @@ -285,50 +261,45 @@ xfs_open_by_handle( | |||
| 285 | 261 | ||
| 286 | if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && | 262 | if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && |
| 287 | (permflag & FMODE_WRITE) && IS_APPEND(inode)) { | 263 | (permflag & FMODE_WRITE) && IS_APPEND(inode)) { |
| 288 | iput(inode); | 264 | error = -XFS_ERROR(EPERM); |
| 289 | return -XFS_ERROR(EPERM); | 265 | goto out_dput; |
| 290 | } | 266 | } |
| 291 | 267 | ||
| 292 | if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) { | 268 | if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) { |
| 293 | iput(inode); | 269 | error = -XFS_ERROR(EACCES); |
| 294 | return -XFS_ERROR(EACCES); | 270 | goto out_dput; |
| 295 | } | 271 | } |
| 296 | 272 | ||
| 297 | /* Can't write directories. */ | 273 | /* Can't write directories. */ |
| 298 | if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { | 274 | if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { |
| 299 | iput(inode); | 275 | error = -XFS_ERROR(EISDIR); |
| 300 | return -XFS_ERROR(EISDIR); | 276 | goto out_dput; |
| 301 | } | 277 | } |
| 302 | 278 | ||
| 303 | if ((new_fd = get_unused_fd()) < 0) { | 279 | fd = get_unused_fd(); |
| 304 | iput(inode); | 280 | if (fd < 0) { |
| 305 | return new_fd; | 281 | error = fd; |
| 282 | goto out_dput; | ||
| 306 | } | 283 | } |
| 307 | 284 | ||
| 308 | dentry = d_obtain_alias(inode); | 285 | filp = dentry_open(dentry, mntget(parfilp->f_path.mnt), |
| 309 | if (IS_ERR(dentry)) { | 286 | hreq->oflags, cred); |
| 310 | put_unused_fd(new_fd); | ||
| 311 | return PTR_ERR(dentry); | ||
| 312 | } | ||
| 313 | |||
| 314 | /* Ensure umount returns EBUSY on umounts while this file is open. */ | ||
| 315 | mntget(parfilp->f_path.mnt); | ||
| 316 | |||
| 317 | /* Create file pointer. */ | ||
| 318 | filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags, cred); | ||
| 319 | if (IS_ERR(filp)) { | 287 | if (IS_ERR(filp)) { |
| 320 | put_unused_fd(new_fd); | 288 | put_unused_fd(fd); |
| 321 | return -XFS_ERROR(-PTR_ERR(filp)); | 289 | return PTR_ERR(filp); |
| 322 | } | 290 | } |
| 323 | 291 | ||
| 324 | if (inode->i_mode & S_IFREG) { | 292 | if (inode->i_mode & S_IFREG) { |
| 325 | /* invisible operation should not change atime */ | ||
| 326 | filp->f_flags |= O_NOATIME; | 293 | filp->f_flags |= O_NOATIME; |
| 327 | filp->f_mode |= FMODE_NOCMTIME; | 294 | filp->f_mode |= FMODE_NOCMTIME; |
| 328 | } | 295 | } |
| 329 | 296 | ||
| 330 | fd_install(new_fd, filp); | 297 | fd_install(fd, filp); |
| 331 | return new_fd; | 298 | return fd; |
| 299 | |||
| 300 | out_dput: | ||
| 301 | dput(dentry); | ||
| 302 | return error; | ||
| 332 | } | 303 | } |
| 333 | 304 | ||
| 334 | /* | 305 | /* |
| @@ -359,11 +330,10 @@ do_readlink( | |||
| 359 | 330 | ||
| 360 | int | 331 | int |
| 361 | xfs_readlink_by_handle( | 332 | xfs_readlink_by_handle( |
| 362 | xfs_mount_t *mp, | 333 | struct file *parfilp, |
| 363 | xfs_fsop_handlereq_t *hreq, | 334 | xfs_fsop_handlereq_t *hreq) |
| 364 | struct inode *parinode) | ||
| 365 | { | 335 | { |
| 366 | struct inode *inode; | 336 | struct dentry *dentry; |
| 367 | __u32 olen; | 337 | __u32 olen; |
| 368 | void *link; | 338 | void *link; |
| 369 | int error; | 339 | int error; |
| @@ -371,26 +341,28 @@ xfs_readlink_by_handle( | |||
| 371 | if (!capable(CAP_SYS_ADMIN)) | 341 | if (!capable(CAP_SYS_ADMIN)) |
| 372 | return -XFS_ERROR(EPERM); | 342 | return -XFS_ERROR(EPERM); |
| 373 | 343 | ||
| 374 | error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); | 344 | dentry = xfs_handlereq_to_dentry(parfilp, hreq); |
| 375 | if (error) | 345 | if (IS_ERR(dentry)) |
| 376 | return -error; | 346 | return PTR_ERR(dentry); |
| 377 | 347 | ||
| 378 | /* Restrict this handle operation to symlinks only. */ | 348 | /* Restrict this handle operation to symlinks only. */ |
| 379 | if (!S_ISLNK(inode->i_mode)) { | 349 | if (!S_ISLNK(dentry->d_inode->i_mode)) { |
| 380 | error = -XFS_ERROR(EINVAL); | 350 | error = -XFS_ERROR(EINVAL); |
| 381 | goto out_iput; | 351 | goto out_dput; |
| 382 | } | 352 | } |
| 383 | 353 | ||
| 384 | if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { | 354 | if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { |
| 385 | error = -XFS_ERROR(EFAULT); | 355 | error = -XFS_ERROR(EFAULT); |
| 386 | goto out_iput; | 356 | goto out_dput; |
| 387 | } | 357 | } |
| 388 | 358 | ||
| 389 | link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); | 359 | link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); |
| 390 | if (!link) | 360 | if (!link) { |
| 391 | goto out_iput; | 361 | error = -XFS_ERROR(ENOMEM); |
| 362 | goto out_dput; | ||
| 363 | } | ||
| 392 | 364 | ||
| 393 | error = -xfs_readlink(XFS_I(inode), link); | 365 | error = -xfs_readlink(XFS_I(dentry->d_inode), link); |
| 394 | if (error) | 366 | if (error) |
| 395 | goto out_kfree; | 367 | goto out_kfree; |
| 396 | error = do_readlink(hreq->ohandle, olen, link); | 368 | error = do_readlink(hreq->ohandle, olen, link); |
| @@ -399,32 +371,31 @@ xfs_readlink_by_handle( | |||
| 399 | 371 | ||
| 400 | out_kfree: | 372 | out_kfree: |
| 401 | kfree(link); | 373 | kfree(link); |
| 402 | out_iput: | 374 | out_dput: |
| 403 | iput(inode); | 375 | dput(dentry); |
| 404 | return error; | 376 | return error; |
| 405 | } | 377 | } |
| 406 | 378 | ||
| 407 | STATIC int | 379 | STATIC int |
| 408 | xfs_fssetdm_by_handle( | 380 | xfs_fssetdm_by_handle( |
| 409 | xfs_mount_t *mp, | 381 | struct file *parfilp, |
| 410 | void __user *arg, | 382 | void __user *arg) |
| 411 | struct inode *parinode) | ||
| 412 | { | 383 | { |
| 413 | int error; | 384 | int error; |
| 414 | struct fsdmidata fsd; | 385 | struct fsdmidata fsd; |
| 415 | xfs_fsop_setdm_handlereq_t dmhreq; | 386 | xfs_fsop_setdm_handlereq_t dmhreq; |
| 416 | struct inode *inode; | 387 | struct dentry *dentry; |
| 417 | 388 | ||
| 418 | if (!capable(CAP_MKNOD)) | 389 | if (!capable(CAP_MKNOD)) |
| 419 | return -XFS_ERROR(EPERM); | 390 | return -XFS_ERROR(EPERM); |
| 420 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) | 391 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) |
| 421 | return -XFS_ERROR(EFAULT); | 392 | return -XFS_ERROR(EFAULT); |
| 422 | 393 | ||
| 423 | error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode); | 394 | dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); |
| 424 | if (error) | 395 | if (IS_ERR(dentry)) |
| 425 | return -error; | 396 | return PTR_ERR(dentry); |
| 426 | 397 | ||
| 427 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { | 398 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { |
| 428 | error = -XFS_ERROR(EPERM); | 399 | error = -XFS_ERROR(EPERM); |
| 429 | goto out; | 400 | goto out; |
| 430 | } | 401 | } |
| @@ -434,24 +405,23 @@ xfs_fssetdm_by_handle( | |||
| 434 | goto out; | 405 | goto out; |
| 435 | } | 406 | } |
| 436 | 407 | ||
| 437 | error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, | 408 | error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, |
| 438 | fsd.fsd_dmstate); | 409 | fsd.fsd_dmstate); |
| 439 | 410 | ||
| 440 | out: | 411 | out: |
| 441 | iput(inode); | 412 | dput(dentry); |
| 442 | return error; | 413 | return error; |
| 443 | } | 414 | } |
| 444 | 415 | ||
| 445 | STATIC int | 416 | STATIC int |
| 446 | xfs_attrlist_by_handle( | 417 | xfs_attrlist_by_handle( |
| 447 | xfs_mount_t *mp, | 418 | struct file *parfilp, |
| 448 | void __user *arg, | 419 | void __user *arg) |
| 449 | struct inode *parinode) | ||
| 450 | { | 420 | { |
| 451 | int error; | 421 | int error = -ENOMEM; |
| 452 | attrlist_cursor_kern_t *cursor; | 422 | attrlist_cursor_kern_t *cursor; |
| 453 | xfs_fsop_attrlist_handlereq_t al_hreq; | 423 | xfs_fsop_attrlist_handlereq_t al_hreq; |
| 454 | struct inode *inode; | 424 | struct dentry *dentry; |
| 455 | char *kbuf; | 425 | char *kbuf; |
| 456 | 426 | ||
| 457 | if (!capable(CAP_SYS_ADMIN)) | 427 | if (!capable(CAP_SYS_ADMIN)) |
| @@ -467,16 +437,16 @@ xfs_attrlist_by_handle( | |||
| 467 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) | 437 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) |
| 468 | return -XFS_ERROR(EINVAL); | 438 | return -XFS_ERROR(EINVAL); |
| 469 | 439 | ||
| 470 | error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode); | 440 | dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); |
| 471 | if (error) | 441 | if (IS_ERR(dentry)) |
| 472 | goto out; | 442 | return PTR_ERR(dentry); |
| 473 | 443 | ||
| 474 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); | 444 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); |
| 475 | if (!kbuf) | 445 | if (!kbuf) |
| 476 | goto out_vn_rele; | 446 | goto out_dput; |
| 477 | 447 | ||
| 478 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; | 448 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; |
| 479 | error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen, | 449 | error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, |
| 480 | al_hreq.flags, cursor); | 450 | al_hreq.flags, cursor); |
| 481 | if (error) | 451 | if (error) |
| 482 | goto out_kfree; | 452 | goto out_kfree; |
| @@ -486,10 +456,9 @@ xfs_attrlist_by_handle( | |||
| 486 | 456 | ||
| 487 | out_kfree: | 457 | out_kfree: |
| 488 | kfree(kbuf); | 458 | kfree(kbuf); |
| 489 | out_vn_rele: | 459 | out_dput: |
| 490 | iput(inode); | 460 | dput(dentry); |
| 491 | out: | 461 | return error; |
| 492 | return -error; | ||
| 493 | } | 462 | } |
| 494 | 463 | ||
| 495 | int | 464 | int |
| @@ -564,15 +533,13 @@ xfs_attrmulti_attr_remove( | |||
| 564 | 533 | ||
| 565 | STATIC int | 534 | STATIC int |
| 566 | xfs_attrmulti_by_handle( | 535 | xfs_attrmulti_by_handle( |
| 567 | xfs_mount_t *mp, | ||
| 568 | void __user *arg, | ||
| 569 | struct file *parfilp, | 536 | struct file *parfilp, |
| 570 | struct inode *parinode) | 537 | void __user *arg) |
| 571 | { | 538 | { |
| 572 | int error; | 539 | int error; |
| 573 | xfs_attr_multiop_t *ops; | 540 | xfs_attr_multiop_t *ops; |
| 574 | xfs_fsop_attrmulti_handlereq_t am_hreq; | 541 | xfs_fsop_attrmulti_handlereq_t am_hreq; |
| 575 | struct inode *inode; | 542 | struct dentry *dentry; |
| 576 | unsigned int i, size; | 543 | unsigned int i, size; |
| 577 | char *attr_name; | 544 | char *attr_name; |
| 578 | 545 | ||
| @@ -581,19 +548,19 @@ xfs_attrmulti_by_handle( | |||
| 581 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) | 548 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) |
| 582 | return -XFS_ERROR(EFAULT); | 549 | return -XFS_ERROR(EFAULT); |
| 583 | 550 | ||
| 584 | error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode); | 551 | dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); |
| 585 | if (error) | 552 | if (IS_ERR(dentry)) |
| 586 | goto out; | 553 | return PTR_ERR(dentry); |
| 587 | 554 | ||
| 588 | error = E2BIG; | 555 | error = E2BIG; |
| 589 | size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); | 556 | size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); |
| 590 | if (!size || size > 16 * PAGE_SIZE) | 557 | if (!size || size > 16 * PAGE_SIZE) |
| 591 | goto out_vn_rele; | 558 | goto out_dput; |
| 592 | 559 | ||
| 593 | error = ENOMEM; | 560 | error = ENOMEM; |
| 594 | ops = kmalloc(size, GFP_KERNEL); | 561 | ops = kmalloc(size, GFP_KERNEL); |
| 595 | if (!ops) | 562 | if (!ops) |
| 596 | goto out_vn_rele; | 563 | goto out_dput; |
| 597 | 564 | ||
| 598 | error = EFAULT; | 565 | error = EFAULT; |
| 599 | if (copy_from_user(ops, am_hreq.ops, size)) | 566 | if (copy_from_user(ops, am_hreq.ops, size)) |
| @@ -615,25 +582,28 @@ xfs_attrmulti_by_handle( | |||
| 615 | 582 | ||
| 616 | switch (ops[i].am_opcode) { | 583 | switch (ops[i].am_opcode) { |
| 617 | case ATTR_OP_GET: | 584 | case ATTR_OP_GET: |
| 618 | ops[i].am_error = xfs_attrmulti_attr_get(inode, | 585 | ops[i].am_error = xfs_attrmulti_attr_get( |
| 619 | attr_name, ops[i].am_attrvalue, | 586 | dentry->d_inode, attr_name, |
| 620 | &ops[i].am_length, ops[i].am_flags); | 587 | ops[i].am_attrvalue, &ops[i].am_length, |
| 588 | ops[i].am_flags); | ||
| 621 | break; | 589 | break; |
| 622 | case ATTR_OP_SET: | 590 | case ATTR_OP_SET: |
| 623 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); | 591 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); |
| 624 | if (ops[i].am_error) | 592 | if (ops[i].am_error) |
| 625 | break; | 593 | break; |
| 626 | ops[i].am_error = xfs_attrmulti_attr_set(inode, | 594 | ops[i].am_error = xfs_attrmulti_attr_set( |
| 627 | attr_name, ops[i].am_attrvalue, | 595 | dentry->d_inode, attr_name, |
| 628 | ops[i].am_length, ops[i].am_flags); | 596 | ops[i].am_attrvalue, ops[i].am_length, |
| 597 | ops[i].am_flags); | ||
| 629 | mnt_drop_write(parfilp->f_path.mnt); | 598 | mnt_drop_write(parfilp->f_path.mnt); |
| 630 | break; | 599 | break; |
| 631 | case ATTR_OP_REMOVE: | 600 | case ATTR_OP_REMOVE: |
| 632 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); | 601 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); |
| 633 | if (ops[i].am_error) | 602 | if (ops[i].am_error) |
| 634 | break; | 603 | break; |
| 635 | ops[i].am_error = xfs_attrmulti_attr_remove(inode, | 604 | ops[i].am_error = xfs_attrmulti_attr_remove( |
| 636 | attr_name, ops[i].am_flags); | 605 | dentry->d_inode, attr_name, |
| 606 | ops[i].am_flags); | ||
| 637 | mnt_drop_write(parfilp->f_path.mnt); | 607 | mnt_drop_write(parfilp->f_path.mnt); |
| 638 | break; | 608 | break; |
| 639 | default: | 609 | default: |
| @@ -647,9 +617,8 @@ xfs_attrmulti_by_handle( | |||
| 647 | kfree(attr_name); | 617 | kfree(attr_name); |
| 648 | out_kfree_ops: | 618 | out_kfree_ops: |
| 649 | kfree(ops); | 619 | kfree(ops); |
| 650 | out_vn_rele: | 620 | out_dput: |
| 651 | iput(inode); | 621 | dput(dentry); |
| 652 | out: | ||
| 653 | return -error; | 622 | return -error; |
| 654 | } | 623 | } |
| 655 | 624 | ||
| @@ -1440,23 +1409,23 @@ xfs_file_ioctl( | |||
| 1440 | 1409 | ||
| 1441 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 1410 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
| 1442 | return -XFS_ERROR(EFAULT); | 1411 | return -XFS_ERROR(EFAULT); |
| 1443 | return xfs_open_by_handle(mp, &hreq, filp, inode); | 1412 | return xfs_open_by_handle(filp, &hreq); |
| 1444 | } | 1413 | } |
| 1445 | case XFS_IOC_FSSETDM_BY_HANDLE: | 1414 | case XFS_IOC_FSSETDM_BY_HANDLE: |
| 1446 | return xfs_fssetdm_by_handle(mp, arg, inode); | 1415 | return xfs_fssetdm_by_handle(filp, arg); |
| 1447 | 1416 | ||
| 1448 | case XFS_IOC_READLINK_BY_HANDLE: { | 1417 | case XFS_IOC_READLINK_BY_HANDLE: { |
| 1449 | xfs_fsop_handlereq_t hreq; | 1418 | xfs_fsop_handlereq_t hreq; |
| 1450 | 1419 | ||
| 1451 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 1420 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
| 1452 | return -XFS_ERROR(EFAULT); | 1421 | return -XFS_ERROR(EFAULT); |
| 1453 | return xfs_readlink_by_handle(mp, &hreq, inode); | 1422 | return xfs_readlink_by_handle(filp, &hreq); |
| 1454 | } | 1423 | } |
| 1455 | case XFS_IOC_ATTRLIST_BY_HANDLE: | 1424 | case XFS_IOC_ATTRLIST_BY_HANDLE: |
| 1456 | return xfs_attrlist_by_handle(mp, arg, inode); | 1425 | return xfs_attrlist_by_handle(filp, arg); |
| 1457 | 1426 | ||
| 1458 | case XFS_IOC_ATTRMULTI_BY_HANDLE: | 1427 | case XFS_IOC_ATTRMULTI_BY_HANDLE: |
| 1459 | return xfs_attrmulti_by_handle(mp, arg, filp, inode); | 1428 | return xfs_attrmulti_by_handle(filp, arg); |
| 1460 | 1429 | ||
| 1461 | case XFS_IOC_SWAPEXT: { | 1430 | case XFS_IOC_SWAPEXT: { |
| 1462 | struct xfs_swapext sxp; | 1431 | struct xfs_swapext sxp; |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index 8c16bf2d7e03..7bd7c6afc1eb 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h | |||
| @@ -34,16 +34,13 @@ xfs_find_handle( | |||
| 34 | 34 | ||
| 35 | extern int | 35 | extern int |
| 36 | xfs_open_by_handle( | 36 | xfs_open_by_handle( |
| 37 | xfs_mount_t *mp, | ||
| 38 | xfs_fsop_handlereq_t *hreq, | ||
| 39 | struct file *parfilp, | 37 | struct file *parfilp, |
| 40 | struct inode *parinode); | 38 | xfs_fsop_handlereq_t *hreq); |
| 41 | 39 | ||
| 42 | extern int | 40 | extern int |
| 43 | xfs_readlink_by_handle( | 41 | xfs_readlink_by_handle( |
| 44 | xfs_mount_t *mp, | 42 | struct file *parfilp, |
| 45 | xfs_fsop_handlereq_t *hreq, | 43 | xfs_fsop_handlereq_t *hreq); |
| 46 | struct inode *parinode); | ||
| 47 | 44 | ||
| 48 | extern int | 45 | extern int |
| 49 | xfs_attrmulti_attr_get( | 46 | xfs_attrmulti_attr_get( |
| @@ -67,6 +64,12 @@ xfs_attrmulti_attr_remove( | |||
| 67 | char *name, | 64 | char *name, |
| 68 | __uint32_t flags); | 65 | __uint32_t flags); |
| 69 | 66 | ||
| 67 | extern struct dentry * | ||
| 68 | xfs_handle_to_dentry( | ||
| 69 | struct file *parfilp, | ||
| 70 | void __user *uhandle, | ||
| 71 | u32 hlen); | ||
| 72 | |||
| 70 | extern long | 73 | extern long |
| 71 | xfs_file_ioctl( | 74 | xfs_file_ioctl( |
| 72 | struct file *filp, | 75 | struct file *filp, |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 50903ad3182e..c70c4e3db790 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | */ | 17 | */ |
| 18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
| 19 | #include <linux/ioctl.h> | 19 | #include <linux/ioctl.h> |
| 20 | #include <linux/mount.h> | ||
| 20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
| 21 | #include "xfs.h" | 22 | #include "xfs.h" |
| 22 | #include "xfs_fs.h" | 23 | #include "xfs_fs.h" |
| @@ -340,96 +341,24 @@ xfs_compat_handlereq_copyin( | |||
| 340 | return 0; | 341 | return 0; |
| 341 | } | 342 | } |
| 342 | 343 | ||
| 343 | /* | 344 | STATIC struct dentry * |
| 344 | * Convert userspace handle data into inode. | 345 | xfs_compat_handlereq_to_dentry( |
| 345 | * | 346 | struct file *parfilp, |
| 346 | * We use the fact that all the fsop_handlereq ioctl calls have a data | 347 | compat_xfs_fsop_handlereq_t *hreq) |
| 347 | * structure argument whose first component is always a xfs_fsop_handlereq_t, | ||
| 348 | * so we can pass that sub structure into this handy, shared routine. | ||
| 349 | * | ||
| 350 | * If no error, caller must always iput the returned inode. | ||
| 351 | */ | ||
| 352 | STATIC int | ||
| 353 | xfs_vget_fsop_handlereq_compat( | ||
| 354 | xfs_mount_t *mp, | ||
| 355 | struct inode *parinode, /* parent inode pointer */ | ||
| 356 | compat_xfs_fsop_handlereq_t *hreq, | ||
| 357 | struct inode **inode) | ||
| 358 | { | 348 | { |
| 359 | void __user *hanp; | 349 | return xfs_handle_to_dentry(parfilp, |
| 360 | size_t hlen; | 350 | compat_ptr(hreq->ihandle), hreq->ihandlen); |
| 361 | xfs_fid_t *xfid; | ||
| 362 | xfs_handle_t *handlep; | ||
| 363 | xfs_handle_t handle; | ||
| 364 | xfs_inode_t *ip; | ||
| 365 | xfs_ino_t ino; | ||
| 366 | __u32 igen; | ||
| 367 | int error; | ||
| 368 | |||
| 369 | /* | ||
| 370 | * Only allow handle opens under a directory. | ||
| 371 | */ | ||
| 372 | if (!S_ISDIR(parinode->i_mode)) | ||
| 373 | return XFS_ERROR(ENOTDIR); | ||
| 374 | |||
| 375 | hanp = compat_ptr(hreq->ihandle); | ||
| 376 | hlen = hreq->ihandlen; | ||
| 377 | handlep = &handle; | ||
| 378 | |||
| 379 | if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) | ||
| 380 | return XFS_ERROR(EINVAL); | ||
| 381 | if (copy_from_user(handlep, hanp, hlen)) | ||
| 382 | return XFS_ERROR(EFAULT); | ||
| 383 | if (hlen < sizeof(*handlep)) | ||
| 384 | memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); | ||
| 385 | if (hlen > sizeof(handlep->ha_fsid)) { | ||
| 386 | if (handlep->ha_fid.fid_len != | ||
| 387 | (hlen - sizeof(handlep->ha_fsid) - | ||
| 388 | sizeof(handlep->ha_fid.fid_len)) || | ||
| 389 | handlep->ha_fid.fid_pad) | ||
| 390 | return XFS_ERROR(EINVAL); | ||
| 391 | } | ||
| 392 | |||
| 393 | /* | ||
| 394 | * Crack the handle, obtain the inode # & generation # | ||
| 395 | */ | ||
| 396 | xfid = (struct xfs_fid *)&handlep->ha_fid; | ||
| 397 | if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { | ||
| 398 | ino = xfid->fid_ino; | ||
| 399 | igen = xfid->fid_gen; | ||
| 400 | } else { | ||
| 401 | return XFS_ERROR(EINVAL); | ||
| 402 | } | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Get the XFS inode, building a Linux inode to go with it. | ||
| 406 | */ | ||
| 407 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); | ||
| 408 | if (error) | ||
| 409 | return error; | ||
| 410 | if (ip == NULL) | ||
| 411 | return XFS_ERROR(EIO); | ||
| 412 | if (ip->i_d.di_gen != igen) { | ||
| 413 | xfs_iput_new(ip, XFS_ILOCK_SHARED); | ||
| 414 | return XFS_ERROR(ENOENT); | ||
| 415 | } | ||
| 416 | |||
| 417 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
| 418 | |||
| 419 | *inode = VFS_I(ip); | ||
| 420 | return 0; | ||
| 421 | } | 351 | } |
| 422 | 352 | ||
| 423 | STATIC int | 353 | STATIC int |
| 424 | xfs_compat_attrlist_by_handle( | 354 | xfs_compat_attrlist_by_handle( |
| 425 | xfs_mount_t *mp, | 355 | struct file *parfilp, |
| 426 | void __user *arg, | 356 | void __user *arg) |
| 427 | struct inode *parinode) | ||
| 428 | { | 357 | { |
| 429 | int error; | 358 | int error; |
| 430 | attrlist_cursor_kern_t *cursor; | 359 | attrlist_cursor_kern_t *cursor; |
| 431 | compat_xfs_fsop_attrlist_handlereq_t al_hreq; | 360 | compat_xfs_fsop_attrlist_handlereq_t al_hreq; |
| 432 | struct inode *inode; | 361 | struct dentry *dentry; |
| 433 | char *kbuf; | 362 | char *kbuf; |
| 434 | 363 | ||
| 435 | if (!capable(CAP_SYS_ADMIN)) | 364 | if (!capable(CAP_SYS_ADMIN)) |
| @@ -446,17 +375,17 @@ xfs_compat_attrlist_by_handle( | |||
| 446 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) | 375 | if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) |
| 447 | return -XFS_ERROR(EINVAL); | 376 | return -XFS_ERROR(EINVAL); |
| 448 | 377 | ||
| 449 | error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq, | 378 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); |
| 450 | &inode); | 379 | if (IS_ERR(dentry)) |
| 451 | if (error) | 380 | return PTR_ERR(dentry); |
| 452 | goto out; | ||
| 453 | 381 | ||
| 382 | error = -ENOMEM; | ||
| 454 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); | 383 | kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); |
| 455 | if (!kbuf) | 384 | if (!kbuf) |
| 456 | goto out_vn_rele; | 385 | goto out_dput; |
| 457 | 386 | ||
| 458 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; | 387 | cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; |
| 459 | error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen, | 388 | error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, |
| 460 | al_hreq.flags, cursor); | 389 | al_hreq.flags, cursor); |
| 461 | if (error) | 390 | if (error) |
| 462 | goto out_kfree; | 391 | goto out_kfree; |
| @@ -466,22 +395,20 @@ xfs_compat_attrlist_by_handle( | |||
| 466 | 395 | ||
| 467 | out_kfree: | 396 | out_kfree: |
| 468 | kfree(kbuf); | 397 | kfree(kbuf); |
| 469 | out_vn_rele: | 398 | out_dput: |
| 470 | iput(inode); | 399 | dput(dentry); |
| 471 | out: | 400 | return error; |
| 472 | return -error; | ||
| 473 | } | 401 | } |
| 474 | 402 | ||
| 475 | STATIC int | 403 | STATIC int |
| 476 | xfs_compat_attrmulti_by_handle( | 404 | xfs_compat_attrmulti_by_handle( |
| 477 | xfs_mount_t *mp, | 405 | struct file *parfilp, |
| 478 | void __user *arg, | 406 | void __user *arg) |
| 479 | struct inode *parinode) | ||
| 480 | { | 407 | { |
| 481 | int error; | 408 | int error; |
| 482 | compat_xfs_attr_multiop_t *ops; | 409 | compat_xfs_attr_multiop_t *ops; |
| 483 | compat_xfs_fsop_attrmulti_handlereq_t am_hreq; | 410 | compat_xfs_fsop_attrmulti_handlereq_t am_hreq; |
| 484 | struct inode *inode; | 411 | struct dentry *dentry; |
| 485 | unsigned int i, size; | 412 | unsigned int i, size; |
| 486 | char *attr_name; | 413 | char *attr_name; |
| 487 | 414 | ||
| @@ -491,20 +418,19 @@ xfs_compat_attrmulti_by_handle( | |||
| 491 | sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) | 418 | sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) |
| 492 | return -XFS_ERROR(EFAULT); | 419 | return -XFS_ERROR(EFAULT); |
| 493 | 420 | ||
| 494 | error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq, | 421 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); |
| 495 | &inode); | 422 | if (IS_ERR(dentry)) |
| 496 | if (error) | 423 | return PTR_ERR(dentry); |
| 497 | goto out; | ||
| 498 | 424 | ||
| 499 | error = E2BIG; | 425 | error = E2BIG; |
| 500 | size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); | 426 | size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); |
| 501 | if (!size || size > 16 * PAGE_SIZE) | 427 | if (!size || size > 16 * PAGE_SIZE) |
| 502 | goto out_vn_rele; | 428 | goto out_dput; |
| 503 | 429 | ||
| 504 | error = ENOMEM; | 430 | error = ENOMEM; |
| 505 | ops = kmalloc(size, GFP_KERNEL); | 431 | ops = kmalloc(size, GFP_KERNEL); |
| 506 | if (!ops) | 432 | if (!ops) |
| 507 | goto out_vn_rele; | 433 | goto out_dput; |
| 508 | 434 | ||
| 509 | error = EFAULT; | 435 | error = EFAULT; |
| 510 | if (copy_from_user(ops, compat_ptr(am_hreq.ops), size)) | 436 | if (copy_from_user(ops, compat_ptr(am_hreq.ops), size)) |
| @@ -527,20 +453,29 @@ xfs_compat_attrmulti_by_handle( | |||
| 527 | 453 | ||
| 528 | switch (ops[i].am_opcode) { | 454 | switch (ops[i].am_opcode) { |
| 529 | case ATTR_OP_GET: | 455 | case ATTR_OP_GET: |
| 530 | ops[i].am_error = xfs_attrmulti_attr_get(inode, | 456 | ops[i].am_error = xfs_attrmulti_attr_get( |
| 531 | attr_name, | 457 | dentry->d_inode, attr_name, |
| 532 | compat_ptr(ops[i].am_attrvalue), | 458 | compat_ptr(ops[i].am_attrvalue), |
| 533 | &ops[i].am_length, ops[i].am_flags); | 459 | &ops[i].am_length, ops[i].am_flags); |
| 534 | break; | 460 | break; |
| 535 | case ATTR_OP_SET: | 461 | case ATTR_OP_SET: |
| 536 | ops[i].am_error = xfs_attrmulti_attr_set(inode, | 462 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); |
| 537 | attr_name, | 463 | if (ops[i].am_error) |
| 464 | break; | ||
| 465 | ops[i].am_error = xfs_attrmulti_attr_set( | ||
| 466 | dentry->d_inode, attr_name, | ||
| 538 | compat_ptr(ops[i].am_attrvalue), | 467 | compat_ptr(ops[i].am_attrvalue), |
| 539 | ops[i].am_length, ops[i].am_flags); | 468 | ops[i].am_length, ops[i].am_flags); |
| 469 | mnt_drop_write(parfilp->f_path.mnt); | ||
| 540 | break; | 470 | break; |
| 541 | case ATTR_OP_REMOVE: | 471 | case ATTR_OP_REMOVE: |
| 542 | ops[i].am_error = xfs_attrmulti_attr_remove(inode, | 472 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); |
| 543 | attr_name, ops[i].am_flags); | 473 | if (ops[i].am_error) |
| 474 | break; | ||
| 475 | ops[i].am_error = xfs_attrmulti_attr_remove( | ||
| 476 | dentry->d_inode, attr_name, | ||
| 477 | ops[i].am_flags); | ||
| 478 | mnt_drop_write(parfilp->f_path.mnt); | ||
| 544 | break; | 479 | break; |
| 545 | default: | 480 | default: |
| 546 | ops[i].am_error = EINVAL; | 481 | ops[i].am_error = EINVAL; |
| @@ -553,22 +488,20 @@ xfs_compat_attrmulti_by_handle( | |||
| 553 | kfree(attr_name); | 488 | kfree(attr_name); |
| 554 | out_kfree_ops: | 489 | out_kfree_ops: |
| 555 | kfree(ops); | 490 | kfree(ops); |
| 556 | out_vn_rele: | 491 | out_dput: |
| 557 | iput(inode); | 492 | dput(dentry); |
| 558 | out: | ||
| 559 | return -error; | 493 | return -error; |
| 560 | } | 494 | } |
| 561 | 495 | ||
| 562 | STATIC int | 496 | STATIC int |
| 563 | xfs_compat_fssetdm_by_handle( | 497 | xfs_compat_fssetdm_by_handle( |
| 564 | xfs_mount_t *mp, | 498 | struct file *parfilp, |
| 565 | void __user *arg, | 499 | void __user *arg) |
| 566 | struct inode *parinode) | ||
| 567 | { | 500 | { |
| 568 | int error; | 501 | int error; |
| 569 | struct fsdmidata fsd; | 502 | struct fsdmidata fsd; |
| 570 | compat_xfs_fsop_setdm_handlereq_t dmhreq; | 503 | compat_xfs_fsop_setdm_handlereq_t dmhreq; |
| 571 | struct inode *inode; | 504 | struct dentry *dentry; |
| 572 | 505 | ||
| 573 | if (!capable(CAP_MKNOD)) | 506 | if (!capable(CAP_MKNOD)) |
| 574 | return -XFS_ERROR(EPERM); | 507 | return -XFS_ERROR(EPERM); |
| @@ -576,12 +509,11 @@ xfs_compat_fssetdm_by_handle( | |||
| 576 | sizeof(compat_xfs_fsop_setdm_handlereq_t))) | 509 | sizeof(compat_xfs_fsop_setdm_handlereq_t))) |
| 577 | return -XFS_ERROR(EFAULT); | 510 | return -XFS_ERROR(EFAULT); |
| 578 | 511 | ||
| 579 | error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq, | 512 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); |
| 580 | &inode); | 513 | if (IS_ERR(dentry)) |
| 581 | if (error) | 514 | return PTR_ERR(dentry); |
| 582 | return -error; | ||
| 583 | 515 | ||
| 584 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { | 516 | if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { |
| 585 | error = -XFS_ERROR(EPERM); | 517 | error = -XFS_ERROR(EPERM); |
| 586 | goto out; | 518 | goto out; |
| 587 | } | 519 | } |
| @@ -591,11 +523,11 @@ xfs_compat_fssetdm_by_handle( | |||
| 591 | goto out; | 523 | goto out; |
| 592 | } | 524 | } |
| 593 | 525 | ||
| 594 | error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, | 526 | error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, |
| 595 | fsd.fsd_dmstate); | 527 | fsd.fsd_dmstate); |
| 596 | 528 | ||
| 597 | out: | 529 | out: |
| 598 | iput(inode); | 530 | dput(dentry); |
| 599 | return error; | 531 | return error; |
| 600 | } | 532 | } |
| 601 | 533 | ||
| @@ -722,21 +654,21 @@ xfs_file_compat_ioctl( | |||
| 722 | 654 | ||
| 723 | if (xfs_compat_handlereq_copyin(&hreq, arg)) | 655 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
| 724 | return -XFS_ERROR(EFAULT); | 656 | return -XFS_ERROR(EFAULT); |
| 725 | return xfs_open_by_handle(mp, &hreq, filp, inode); | 657 | return xfs_open_by_handle(filp, &hreq); |
| 726 | } | 658 | } |
| 727 | case XFS_IOC_READLINK_BY_HANDLE_32: { | 659 | case XFS_IOC_READLINK_BY_HANDLE_32: { |
| 728 | struct xfs_fsop_handlereq hreq; | 660 | struct xfs_fsop_handlereq hreq; |
| 729 | 661 | ||
| 730 | if (xfs_compat_handlereq_copyin(&hreq, arg)) | 662 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
| 731 | return -XFS_ERROR(EFAULT); | 663 | return -XFS_ERROR(EFAULT); |
| 732 | return xfs_readlink_by_handle(mp, &hreq, inode); | 664 | return xfs_readlink_by_handle(filp, &hreq); |
| 733 | } | 665 | } |
| 734 | case XFS_IOC_ATTRLIST_BY_HANDLE_32: | 666 | case XFS_IOC_ATTRLIST_BY_HANDLE_32: |
| 735 | return xfs_compat_attrlist_by_handle(mp, arg, inode); | 667 | return xfs_compat_attrlist_by_handle(filp, arg); |
| 736 | case XFS_IOC_ATTRMULTI_BY_HANDLE_32: | 668 | case XFS_IOC_ATTRMULTI_BY_HANDLE_32: |
| 737 | return xfs_compat_attrmulti_by_handle(mp, arg, inode); | 669 | return xfs_compat_attrmulti_by_handle(filp, arg); |
| 738 | case XFS_IOC_FSSETDM_BY_HANDLE_32: | 670 | case XFS_IOC_FSSETDM_BY_HANDLE_32: |
| 739 | return xfs_compat_fssetdm_by_handle(mp, arg, inode); | 671 | return xfs_compat_fssetdm_by_handle(filp, arg); |
| 740 | default: | 672 | default: |
| 741 | return -XFS_ERROR(ENOIOCTLCMD); | 673 | return -XFS_ERROR(ENOIOCTLCMD); |
| 742 | } | 674 | } |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 95a971080368..c71e226da7f5 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -1197,6 +1197,7 @@ xfs_fs_remount( | |||
| 1197 | struct xfs_mount *mp = XFS_M(sb); | 1197 | struct xfs_mount *mp = XFS_M(sb); |
| 1198 | substring_t args[MAX_OPT_ARGS]; | 1198 | substring_t args[MAX_OPT_ARGS]; |
| 1199 | char *p; | 1199 | char *p; |
| 1200 | int error; | ||
| 1200 | 1201 | ||
| 1201 | while ((p = strsep(&options, ",")) != NULL) { | 1202 | while ((p = strsep(&options, ",")) != NULL) { |
| 1202 | int token; | 1203 | int token; |
| @@ -1247,11 +1248,25 @@ xfs_fs_remount( | |||
| 1247 | } | 1248 | } |
| 1248 | } | 1249 | } |
| 1249 | 1250 | ||
| 1250 | /* rw/ro -> rw */ | 1251 | /* ro -> rw */ |
| 1251 | if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { | 1252 | if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { |
| 1252 | mp->m_flags &= ~XFS_MOUNT_RDONLY; | 1253 | mp->m_flags &= ~XFS_MOUNT_RDONLY; |
| 1253 | if (mp->m_flags & XFS_MOUNT_BARRIER) | 1254 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
| 1254 | xfs_mountfs_check_barriers(mp); | 1255 | xfs_mountfs_check_barriers(mp); |
| 1256 | |||
| 1257 | /* | ||
| 1258 | * If this is the first remount to writeable state we | ||
| 1259 | * might have some superblock changes to update. | ||
| 1260 | */ | ||
| 1261 | if (mp->m_update_flags) { | ||
| 1262 | error = xfs_mount_log_sb(mp, mp->m_update_flags); | ||
| 1263 | if (error) { | ||
| 1264 | cmn_err(CE_WARN, | ||
| 1265 | "XFS: failed to write sb changes"); | ||
| 1266 | return error; | ||
| 1267 | } | ||
| 1268 | mp->m_update_flags = 0; | ||
| 1269 | } | ||
| 1255 | } | 1270 | } |
| 1256 | 1271 | ||
| 1257 | /* rw -> ro */ | 1272 | /* rw -> ro */ |
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 591ca6602bfb..6543c0b29753 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
| @@ -73,6 +73,8 @@ int xfs_dqreq_num; | |||
| 73 | int xfs_dqerror_mod = 33; | 73 | int xfs_dqerror_mod = 33; |
| 74 | #endif | 74 | #endif |
| 75 | 75 | ||
| 76 | static struct lock_class_key xfs_dquot_other_class; | ||
| 77 | |||
| 76 | /* | 78 | /* |
| 77 | * Allocate and initialize a dquot. We don't always allocate fresh memory; | 79 | * Allocate and initialize a dquot. We don't always allocate fresh memory; |
| 78 | * we try to reclaim a free dquot if the number of incore dquots are above | 80 | * we try to reclaim a free dquot if the number of incore dquots are above |
| @@ -139,7 +141,15 @@ xfs_qm_dqinit( | |||
| 139 | ASSERT(dqp->q_trace); | 141 | ASSERT(dqp->q_trace); |
| 140 | xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); | 142 | xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); |
| 141 | #endif | 143 | #endif |
| 142 | } | 144 | } |
| 145 | |||
| 146 | /* | ||
| 147 | * In either case we need to make sure group quotas have a different | ||
| 148 | * lock class than user quotas, to make sure lockdep knows we can | ||
| 149 | * locks of one of each at the same time. | ||
| 150 | */ | ||
| 151 | if (!(type & XFS_DQ_USER)) | ||
| 152 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); | ||
| 143 | 153 | ||
| 144 | /* | 154 | /* |
| 145 | * log item gets initialized later | 155 | * log item gets initialized later |
| @@ -421,7 +431,7 @@ xfs_qm_dqalloc( | |||
| 421 | /* | 431 | /* |
| 422 | * Initialize the bmap freelist prior to calling bmapi code. | 432 | * Initialize the bmap freelist prior to calling bmapi code. |
| 423 | */ | 433 | */ |
| 424 | XFS_BMAP_INIT(&flist, &firstblock); | 434 | xfs_bmap_init(&flist, &firstblock); |
| 425 | xfs_ilock(quotip, XFS_ILOCK_EXCL); | 435 | xfs_ilock(quotip, XFS_ILOCK_EXCL); |
| 426 | /* | 436 | /* |
| 427 | * Return if this type of quotas is turned off while we didn't | 437 | * Return if this type of quotas is turned off while we didn't |
| @@ -1383,6 +1393,12 @@ xfs_dqunlock_nonotify( | |||
| 1383 | mutex_unlock(&(dqp->q_qlock)); | 1393 | mutex_unlock(&(dqp->q_qlock)); |
| 1384 | } | 1394 | } |
| 1385 | 1395 | ||
| 1396 | /* | ||
| 1397 | * Lock two xfs_dquot structures. | ||
| 1398 | * | ||
| 1399 | * To avoid deadlocks we always lock the quota structure with | ||
| 1400 | * the lowerd id first. | ||
| 1401 | */ | ||
| 1386 | void | 1402 | void |
| 1387 | xfs_dqlock2( | 1403 | xfs_dqlock2( |
| 1388 | xfs_dquot_t *d1, | 1404 | xfs_dquot_t *d1, |
| @@ -1392,18 +1408,16 @@ xfs_dqlock2( | |||
| 1392 | ASSERT(d1 != d2); | 1408 | ASSERT(d1 != d2); |
| 1393 | if (be32_to_cpu(d1->q_core.d_id) > | 1409 | if (be32_to_cpu(d1->q_core.d_id) > |
| 1394 | be32_to_cpu(d2->q_core.d_id)) { | 1410 | be32_to_cpu(d2->q_core.d_id)) { |
| 1395 | xfs_dqlock(d2); | 1411 | mutex_lock(&d2->q_qlock); |
| 1396 | xfs_dqlock(d1); | 1412 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); |
| 1397 | } else { | 1413 | } else { |
| 1398 | xfs_dqlock(d1); | 1414 | mutex_lock(&d1->q_qlock); |
| 1399 | xfs_dqlock(d2); | 1415 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); |
| 1400 | } | ||
| 1401 | } else { | ||
| 1402 | if (d1) { | ||
| 1403 | xfs_dqlock(d1); | ||
| 1404 | } else if (d2) { | ||
| 1405 | xfs_dqlock(d2); | ||
| 1406 | } | 1416 | } |
| 1417 | } else if (d1) { | ||
| 1418 | mutex_lock(&d1->q_qlock); | ||
| 1419 | } else if (d2) { | ||
| 1420 | mutex_lock(&d2->q_qlock); | ||
| 1407 | } | 1421 | } |
| 1408 | } | 1422 | } |
| 1409 | 1423 | ||
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index 7e455337e2ba..d443e93b4331 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h | |||
| @@ -97,6 +97,16 @@ typedef struct xfs_dquot { | |||
| 97 | #define dq_hashlist q_lists.dqm_hashlist | 97 | #define dq_hashlist q_lists.dqm_hashlist |
| 98 | #define dq_flags q_lists.dqm_flags | 98 | #define dq_flags q_lists.dqm_flags |
| 99 | 99 | ||
| 100 | /* | ||
| 101 | * Lock hierachy for q_qlock: | ||
| 102 | * XFS_QLOCK_NORMAL is the implicit default, | ||
| 103 | * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 | ||
| 104 | */ | ||
| 105 | enum { | ||
| 106 | XFS_QLOCK_NORMAL = 0, | ||
| 107 | XFS_QLOCK_NESTED, | ||
| 108 | }; | ||
| 109 | |||
| 100 | #define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) | 110 | #define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) |
| 101 | 111 | ||
| 102 | #ifdef DEBUG | 112 | #ifdef DEBUG |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 6b13960cf318..7a2beb64314f 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
| @@ -1070,6 +1070,13 @@ xfs_qm_sync( | |||
| 1070 | return 0; | 1070 | return 0; |
| 1071 | } | 1071 | } |
| 1072 | 1072 | ||
| 1073 | /* | ||
| 1074 | * The hash chains and the mplist use the same xfs_dqhash structure as | ||
| 1075 | * their list head, but we can take the mplist qh_lock and one of the | ||
| 1076 | * hash qh_locks at the same time without any problem as they aren't | ||
| 1077 | * related. | ||
| 1078 | */ | ||
| 1079 | static struct lock_class_key xfs_quota_mplist_class; | ||
| 1073 | 1080 | ||
| 1074 | /* | 1081 | /* |
| 1075 | * This initializes all the quota information that's kept in the | 1082 | * This initializes all the quota information that's kept in the |
| @@ -1105,6 +1112,8 @@ xfs_qm_init_quotainfo( | |||
| 1105 | } | 1112 | } |
| 1106 | 1113 | ||
| 1107 | xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); | 1114 | xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); |
| 1115 | lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class); | ||
| 1116 | |||
| 1108 | qinf->qi_dqreclaims = 0; | 1117 | qinf->qi_dqreclaims = 0; |
| 1109 | 1118 | ||
| 1110 | /* mutex used to serialize quotaoffs */ | 1119 | /* mutex used to serialize quotaoffs */ |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index d3b3cf742999..143d63ecb20a 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
| @@ -244,8 +244,8 @@ typedef struct xfs_perag | |||
| 244 | #define XFS_AG_CHECK_DADDR(mp,d,len) \ | 244 | #define XFS_AG_CHECK_DADDR(mp,d,len) \ |
| 245 | ((len) == 1 ? \ | 245 | ((len) == 1 ? \ |
| 246 | ASSERT((d) == XFS_SB_DADDR || \ | 246 | ASSERT((d) == XFS_SB_DADDR || \ |
| 247 | XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \ | 247 | xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \ |
| 248 | ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \ | 248 | ASSERT(xfs_daddr_to_agno(mp, d) == \ |
| 249 | XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1))) | 249 | xfs_daddr_to_agno(mp, (d) + (len) - 1))) |
| 250 | 250 | ||
| 251 | #endif /* __XFS_AG_H__ */ | 251 | #endif /* __XFS_AG_H__ */ |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 733cb75a8c5d..c10c3a292d30 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
| @@ -115,7 +115,7 @@ xfs_allocbt_free_block( | |||
| 115 | xfs_agblock_t bno; | 115 | xfs_agblock_t bno; |
| 116 | int error; | 116 | int error; |
| 117 | 117 | ||
| 118 | bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp)); | 118 | bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); |
| 119 | error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); | 119 | error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); |
| 120 | if (error) | 120 | if (error) |
| 121 | return error; | 121 | return error; |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index f7cdc28aff41..5fde1654b430 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
| @@ -374,7 +374,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
| 374 | * It won't fit in the shortform, transform to a leaf block. | 374 | * It won't fit in the shortform, transform to a leaf block. |
| 375 | * GROT: another possible req'mt for a double-split btree op. | 375 | * GROT: another possible req'mt for a double-split btree op. |
| 376 | */ | 376 | */ |
| 377 | XFS_BMAP_INIT(args.flist, args.firstblock); | 377 | xfs_bmap_init(args.flist, args.firstblock); |
| 378 | error = xfs_attr_shortform_to_leaf(&args); | 378 | error = xfs_attr_shortform_to_leaf(&args); |
| 379 | if (!error) { | 379 | if (!error) { |
| 380 | error = xfs_bmap_finish(&args.trans, args.flist, | 380 | error = xfs_bmap_finish(&args.trans, args.flist, |
| @@ -956,7 +956,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
| 956 | * Commit that transaction so that the node_addname() call | 956 | * Commit that transaction so that the node_addname() call |
| 957 | * can manage its own transactions. | 957 | * can manage its own transactions. |
| 958 | */ | 958 | */ |
| 959 | XFS_BMAP_INIT(args->flist, args->firstblock); | 959 | xfs_bmap_init(args->flist, args->firstblock); |
| 960 | error = xfs_attr_leaf_to_node(args); | 960 | error = xfs_attr_leaf_to_node(args); |
| 961 | if (!error) { | 961 | if (!error) { |
| 962 | error = xfs_bmap_finish(&args->trans, args->flist, | 962 | error = xfs_bmap_finish(&args->trans, args->flist, |
| @@ -1057,7 +1057,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
| 1057 | * If the result is small enough, shrink it all into the inode. | 1057 | * If the result is small enough, shrink it all into the inode. |
| 1058 | */ | 1058 | */ |
| 1059 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { | 1059 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { |
| 1060 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1060 | xfs_bmap_init(args->flist, args->firstblock); |
| 1061 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); | 1061 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); |
| 1062 | /* bp is gone due to xfs_da_shrink_inode */ | 1062 | /* bp is gone due to xfs_da_shrink_inode */ |
| 1063 | if (!error) { | 1063 | if (!error) { |
| @@ -1135,7 +1135,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args) | |||
| 1135 | * If the result is small enough, shrink it all into the inode. | 1135 | * If the result is small enough, shrink it all into the inode. |
| 1136 | */ | 1136 | */ |
| 1137 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { | 1137 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { |
| 1138 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1138 | xfs_bmap_init(args->flist, args->firstblock); |
| 1139 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); | 1139 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); |
| 1140 | /* bp is gone due to xfs_da_shrink_inode */ | 1140 | /* bp is gone due to xfs_da_shrink_inode */ |
| 1141 | if (!error) { | 1141 | if (!error) { |
| @@ -1290,7 +1290,7 @@ restart: | |||
| 1290 | * have been a b-tree. | 1290 | * have been a b-tree. |
| 1291 | */ | 1291 | */ |
| 1292 | xfs_da_state_free(state); | 1292 | xfs_da_state_free(state); |
| 1293 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1293 | xfs_bmap_init(args->flist, args->firstblock); |
| 1294 | error = xfs_attr_leaf_to_node(args); | 1294 | error = xfs_attr_leaf_to_node(args); |
| 1295 | if (!error) { | 1295 | if (!error) { |
| 1296 | error = xfs_bmap_finish(&args->trans, | 1296 | error = xfs_bmap_finish(&args->trans, |
| @@ -1331,7 +1331,7 @@ restart: | |||
| 1331 | * in the index/blkno/rmtblkno/rmtblkcnt fields and | 1331 | * in the index/blkno/rmtblkno/rmtblkcnt fields and |
| 1332 | * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. | 1332 | * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. |
| 1333 | */ | 1333 | */ |
| 1334 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1334 | xfs_bmap_init(args->flist, args->firstblock); |
| 1335 | error = xfs_da_split(state); | 1335 | error = xfs_da_split(state); |
| 1336 | if (!error) { | 1336 | if (!error) { |
| 1337 | error = xfs_bmap_finish(&args->trans, args->flist, | 1337 | error = xfs_bmap_finish(&args->trans, args->flist, |
| @@ -1443,7 +1443,7 @@ restart: | |||
| 1443 | * Check to see if the tree needs to be collapsed. | 1443 | * Check to see if the tree needs to be collapsed. |
| 1444 | */ | 1444 | */ |
| 1445 | if (retval && (state->path.active > 1)) { | 1445 | if (retval && (state->path.active > 1)) { |
| 1446 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1446 | xfs_bmap_init(args->flist, args->firstblock); |
| 1447 | error = xfs_da_join(state); | 1447 | error = xfs_da_join(state); |
| 1448 | if (!error) { | 1448 | if (!error) { |
| 1449 | error = xfs_bmap_finish(&args->trans, | 1449 | error = xfs_bmap_finish(&args->trans, |
| @@ -1579,7 +1579,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
| 1579 | * Check to see if the tree needs to be collapsed. | 1579 | * Check to see if the tree needs to be collapsed. |
| 1580 | */ | 1580 | */ |
| 1581 | if (retval && (state->path.active > 1)) { | 1581 | if (retval && (state->path.active > 1)) { |
| 1582 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1582 | xfs_bmap_init(args->flist, args->firstblock); |
| 1583 | error = xfs_da_join(state); | 1583 | error = xfs_da_join(state); |
| 1584 | if (!error) { | 1584 | if (!error) { |
| 1585 | error = xfs_bmap_finish(&args->trans, args->flist, | 1585 | error = xfs_bmap_finish(&args->trans, args->flist, |
| @@ -1630,7 +1630,7 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
| 1630 | == XFS_ATTR_LEAF_MAGIC); | 1630 | == XFS_ATTR_LEAF_MAGIC); |
| 1631 | 1631 | ||
| 1632 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { | 1632 | if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { |
| 1633 | XFS_BMAP_INIT(args->flist, args->firstblock); | 1633 | xfs_bmap_init(args->flist, args->firstblock); |
| 1634 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); | 1634 | error = xfs_attr_leaf_to_shortform(bp, args, forkoff); |
| 1635 | /* bp is gone due to xfs_da_shrink_inode */ | 1635 | /* bp is gone due to xfs_da_shrink_inode */ |
| 1636 | if (!error) { | 1636 | if (!error) { |
| @@ -2069,7 +2069,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
| 2069 | /* | 2069 | /* |
| 2070 | * Allocate a single extent, up to the size of the value. | 2070 | * Allocate a single extent, up to the size of the value. |
| 2071 | */ | 2071 | */ |
| 2072 | XFS_BMAP_INIT(args->flist, args->firstblock); | 2072 | xfs_bmap_init(args->flist, args->firstblock); |
| 2073 | nmap = 1; | 2073 | nmap = 1; |
| 2074 | error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno, | 2074 | error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno, |
| 2075 | blkcnt, | 2075 | blkcnt, |
| @@ -2123,7 +2123,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
| 2123 | /* | 2123 | /* |
| 2124 | * Try to remember where we decided to put the value. | 2124 | * Try to remember where we decided to put the value. |
| 2125 | */ | 2125 | */ |
| 2126 | XFS_BMAP_INIT(args->flist, args->firstblock); | 2126 | xfs_bmap_init(args->flist, args->firstblock); |
| 2127 | nmap = 1; | 2127 | nmap = 1; |
| 2128 | error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno, | 2128 | error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno, |
| 2129 | args->rmtblkcnt, | 2129 | args->rmtblkcnt, |
| @@ -2188,7 +2188,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
| 2188 | /* | 2188 | /* |
| 2189 | * Try to remember where we decided to put the value. | 2189 | * Try to remember where we decided to put the value. |
| 2190 | */ | 2190 | */ |
| 2191 | XFS_BMAP_INIT(args->flist, args->firstblock); | 2191 | xfs_bmap_init(args->flist, args->firstblock); |
| 2192 | nmap = 1; | 2192 | nmap = 1; |
| 2193 | error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno, | 2193 | error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno, |
| 2194 | args->rmtblkcnt, | 2194 | args->rmtblkcnt, |
| @@ -2229,7 +2229,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
| 2229 | blkcnt = args->rmtblkcnt; | 2229 | blkcnt = args->rmtblkcnt; |
| 2230 | done = 0; | 2230 | done = 0; |
| 2231 | while (!done) { | 2231 | while (!done) { |
| 2232 | XFS_BMAP_INIT(args->flist, args->firstblock); | 2232 | xfs_bmap_init(args->flist, args->firstblock); |
| 2233 | error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, | 2233 | error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, |
| 2234 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 2234 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
| 2235 | 1, args->firstblock, args->flist, | 2235 | 1, args->firstblock, args->flist, |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 138308e70d14..c852cd65aaea 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
| @@ -595,9 +595,9 @@ xfs_bmap_add_extent( | |||
| 595 | xfs_iext_insert(ifp, 0, 1, new); | 595 | xfs_iext_insert(ifp, 0, 1, new); |
| 596 | ASSERT(cur == NULL); | 596 | ASSERT(cur == NULL); |
| 597 | ifp->if_lastex = 0; | 597 | ifp->if_lastex = 0; |
| 598 | if (!ISNULLSTARTBLOCK(new->br_startblock)) { | 598 | if (!isnullstartblock(new->br_startblock)) { |
| 599 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); | 599 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); |
| 600 | logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); | 600 | logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
| 601 | } else | 601 | } else |
| 602 | logflags = 0; | 602 | logflags = 0; |
| 603 | /* DELTA: single new extent */ | 603 | /* DELTA: single new extent */ |
| @@ -613,7 +613,7 @@ xfs_bmap_add_extent( | |||
| 613 | /* | 613 | /* |
| 614 | * Any kind of new delayed allocation goes here. | 614 | * Any kind of new delayed allocation goes here. |
| 615 | */ | 615 | */ |
| 616 | else if (ISNULLSTARTBLOCK(new->br_startblock)) { | 616 | else if (isnullstartblock(new->br_startblock)) { |
| 617 | if (cur) | 617 | if (cur) |
| 618 | ASSERT((cur->bc_private.b.flags & | 618 | ASSERT((cur->bc_private.b.flags & |
| 619 | XFS_BTCUR_BPRV_WASDEL) == 0); | 619 | XFS_BTCUR_BPRV_WASDEL) == 0); |
| @@ -644,11 +644,11 @@ xfs_bmap_add_extent( | |||
| 644 | * in a delayed or unwritten allocation with a real one, or | 644 | * in a delayed or unwritten allocation with a real one, or |
| 645 | * converting real back to unwritten. | 645 | * converting real back to unwritten. |
| 646 | */ | 646 | */ |
| 647 | if (!ISNULLSTARTBLOCK(new->br_startblock) && | 647 | if (!isnullstartblock(new->br_startblock) && |
| 648 | new->br_startoff + new->br_blockcount > prev.br_startoff) { | 648 | new->br_startoff + new->br_blockcount > prev.br_startoff) { |
| 649 | if (prev.br_state != XFS_EXT_UNWRITTEN && | 649 | if (prev.br_state != XFS_EXT_UNWRITTEN && |
| 650 | ISNULLSTARTBLOCK(prev.br_startblock)) { | 650 | isnullstartblock(prev.br_startblock)) { |
| 651 | da_old = STARTBLOCKVAL(prev.br_startblock); | 651 | da_old = startblockval(prev.br_startblock); |
| 652 | if (cur) | 652 | if (cur) |
| 653 | ASSERT(cur->bc_private.b.flags & | 653 | ASSERT(cur->bc_private.b.flags & |
| 654 | XFS_BTCUR_BPRV_WASDEL); | 654 | XFS_BTCUR_BPRV_WASDEL); |
| @@ -803,7 +803,7 @@ xfs_bmap_add_extent_delay_real( | |||
| 803 | */ | 803 | */ |
| 804 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { | 804 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { |
| 805 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); | 805 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); |
| 806 | STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); | 806 | STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); |
| 807 | } | 807 | } |
| 808 | STATE_SET(LEFT_CONTIG, | 808 | STATE_SET(LEFT_CONTIG, |
| 809 | STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && | 809 | STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && |
| @@ -820,7 +820,7 @@ xfs_bmap_add_extent_delay_real( | |||
| 820 | idx < | 820 | idx < |
| 821 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { | 821 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { |
| 822 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); | 822 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); |
| 823 | STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); | 823 | STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); |
| 824 | } | 824 | } |
| 825 | STATE_SET(RIGHT_CONTIG, | 825 | STATE_SET(RIGHT_CONTIG, |
| 826 | STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && | 826 | STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && |
| @@ -1019,8 +1019,8 @@ xfs_bmap_add_extent_delay_real( | |||
| 1019 | goto done; | 1019 | goto done; |
| 1020 | } | 1020 | } |
| 1021 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 1021 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 1022 | STARTBLOCKVAL(PREV.br_startblock)); | 1022 | startblockval(PREV.br_startblock)); |
| 1023 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 1023 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 1024 | XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); | 1024 | XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); |
| 1025 | *dnew = temp; | 1025 | *dnew = temp; |
| 1026 | /* DELTA: The boundary between two in-core extents moved. */ | 1026 | /* DELTA: The boundary between two in-core extents moved. */ |
| @@ -1067,10 +1067,10 @@ xfs_bmap_add_extent_delay_real( | |||
| 1067 | goto done; | 1067 | goto done; |
| 1068 | } | 1068 | } |
| 1069 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 1069 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 1070 | STARTBLOCKVAL(PREV.br_startblock) - | 1070 | startblockval(PREV.br_startblock) - |
| 1071 | (cur ? cur->bc_private.b.allocated : 0)); | 1071 | (cur ? cur->bc_private.b.allocated : 0)); |
| 1072 | ep = xfs_iext_get_ext(ifp, idx + 1); | 1072 | ep = xfs_iext_get_ext(ifp, idx + 1); |
| 1073 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 1073 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 1074 | XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); | 1074 | XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); |
| 1075 | *dnew = temp; | 1075 | *dnew = temp; |
| 1076 | /* DELTA: One in-core extent is split in two. */ | 1076 | /* DELTA: One in-core extent is split in two. */ |
| @@ -1110,8 +1110,8 @@ xfs_bmap_add_extent_delay_real( | |||
| 1110 | goto done; | 1110 | goto done; |
| 1111 | } | 1111 | } |
| 1112 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 1112 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 1113 | STARTBLOCKVAL(PREV.br_startblock)); | 1113 | startblockval(PREV.br_startblock)); |
| 1114 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 1114 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 1115 | XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); | 1115 | XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); |
| 1116 | *dnew = temp; | 1116 | *dnew = temp; |
| 1117 | /* DELTA: The boundary between two in-core extents moved. */ | 1117 | /* DELTA: The boundary between two in-core extents moved. */ |
| @@ -1157,10 +1157,10 @@ xfs_bmap_add_extent_delay_real( | |||
| 1157 | goto done; | 1157 | goto done; |
| 1158 | } | 1158 | } |
| 1159 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 1159 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 1160 | STARTBLOCKVAL(PREV.br_startblock) - | 1160 | startblockval(PREV.br_startblock) - |
| 1161 | (cur ? cur->bc_private.b.allocated : 0)); | 1161 | (cur ? cur->bc_private.b.allocated : 0)); |
| 1162 | ep = xfs_iext_get_ext(ifp, idx); | 1162 | ep = xfs_iext_get_ext(ifp, idx); |
| 1163 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 1163 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 1164 | XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); | 1164 | XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); |
| 1165 | *dnew = temp; | 1165 | *dnew = temp; |
| 1166 | /* DELTA: One in-core extent is split in two. */ | 1166 | /* DELTA: One in-core extent is split in two. */ |
| @@ -1213,7 +1213,7 @@ xfs_bmap_add_extent_delay_real( | |||
| 1213 | } | 1213 | } |
| 1214 | temp = xfs_bmap_worst_indlen(ip, temp); | 1214 | temp = xfs_bmap_worst_indlen(ip, temp); |
| 1215 | temp2 = xfs_bmap_worst_indlen(ip, temp2); | 1215 | temp2 = xfs_bmap_worst_indlen(ip, temp2); |
| 1216 | diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) - | 1216 | diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - |
| 1217 | (cur ? cur->bc_private.b.allocated : 0)); | 1217 | (cur ? cur->bc_private.b.allocated : 0)); |
| 1218 | if (diff > 0 && | 1218 | if (diff > 0 && |
| 1219 | xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) { | 1219 | xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) { |
| @@ -1241,11 +1241,11 @@ xfs_bmap_add_extent_delay_real( | |||
| 1241 | } | 1241 | } |
| 1242 | } | 1242 | } |
| 1243 | ep = xfs_iext_get_ext(ifp, idx); | 1243 | ep = xfs_iext_get_ext(ifp, idx); |
| 1244 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 1244 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 1245 | XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); | 1245 | XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); |
| 1246 | XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); | 1246 | XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); |
| 1247 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), | 1247 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), |
| 1248 | NULLSTARTBLOCK((int)temp2)); | 1248 | nullstartblock((int)temp2)); |
| 1249 | XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); | 1249 | XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); |
| 1250 | *dnew = temp + temp2; | 1250 | *dnew = temp + temp2; |
| 1251 | /* DELTA: One in-core extent is split in three. */ | 1251 | /* DELTA: One in-core extent is split in three. */ |
| @@ -1365,7 +1365,7 @@ xfs_bmap_add_extent_unwritten_real( | |||
| 1365 | */ | 1365 | */ |
| 1366 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { | 1366 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { |
| 1367 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); | 1367 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); |
| 1368 | STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); | 1368 | STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); |
| 1369 | } | 1369 | } |
| 1370 | STATE_SET(LEFT_CONTIG, | 1370 | STATE_SET(LEFT_CONTIG, |
| 1371 | STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && | 1371 | STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && |
| @@ -1382,7 +1382,7 @@ xfs_bmap_add_extent_unwritten_real( | |||
| 1382 | idx < | 1382 | idx < |
| 1383 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { | 1383 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { |
| 1384 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); | 1384 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); |
| 1385 | STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); | 1385 | STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); |
| 1386 | } | 1386 | } |
| 1387 | STATE_SET(RIGHT_CONTIG, | 1387 | STATE_SET(RIGHT_CONTIG, |
| 1388 | STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && | 1388 | STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && |
| @@ -1889,13 +1889,13 @@ xfs_bmap_add_extent_hole_delay( | |||
| 1889 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 1889 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
| 1890 | ep = xfs_iext_get_ext(ifp, idx); | 1890 | ep = xfs_iext_get_ext(ifp, idx); |
| 1891 | state = 0; | 1891 | state = 0; |
| 1892 | ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); | 1892 | ASSERT(isnullstartblock(new->br_startblock)); |
| 1893 | /* | 1893 | /* |
| 1894 | * Check and set flags if this segment has a left neighbor | 1894 | * Check and set flags if this segment has a left neighbor |
| 1895 | */ | 1895 | */ |
| 1896 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { | 1896 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { |
| 1897 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); | 1897 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); |
| 1898 | STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); | 1898 | STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); |
| 1899 | } | 1899 | } |
| 1900 | /* | 1900 | /* |
| 1901 | * Check and set flags if the current (right) segment exists. | 1901 | * Check and set flags if the current (right) segment exists. |
| @@ -1905,7 +1905,7 @@ xfs_bmap_add_extent_hole_delay( | |||
| 1905 | idx < | 1905 | idx < |
| 1906 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { | 1906 | ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { |
| 1907 | xfs_bmbt_get_all(ep, &right); | 1907 | xfs_bmbt_get_all(ep, &right); |
| 1908 | STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); | 1908 | STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); |
| 1909 | } | 1909 | } |
| 1910 | /* | 1910 | /* |
| 1911 | * Set contiguity flags on the left and right neighbors. | 1911 | * Set contiguity flags on the left and right neighbors. |
| @@ -1938,12 +1938,12 @@ xfs_bmap_add_extent_hole_delay( | |||
| 1938 | XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, | 1938 | XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, |
| 1939 | XFS_DATA_FORK); | 1939 | XFS_DATA_FORK); |
| 1940 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); | 1940 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); |
| 1941 | oldlen = STARTBLOCKVAL(left.br_startblock) + | 1941 | oldlen = startblockval(left.br_startblock) + |
| 1942 | STARTBLOCKVAL(new->br_startblock) + | 1942 | startblockval(new->br_startblock) + |
| 1943 | STARTBLOCKVAL(right.br_startblock); | 1943 | startblockval(right.br_startblock); |
| 1944 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1944 | newlen = xfs_bmap_worst_indlen(ip, temp); |
| 1945 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), | 1945 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), |
| 1946 | NULLSTARTBLOCK((int)newlen)); | 1946 | nullstartblock((int)newlen)); |
| 1947 | XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, | 1947 | XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, |
| 1948 | XFS_DATA_FORK); | 1948 | XFS_DATA_FORK); |
| 1949 | XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); | 1949 | XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); |
| @@ -1964,11 +1964,11 @@ xfs_bmap_add_extent_hole_delay( | |||
| 1964 | XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, | 1964 | XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, |
| 1965 | XFS_DATA_FORK); | 1965 | XFS_DATA_FORK); |
| 1966 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); | 1966 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); |
| 1967 | oldlen = STARTBLOCKVAL(left.br_startblock) + | 1967 | oldlen = startblockval(left.br_startblock) + |
| 1968 | STARTBLOCKVAL(new->br_startblock); | 1968 | startblockval(new->br_startblock); |
| 1969 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1969 | newlen = xfs_bmap_worst_indlen(ip, temp); |
| 1970 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), | 1970 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), |
| 1971 | NULLSTARTBLOCK((int)newlen)); | 1971 | nullstartblock((int)newlen)); |
| 1972 | XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, | 1972 | XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, |
| 1973 | XFS_DATA_FORK); | 1973 | XFS_DATA_FORK); |
| 1974 | ip->i_df.if_lastex = idx - 1; | 1974 | ip->i_df.if_lastex = idx - 1; |
| @@ -1985,11 +1985,11 @@ xfs_bmap_add_extent_hole_delay( | |||
| 1985 | */ | 1985 | */ |
| 1986 | XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); | 1986 | XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); |
| 1987 | temp = new->br_blockcount + right.br_blockcount; | 1987 | temp = new->br_blockcount + right.br_blockcount; |
| 1988 | oldlen = STARTBLOCKVAL(new->br_startblock) + | 1988 | oldlen = startblockval(new->br_startblock) + |
| 1989 | STARTBLOCKVAL(right.br_startblock); | 1989 | startblockval(right.br_startblock); |
| 1990 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1990 | newlen = xfs_bmap_worst_indlen(ip, temp); |
| 1991 | xfs_bmbt_set_allf(ep, new->br_startoff, | 1991 | xfs_bmbt_set_allf(ep, new->br_startoff, |
| 1992 | NULLSTARTBLOCK((int)newlen), temp, right.br_state); | 1992 | nullstartblock((int)newlen), temp, right.br_state); |
| 1993 | XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); | 1993 | XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); |
| 1994 | ip->i_df.if_lastex = idx; | 1994 | ip->i_df.if_lastex = idx; |
| 1995 | /* DELTA: One in-core extent grew into a hole. */ | 1995 | /* DELTA: One in-core extent grew into a hole. */ |
| @@ -2085,7 +2085,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2085 | */ | 2085 | */ |
| 2086 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { | 2086 | if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { |
| 2087 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); | 2087 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); |
| 2088 | STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); | 2088 | STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); |
| 2089 | } | 2089 | } |
| 2090 | /* | 2090 | /* |
| 2091 | * Check and set flags if this segment has a current value. | 2091 | * Check and set flags if this segment has a current value. |
| @@ -2095,7 +2095,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2095 | idx < | 2095 | idx < |
| 2096 | ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { | 2096 | ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { |
| 2097 | xfs_bmbt_get_all(ep, &right); | 2097 | xfs_bmbt_get_all(ep, &right); |
| 2098 | STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock)); | 2098 | STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); |
| 2099 | } | 2099 | } |
| 2100 | /* | 2100 | /* |
| 2101 | * We're inserting a real allocation between "left" and "right". | 2101 | * We're inserting a real allocation between "left" and "right". |
| @@ -2143,7 +2143,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2143 | XFS_IFORK_NEXT_SET(ip, whichfork, | 2143 | XFS_IFORK_NEXT_SET(ip, whichfork, |
| 2144 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | 2144 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); |
| 2145 | if (cur == NULL) { | 2145 | if (cur == NULL) { |
| 2146 | rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); | 2146 | rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
| 2147 | } else { | 2147 | } else { |
| 2148 | rval = XFS_ILOG_CORE; | 2148 | rval = XFS_ILOG_CORE; |
| 2149 | if ((error = xfs_bmbt_lookup_eq(cur, | 2149 | if ((error = xfs_bmbt_lookup_eq(cur, |
| @@ -2185,7 +2185,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2185 | XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); | 2185 | XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); |
| 2186 | ifp->if_lastex = idx - 1; | 2186 | ifp->if_lastex = idx - 1; |
| 2187 | if (cur == NULL) { | 2187 | if (cur == NULL) { |
| 2188 | rval = XFS_ILOG_FEXT(whichfork); | 2188 | rval = xfs_ilog_fext(whichfork); |
| 2189 | } else { | 2189 | } else { |
| 2190 | rval = 0; | 2190 | rval = 0; |
| 2191 | if ((error = xfs_bmbt_lookup_eq(cur, | 2191 | if ((error = xfs_bmbt_lookup_eq(cur, |
| @@ -2220,7 +2220,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2220 | XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); | 2220 | XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); |
| 2221 | ifp->if_lastex = idx; | 2221 | ifp->if_lastex = idx; |
| 2222 | if (cur == NULL) { | 2222 | if (cur == NULL) { |
| 2223 | rval = XFS_ILOG_FEXT(whichfork); | 2223 | rval = xfs_ilog_fext(whichfork); |
| 2224 | } else { | 2224 | } else { |
| 2225 | rval = 0; | 2225 | rval = 0; |
| 2226 | if ((error = xfs_bmbt_lookup_eq(cur, | 2226 | if ((error = xfs_bmbt_lookup_eq(cur, |
| @@ -2254,7 +2254,7 @@ xfs_bmap_add_extent_hole_real( | |||
| 2254 | XFS_IFORK_NEXT_SET(ip, whichfork, | 2254 | XFS_IFORK_NEXT_SET(ip, whichfork, |
| 2255 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | 2255 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); |
| 2256 | if (cur == NULL) { | 2256 | if (cur == NULL) { |
| 2257 | rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); | 2257 | rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
| 2258 | } else { | 2258 | } else { |
| 2259 | rval = XFS_ILOG_CORE; | 2259 | rval = XFS_ILOG_CORE; |
| 2260 | if ((error = xfs_bmbt_lookup_eq(cur, | 2260 | if ((error = xfs_bmbt_lookup_eq(cur, |
| @@ -2482,7 +2482,7 @@ xfs_bmap_adjacent( | |||
| 2482 | * try to use it's last block as our starting point. | 2482 | * try to use it's last block as our starting point. |
| 2483 | */ | 2483 | */ |
| 2484 | if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && | 2484 | if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF && |
| 2485 | !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && | 2485 | !isnullstartblock(ap->prevp->br_startblock) && |
| 2486 | ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount, | 2486 | ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount, |
| 2487 | ap->prevp->br_startblock)) { | 2487 | ap->prevp->br_startblock)) { |
| 2488 | ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; | 2488 | ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount; |
| @@ -2511,7 +2511,7 @@ xfs_bmap_adjacent( | |||
| 2511 | * start block based on it. | 2511 | * start block based on it. |
| 2512 | */ | 2512 | */ |
| 2513 | if (ap->prevp->br_startoff != NULLFILEOFF && | 2513 | if (ap->prevp->br_startoff != NULLFILEOFF && |
| 2514 | !ISNULLSTARTBLOCK(ap->prevp->br_startblock) && | 2514 | !isnullstartblock(ap->prevp->br_startblock) && |
| 2515 | (prevbno = ap->prevp->br_startblock + | 2515 | (prevbno = ap->prevp->br_startblock + |
| 2516 | ap->prevp->br_blockcount) && | 2516 | ap->prevp->br_blockcount) && |
| 2517 | ISVALID(prevbno, ap->prevp->br_startblock)) { | 2517 | ISVALID(prevbno, ap->prevp->br_startblock)) { |
| @@ -2552,7 +2552,7 @@ xfs_bmap_adjacent( | |||
| 2552 | * If there's a following (right) block, select a requested | 2552 | * If there's a following (right) block, select a requested |
| 2553 | * start block based on it. | 2553 | * start block based on it. |
| 2554 | */ | 2554 | */ |
| 2555 | if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) { | 2555 | if (!isnullstartblock(ap->gotp->br_startblock)) { |
| 2556 | /* | 2556 | /* |
| 2557 | * Calculate gap to start of next block. | 2557 | * Calculate gap to start of next block. |
| 2558 | */ | 2558 | */ |
| @@ -3082,7 +3082,7 @@ xfs_bmap_btree_to_extents( | |||
| 3082 | ASSERT(ifp->if_broot == NULL); | 3082 | ASSERT(ifp->if_broot == NULL); |
| 3083 | ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); | 3083 | ASSERT((ifp->if_flags & XFS_IFBROOT) == 0); |
| 3084 | XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); | 3084 | XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); |
| 3085 | *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork); | 3085 | *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
| 3086 | return 0; | 3086 | return 0; |
| 3087 | } | 3087 | } |
| 3088 | 3088 | ||
| @@ -3136,8 +3136,8 @@ xfs_bmap_del_extent( | |||
| 3136 | del_endoff = del->br_startoff + del->br_blockcount; | 3136 | del_endoff = del->br_startoff + del->br_blockcount; |
| 3137 | got_endoff = got.br_startoff + got.br_blockcount; | 3137 | got_endoff = got.br_startoff + got.br_blockcount; |
| 3138 | ASSERT(got_endoff >= del_endoff); | 3138 | ASSERT(got_endoff >= del_endoff); |
| 3139 | delay = ISNULLSTARTBLOCK(got.br_startblock); | 3139 | delay = isnullstartblock(got.br_startblock); |
| 3140 | ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay); | 3140 | ASSERT(isnullstartblock(del->br_startblock) == delay); |
| 3141 | flags = 0; | 3141 | flags = 0; |
| 3142 | qfield = 0; | 3142 | qfield = 0; |
| 3143 | error = 0; | 3143 | error = 0; |
| @@ -3189,7 +3189,7 @@ xfs_bmap_del_extent( | |||
| 3189 | } | 3189 | } |
| 3190 | da_old = da_new = 0; | 3190 | da_old = da_new = 0; |
| 3191 | } else { | 3191 | } else { |
| 3192 | da_old = STARTBLOCKVAL(got.br_startblock); | 3192 | da_old = startblockval(got.br_startblock); |
| 3193 | da_new = 0; | 3193 | da_new = 0; |
| 3194 | nblks = 0; | 3194 | nblks = 0; |
| 3195 | do_fx = 0; | 3195 | do_fx = 0; |
| @@ -3213,7 +3213,7 @@ xfs_bmap_del_extent( | |||
| 3213 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | 3213 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); |
| 3214 | flags |= XFS_ILOG_CORE; | 3214 | flags |= XFS_ILOG_CORE; |
| 3215 | if (!cur) { | 3215 | if (!cur) { |
| 3216 | flags |= XFS_ILOG_FEXT(whichfork); | 3216 | flags |= xfs_ilog_fext(whichfork); |
| 3217 | break; | 3217 | break; |
| 3218 | } | 3218 | } |
| 3219 | if ((error = xfs_btree_delete(cur, &i))) | 3219 | if ((error = xfs_btree_delete(cur, &i))) |
| @@ -3233,7 +3233,7 @@ xfs_bmap_del_extent( | |||
| 3233 | if (delay) { | 3233 | if (delay) { |
| 3234 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 3234 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 3235 | da_old); | 3235 | da_old); |
| 3236 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 3236 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 3237 | XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, | 3237 | XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, |
| 3238 | whichfork); | 3238 | whichfork); |
| 3239 | da_new = temp; | 3239 | da_new = temp; |
| @@ -3242,7 +3242,7 @@ xfs_bmap_del_extent( | |||
| 3242 | xfs_bmbt_set_startblock(ep, del_endblock); | 3242 | xfs_bmbt_set_startblock(ep, del_endblock); |
| 3243 | XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); | 3243 | XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); |
| 3244 | if (!cur) { | 3244 | if (!cur) { |
| 3245 | flags |= XFS_ILOG_FEXT(whichfork); | 3245 | flags |= xfs_ilog_fext(whichfork); |
| 3246 | break; | 3246 | break; |
| 3247 | } | 3247 | } |
| 3248 | if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, | 3248 | if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock, |
| @@ -3262,7 +3262,7 @@ xfs_bmap_del_extent( | |||
| 3262 | if (delay) { | 3262 | if (delay) { |
| 3263 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 3263 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
| 3264 | da_old); | 3264 | da_old); |
| 3265 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 3265 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 3266 | XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, | 3266 | XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, |
| 3267 | whichfork); | 3267 | whichfork); |
| 3268 | da_new = temp; | 3268 | da_new = temp; |
| @@ -3270,7 +3270,7 @@ xfs_bmap_del_extent( | |||
| 3270 | } | 3270 | } |
| 3271 | XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); | 3271 | XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); |
| 3272 | if (!cur) { | 3272 | if (!cur) { |
| 3273 | flags |= XFS_ILOG_FEXT(whichfork); | 3273 | flags |= xfs_ilog_fext(whichfork); |
| 3274 | break; | 3274 | break; |
| 3275 | } | 3275 | } |
| 3276 | if ((error = xfs_bmbt_update(cur, got.br_startoff, | 3276 | if ((error = xfs_bmbt_update(cur, got.br_startoff, |
| @@ -3345,22 +3345,22 @@ xfs_bmap_del_extent( | |||
| 3345 | } | 3345 | } |
| 3346 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 3346 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
| 3347 | } else | 3347 | } else |
| 3348 | flags |= XFS_ILOG_FEXT(whichfork); | 3348 | flags |= xfs_ilog_fext(whichfork); |
| 3349 | XFS_IFORK_NEXT_SET(ip, whichfork, | 3349 | XFS_IFORK_NEXT_SET(ip, whichfork, |
| 3350 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | 3350 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); |
| 3351 | } else { | 3351 | } else { |
| 3352 | ASSERT(whichfork == XFS_DATA_FORK); | 3352 | ASSERT(whichfork == XFS_DATA_FORK); |
| 3353 | temp = xfs_bmap_worst_indlen(ip, temp); | 3353 | temp = xfs_bmap_worst_indlen(ip, temp); |
| 3354 | xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); | 3354 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
| 3355 | temp2 = xfs_bmap_worst_indlen(ip, temp2); | 3355 | temp2 = xfs_bmap_worst_indlen(ip, temp2); |
| 3356 | new.br_startblock = NULLSTARTBLOCK((int)temp2); | 3356 | new.br_startblock = nullstartblock((int)temp2); |
| 3357 | da_new = temp + temp2; | 3357 | da_new = temp + temp2; |
| 3358 | while (da_new > da_old) { | 3358 | while (da_new > da_old) { |
| 3359 | if (temp) { | 3359 | if (temp) { |
| 3360 | temp--; | 3360 | temp--; |
| 3361 | da_new--; | 3361 | da_new--; |
| 3362 | xfs_bmbt_set_startblock(ep, | 3362 | xfs_bmbt_set_startblock(ep, |
| 3363 | NULLSTARTBLOCK((int)temp)); | 3363 | nullstartblock((int)temp)); |
| 3364 | } | 3364 | } |
| 3365 | if (da_new == da_old) | 3365 | if (da_new == da_old) |
| 3366 | break; | 3366 | break; |
| @@ -3368,7 +3368,7 @@ xfs_bmap_del_extent( | |||
| 3368 | temp2--; | 3368 | temp2--; |
| 3369 | da_new--; | 3369 | da_new--; |
| 3370 | new.br_startblock = | 3370 | new.br_startblock = |
| 3371 | NULLSTARTBLOCK((int)temp2); | 3371 | nullstartblock((int)temp2); |
| 3372 | } | 3372 | } |
| 3373 | } | 3373 | } |
| 3374 | } | 3374 | } |
| @@ -3545,7 +3545,7 @@ xfs_bmap_extents_to_btree( | |||
| 3545 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 3545 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
| 3546 | for (cnt = i = 0; i < nextents; i++) { | 3546 | for (cnt = i = 0; i < nextents; i++) { |
| 3547 | ep = xfs_iext_get_ext(ifp, i); | 3547 | ep = xfs_iext_get_ext(ifp, i); |
| 3548 | if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { | 3548 | if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { |
| 3549 | arp->l0 = cpu_to_be64(ep->l0); | 3549 | arp->l0 = cpu_to_be64(ep->l0); |
| 3550 | arp->l1 = cpu_to_be64(ep->l1); | 3550 | arp->l1 = cpu_to_be64(ep->l1); |
| 3551 | arp++; cnt++; | 3551 | arp++; cnt++; |
| @@ -3572,7 +3572,7 @@ xfs_bmap_extents_to_btree( | |||
| 3572 | xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); | 3572 | xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); |
| 3573 | ASSERT(*curp == NULL); | 3573 | ASSERT(*curp == NULL); |
| 3574 | *curp = cur; | 3574 | *curp = cur; |
| 3575 | *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); | 3575 | *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); |
| 3576 | return 0; | 3576 | return 0; |
| 3577 | } | 3577 | } |
| 3578 | 3578 | ||
| @@ -3676,7 +3676,7 @@ xfs_bmap_local_to_extents( | |||
| 3676 | ip->i_d.di_nblocks = 1; | 3676 | ip->i_d.di_nblocks = 1; |
| 3677 | XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, | 3677 | XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, |
| 3678 | XFS_TRANS_DQ_BCOUNT, 1L); | 3678 | XFS_TRANS_DQ_BCOUNT, 1L); |
| 3679 | flags |= XFS_ILOG_FEXT(whichfork); | 3679 | flags |= xfs_ilog_fext(whichfork); |
| 3680 | } else { | 3680 | } else { |
| 3681 | ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); | 3681 | ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); |
| 3682 | xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork); | 3682 | xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork); |
| @@ -4082,7 +4082,7 @@ xfs_bmap_add_attrfork( | |||
| 4082 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); | 4082 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); |
| 4083 | ip->i_afp->if_flags = XFS_IFEXTENTS; | 4083 | ip->i_afp->if_flags = XFS_IFEXTENTS; |
| 4084 | logflags = 0; | 4084 | logflags = 0; |
| 4085 | XFS_BMAP_INIT(&flist, &firstblock); | 4085 | xfs_bmap_init(&flist, &firstblock); |
| 4086 | switch (ip->i_d.di_format) { | 4086 | switch (ip->i_d.di_format) { |
| 4087 | case XFS_DINODE_FMT_LOCAL: | 4087 | case XFS_DINODE_FMT_LOCAL: |
| 4088 | error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, | 4088 | error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, |
| @@ -4162,7 +4162,7 @@ xfs_bmap_add_free( | |||
| 4162 | ASSERT(bno != NULLFSBLOCK); | 4162 | ASSERT(bno != NULLFSBLOCK); |
| 4163 | ASSERT(len > 0); | 4163 | ASSERT(len > 0); |
| 4164 | ASSERT(len <= MAXEXTLEN); | 4164 | ASSERT(len <= MAXEXTLEN); |
| 4165 | ASSERT(!ISNULLSTARTBLOCK(bno)); | 4165 | ASSERT(!isnullstartblock(bno)); |
| 4166 | agno = XFS_FSB_TO_AGNO(mp, bno); | 4166 | agno = XFS_FSB_TO_AGNO(mp, bno); |
| 4167 | agbno = XFS_FSB_TO_AGBNO(mp, bno); | 4167 | agbno = XFS_FSB_TO_AGBNO(mp, bno); |
| 4168 | ASSERT(agno < mp->m_sb.sb_agcount); | 4168 | ASSERT(agno < mp->m_sb.sb_agcount); |
| @@ -4909,7 +4909,7 @@ xfs_bmapi( | |||
| 4909 | got.br_startoff = end; | 4909 | got.br_startoff = end; |
| 4910 | inhole = eof || got.br_startoff > bno; | 4910 | inhole = eof || got.br_startoff > bno; |
| 4911 | wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && | 4911 | wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && |
| 4912 | ISNULLSTARTBLOCK(got.br_startblock); | 4912 | isnullstartblock(got.br_startblock); |
| 4913 | /* | 4913 | /* |
| 4914 | * First, deal with the hole before the allocated space | 4914 | * First, deal with the hole before the allocated space |
| 4915 | * that we found, if any. | 4915 | * that we found, if any. |
| @@ -5028,7 +5028,7 @@ xfs_bmapi( | |||
| 5028 | } | 5028 | } |
| 5029 | 5029 | ||
| 5030 | ip->i_delayed_blks += alen; | 5030 | ip->i_delayed_blks += alen; |
| 5031 | abno = NULLSTARTBLOCK(indlen); | 5031 | abno = nullstartblock(indlen); |
| 5032 | } else { | 5032 | } else { |
| 5033 | /* | 5033 | /* |
| 5034 | * If first time, allocate and fill in | 5034 | * If first time, allocate and fill in |
| @@ -5144,8 +5144,8 @@ xfs_bmapi( | |||
| 5144 | aoff + alen); | 5144 | aoff + alen); |
| 5145 | #ifdef DEBUG | 5145 | #ifdef DEBUG |
| 5146 | if (flags & XFS_BMAPI_DELAY) { | 5146 | if (flags & XFS_BMAPI_DELAY) { |
| 5147 | ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); | 5147 | ASSERT(isnullstartblock(got.br_startblock)); |
| 5148 | ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); | 5148 | ASSERT(startblockval(got.br_startblock) > 0); |
| 5149 | } | 5149 | } |
| 5150 | ASSERT(got.br_state == XFS_EXT_NORM || | 5150 | ASSERT(got.br_state == XFS_EXT_NORM || |
| 5151 | got.br_state == XFS_EXT_UNWRITTEN); | 5151 | got.br_state == XFS_EXT_UNWRITTEN); |
| @@ -5179,7 +5179,7 @@ xfs_bmapi( | |||
| 5179 | ASSERT((bno >= obno) || (n == 0)); | 5179 | ASSERT((bno >= obno) || (n == 0)); |
| 5180 | ASSERT(bno < end); | 5180 | ASSERT(bno < end); |
| 5181 | mval->br_startoff = bno; | 5181 | mval->br_startoff = bno; |
| 5182 | if (ISNULLSTARTBLOCK(got.br_startblock)) { | 5182 | if (isnullstartblock(got.br_startblock)) { |
| 5183 | ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); | 5183 | ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); |
| 5184 | mval->br_startblock = DELAYSTARTBLOCK; | 5184 | mval->br_startblock = DELAYSTARTBLOCK; |
| 5185 | } else | 5185 | } else |
| @@ -5201,7 +5201,7 @@ xfs_bmapi( | |||
| 5201 | ASSERT(mval->br_blockcount <= len); | 5201 | ASSERT(mval->br_blockcount <= len); |
| 5202 | } else { | 5202 | } else { |
| 5203 | *mval = got; | 5203 | *mval = got; |
| 5204 | if (ISNULLSTARTBLOCK(mval->br_startblock)) { | 5204 | if (isnullstartblock(mval->br_startblock)) { |
| 5205 | ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); | 5205 | ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); |
| 5206 | mval->br_startblock = DELAYSTARTBLOCK; | 5206 | mval->br_startblock = DELAYSTARTBLOCK; |
| 5207 | } | 5207 | } |
| @@ -5329,12 +5329,12 @@ error0: | |||
| 5329 | * Log everything. Do this after conversion, there's no point in | 5329 | * Log everything. Do this after conversion, there's no point in |
| 5330 | * logging the extent records if we've converted to btree format. | 5330 | * logging the extent records if we've converted to btree format. |
| 5331 | */ | 5331 | */ |
| 5332 | if ((logflags & XFS_ILOG_FEXT(whichfork)) && | 5332 | if ((logflags & xfs_ilog_fext(whichfork)) && |
| 5333 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | 5333 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) |
| 5334 | logflags &= ~XFS_ILOG_FEXT(whichfork); | 5334 | logflags &= ~xfs_ilog_fext(whichfork); |
| 5335 | else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && | 5335 | else if ((logflags & xfs_ilog_fbroot(whichfork)) && |
| 5336 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) | 5336 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) |
| 5337 | logflags &= ~XFS_ILOG_FBROOT(whichfork); | 5337 | logflags &= ~xfs_ilog_fbroot(whichfork); |
| 5338 | /* | 5338 | /* |
| 5339 | * Log whatever the flags say, even if error. Otherwise we might miss | 5339 | * Log whatever the flags say, even if error. Otherwise we might miss |
| 5340 | * detecting a case where the data is changed, there's an error, | 5340 | * detecting a case where the data is changed, there's an error, |
| @@ -5411,7 +5411,7 @@ xfs_bmapi_single( | |||
| 5411 | *fsb = NULLFSBLOCK; | 5411 | *fsb = NULLFSBLOCK; |
| 5412 | return 0; | 5412 | return 0; |
| 5413 | } | 5413 | } |
| 5414 | ASSERT(!ISNULLSTARTBLOCK(got.br_startblock)); | 5414 | ASSERT(!isnullstartblock(got.br_startblock)); |
| 5415 | ASSERT(bno < got.br_startoff + got.br_blockcount); | 5415 | ASSERT(bno < got.br_startoff + got.br_blockcount); |
| 5416 | *fsb = got.br_startblock + (bno - got.br_startoff); | 5416 | *fsb = got.br_startblock + (bno - got.br_startoff); |
| 5417 | ifp->if_lastex = lastx; | 5417 | ifp->if_lastex = lastx; |
| @@ -5543,7 +5543,7 @@ xfs_bunmapi( | |||
| 5543 | */ | 5543 | */ |
| 5544 | ASSERT(ep != NULL); | 5544 | ASSERT(ep != NULL); |
| 5545 | del = got; | 5545 | del = got; |
| 5546 | wasdel = ISNULLSTARTBLOCK(del.br_startblock); | 5546 | wasdel = isnullstartblock(del.br_startblock); |
| 5547 | if (got.br_startoff < start) { | 5547 | if (got.br_startoff < start) { |
| 5548 | del.br_startoff = start; | 5548 | del.br_startoff = start; |
| 5549 | del.br_blockcount -= start - got.br_startoff; | 5549 | del.br_blockcount -= start - got.br_startoff; |
| @@ -5638,7 +5638,7 @@ xfs_bunmapi( | |||
| 5638 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, | 5638 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, |
| 5639 | lastx - 1), &prev); | 5639 | lastx - 1), &prev); |
| 5640 | ASSERT(prev.br_state == XFS_EXT_NORM); | 5640 | ASSERT(prev.br_state == XFS_EXT_NORM); |
| 5641 | ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); | 5641 | ASSERT(!isnullstartblock(prev.br_startblock)); |
| 5642 | ASSERT(del.br_startblock == | 5642 | ASSERT(del.br_startblock == |
| 5643 | prev.br_startblock + prev.br_blockcount); | 5643 | prev.br_startblock + prev.br_blockcount); |
| 5644 | if (prev.br_startoff < start) { | 5644 | if (prev.br_startoff < start) { |
| @@ -5666,7 +5666,7 @@ xfs_bunmapi( | |||
| 5666 | } | 5666 | } |
| 5667 | } | 5667 | } |
| 5668 | if (wasdel) { | 5668 | if (wasdel) { |
| 5669 | ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); | 5669 | ASSERT(startblockval(del.br_startblock) > 0); |
| 5670 | /* Update realtime/data freespace, unreserve quota */ | 5670 | /* Update realtime/data freespace, unreserve quota */ |
| 5671 | if (isrt) { | 5671 | if (isrt) { |
| 5672 | xfs_filblks_t rtexts; | 5672 | xfs_filblks_t rtexts; |
| @@ -5782,12 +5782,12 @@ error0: | |||
| 5782 | * Log everything. Do this after conversion, there's no point in | 5782 | * Log everything. Do this after conversion, there's no point in |
| 5783 | * logging the extent records if we've converted to btree format. | 5783 | * logging the extent records if we've converted to btree format. |
| 5784 | */ | 5784 | */ |
| 5785 | if ((logflags & XFS_ILOG_FEXT(whichfork)) && | 5785 | if ((logflags & xfs_ilog_fext(whichfork)) && |
| 5786 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) | 5786 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) |
| 5787 | logflags &= ~XFS_ILOG_FEXT(whichfork); | 5787 | logflags &= ~xfs_ilog_fext(whichfork); |
| 5788 | else if ((logflags & XFS_ILOG_FBROOT(whichfork)) && | 5788 | else if ((logflags & xfs_ilog_fbroot(whichfork)) && |
| 5789 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) | 5789 | XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) |
| 5790 | logflags &= ~XFS_ILOG_FBROOT(whichfork); | 5790 | logflags &= ~xfs_ilog_fbroot(whichfork); |
| 5791 | /* | 5791 | /* |
| 5792 | * Log inode even in the error case, if the transaction | 5792 | * Log inode even in the error case, if the transaction |
| 5793 | * is dirty we'll need to shut down the filesystem. | 5793 | * is dirty we'll need to shut down the filesystem. |
| @@ -5838,7 +5838,7 @@ xfs_getbmapx_fix_eof_hole( | |||
| 5838 | if (startblock == DELAYSTARTBLOCK) | 5838 | if (startblock == DELAYSTARTBLOCK) |
| 5839 | out->bmv_block = -2; | 5839 | out->bmv_block = -2; |
| 5840 | else | 5840 | else |
| 5841 | out->bmv_block = XFS_FSB_TO_DB(ip, startblock); | 5841 | out->bmv_block = xfs_fsb_to_db(ip, startblock); |
| 5842 | fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); | 5842 | fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); |
| 5843 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 5843 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
| 5844 | if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && | 5844 | if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && |
| @@ -5979,7 +5979,7 @@ xfs_getbmap( | |||
| 5979 | if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) | 5979 | if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) |
| 5980 | nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; | 5980 | nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; |
| 5981 | 5981 | ||
| 5982 | bmapi_flags = XFS_BMAPI_AFLAG(whichfork) | | 5982 | bmapi_flags = xfs_bmapi_aflag(whichfork) | |
| 5983 | ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); | 5983 | ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); |
| 5984 | 5984 | ||
| 5985 | /* | 5985 | /* |
| @@ -6098,7 +6098,7 @@ xfs_bmap_isaeof( | |||
| 6098 | */ | 6098 | */ |
| 6099 | *aeof = (off >= s.br_startoff && | 6099 | *aeof = (off >= s.br_startoff && |
| 6100 | off < s.br_startoff + s.br_blockcount && | 6100 | off < s.br_startoff + s.br_blockcount && |
| 6101 | ISNULLSTARTBLOCK(s.br_startblock)) || | 6101 | isnullstartblock(s.br_startblock)) || |
| 6102 | off >= s.br_startoff + s.br_blockcount; | 6102 | off >= s.br_startoff + s.br_blockcount; |
| 6103 | return 0; | 6103 | return 0; |
| 6104 | } | 6104 | } |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 284571c05ed0..be2979d88d32 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
| @@ -95,7 +95,6 @@ typedef struct xfs_bmap_free | |||
| 95 | /* need write cache flushing and no */ | 95 | /* need write cache flushing and no */ |
| 96 | /* additional allocation alignments */ | 96 | /* additional allocation alignments */ |
| 97 | 97 | ||
| 98 | #define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) | ||
| 99 | static inline int xfs_bmapi_aflag(int w) | 98 | static inline int xfs_bmapi_aflag(int w) |
| 100 | { | 99 | { |
| 101 | return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); | 100 | return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); |
| @@ -107,7 +106,6 @@ static inline int xfs_bmapi_aflag(int w) | |||
| 107 | #define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) | 106 | #define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL) |
| 108 | #define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) | 107 | #define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL) |
| 109 | 108 | ||
| 110 | #define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp) | ||
| 111 | static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp) | 109 | static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp) |
| 112 | { | 110 | { |
| 113 | ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ | 111 | ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \ |
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index ba6b08c2fb02..0760d352586f 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
| @@ -121,7 +121,7 @@ __xfs_bmbt_get_all( | |||
| 121 | 121 | ||
| 122 | b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | | 122 | b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | |
| 123 | (((xfs_dfsbno_t)l1) >> 21); | 123 | (((xfs_dfsbno_t)l1) >> 21); |
| 124 | ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); | 124 | ASSERT((b >> 32) == 0 || isnulldstartblock(b)); |
| 125 | s->br_startblock = (xfs_fsblock_t)b; | 125 | s->br_startblock = (xfs_fsblock_t)b; |
| 126 | } | 126 | } |
| 127 | #else /* !DEBUG */ | 127 | #else /* !DEBUG */ |
| @@ -172,7 +172,7 @@ xfs_bmbt_get_startblock( | |||
| 172 | 172 | ||
| 173 | b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | | 173 | b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | |
| 174 | (((xfs_dfsbno_t)r->l1) >> 21); | 174 | (((xfs_dfsbno_t)r->l1) >> 21); |
| 175 | ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b)); | 175 | ASSERT((b >> 32) == 0 || isnulldstartblock(b)); |
| 176 | return (xfs_fsblock_t)b; | 176 | return (xfs_fsblock_t)b; |
| 177 | #else /* !DEBUG */ | 177 | #else /* !DEBUG */ |
| 178 | return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); | 178 | return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); |
| @@ -261,7 +261,7 @@ xfs_bmbt_set_allf( | |||
| 261 | ((xfs_bmbt_rec_base_t)blockcount & | 261 | ((xfs_bmbt_rec_base_t)blockcount & |
| 262 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); | 262 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); |
| 263 | #else /* !XFS_BIG_BLKNOS */ | 263 | #else /* !XFS_BIG_BLKNOS */ |
| 264 | if (ISNULLSTARTBLOCK(startblock)) { | 264 | if (isnullstartblock(startblock)) { |
| 265 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | | 265 | r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | |
| 266 | ((xfs_bmbt_rec_base_t)startoff << 9) | | 266 | ((xfs_bmbt_rec_base_t)startoff << 9) | |
| 267 | (xfs_bmbt_rec_base_t)xfs_mask64lo(9); | 267 | (xfs_bmbt_rec_base_t)xfs_mask64lo(9); |
| @@ -321,7 +321,7 @@ xfs_bmbt_disk_set_allf( | |||
| 321 | ((xfs_bmbt_rec_base_t)blockcount & | 321 | ((xfs_bmbt_rec_base_t)blockcount & |
| 322 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); | 322 | (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); |
| 323 | #else /* !XFS_BIG_BLKNOS */ | 323 | #else /* !XFS_BIG_BLKNOS */ |
| 324 | if (ISNULLSTARTBLOCK(startblock)) { | 324 | if (isnullstartblock(startblock)) { |
| 325 | r->l0 = cpu_to_be64( | 325 | r->l0 = cpu_to_be64( |
| 326 | ((xfs_bmbt_rec_base_t)extent_flag << 63) | | 326 | ((xfs_bmbt_rec_base_t)extent_flag << 63) | |
| 327 | ((xfs_bmbt_rec_base_t)startoff << 9) | | 327 | ((xfs_bmbt_rec_base_t)startoff << 9) | |
| @@ -382,7 +382,7 @@ xfs_bmbt_set_startblock( | |||
| 382 | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | | 382 | r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | |
| 383 | (xfs_bmbt_rec_base_t)(v << 21); | 383 | (xfs_bmbt_rec_base_t)(v << 21); |
| 384 | #else /* !XFS_BIG_BLKNOS */ | 384 | #else /* !XFS_BIG_BLKNOS */ |
| 385 | if (ISNULLSTARTBLOCK(v)) { | 385 | if (isnullstartblock(v)) { |
| 386 | r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); | 386 | r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); |
| 387 | r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | | 387 | r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | |
| 388 | ((xfs_bmbt_rec_base_t)v << 21) | | 388 | ((xfs_bmbt_rec_base_t)v << 21) | |
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index a4555abb6622..0e8df007615e 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h | |||
| @@ -76,26 +76,22 @@ typedef struct xfs_bmbt_rec_host { | |||
| 76 | #define DSTARTBLOCKMASK \ | 76 | #define DSTARTBLOCKMASK \ |
| 77 | (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) | 77 | (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) |
| 78 | 78 | ||
| 79 | #define ISNULLSTARTBLOCK(x) isnullstartblock(x) | ||
| 80 | static inline int isnullstartblock(xfs_fsblock_t x) | 79 | static inline int isnullstartblock(xfs_fsblock_t x) |
| 81 | { | 80 | { |
| 82 | return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; | 81 | return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; |
| 83 | } | 82 | } |
| 84 | 83 | ||
| 85 | #define ISNULLDSTARTBLOCK(x) isnulldstartblock(x) | ||
| 86 | static inline int isnulldstartblock(xfs_dfsbno_t x) | 84 | static inline int isnulldstartblock(xfs_dfsbno_t x) |
| 87 | { | 85 | { |
| 88 | return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK; | 86 | return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK; |
| 89 | } | 87 | } |
| 90 | 88 | ||
| 91 | #define NULLSTARTBLOCK(k) nullstartblock(k) | ||
| 92 | static inline xfs_fsblock_t nullstartblock(int k) | 89 | static inline xfs_fsblock_t nullstartblock(int k) |
| 93 | { | 90 | { |
| 94 | ASSERT(k < (1 << STARTBLOCKVALBITS)); | 91 | ASSERT(k < (1 << STARTBLOCKVALBITS)); |
| 95 | return STARTBLOCKMASK | (k); | 92 | return STARTBLOCKMASK | (k); |
| 96 | } | 93 | } |
| 97 | 94 | ||
| 98 | #define STARTBLOCKVAL(x) startblockval(x) | ||
| 99 | static inline xfs_filblks_t startblockval(xfs_fsblock_t x) | 95 | static inline xfs_filblks_t startblockval(xfs_fsblock_t x) |
| 100 | { | 96 | { |
| 101 | return (xfs_filblks_t)((x) & ~STARTBLOCKMASK); | 97 | return (xfs_filblks_t)((x) & ~STARTBLOCKMASK); |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 2c3ef20f8842..e73c332eb23f 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
| @@ -843,7 +843,7 @@ xfs_btree_ptr_is_null( | |||
| 843 | union xfs_btree_ptr *ptr) | 843 | union xfs_btree_ptr *ptr) |
| 844 | { | 844 | { |
| 845 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) | 845 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) |
| 846 | return be64_to_cpu(ptr->l) == NULLFSBLOCK; | 846 | return be64_to_cpu(ptr->l) == NULLDFSBNO; |
| 847 | else | 847 | else |
| 848 | return be32_to_cpu(ptr->s) == NULLAGBLOCK; | 848 | return be32_to_cpu(ptr->s) == NULLAGBLOCK; |
| 849 | } | 849 | } |
| @@ -854,7 +854,7 @@ xfs_btree_set_ptr_null( | |||
| 854 | union xfs_btree_ptr *ptr) | 854 | union xfs_btree_ptr *ptr) |
| 855 | { | 855 | { |
| 856 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) | 856 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) |
| 857 | ptr->l = cpu_to_be64(NULLFSBLOCK); | 857 | ptr->l = cpu_to_be64(NULLDFSBNO); |
| 858 | else | 858 | else |
| 859 | ptr->s = cpu_to_be32(NULLAGBLOCK); | 859 | ptr->s = cpu_to_be32(NULLAGBLOCK); |
| 860 | } | 860 | } |
| @@ -918,8 +918,8 @@ xfs_btree_init_block( | |||
| 918 | new->bb_numrecs = cpu_to_be16(numrecs); | 918 | new->bb_numrecs = cpu_to_be16(numrecs); |
| 919 | 919 | ||
| 920 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { | 920 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { |
| 921 | new->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); | 921 | new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); |
| 922 | new->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); | 922 | new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); |
| 923 | } else { | 923 | } else { |
| 924 | new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); | 924 | new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
| 925 | new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); | 925 | new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
| @@ -960,7 +960,7 @@ xfs_btree_buf_to_ptr( | |||
| 960 | ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, | 960 | ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, |
| 961 | XFS_BUF_ADDR(bp))); | 961 | XFS_BUF_ADDR(bp))); |
| 962 | else { | 962 | else { |
| 963 | ptr->s = cpu_to_be32(XFS_DADDR_TO_AGBNO(cur->bc_mp, | 963 | ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, |
| 964 | XFS_BUF_ADDR(bp))); | 964 | XFS_BUF_ADDR(bp))); |
| 965 | } | 965 | } |
| 966 | } | 966 | } |
| @@ -971,7 +971,7 @@ xfs_btree_ptr_to_daddr( | |||
| 971 | union xfs_btree_ptr *ptr) | 971 | union xfs_btree_ptr *ptr) |
| 972 | { | 972 | { |
| 973 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { | 973 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { |
| 974 | ASSERT(be64_to_cpu(ptr->l) != NULLFSBLOCK); | 974 | ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO); |
| 975 | 975 | ||
| 976 | return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); | 976 | return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); |
| 977 | } else { | 977 | } else { |
| @@ -2454,7 +2454,7 @@ xfs_btree_new_iroot( | |||
| 2454 | xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); | 2454 | xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); |
| 2455 | 2455 | ||
| 2456 | *logflags |= | 2456 | *logflags |= |
| 2457 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); | 2457 | XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork); |
| 2458 | *stat = 1; | 2458 | *stat = 1; |
| 2459 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | 2459 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
| 2460 | return 0; | 2460 | return 0; |
| @@ -3048,7 +3048,7 @@ xfs_btree_kill_iroot( | |||
| 3048 | cur->bc_bufs[level - 1] = NULL; | 3048 | cur->bc_bufs[level - 1] = NULL; |
| 3049 | be16_add_cpu(&block->bb_level, -1); | 3049 | be16_add_cpu(&block->bb_level, -1); |
| 3050 | xfs_trans_log_inode(cur->bc_tp, ip, | 3050 | xfs_trans_log_inode(cur->bc_tp, ip, |
| 3051 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); | 3051 | XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork)); |
| 3052 | cur->bc_nlevels--; | 3052 | cur->bc_nlevels--; |
| 3053 | out0: | 3053 | out0: |
| 3054 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | 3054 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index a11a8390bf6c..c45f74ff1a5b 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
| @@ -1597,7 +1597,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) | |||
| 1597 | nmap = 1; | 1597 | nmap = 1; |
| 1598 | ASSERT(args->firstblock != NULL); | 1598 | ASSERT(args->firstblock != NULL); |
| 1599 | if ((error = xfs_bmapi(tp, dp, bno, count, | 1599 | if ((error = xfs_bmapi(tp, dp, bno, count, |
| 1600 | XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| | 1600 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| |
| 1601 | XFS_BMAPI_CONTIG, | 1601 | XFS_BMAPI_CONTIG, |
| 1602 | args->firstblock, args->total, &map, &nmap, | 1602 | args->firstblock, args->total, &map, &nmap, |
| 1603 | args->flist, NULL))) { | 1603 | args->flist, NULL))) { |
| @@ -1618,7 +1618,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) | |||
| 1618 | nmap = MIN(XFS_BMAP_MAX_NMAP, count); | 1618 | nmap = MIN(XFS_BMAP_MAX_NMAP, count); |
| 1619 | c = (int)(bno + count - b); | 1619 | c = (int)(bno + count - b); |
| 1620 | if ((error = xfs_bmapi(tp, dp, b, c, | 1620 | if ((error = xfs_bmapi(tp, dp, b, c, |
| 1621 | XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE| | 1621 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| |
| 1622 | XFS_BMAPI_METADATA, | 1622 | XFS_BMAPI_METADATA, |
| 1623 | args->firstblock, args->total, | 1623 | args->firstblock, args->total, |
| 1624 | &mapp[mapi], &nmap, args->flist, | 1624 | &mapp[mapi], &nmap, args->flist, |
| @@ -1882,7 +1882,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, | |||
| 1882 | * the last block to the place we want to kill. | 1882 | * the last block to the place we want to kill. |
| 1883 | */ | 1883 | */ |
| 1884 | if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, | 1884 | if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, |
| 1885 | XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA, | 1885 | xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, |
| 1886 | 0, args->firstblock, args->flist, NULL, | 1886 | 0, args->firstblock, args->flist, NULL, |
| 1887 | &done)) == ENOSPC) { | 1887 | &done)) == ENOSPC) { |
| 1888 | if (w != XFS_DATA_FORK) | 1888 | if (w != XFS_DATA_FORK) |
| @@ -1987,7 +1987,7 @@ xfs_da_do_buf( | |||
| 1987 | if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, | 1987 | if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, |
| 1988 | nfsb, | 1988 | nfsb, |
| 1989 | XFS_BMAPI_METADATA | | 1989 | XFS_BMAPI_METADATA | |
| 1990 | XFS_BMAPI_AFLAG(whichfork), | 1990 | xfs_bmapi_aflag(whichfork), |
| 1991 | NULL, 0, mapp, &nmap, NULL, NULL))) | 1991 | NULL, 0, mapp, &nmap, NULL, NULL))) |
| 1992 | goto exit0; | 1992 | goto exit0; |
| 1993 | } | 1993 | } |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index e6ebbaeb4dc6..ab016e5ae7be 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
| @@ -357,7 +357,7 @@ xfs_ialloc_ag_alloc( | |||
| 357 | int ioffset = i << args.mp->m_sb.sb_inodelog; | 357 | int ioffset = i << args.mp->m_sb.sb_inodelog; |
| 358 | uint isize = sizeof(struct xfs_dinode); | 358 | uint isize = sizeof(struct xfs_dinode); |
| 359 | 359 | ||
| 360 | free = XFS_MAKE_IPTR(args.mp, fbuf, i); | 360 | free = xfs_make_iptr(args.mp, fbuf, i); |
| 361 | free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); | 361 | free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); |
| 362 | free->di_version = version; | 362 | free->di_version = version; |
| 363 | free->di_gen = cpu_to_be32(gen); | 363 | free->di_gen = cpu_to_be32(gen); |
| @@ -937,7 +937,7 @@ nextag: | |||
| 937 | } | 937 | } |
| 938 | } | 938 | } |
| 939 | } | 939 | } |
| 940 | offset = XFS_IALLOC_FIND_FREE(&rec.ir_free); | 940 | offset = xfs_ialloc_find_free(&rec.ir_free); |
| 941 | ASSERT(offset >= 0); | 941 | ASSERT(offset >= 0); |
| 942 | ASSERT(offset < XFS_INODES_PER_CHUNK); | 942 | ASSERT(offset < XFS_INODES_PER_CHUNK); |
| 943 | ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % | 943 | ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % |
| @@ -1279,7 +1279,7 @@ xfs_imap( | |||
| 1279 | offset = XFS_INO_TO_OFFSET(mp, ino); | 1279 | offset = XFS_INO_TO_OFFSET(mp, ino); |
| 1280 | ASSERT(offset < mp->m_sb.sb_inopblock); | 1280 | ASSERT(offset < mp->m_sb.sb_inopblock); |
| 1281 | 1281 | ||
| 1282 | cluster_agbno = XFS_DADDR_TO_AGBNO(mp, imap->im_blkno); | 1282 | cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno); |
| 1283 | offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; | 1283 | offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; |
| 1284 | 1284 | ||
| 1285 | imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); | 1285 | imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); |
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index 50f558a4e0a8..aeee8278f92c 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h | |||
| @@ -39,7 +39,6 @@ struct xfs_trans; | |||
| 39 | /* | 39 | /* |
| 40 | * Make an inode pointer out of the buffer/offset. | 40 | * Make an inode pointer out of the buffer/offset. |
| 41 | */ | 41 | */ |
| 42 | #define XFS_MAKE_IPTR(mp,b,o) xfs_make_iptr(mp,b,o) | ||
| 43 | static inline struct xfs_dinode * | 42 | static inline struct xfs_dinode * |
| 44 | xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) | 43 | xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) |
| 45 | { | 44 | { |
| @@ -50,7 +49,6 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) | |||
| 50 | /* | 49 | /* |
| 51 | * Find a free (set) bit in the inode bitmask. | 50 | * Find a free (set) bit in the inode bitmask. |
| 52 | */ | 51 | */ |
| 53 | #define XFS_IALLOC_FIND_FREE(fp) xfs_ialloc_find_free(fp) | ||
| 54 | static inline int xfs_ialloc_find_free(xfs_inofree_t *fp) | 52 | static inline int xfs_ialloc_find_free(xfs_inofree_t *fp) |
| 55 | { | 53 | { |
| 56 | return xfs_lowbit64(*fp); | 54 | return xfs_lowbit64(*fp); |
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 37e5dd01a577..5580e255ff06 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h | |||
| @@ -36,7 +36,6 @@ typedef __uint64_t xfs_inofree_t; | |||
| 36 | #define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) | 36 | #define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) |
| 37 | #define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) | 37 | #define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) |
| 38 | 38 | ||
| 39 | #define XFS_INOBT_MASKN(i,n) xfs_inobt_maskn(i,n) | ||
| 40 | static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) | 39 | static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) |
| 41 | { | 40 | { |
| 42 | return (((n) >= XFS_INODES_PER_CHUNK ? \ | 41 | return (((n) >= XFS_INODES_PER_CHUNK ? \ |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5a5e035e5d38..e7ae08d1df48 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -424,6 +424,19 @@ xfs_iformat( | |||
| 424 | case XFS_DINODE_FMT_LOCAL: | 424 | case XFS_DINODE_FMT_LOCAL: |
| 425 | atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); | 425 | atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); |
| 426 | size = be16_to_cpu(atp->hdr.totsize); | 426 | size = be16_to_cpu(atp->hdr.totsize); |
| 427 | |||
| 428 | if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { | ||
| 429 | xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, | ||
| 430 | "corrupt inode %Lu " | ||
| 431 | "(bad attr fork size %Ld).", | ||
| 432 | (unsigned long long) ip->i_ino, | ||
| 433 | (long long) size); | ||
| 434 | XFS_CORRUPTION_ERROR("xfs_iformat(8)", | ||
| 435 | XFS_ERRLEVEL_LOW, | ||
| 436 | ip->i_mount, dip); | ||
| 437 | return XFS_ERROR(EFSCORRUPTED); | ||
| 438 | } | ||
| 439 | |||
| 427 | error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); | 440 | error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); |
| 428 | break; | 441 | break; |
| 429 | case XFS_DINODE_FMT_EXTENTS: | 442 | case XFS_DINODE_FMT_EXTENTS: |
| @@ -1601,10 +1614,10 @@ xfs_itruncate_finish( | |||
| 1601 | * in this file with garbage in them once recovery | 1614 | * in this file with garbage in them once recovery |
| 1602 | * runs. | 1615 | * runs. |
| 1603 | */ | 1616 | */ |
| 1604 | XFS_BMAP_INIT(&free_list, &first_block); | 1617 | xfs_bmap_init(&free_list, &first_block); |
| 1605 | error = xfs_bunmapi(ntp, ip, | 1618 | error = xfs_bunmapi(ntp, ip, |
| 1606 | first_unmap_block, unmap_len, | 1619 | first_unmap_block, unmap_len, |
| 1607 | XFS_BMAPI_AFLAG(fork) | | 1620 | xfs_bmapi_aflag(fork) | |
| 1608 | (sync ? 0 : XFS_BMAPI_ASYNC), | 1621 | (sync ? 0 : XFS_BMAPI_ASYNC), |
| 1609 | XFS_ITRUNC_MAX_EXTENTS, | 1622 | XFS_ITRUNC_MAX_EXTENTS, |
| 1610 | &first_block, &free_list, | 1623 | &first_block, &free_list, |
| @@ -2557,7 +2570,7 @@ xfs_iextents_copy( | |||
| 2557 | for (i = 0; i < nrecs; i++) { | 2570 | for (i = 0; i < nrecs; i++) { |
| 2558 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); | 2571 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); |
| 2559 | start_block = xfs_bmbt_get_startblock(ep); | 2572 | start_block = xfs_bmbt_get_startblock(ep); |
| 2560 | if (ISNULLSTARTBLOCK(start_block)) { | 2573 | if (isnullstartblock(start_block)) { |
| 2561 | /* | 2574 | /* |
| 2562 | * It's a delayed allocation extent, so skip it. | 2575 | * It's a delayed allocation extent, so skip it. |
| 2563 | */ | 2576 | */ |
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 1ff04cc323ad..9957d0602d54 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h | |||
| @@ -111,20 +111,16 @@ typedef struct xfs_inode_log_format_64 { | |||
| 111 | 111 | ||
| 112 | #define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) | 112 | #define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) |
| 113 | 113 | ||
| 114 | |||
| 115 | #define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) | ||
| 116 | static inline int xfs_ilog_fbroot(int w) | 114 | static inline int xfs_ilog_fbroot(int w) |
| 117 | { | 115 | { |
| 118 | return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); | 116 | return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); |
| 119 | } | 117 | } |
| 120 | 118 | ||
| 121 | #define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) | ||
| 122 | static inline int xfs_ilog_fext(int w) | 119 | static inline int xfs_ilog_fext(int w) |
| 123 | { | 120 | { |
| 124 | return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); | 121 | return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); |
| 125 | } | 122 | } |
| 126 | 123 | ||
| 127 | #define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) | ||
| 128 | static inline int xfs_ilog_fdata(int w) | 124 | static inline int xfs_ilog_fdata(int w) |
| 129 | { | 125 | { |
| 130 | return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA); | 126 | return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA); |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 911062cf73a6..08ce72316bfe 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
| @@ -155,7 +155,7 @@ xfs_imap_to_bmap( | |||
| 155 | iomapp->iomap_bn = IOMAP_DADDR_NULL; | 155 | iomapp->iomap_bn = IOMAP_DADDR_NULL; |
| 156 | iomapp->iomap_flags |= IOMAP_DELAY; | 156 | iomapp->iomap_flags |= IOMAP_DELAY; |
| 157 | } else { | 157 | } else { |
| 158 | iomapp->iomap_bn = XFS_FSB_TO_DB(ip, start_block); | 158 | iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block); |
| 159 | if (ISUNWRITTEN(imap)) | 159 | if (ISUNWRITTEN(imap)) |
| 160 | iomapp->iomap_flags |= IOMAP_UNWRITTEN; | 160 | iomapp->iomap_flags |= IOMAP_UNWRITTEN; |
| 161 | } | 161 | } |
| @@ -261,7 +261,7 @@ xfs_iomap( | |||
| 261 | xfs_iunlock(ip, lockmode); | 261 | xfs_iunlock(ip, lockmode); |
| 262 | lockmode = 0; | 262 | lockmode = 0; |
| 263 | 263 | ||
| 264 | if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) { | 264 | if (nimaps && !isnullstartblock(imap.br_startblock)) { |
| 265 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, | 265 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, |
| 266 | offset, count, iomapp, &imap, flags); | 266 | offset, count, iomapp, &imap, flags); |
| 267 | break; | 267 | break; |
| @@ -491,7 +491,7 @@ xfs_iomap_write_direct( | |||
| 491 | /* | 491 | /* |
| 492 | * Issue the xfs_bmapi() call to allocate the blocks | 492 | * Issue the xfs_bmapi() call to allocate the blocks |
| 493 | */ | 493 | */ |
| 494 | XFS_BMAP_INIT(&free_list, &firstfsb); | 494 | xfs_bmap_init(&free_list, &firstfsb); |
| 495 | nimaps = 1; | 495 | nimaps = 1; |
| 496 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, | 496 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, |
| 497 | &firstfsb, 0, &imap, &nimaps, &free_list, NULL); | 497 | &firstfsb, 0, &imap, &nimaps, &free_list, NULL); |
| @@ -751,7 +751,7 @@ xfs_iomap_write_allocate( | |||
| 751 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 751 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 752 | xfs_trans_ihold(tp, ip); | 752 | xfs_trans_ihold(tp, ip); |
| 753 | 753 | ||
| 754 | XFS_BMAP_INIT(&free_list, &first_block); | 754 | xfs_bmap_init(&free_list, &first_block); |
| 755 | 755 | ||
| 756 | /* | 756 | /* |
| 757 | * it is possible that the extents have changed since | 757 | * it is possible that the extents have changed since |
| @@ -911,7 +911,7 @@ xfs_iomap_write_unwritten( | |||
| 911 | /* | 911 | /* |
| 912 | * Modify the unwritten extent state of the buffer. | 912 | * Modify the unwritten extent state of the buffer. |
| 913 | */ | 913 | */ |
| 914 | XFS_BMAP_INIT(&free_list, &firstfsb); | 914 | xfs_bmap_init(&free_list, &firstfsb); |
| 915 | nimaps = 1; | 915 | nimaps = 1; |
| 916 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, | 916 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, |
| 917 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, | 917 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index e19d0a8d5618..cf98a805ec90 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
| @@ -453,7 +453,7 @@ xfs_bulkstat( | |||
| 453 | (chunkidx = agino - gino + 1) < | 453 | (chunkidx = agino - gino + 1) < |
| 454 | XFS_INODES_PER_CHUNK && | 454 | XFS_INODES_PER_CHUNK && |
| 455 | /* there are some left allocated */ | 455 | /* there are some left allocated */ |
| 456 | XFS_INOBT_MASKN(chunkidx, | 456 | xfs_inobt_maskn(chunkidx, |
| 457 | XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { | 457 | XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { |
| 458 | /* | 458 | /* |
| 459 | * Grab the chunk record. Mark all the | 459 | * Grab the chunk record. Mark all the |
| @@ -464,7 +464,7 @@ xfs_bulkstat( | |||
| 464 | if (XFS_INOBT_MASK(i) & ~gfree) | 464 | if (XFS_INOBT_MASK(i) & ~gfree) |
| 465 | gcnt++; | 465 | gcnt++; |
| 466 | } | 466 | } |
| 467 | gfree |= XFS_INOBT_MASKN(0, chunkidx); | 467 | gfree |= xfs_inobt_maskn(0, chunkidx); |
| 468 | irbp->ir_startino = gino; | 468 | irbp->ir_startino = gino; |
| 469 | irbp->ir_freecount = gcnt; | 469 | irbp->ir_freecount = gcnt; |
| 470 | irbp->ir_free = gfree; | 470 | irbp->ir_free = gfree; |
| @@ -535,7 +535,7 @@ xfs_bulkstat( | |||
| 535 | chunkidx < XFS_INODES_PER_CHUNK; | 535 | chunkidx < XFS_INODES_PER_CHUNK; |
| 536 | chunkidx += nicluster, | 536 | chunkidx += nicluster, |
| 537 | agbno += nbcluster) { | 537 | agbno += nbcluster) { |
| 538 | if (XFS_INOBT_MASKN(chunkidx, | 538 | if (xfs_inobt_maskn(chunkidx, |
| 539 | nicluster) & ~gfree) | 539 | nicluster) & ~gfree) |
| 540 | xfs_btree_reada_bufs(mp, agno, | 540 | xfs_btree_reada_bufs(mp, agno, |
| 541 | agbno, nbcluster); | 541 | agbno, nbcluster); |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 3c97c6463a4e..35300250e86d 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -45,7 +45,6 @@ | |||
| 45 | #include "xfs_fsops.h" | 45 | #include "xfs_fsops.h" |
| 46 | #include "xfs_utils.h" | 46 | #include "xfs_utils.h" |
| 47 | 47 | ||
| 48 | STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t); | ||
| 49 | STATIC int xfs_uuid_mount(xfs_mount_t *); | 48 | STATIC int xfs_uuid_mount(xfs_mount_t *); |
| 50 | STATIC void xfs_unmountfs_wait(xfs_mount_t *); | 49 | STATIC void xfs_unmountfs_wait(xfs_mount_t *); |
| 51 | 50 | ||
| @@ -682,7 +681,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
| 682 | * Update alignment values based on mount options and sb values | 681 | * Update alignment values based on mount options and sb values |
| 683 | */ | 682 | */ |
| 684 | STATIC int | 683 | STATIC int |
| 685 | xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags) | 684 | xfs_update_alignment(xfs_mount_t *mp) |
| 686 | { | 685 | { |
| 687 | xfs_sb_t *sbp = &(mp->m_sb); | 686 | xfs_sb_t *sbp = &(mp->m_sb); |
| 688 | 687 | ||
| @@ -736,11 +735,11 @@ xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags) | |||
| 736 | if (xfs_sb_version_hasdalign(sbp)) { | 735 | if (xfs_sb_version_hasdalign(sbp)) { |
| 737 | if (sbp->sb_unit != mp->m_dalign) { | 736 | if (sbp->sb_unit != mp->m_dalign) { |
| 738 | sbp->sb_unit = mp->m_dalign; | 737 | sbp->sb_unit = mp->m_dalign; |
| 739 | *update_flags |= XFS_SB_UNIT; | 738 | mp->m_update_flags |= XFS_SB_UNIT; |
| 740 | } | 739 | } |
| 741 | if (sbp->sb_width != mp->m_swidth) { | 740 | if (sbp->sb_width != mp->m_swidth) { |
| 742 | sbp->sb_width = mp->m_swidth; | 741 | sbp->sb_width = mp->m_swidth; |
| 743 | *update_flags |= XFS_SB_WIDTH; | 742 | mp->m_update_flags |= XFS_SB_WIDTH; |
| 744 | } | 743 | } |
| 745 | } | 744 | } |
| 746 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && | 745 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
| @@ -905,7 +904,6 @@ xfs_mountfs( | |||
| 905 | xfs_sb_t *sbp = &(mp->m_sb); | 904 | xfs_sb_t *sbp = &(mp->m_sb); |
| 906 | xfs_inode_t *rip; | 905 | xfs_inode_t *rip; |
| 907 | __uint64_t resblks; | 906 | __uint64_t resblks; |
| 908 | __int64_t update_flags = 0LL; | ||
| 909 | uint quotamount, quotaflags; | 907 | uint quotamount, quotaflags; |
| 910 | int uuid_mounted = 0; | 908 | int uuid_mounted = 0; |
| 911 | int error = 0; | 909 | int error = 0; |
| @@ -933,7 +931,7 @@ xfs_mountfs( | |||
| 933 | "XFS: correcting sb_features alignment problem"); | 931 | "XFS: correcting sb_features alignment problem"); |
| 934 | sbp->sb_features2 |= sbp->sb_bad_features2; | 932 | sbp->sb_features2 |= sbp->sb_bad_features2; |
| 935 | sbp->sb_bad_features2 = sbp->sb_features2; | 933 | sbp->sb_bad_features2 = sbp->sb_features2; |
| 936 | update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; | 934 | mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; |
| 937 | 935 | ||
| 938 | /* | 936 | /* |
| 939 | * Re-check for ATTR2 in case it was found in bad_features2 | 937 | * Re-check for ATTR2 in case it was found in bad_features2 |
| @@ -947,11 +945,11 @@ xfs_mountfs( | |||
| 947 | if (xfs_sb_version_hasattr2(&mp->m_sb) && | 945 | if (xfs_sb_version_hasattr2(&mp->m_sb) && |
| 948 | (mp->m_flags & XFS_MOUNT_NOATTR2)) { | 946 | (mp->m_flags & XFS_MOUNT_NOATTR2)) { |
| 949 | xfs_sb_version_removeattr2(&mp->m_sb); | 947 | xfs_sb_version_removeattr2(&mp->m_sb); |
| 950 | update_flags |= XFS_SB_FEATURES2; | 948 | mp->m_update_flags |= XFS_SB_FEATURES2; |
| 951 | 949 | ||
| 952 | /* update sb_versionnum for the clearing of the morebits */ | 950 | /* update sb_versionnum for the clearing of the morebits */ |
| 953 | if (!sbp->sb_features2) | 951 | if (!sbp->sb_features2) |
| 954 | update_flags |= XFS_SB_VERSIONNUM; | 952 | mp->m_update_flags |= XFS_SB_VERSIONNUM; |
| 955 | } | 953 | } |
| 956 | 954 | ||
| 957 | /* | 955 | /* |
| @@ -960,7 +958,7 @@ xfs_mountfs( | |||
| 960 | * allocator alignment is within an ag, therefore ag has | 958 | * allocator alignment is within an ag, therefore ag has |
| 961 | * to be aligned at stripe boundary. | 959 | * to be aligned at stripe boundary. |
| 962 | */ | 960 | */ |
| 963 | error = xfs_update_alignment(mp, &update_flags); | 961 | error = xfs_update_alignment(mp); |
| 964 | if (error) | 962 | if (error) |
| 965 | goto error1; | 963 | goto error1; |
| 966 | 964 | ||
| @@ -1137,10 +1135,12 @@ xfs_mountfs( | |||
| 1137 | } | 1135 | } |
| 1138 | 1136 | ||
| 1139 | /* | 1137 | /* |
| 1140 | * If fs is not mounted readonly, then update the superblock changes. | 1138 | * If this is a read-only mount defer the superblock updates until |
| 1139 | * the next remount into writeable mode. Otherwise we would never | ||
| 1140 | * perform the update e.g. for the root filesystem. | ||
| 1141 | */ | 1141 | */ |
| 1142 | if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { | 1142 | if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
| 1143 | error = xfs_mount_log_sb(mp, update_flags); | 1143 | error = xfs_mount_log_sb(mp, mp->m_update_flags); |
| 1144 | if (error) { | 1144 | if (error) { |
| 1145 | cmn_err(CE_WARN, "XFS: failed to write sb changes"); | 1145 | cmn_err(CE_WARN, "XFS: failed to write sb changes"); |
| 1146 | goto error4; | 1146 | goto error4; |
| @@ -1820,7 +1820,7 @@ xfs_uuid_mount( | |||
| 1820 | * be altered by the mount options, as well as any potential sb_features2 | 1820 | * be altered by the mount options, as well as any potential sb_features2 |
| 1821 | * fixup. Only the first superblock is updated. | 1821 | * fixup. Only the first superblock is updated. |
| 1822 | */ | 1822 | */ |
| 1823 | STATIC int | 1823 | int |
| 1824 | xfs_mount_log_sb( | 1824 | xfs_mount_log_sb( |
| 1825 | xfs_mount_t *mp, | 1825 | xfs_mount_t *mp, |
| 1826 | __int64_t fields) | 1826 | __int64_t fields) |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index c1e028467327..f5e9937f9bdb 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
| @@ -44,9 +44,9 @@ typedef struct xfs_trans_reservations { | |||
| 44 | 44 | ||
| 45 | #ifndef __KERNEL__ | 45 | #ifndef __KERNEL__ |
| 46 | 46 | ||
| 47 | #define XFS_DADDR_TO_AGNO(mp,d) \ | 47 | #define xfs_daddr_to_agno(mp,d) \ |
| 48 | ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) | 48 | ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) |
| 49 | #define XFS_DADDR_TO_AGBNO(mp,d) \ | 49 | #define xfs_daddr_to_agbno(mp,d) \ |
| 50 | ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) | 50 | ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) |
| 51 | 51 | ||
| 52 | #else /* __KERNEL__ */ | 52 | #else /* __KERNEL__ */ |
| @@ -327,6 +327,8 @@ typedef struct xfs_mount { | |||
| 327 | spinlock_t m_sync_lock; /* work item list lock */ | 327 | spinlock_t m_sync_lock; /* work item list lock */ |
| 328 | int m_sync_seq; /* sync thread generation no. */ | 328 | int m_sync_seq; /* sync thread generation no. */ |
| 329 | wait_queue_head_t m_wait_single_sync_task; | 329 | wait_queue_head_t m_wait_single_sync_task; |
| 330 | __int64_t m_update_flags; /* sb flags we need to update | ||
| 331 | on the next remount,rw */ | ||
| 330 | } xfs_mount_t; | 332 | } xfs_mount_t; |
| 331 | 333 | ||
| 332 | /* | 334 | /* |
| @@ -439,7 +441,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, | |||
| 439 | */ | 441 | */ |
| 440 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ | 442 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ |
| 441 | 443 | ||
| 442 | #define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) | ||
| 443 | static inline xfs_agnumber_t | 444 | static inline xfs_agnumber_t |
| 444 | xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) | 445 | xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) |
| 445 | { | 446 | { |
| @@ -448,7 +449,6 @@ xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) | |||
| 448 | return (xfs_agnumber_t) ld; | 449 | return (xfs_agnumber_t) ld; |
| 449 | } | 450 | } |
| 450 | 451 | ||
| 451 | #define XFS_DADDR_TO_AGBNO(mp,d) xfs_daddr_to_agbno(mp,d) | ||
| 452 | static inline xfs_agblock_t | 452 | static inline xfs_agblock_t |
| 453 | xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) | 453 | xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) |
| 454 | { | 454 | { |
| @@ -514,6 +514,7 @@ extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, | |||
| 514 | int64_t, int); | 514 | int64_t, int); |
| 515 | extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, | 515 | extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, |
| 516 | uint, int); | 516 | uint, int); |
| 517 | extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); | ||
| 517 | extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); | 518 | extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); |
| 518 | extern int xfs_readsb(xfs_mount_t *, int); | 519 | extern int xfs_readsb(xfs_mount_t *, int); |
| 519 | extern void xfs_freesb(xfs_mount_t *); | 520 | extern void xfs_freesb(xfs_mount_t *); |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 86471bb40fd4..58f85e9cd11d 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
| @@ -147,7 +147,7 @@ xfs_rename( | |||
| 147 | xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, | 147 | xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, |
| 148 | inodes, &num_inodes); | 148 | inodes, &num_inodes); |
| 149 | 149 | ||
| 150 | XFS_BMAP_INIT(&free_list, &first_block); | 150 | xfs_bmap_init(&free_list, &first_block); |
| 151 | tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); | 151 | tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); |
| 152 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 152 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
| 153 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); | 153 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index edf12c7b834c..c5bb86f3ec05 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
| @@ -120,7 +120,7 @@ xfs_growfs_rt_alloc( | |||
| 120 | if ((error = xfs_trans_iget(mp, tp, ino, 0, | 120 | if ((error = xfs_trans_iget(mp, tp, ino, 0, |
| 121 | XFS_ILOCK_EXCL, &ip))) | 121 | XFS_ILOCK_EXCL, &ip))) |
| 122 | goto error_cancel; | 122 | goto error_cancel; |
| 123 | XFS_BMAP_INIT(&flist, &firstblock); | 123 | xfs_bmap_init(&flist, &firstblock); |
| 124 | /* | 124 | /* |
| 125 | * Allocate blocks to the bitmap file. | 125 | * Allocate blocks to the bitmap file. |
| 126 | */ | 126 | */ |
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index f87db5344ce6..f76c003ec55d 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h | |||
| @@ -28,7 +28,6 @@ struct xfs_mount; | |||
| 28 | * file is a real time file or not, because the bmap code | 28 | * file is a real time file or not, because the bmap code |
| 29 | * does. | 29 | * does. |
| 30 | */ | 30 | */ |
| 31 | #define XFS_FSB_TO_DB(ip,fsb) xfs_fsb_to_db(ip,fsb) | ||
| 32 | static inline xfs_daddr_t | 31 | static inline xfs_daddr_t |
| 33 | xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) | 32 | xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) |
| 34 | { | 33 | { |
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 1ed71916e4c9..1b017c657494 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h | |||
| @@ -505,7 +505,7 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp) | |||
| 505 | 505 | ||
| 506 | #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) | 506 | #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) |
| 507 | #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ | 507 | #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ |
| 508 | XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d)) | 508 | xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d)) |
| 509 | #define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \ | 509 | #define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \ |
| 510 | XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno)) | 510 | XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno)) |
| 511 | 511 | ||
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index f07bf8768c3a..0e55c5d7db5f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
| @@ -862,7 +862,7 @@ xfs_inactive_symlink_rmt( | |||
| 862 | * Find the block(s) so we can inval and unmap them. | 862 | * Find the block(s) so we can inval and unmap them. |
| 863 | */ | 863 | */ |
| 864 | done = 0; | 864 | done = 0; |
| 865 | XFS_BMAP_INIT(&free_list, &first_block); | 865 | xfs_bmap_init(&free_list, &first_block); |
| 866 | nmaps = ARRAY_SIZE(mval); | 866 | nmaps = ARRAY_SIZE(mval); |
| 867 | if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), | 867 | if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), |
| 868 | XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, | 868 | XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, |
| @@ -1288,7 +1288,7 @@ xfs_inactive( | |||
| 1288 | /* | 1288 | /* |
| 1289 | * Free the inode. | 1289 | * Free the inode. |
| 1290 | */ | 1290 | */ |
| 1291 | XFS_BMAP_INIT(&free_list, &first_block); | 1291 | xfs_bmap_init(&free_list, &first_block); |
| 1292 | error = xfs_ifree(tp, ip, &free_list); | 1292 | error = xfs_ifree(tp, ip, &free_list); |
| 1293 | if (error) { | 1293 | if (error) { |
| 1294 | /* | 1294 | /* |
| @@ -1461,7 +1461,7 @@ xfs_create( | |||
| 1461 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); | 1461 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); |
| 1462 | unlock_dp_on_error = B_TRUE; | 1462 | unlock_dp_on_error = B_TRUE; |
| 1463 | 1463 | ||
| 1464 | XFS_BMAP_INIT(&free_list, &first_block); | 1464 | xfs_bmap_init(&free_list, &first_block); |
| 1465 | 1465 | ||
| 1466 | ASSERT(ip == NULL); | 1466 | ASSERT(ip == NULL); |
| 1467 | 1467 | ||
| @@ -1879,7 +1879,7 @@ xfs_remove( | |||
| 1879 | } | 1879 | } |
| 1880 | } | 1880 | } |
| 1881 | 1881 | ||
| 1882 | XFS_BMAP_INIT(&free_list, &first_block); | 1882 | xfs_bmap_init(&free_list, &first_block); |
| 1883 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, | 1883 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, |
| 1884 | &first_block, &free_list, resblks); | 1884 | &first_block, &free_list, resblks); |
| 1885 | if (error) { | 1885 | if (error) { |
| @@ -2059,7 +2059,7 @@ xfs_link( | |||
| 2059 | if (error) | 2059 | if (error) |
| 2060 | goto error_return; | 2060 | goto error_return; |
| 2061 | 2061 | ||
| 2062 | XFS_BMAP_INIT(&free_list, &first_block); | 2062 | xfs_bmap_init(&free_list, &first_block); |
| 2063 | 2063 | ||
| 2064 | error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, | 2064 | error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, |
| 2065 | &first_block, &free_list, resblks); | 2065 | &first_block, &free_list, resblks); |
| @@ -2231,7 +2231,7 @@ xfs_mkdir( | |||
| 2231 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 2231 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
| 2232 | unlock_dp_on_error = B_FALSE; | 2232 | unlock_dp_on_error = B_FALSE; |
| 2233 | 2233 | ||
| 2234 | XFS_BMAP_INIT(&free_list, &first_block); | 2234 | xfs_bmap_init(&free_list, &first_block); |
| 2235 | 2235 | ||
| 2236 | error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino, | 2236 | error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino, |
| 2237 | &first_block, &free_list, resblks ? | 2237 | &first_block, &free_list, resblks ? |
| @@ -2438,7 +2438,7 @@ xfs_symlink( | |||
| 2438 | * Initialize the bmap freelist prior to calling either | 2438 | * Initialize the bmap freelist prior to calling either |
| 2439 | * bmapi or the directory create code. | 2439 | * bmapi or the directory create code. |
| 2440 | */ | 2440 | */ |
| 2441 | XFS_BMAP_INIT(&free_list, &first_block); | 2441 | xfs_bmap_init(&free_list, &first_block); |
| 2442 | 2442 | ||
| 2443 | /* | 2443 | /* |
| 2444 | * Allocate an inode for the symlink. | 2444 | * Allocate an inode for the symlink. |
| @@ -2860,7 +2860,7 @@ retry: | |||
| 2860 | /* | 2860 | /* |
| 2861 | * Issue the xfs_bmapi() call to allocate the blocks | 2861 | * Issue the xfs_bmapi() call to allocate the blocks |
| 2862 | */ | 2862 | */ |
| 2863 | XFS_BMAP_INIT(&free_list, &firstfsb); | 2863 | xfs_bmap_init(&free_list, &firstfsb); |
| 2864 | error = xfs_bmapi(tp, ip, startoffset_fsb, | 2864 | error = xfs_bmapi(tp, ip, startoffset_fsb, |
| 2865 | allocatesize_fsb, bmapi_flag, | 2865 | allocatesize_fsb, bmapi_flag, |
| 2866 | &firstfsb, 0, imapp, &nimaps, | 2866 | &firstfsb, 0, imapp, &nimaps, |
| @@ -2980,7 +2980,7 @@ xfs_zero_remaining_bytes( | |||
| 2980 | XFS_BUF_UNDONE(bp); | 2980 | XFS_BUF_UNDONE(bp); |
| 2981 | XFS_BUF_UNWRITE(bp); | 2981 | XFS_BUF_UNWRITE(bp); |
| 2982 | XFS_BUF_READ(bp); | 2982 | XFS_BUF_READ(bp); |
| 2983 | XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); | 2983 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
| 2984 | xfsbdstrat(mp, bp); | 2984 | xfsbdstrat(mp, bp); |
| 2985 | error = xfs_iowait(bp); | 2985 | error = xfs_iowait(bp); |
| 2986 | if (error) { | 2986 | if (error) { |
| @@ -3186,7 +3186,7 @@ xfs_free_file_space( | |||
| 3186 | /* | 3186 | /* |
| 3187 | * issue the bunmapi() call to free the blocks | 3187 | * issue the bunmapi() call to free the blocks |
| 3188 | */ | 3188 | */ |
| 3189 | XFS_BMAP_INIT(&free_list, &firstfsb); | 3189 | xfs_bmap_init(&free_list, &firstfsb); |
| 3190 | error = xfs_bunmapi(tp, ip, startoffset_fsb, | 3190 | error = xfs_bunmapi(tp, ip, startoffset_fsb, |
| 3191 | endoffset_fsb - startoffset_fsb, | 3191 | endoffset_fsb - startoffset_fsb, |
| 3192 | 0, 2, &firstfsb, &free_list, NULL, &done); | 3192 | 0, 2, &firstfsb, &free_list, NULL, &done); |
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 9a3274aecf83..937d7c435575 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * | 9 | * |
| 10 | * Undefined if no bit exists, so code should check against 0 first. | 10 | * Undefined if no bit exists, so code should check against 0 first. |
| 11 | */ | 11 | */ |
| 12 | static inline unsigned long __ffs(unsigned long word) | 12 | static __always_inline unsigned long __ffs(unsigned long word) |
| 13 | { | 13 | { |
| 14 | int num = 0; | 14 | int num = 0; |
| 15 | 15 | ||
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index be24465403d6..a60a7ccb6782 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * | 9 | * |
| 10 | * Undefined if no set bit exists, so code should check against 0 first. | 10 | * Undefined if no set bit exists, so code should check against 0 first. |
| 11 | */ | 11 | */ |
| 12 | static inline unsigned long __fls(unsigned long word) | 12 | static __always_inline unsigned long __fls(unsigned long word) |
| 13 | { | 13 | { |
| 14 | int num = BITS_PER_LONG - 1; | 14 | int num = BITS_PER_LONG - 1; |
| 15 | 15 | ||
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 850859bc5069..0576d1f42f43 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | 9 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | static inline int fls(int x) | 12 | static __always_inline int fls(int x) |
| 13 | { | 13 | { |
| 14 | int r = 32; | 14 | int r = 32; |
| 15 | 15 | ||
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 86d403f8b256..b097cf8444e3 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | * at position 64. | 15 | * at position 64. |
| 16 | */ | 16 | */ |
| 17 | #if BITS_PER_LONG == 32 | 17 | #if BITS_PER_LONG == 32 |
| 18 | static inline int fls64(__u64 x) | 18 | static __always_inline int fls64(__u64 x) |
| 19 | { | 19 | { |
| 20 | __u32 h = x >> 32; | 20 | __u32 h = x >> 32; |
| 21 | if (h) | 21 | if (h) |
| @@ -23,7 +23,7 @@ static inline int fls64(__u64 x) | |||
| 23 | return fls(x); | 23 | return fls(x); |
| 24 | } | 24 | } |
| 25 | #elif BITS_PER_LONG == 64 | 25 | #elif BITS_PER_LONG == 64 |
| 26 | static inline int fls64(__u64 x) | 26 | static __always_inline int fls64(__u64 x) |
| 27 | { | 27 | { |
| 28 | if (x == 0) | 28 | if (x == 0) |
| 29 | return 0; | 29 | return 0; |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2f3c2d4ef73b..ea0ea1a4c36f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -48,6 +48,12 @@ extern struct fs_struct init_fs; | |||
| 48 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ | 48 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ |
| 49 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ | 49 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
| 50 | .rlim = INIT_RLIMITS, \ | 50 | .rlim = INIT_RLIMITS, \ |
| 51 | .cputime = { .totals = { \ | ||
| 52 | .utime = cputime_zero, \ | ||
| 53 | .stime = cputime_zero, \ | ||
| 54 | .sum_exec_runtime = 0, \ | ||
| 55 | .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \ | ||
| 56 | }, }, \ | ||
| 51 | } | 57 | } |
| 52 | 58 | ||
| 53 | extern struct nsproxy init_nsproxy; | 59 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f8..02e16d207304 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -293,6 +293,9 @@ extern void sched_show_task(struct task_struct *p); | |||
| 293 | extern void softlockup_tick(void); | 293 | extern void softlockup_tick(void); |
| 294 | extern void touch_softlockup_watchdog(void); | 294 | extern void touch_softlockup_watchdog(void); |
| 295 | extern void touch_all_softlockup_watchdogs(void); | 295 | extern void touch_all_softlockup_watchdogs(void); |
| 296 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
| 297 | struct file *filp, void __user *buffer, | ||
| 298 | size_t *lenp, loff_t *ppos); | ||
| 296 | extern unsigned int softlockup_panic; | 299 | extern unsigned int softlockup_panic; |
| 297 | extern unsigned long sysctl_hung_task_check_count; | 300 | extern unsigned long sysctl_hung_task_check_count; |
| 298 | extern unsigned long sysctl_hung_task_timeout_secs; | 301 | extern unsigned long sysctl_hung_task_timeout_secs; |
| @@ -450,6 +453,7 @@ struct task_cputime { | |||
| 450 | cputime_t utime; | 453 | cputime_t utime; |
| 451 | cputime_t stime; | 454 | cputime_t stime; |
| 452 | unsigned long long sum_exec_runtime; | 455 | unsigned long long sum_exec_runtime; |
| 456 | spinlock_t lock; | ||
| 453 | }; | 457 | }; |
| 454 | /* Alternate field names when used to cache expirations. */ | 458 | /* Alternate field names when used to cache expirations. */ |
| 455 | #define prof_exp stime | 459 | #define prof_exp stime |
| @@ -465,7 +469,7 @@ struct task_cputime { | |||
| 465 | * used for thread group CPU clock calculations. | 469 | * used for thread group CPU clock calculations. |
| 466 | */ | 470 | */ |
| 467 | struct thread_group_cputime { | 471 | struct thread_group_cputime { |
| 468 | struct task_cputime *totals; | 472 | struct task_cputime totals; |
| 469 | }; | 473 | }; |
| 470 | 474 | ||
| 471 | /* | 475 | /* |
| @@ -2180,24 +2184,30 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
| 2180 | * Thread group CPU time accounting. | 2184 | * Thread group CPU time accounting. |
| 2181 | */ | 2185 | */ |
| 2182 | 2186 | ||
| 2183 | extern int thread_group_cputime_alloc(struct task_struct *); | 2187 | static inline |
| 2184 | extern void thread_group_cputime(struct task_struct *, struct task_cputime *); | 2188 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
| 2185 | |||
| 2186 | static inline void thread_group_cputime_init(struct signal_struct *sig) | ||
| 2187 | { | 2189 | { |
| 2188 | sig->cputime.totals = NULL; | 2190 | struct task_cputime *totals = &tsk->signal->cputime.totals; |
| 2191 | unsigned long flags; | ||
| 2192 | |||
| 2193 | spin_lock_irqsave(&totals->lock, flags); | ||
| 2194 | *times = *totals; | ||
| 2195 | spin_unlock_irqrestore(&totals->lock, flags); | ||
| 2189 | } | 2196 | } |
| 2190 | 2197 | ||
| 2191 | static inline int thread_group_cputime_clone_thread(struct task_struct *curr) | 2198 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
| 2192 | { | 2199 | { |
| 2193 | if (curr->signal->cputime.totals) | 2200 | sig->cputime.totals = (struct task_cputime){ |
| 2194 | return 0; | 2201 | .utime = cputime_zero, |
| 2195 | return thread_group_cputime_alloc(curr); | 2202 | .stime = cputime_zero, |
| 2203 | .sum_exec_runtime = 0, | ||
| 2204 | }; | ||
| 2205 | |||
| 2206 | spin_lock_init(&sig->cputime.totals.lock); | ||
| 2196 | } | 2207 | } |
| 2197 | 2208 | ||
| 2198 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2209 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
| 2199 | { | 2210 | { |
| 2200 | free_percpu(sig->cputime.totals); | ||
| 2201 | } | 2211 | } |
| 2202 | 2212 | ||
| 2203 | /* | 2213 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b36291130f22..3cd51e579ab1 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -118,12 +118,24 @@ struct execute_work { | |||
| 118 | init_timer(&(_work)->timer); \ | 118 | init_timer(&(_work)->timer); \ |
| 119 | } while (0) | 119 | } while (0) |
| 120 | 120 | ||
| 121 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ | ||
| 122 | do { \ | ||
| 123 | INIT_WORK(&(_work)->work, (_func)); \ | ||
| 124 | init_timer_on_stack(&(_work)->timer); \ | ||
| 125 | } while (0) | ||
| 126 | |||
| 121 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ | 127 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ |
| 122 | do { \ | 128 | do { \ |
| 123 | INIT_WORK(&(_work)->work, (_func)); \ | 129 | INIT_WORK(&(_work)->work, (_func)); \ |
| 124 | init_timer_deferrable(&(_work)->timer); \ | 130 | init_timer_deferrable(&(_work)->timer); \ |
| 125 | } while (0) | 131 | } while (0) |
| 126 | 132 | ||
| 133 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ | ||
| 134 | do { \ | ||
| 135 | INIT_WORK(&(_work)->work, (_func)); \ | ||
| 136 | init_timer_on_stack(&(_work)->timer); \ | ||
| 137 | } while (0) | ||
| 138 | |||
| 127 | /** | 139 | /** |
| 128 | * work_pending - Find out whether a work item is currently pending | 140 | * work_pending - Find out whether a work item is currently pending |
| 129 | * @work: The work item in question | 141 | * @work: The work item in question |
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 93a4edb148b5..dfa804958820 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
| @@ -108,7 +108,7 @@ | |||
| 108 | #define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \ | 108 | #define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \ |
| 109 | wevent, wflags) \ | 109 | wevent, wflags) \ |
| 110 | { .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \ | 110 | { .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \ |
| 111 | .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1 \ | 111 | .invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \ |
| 112 | .event = wevent, .event_flags = wflags} | 112 | .event = wevent, .event_flags = wflags} |
| 113 | #define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \ | 113 | #define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \ |
| 114 | wevent, wflags) \ | 114 | wevent, wflags) \ |
diff --git a/init/Kconfig b/init/Kconfig index 2af83825634e..3be35f3a001b 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -238,6 +238,98 @@ config AUDIT_TREE | |||
| 238 | def_bool y | 238 | def_bool y |
| 239 | depends on AUDITSYSCALL && INOTIFY | 239 | depends on AUDITSYSCALL && INOTIFY |
| 240 | 240 | ||
| 241 | menu "RCU Subsystem" | ||
| 242 | |||
| 243 | choice | ||
| 244 | prompt "RCU Implementation" | ||
| 245 | default CLASSIC_RCU | ||
| 246 | |||
| 247 | config CLASSIC_RCU | ||
| 248 | bool "Classic RCU" | ||
| 249 | help | ||
| 250 | This option selects the classic RCU implementation that is | ||
| 251 | designed for best read-side performance on non-realtime | ||
| 252 | systems. | ||
| 253 | |||
| 254 | Select this option if you are unsure. | ||
| 255 | |||
| 256 | config TREE_RCU | ||
| 257 | bool "Tree-based hierarchical RCU" | ||
| 258 | help | ||
| 259 | This option selects the RCU implementation that is | ||
| 260 | designed for very large SMP system with hundreds or | ||
| 261 | thousands of CPUs. | ||
| 262 | |||
| 263 | config PREEMPT_RCU | ||
| 264 | bool "Preemptible RCU" | ||
| 265 | depends on PREEMPT | ||
| 266 | help | ||
| 267 | This option reduces the latency of the kernel by making certain | ||
| 268 | RCU sections preemptible. Normally RCU code is non-preemptible, if | ||
| 269 | this option is selected then read-only RCU sections become | ||
| 270 | preemptible. This helps latency, but may expose bugs due to | ||
| 271 | now-naive assumptions about each RCU read-side critical section | ||
| 272 | remaining on a given CPU through its execution. | ||
| 273 | |||
| 274 | endchoice | ||
| 275 | |||
| 276 | config RCU_TRACE | ||
| 277 | bool "Enable tracing for RCU" | ||
| 278 | depends on TREE_RCU || PREEMPT_RCU | ||
| 279 | help | ||
| 280 | This option provides tracing in RCU which presents stats | ||
| 281 | in debugfs for debugging RCU implementation. | ||
| 282 | |||
| 283 | Say Y here if you want to enable RCU tracing | ||
| 284 | Say N if you are unsure. | ||
| 285 | |||
| 286 | config RCU_FANOUT | ||
| 287 | int "Tree-based hierarchical RCU fanout value" | ||
| 288 | range 2 64 if 64BIT | ||
| 289 | range 2 32 if !64BIT | ||
| 290 | depends on TREE_RCU | ||
| 291 | default 64 if 64BIT | ||
| 292 | default 32 if !64BIT | ||
| 293 | help | ||
| 294 | This option controls the fanout of hierarchical implementations | ||
| 295 | of RCU, allowing RCU to work efficiently on machines with | ||
| 296 | large numbers of CPUs. This value must be at least the cube | ||
| 297 | root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit | ||
| 298 | systems and up to 262,144 for 64-bit systems. | ||
| 299 | |||
| 300 | Select a specific number if testing RCU itself. | ||
| 301 | Take the default if unsure. | ||
| 302 | |||
| 303 | config RCU_FANOUT_EXACT | ||
| 304 | bool "Disable tree-based hierarchical RCU auto-balancing" | ||
| 305 | depends on TREE_RCU | ||
| 306 | default n | ||
| 307 | help | ||
| 308 | This option forces use of the exact RCU_FANOUT value specified, | ||
| 309 | regardless of imbalances in the hierarchy. This is useful for | ||
| 310 | testing RCU itself, and might one day be useful on systems with | ||
| 311 | strong NUMA behavior. | ||
| 312 | |||
| 313 | Without RCU_FANOUT_EXACT, the code will balance the hierarchy. | ||
| 314 | |||
| 315 | Say N if unsure. | ||
| 316 | |||
| 317 | config TREE_RCU_TRACE | ||
| 318 | def_bool RCU_TRACE && TREE_RCU | ||
| 319 | select DEBUG_FS | ||
| 320 | help | ||
| 321 | This option provides tracing for the TREE_RCU implementation, | ||
| 322 | permitting Makefile to trivially select kernel/rcutree_trace.c. | ||
| 323 | |||
| 324 | config PREEMPT_RCU_TRACE | ||
| 325 | def_bool RCU_TRACE && PREEMPT_RCU | ||
| 326 | select DEBUG_FS | ||
| 327 | help | ||
| 328 | This option provides tracing for the PREEMPT_RCU implementation, | ||
| 329 | permitting Makefile to trivially select kernel/rcupreempt_trace.c. | ||
| 330 | |||
| 331 | endmenu # "RCU Subsystem" | ||
| 332 | |||
| 241 | config IKCONFIG | 333 | config IKCONFIG |
| 242 | tristate "Kernel .config support" | 334 | tristate "Kernel .config support" |
| 243 | ---help--- | 335 | ---help--- |
| @@ -972,90 +1064,3 @@ source "block/Kconfig" | |||
| 972 | config PREEMPT_NOTIFIERS | 1064 | config PREEMPT_NOTIFIERS |
| 973 | bool | 1065 | bool |
| 974 | 1066 | ||
| 975 | choice | ||
| 976 | prompt "RCU Implementation" | ||
| 977 | default CLASSIC_RCU | ||
| 978 | |||
| 979 | config CLASSIC_RCU | ||
| 980 | bool "Classic RCU" | ||
| 981 | help | ||
| 982 | This option selects the classic RCU implementation that is | ||
| 983 | designed for best read-side performance on non-realtime | ||
| 984 | systems. | ||
| 985 | |||
| 986 | Select this option if you are unsure. | ||
| 987 | |||
| 988 | config TREE_RCU | ||
| 989 | bool "Tree-based hierarchical RCU" | ||
| 990 | help | ||
| 991 | This option selects the RCU implementation that is | ||
| 992 | designed for very large SMP system with hundreds or | ||
| 993 | thousands of CPUs. | ||
| 994 | |||
| 995 | config PREEMPT_RCU | ||
| 996 | bool "Preemptible RCU" | ||
| 997 | depends on PREEMPT | ||
| 998 | help | ||
| 999 | This option reduces the latency of the kernel by making certain | ||
| 1000 | RCU sections preemptible. Normally RCU code is non-preemptible, if | ||
| 1001 | this option is selected then read-only RCU sections become | ||
| 1002 | preemptible. This helps latency, but may expose bugs due to | ||
| 1003 | now-naive assumptions about each RCU read-side critical section | ||
| 1004 | remaining on a given CPU through its execution. | ||
| 1005 | |||
| 1006 | endchoice | ||
| 1007 | |||
| 1008 | config RCU_TRACE | ||
| 1009 | bool "Enable tracing for RCU" | ||
| 1010 | depends on TREE_RCU || PREEMPT_RCU | ||
| 1011 | help | ||
| 1012 | This option provides tracing in RCU which presents stats | ||
| 1013 | in debugfs for debugging RCU implementation. | ||
| 1014 | |||
| 1015 | Say Y here if you want to enable RCU tracing | ||
| 1016 | Say N if you are unsure. | ||
| 1017 | |||
| 1018 | config RCU_FANOUT | ||
| 1019 | int "Tree-based hierarchical RCU fanout value" | ||
| 1020 | range 2 64 if 64BIT | ||
| 1021 | range 2 32 if !64BIT | ||
| 1022 | depends on TREE_RCU | ||
| 1023 | default 64 if 64BIT | ||
| 1024 | default 32 if !64BIT | ||
| 1025 | help | ||
| 1026 | This option controls the fanout of hierarchical implementations | ||
| 1027 | of RCU, allowing RCU to work efficiently on machines with | ||
| 1028 | large numbers of CPUs. This value must be at least the cube | ||
| 1029 | root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit | ||
| 1030 | systems and up to 262,144 for 64-bit systems. | ||
| 1031 | |||
| 1032 | Select a specific number if testing RCU itself. | ||
| 1033 | Take the default if unsure. | ||
| 1034 | |||
| 1035 | config RCU_FANOUT_EXACT | ||
| 1036 | bool "Disable tree-based hierarchical RCU auto-balancing" | ||
| 1037 | depends on TREE_RCU | ||
| 1038 | default n | ||
| 1039 | help | ||
| 1040 | This option forces use of the exact RCU_FANOUT value specified, | ||
| 1041 | regardless of imbalances in the hierarchy. This is useful for | ||
| 1042 | testing RCU itself, and might one day be useful on systems with | ||
| 1043 | strong NUMA behavior. | ||
| 1044 | |||
| 1045 | Without RCU_FANOUT_EXACT, the code will balance the hierarchy. | ||
| 1046 | |||
| 1047 | Say N if unsure. | ||
| 1048 | |||
| 1049 | config TREE_RCU_TRACE | ||
| 1050 | def_bool RCU_TRACE && TREE_RCU | ||
| 1051 | select DEBUG_FS | ||
| 1052 | help | ||
| 1053 | This option provides tracing for the TREE_RCU implementation, | ||
| 1054 | permitting Makefile to trivially select kernel/rcutree_trace.c. | ||
| 1055 | |||
| 1056 | config PREEMPT_RCU_TRACE | ||
| 1057 | def_bool RCU_TRACE && PREEMPT_RCU | ||
| 1058 | select DEBUG_FS | ||
| 1059 | help | ||
| 1060 | This option provides tracing for the PREEMPT_RCU implementation, | ||
| 1061 | permitting Makefile to trivially select kernel/rcupreempt_trace.c. | ||
diff --git a/kernel/fork.c b/kernel/fork.c index bf0cef8bbdf2..242a706e7721 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
| 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
| 818 | { | 818 | { |
| 819 | struct signal_struct *sig; | 819 | struct signal_struct *sig; |
| 820 | int ret; | ||
| 821 | 820 | ||
| 822 | if (clone_flags & CLONE_THREAD) { | 821 | if (clone_flags & CLONE_THREAD) { |
| 823 | ret = thread_group_cputime_clone_thread(current); | 822 | atomic_inc(¤t->signal->count); |
| 824 | if (likely(!ret)) { | 823 | atomic_inc(¤t->signal->live); |
| 825 | atomic_inc(¤t->signal->count); | 824 | return 0; |
| 826 | atomic_inc(¤t->signal->live); | ||
| 827 | } | ||
| 828 | return ret; | ||
| 829 | } | 825 | } |
| 830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 826 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
| 827 | |||
| 828 | if (sig) | ||
| 829 | posix_cpu_timers_init_group(sig); | ||
| 830 | |||
| 831 | tsk->signal = sig; | 831 | tsk->signal = sig; |
| 832 | if (!sig) | 832 | if (!sig) |
| 833 | return -ENOMEM; | 833 | return -ENOMEM; |
| @@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
| 865 | task_unlock(current->group_leader); | 865 | task_unlock(current->group_leader); |
| 866 | 866 | ||
| 867 | posix_cpu_timers_init_group(sig); | ||
| 868 | |||
| 869 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
| 870 | 868 | ||
| 871 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2dc30c59c5fd..f33afb0407bc 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -614,7 +614,9 @@ void clock_was_set(void) | |||
| 614 | */ | 614 | */ |
| 615 | void hres_timers_resume(void) | 615 | void hres_timers_resume(void) |
| 616 | { | 616 | { |
| 617 | /* Retrigger the CPU local events: */ | 617 | WARN_ONCE(!irqs_disabled(), |
| 618 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
| 619 | |||
| 618 | retrigger_next_event(NULL); | 620 | retrigger_next_event(NULL); |
| 619 | } | 621 | } |
| 620 | 622 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a47832..fa07da94d7be 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -10,76 +10,6 @@ | |||
| 10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * Allocate the thread_group_cputime structure appropriately and fill in the | ||
| 14 | * current values of the fields. Called from copy_signal() via | ||
| 15 | * thread_group_cputime_clone_thread() when adding a second or subsequent | ||
| 16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
| 17 | */ | ||
| 18 | int thread_group_cputime_alloc(struct task_struct *tsk) | ||
| 19 | { | ||
| 20 | struct signal_struct *sig = tsk->signal; | ||
| 21 | struct task_cputime *cputime; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * If we have multiple threads and we don't already have a | ||
| 25 | * per-CPU task_cputime struct (checked in the caller), allocate | ||
| 26 | * one and fill it in with the times accumulated so far. We may | ||
| 27 | * race with another thread so recheck after we pick up the sighand | ||
| 28 | * lock. | ||
| 29 | */ | ||
| 30 | cputime = alloc_percpu(struct task_cputime); | ||
| 31 | if (cputime == NULL) | ||
| 32 | return -ENOMEM; | ||
| 33 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 34 | if (sig->cputime.totals) { | ||
| 35 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 36 | free_percpu(cputime); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | sig->cputime.totals = cputime; | ||
| 40 | cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); | ||
| 41 | cputime->utime = tsk->utime; | ||
| 42 | cputime->stime = tsk->stime; | ||
| 43 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
| 44 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | /** | ||
| 49 | * thread_group_cputime - Sum the thread group time fields across all CPUs. | ||
| 50 | * | ||
| 51 | * @tsk: The task we use to identify the thread group. | ||
| 52 | * @times: task_cputime structure in which we return the summed fields. | ||
| 53 | * | ||
| 54 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
| 55 | * time structure. | ||
| 56 | */ | ||
| 57 | void thread_group_cputime( | ||
| 58 | struct task_struct *tsk, | ||
| 59 | struct task_cputime *times) | ||
| 60 | { | ||
| 61 | struct task_cputime *totals, *tot; | ||
| 62 | int i; | ||
| 63 | |||
| 64 | totals = tsk->signal->cputime.totals; | ||
| 65 | if (!totals) { | ||
| 66 | times->utime = tsk->utime; | ||
| 67 | times->stime = tsk->stime; | ||
| 68 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
| 69 | return; | ||
| 70 | } | ||
| 71 | |||
| 72 | times->stime = times->utime = cputime_zero; | ||
| 73 | times->sum_exec_runtime = 0; | ||
| 74 | for_each_possible_cpu(i) { | ||
| 75 | tot = per_cpu_ptr(totals, i); | ||
| 76 | times->utime = cputime_add(times->utime, tot->utime); | ||
| 77 | times->stime = cputime_add(times->stime, tot->stime); | ||
| 78 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
| 84 | */ | 14 | */ |
| 85 | void update_rlimit_cpu(unsigned long rlim_new) | 15 | void update_rlimit_cpu(unsigned long rlim_new) |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 490934fc7ac3..bd5a9003497c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 716 | raise_rcu_softirq(); | 716 | raise_rcu_softirq(); |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 719 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
| 720 | struct rcu_data *rdp) | 720 | struct rcu_data *rdp) |
| 721 | { | 721 | { |
| 722 | unsigned long flags; | 722 | unsigned long flags; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f2d8638e6c60..b2fd602a6f6f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) | |||
| 1314 | * access due to the fact that this CPU cannot possibly have any RCU | 1314 | * access due to the fact that this CPU cannot possibly have any RCU |
| 1315 | * callbacks in flight yet. | 1315 | * callbacks in flight yet. |
| 1316 | */ | 1316 | */ |
| 1317 | static void | 1317 | static void __cpuinit |
| 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) |
| 1319 | { | 1319 | { |
| 1320 | unsigned long flags; | 1320 | unsigned long flags; |
diff --git a/kernel/relay.c b/kernel/relay.c index 09ac2008f77b..9d79b7854fa6 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan, | |||
| 663 | 663 | ||
| 664 | mutex_lock(&relay_channels_mutex); | 664 | mutex_lock(&relay_channels_mutex); |
| 665 | /* Is chan already set up? */ | 665 | /* Is chan already set up? */ |
| 666 | if (unlikely(chan->has_base_filename)) | 666 | if (unlikely(chan->has_base_filename)) { |
| 667 | mutex_unlock(&relay_channels_mutex); | ||
| 667 | return -EEXIST; | 668 | return -EEXIST; |
| 669 | } | ||
| 668 | chan->has_base_filename = 1; | 670 | chan->has_base_filename = 1; |
| 669 | chan->parent = parent; | 671 | chan->parent = parent; |
| 670 | curr_cpu = get_cpu(); | 672 | curr_cpu = get_cpu(); |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d1226..8ab0cef8ecab 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
| 296 | static inline void account_group_user_time(struct task_struct *tsk, | 296 | static inline void account_group_user_time(struct task_struct *tsk, |
| 297 | cputime_t cputime) | 297 | cputime_t cputime) |
| 298 | { | 298 | { |
| 299 | struct task_cputime *times; | ||
| 299 | struct signal_struct *sig; | 300 | struct signal_struct *sig; |
| 300 | 301 | ||
| 301 | /* tsk == current, ensure it is safe to use ->signal */ | 302 | /* tsk == current, ensure it is safe to use ->signal */ |
| @@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 303 | return; | 304 | return; |
| 304 | 305 | ||
| 305 | sig = tsk->signal; | 306 | sig = tsk->signal; |
| 306 | if (sig->cputime.totals) { | 307 | times = &sig->cputime.totals; |
| 307 | struct task_cputime *times; | ||
| 308 | 308 | ||
| 309 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 309 | spin_lock(×->lock); |
| 310 | times->utime = cputime_add(times->utime, cputime); | 310 | times->utime = cputime_add(times->utime, cputime); |
| 311 | put_cpu_no_resched(); | 311 | spin_unlock(×->lock); |
| 312 | } | ||
| 313 | } | 312 | } |
| 314 | 313 | ||
| 315 | /** | 314 | /** |
| @@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 325 | static inline void account_group_system_time(struct task_struct *tsk, | 324 | static inline void account_group_system_time(struct task_struct *tsk, |
| 326 | cputime_t cputime) | 325 | cputime_t cputime) |
| 327 | { | 326 | { |
| 327 | struct task_cputime *times; | ||
| 328 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
| 329 | 329 | ||
| 330 | /* tsk == current, ensure it is safe to use ->signal */ | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
| @@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 332 | return; | 332 | return; |
| 333 | 333 | ||
| 334 | sig = tsk->signal; | 334 | sig = tsk->signal; |
| 335 | if (sig->cputime.totals) { | 335 | times = &sig->cputime.totals; |
| 336 | struct task_cputime *times; | ||
| 337 | 336 | ||
| 338 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 337 | spin_lock(×->lock); |
| 339 | times->stime = cputime_add(times->stime, cputime); | 338 | times->stime = cputime_add(times->stime, cputime); |
| 340 | put_cpu_no_resched(); | 339 | spin_unlock(×->lock); |
| 341 | } | ||
| 342 | } | 340 | } |
| 343 | 341 | ||
| 344 | /** | 342 | /** |
| @@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 354 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 352 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
| 355 | unsigned long long ns) | 353 | unsigned long long ns) |
| 356 | { | 354 | { |
| 355 | struct task_cputime *times; | ||
| 357 | struct signal_struct *sig; | 356 | struct signal_struct *sig; |
| 358 | 357 | ||
| 359 | sig = tsk->signal; | 358 | sig = tsk->signal; |
| @@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
| 362 | if (unlikely(!sig)) | 361 | if (unlikely(!sig)) |
| 363 | return; | 362 | return; |
| 364 | 363 | ||
| 365 | if (sig->cputime.totals) { | 364 | times = &sig->cputime.totals; |
| 366 | struct task_cputime *times; | ||
| 367 | 365 | ||
| 368 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 366 | spin_lock(×->lock); |
| 369 | times->sum_exec_runtime += ns; | 367 | times->sum_exec_runtime += ns; |
| 370 | put_cpu_no_resched(); | 368 | spin_unlock(×->lock); |
| 371 | } | ||
| 372 | } | 369 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9188c66278a..85d5a2455103 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/lockdep.h> | 16 | #include <linux/lockdep.h> |
| 17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/sysctl.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/irq_regs.h> | 21 | #include <asm/irq_regs.h> |
| 21 | 22 | ||
| @@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 88 | } | 89 | } |
| 89 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
| 90 | 91 | ||
| 92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
| 93 | struct file *filp, void __user *buffer, | ||
| 94 | size_t *lenp, loff_t *ppos) | ||
| 95 | { | ||
| 96 | touch_all_softlockup_watchdogs(); | ||
| 97 | return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
| 98 | } | ||
| 99 | |||
| 91 | /* | 100 | /* |
| 92 | * This callback runs from the timer interrupt, and checks | 101 | * This callback runs from the timer interrupt, and checks |
| 93 | * whether the watchdog thread has hung or not: | 102 | * whether the watchdog thread has hung or not: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 368d1638ee78..790f9d785663 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -809,7 +809,7 @@ static struct ctl_table kern_table[] = { | |||
| 809 | .data = &softlockup_thresh, | 809 | .data = &softlockup_thresh, |
| 810 | .maxlen = sizeof(int), | 810 | .maxlen = sizeof(int), |
| 811 | .mode = 0644, | 811 | .mode = 0644, |
| 812 | .proc_handler = &proc_dointvec_minmax, | 812 | .proc_handler = &proc_dosoftlockup_thresh, |
| 813 | .strategy = &sysctl_intvec, | 813 | .strategy = &sysctl_intvec, |
| 814 | .extra1 = &neg_one, | 814 | .extra1 = &neg_one, |
| 815 | .extra2 = &sixty, | 815 | .extra2 = &sixty, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0a..d3f1ef4d5cbe 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); | |||
| 134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
| 135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
| 136 | */ | 136 | */ |
| 137 | void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(void) |
| 138 | { | 138 | { |
| 139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
| 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae37..1f0c509b40d3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -971,6 +971,8 @@ undo: | |||
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
| 974 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
| 975 | |||
| 974 | struct work_for_cpu { | 976 | struct work_for_cpu { |
| 975 | struct work_struct work; | 977 | struct work_struct work; |
| 976 | long (*fn)(void *); | 978 | long (*fn)(void *); |
| @@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w) | |||
| 991 | * @fn: the function to run | 993 | * @fn: the function to run |
| 992 | * @arg: the function arg | 994 | * @arg: the function arg |
| 993 | * | 995 | * |
| 994 | * This will return -EINVAL in the cpu is not online, or the return value | 996 | * This will return the value @fn returns. |
| 995 | * of @fn otherwise. | 997 | * It is up to the caller to ensure that the cpu doesn't go offline. |
| 996 | */ | 998 | */ |
| 997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 999 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
| 998 | { | 1000 | { |
| @@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
| 1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | 1003 | INIT_WORK(&wfc.work, do_work_for_cpu); |
| 1002 | wfc.fn = fn; | 1004 | wfc.fn = fn; |
| 1003 | wfc.arg = arg; | 1005 | wfc.arg = arg; |
| 1004 | get_online_cpus(); | 1006 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); |
| 1005 | if (unlikely(!cpu_online(cpu))) | 1007 | flush_work(&wfc.work); |
| 1006 | wfc.ret = -EINVAL; | ||
| 1007 | else { | ||
| 1008 | schedule_work_on(cpu, &wfc.work); | ||
| 1009 | flush_work(&wfc.work); | ||
| 1010 | } | ||
| 1011 | put_online_cpus(); | ||
| 1012 | 1008 | ||
| 1013 | return wfc.ret; | 1009 | return wfc.ret; |
| 1014 | } | 1010 | } |
| @@ -1025,4 +1021,8 @@ void __init init_workqueues(void) | |||
| 1025 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1021 | hotcpu_notifier(workqueue_cpu_callback, 0); |
| 1026 | keventd_wq = create_workqueue("events"); | 1022 | keventd_wq = create_workqueue("events"); |
| 1027 | BUG_ON(!keventd_wq); | 1023 | BUG_ON(!keventd_wq); |
| 1024 | #ifdef CONFIG_SMP | ||
| 1025 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
| 1026 | BUG_ON(!work_on_cpu_wq); | ||
| 1027 | #endif | ||
| 1028 | } | 1028 | } |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4c9ae6085c75..29044f500269 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -570,6 +570,15 @@ config DEBUG_NOTIFIERS | |||
| 570 | This is a relatively cheap check but if you care about maximum | 570 | This is a relatively cheap check but if you care about maximum |
| 571 | performance, say N. | 571 | performance, say N. |
| 572 | 572 | ||
| 573 | # | ||
| 574 | # Select this config option from the architecture Kconfig, if it | ||
| 575 | # it is preferred to always offer frame pointers as a config | ||
| 576 | # option on the architecture (regardless of KERNEL_DEBUG): | ||
| 577 | # | ||
| 578 | config ARCH_WANT_FRAME_POINTERS | ||
| 579 | bool | ||
| 580 | help | ||
| 581 | |||
| 573 | config FRAME_POINTER | 582 | config FRAME_POINTER |
| 574 | bool "Compile the kernel with frame pointers" | 583 | bool "Compile the kernel with frame pointers" |
| 575 | depends on DEBUG_KERNEL && \ | 584 | depends on DEBUG_KERNEL && \ |
| @@ -633,19 +642,6 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 633 | 642 | ||
| 634 | config RCU_CPU_STALL_DETECTOR | 643 | config RCU_CPU_STALL_DETECTOR |
| 635 | bool "Check for stalled CPUs delaying RCU grace periods" | 644 | bool "Check for stalled CPUs delaying RCU grace periods" |
| 636 | depends on CLASSIC_RCU | ||
| 637 | default n | ||
| 638 | help | ||
| 639 | This option causes RCU to printk information on which | ||
| 640 | CPUs are delaying the current grace period, but only when | ||
| 641 | the grace period extends for excessive time periods. | ||
| 642 | |||
| 643 | Say Y if you want RCU to perform such checks. | ||
| 644 | |||
| 645 | Say N if you are unsure. | ||
| 646 | |||
| 647 | config RCU_CPU_STALL_DETECTOR | ||
| 648 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
| 649 | depends on CLASSIC_RCU || TREE_RCU | 645 | depends on CLASSIC_RCU || TREE_RCU |
| 650 | default n | 646 | default n |
| 651 | help | 647 | help |
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index 6e3a1848447c..82b9bddcdcd6 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig | |||
| @@ -744,8 +744,8 @@ config SND_VIRTUOSO | |||
| 744 | select SND_OXYGEN_LIB | 744 | select SND_OXYGEN_LIB |
| 745 | help | 745 | help |
| 746 | Say Y here to include support for sound cards based on the | 746 | Say Y here to include support for sound cards based on the |
| 747 | Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X and | 747 | Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2 and D2X. |
| 748 | HDAV1.3 (Deluxe). | 748 | Support for the HDAV1.3 (Deluxe) is very experimental. |
| 749 | 749 | ||
| 750 | To compile this driver as a module, choose M here: the module | 750 | To compile this driver as a module, choose M here: the module |
| 751 | will be called snd-virtuoso. | 751 | will be called snd-virtuoso. |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 82dd08431970..5d249a547fbf 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -8478,6 +8478,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
| 8478 | SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601), | 8478 | SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601), |
| 8479 | SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG), | 8479 | SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG), |
| 8480 | SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), | 8480 | SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), |
| 8481 | SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC), | ||
| 8481 | SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), | 8482 | SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), |
| 8482 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), | 8483 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), |
| 8483 | SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), | 8484 | SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), |
| @@ -8526,6 +8527,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
| 8526 | SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), | 8527 | SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), |
| 8527 | SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL), | 8528 | SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL), |
| 8528 | SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL), | 8529 | SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL), |
| 8530 | SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC), | ||
| 8529 | SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL), | 8531 | SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL), |
| 8530 | SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), | 8532 | SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), |
| 8531 | {} | 8533 | {} |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index c39deebb588f..3dd4eee70b7c 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -81,6 +81,7 @@ enum { | |||
| 81 | 81 | ||
| 82 | enum { | 82 | enum { |
| 83 | STAC_92HD83XXX_REF, | 83 | STAC_92HD83XXX_REF, |
| 84 | STAC_92HD83XXX_PWR_REF, | ||
| 84 | STAC_92HD83XXX_MODELS | 85 | STAC_92HD83XXX_MODELS |
| 85 | }; | 86 | }; |
| 86 | 87 | ||
| @@ -334,7 +335,7 @@ static hda_nid_t stac92hd83xxx_slave_dig_outs[2] = { | |||
| 334 | }; | 335 | }; |
| 335 | 336 | ||
| 336 | static unsigned int stac92hd83xxx_pwr_mapping[4] = { | 337 | static unsigned int stac92hd83xxx_pwr_mapping[4] = { |
| 337 | 0x03, 0x0c, 0x10, 0x40, | 338 | 0x03, 0x0c, 0x20, 0x40, |
| 338 | }; | 339 | }; |
| 339 | 340 | ||
| 340 | static hda_nid_t stac92hd83xxx_amp_nids[1] = { | 341 | static hda_nid_t stac92hd83xxx_amp_nids[1] = { |
| @@ -841,10 +842,6 @@ static struct hda_verb stac92hd73xx_10ch_core_init[] = { | |||
| 841 | }; | 842 | }; |
| 842 | 843 | ||
| 843 | static struct hda_verb stac92hd83xxx_core_init[] = { | 844 | static struct hda_verb stac92hd83xxx_core_init[] = { |
| 844 | /* start of config #1 */ | ||
| 845 | { 0xe, AC_VERB_SET_CONNECT_SEL, 0x3}, | ||
| 846 | |||
| 847 | /* start of config #2 */ | ||
| 848 | { 0xa, AC_VERB_SET_CONNECT_SEL, 0x0}, | 845 | { 0xa, AC_VERB_SET_CONNECT_SEL, 0x0}, |
| 849 | { 0xb, AC_VERB_SET_CONNECT_SEL, 0x0}, | 846 | { 0xb, AC_VERB_SET_CONNECT_SEL, 0x0}, |
| 850 | { 0xd, AC_VERB_SET_CONNECT_SEL, 0x1}, | 847 | { 0xd, AC_VERB_SET_CONNECT_SEL, 0x1}, |
| @@ -885,8 +882,8 @@ static struct hda_verb stac92hd71bxx_analog_core_init[] = { | |||
| 885 | static struct hda_verb stac925x_core_init[] = { | 882 | static struct hda_verb stac925x_core_init[] = { |
| 886 | /* set dac0mux for dac converter */ | 883 | /* set dac0mux for dac converter */ |
| 887 | { 0x06, AC_VERB_SET_CONNECT_SEL, 0x00}, | 884 | { 0x06, AC_VERB_SET_CONNECT_SEL, 0x00}, |
| 888 | /* unmute and set max the selector */ | 885 | /* mute the master volume */ |
| 889 | { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f }, | 886 | { 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE }, |
| 890 | {} | 887 | {} |
| 891 | }; | 888 | }; |
| 892 | 889 | ||
| @@ -1138,6 +1135,8 @@ static struct snd_kcontrol_new stac92hd71bxx_mixer[] = { | |||
| 1138 | }; | 1135 | }; |
| 1139 | 1136 | ||
| 1140 | static struct snd_kcontrol_new stac925x_mixer[] = { | 1137 | static struct snd_kcontrol_new stac925x_mixer[] = { |
| 1138 | HDA_CODEC_VOLUME("Master Playback Volume", 0x0e, 0, HDA_OUTPUT), | ||
| 1139 | HDA_CODEC_MUTE("Master Playback Switch", 0x0e, 0, HDA_OUTPUT), | ||
| 1141 | STAC_INPUT_SOURCE(1), | 1140 | STAC_INPUT_SOURCE(1), |
| 1142 | HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT), | 1141 | HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT), |
| 1143 | HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT), | 1142 | HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT), |
| @@ -1736,10 +1735,12 @@ static unsigned int ref92hd83xxx_pin_configs[14] = { | |||
| 1736 | 1735 | ||
| 1737 | static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { | 1736 | static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { |
| 1738 | [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, | 1737 | [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, |
| 1738 | [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, | ||
| 1739 | }; | 1739 | }; |
| 1740 | 1740 | ||
| 1741 | static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { | 1741 | static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { |
| 1742 | [STAC_92HD83XXX_REF] = "ref", | 1742 | [STAC_92HD83XXX_REF] = "ref", |
| 1743 | [STAC_92HD83XXX_PWR_REF] = "mic-ref", | ||
| 1743 | }; | 1744 | }; |
| 1744 | 1745 | ||
| 1745 | static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { | 1746 | static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { |
| @@ -1799,6 +1800,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { | |||
| 1799 | "HP dv5", STAC_HP_M4), | 1800 | "HP dv5", STAC_HP_M4), |
| 1800 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f4, | 1801 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f4, |
| 1801 | "HP dv7", STAC_HP_M4), | 1802 | "HP dv7", STAC_HP_M4), |
| 1803 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f7, | ||
| 1804 | "HP dv4", STAC_HP_DV5), | ||
| 1802 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc, | 1805 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc, |
| 1803 | "HP dv7", STAC_HP_M4), | 1806 | "HP dv7", STAC_HP_M4), |
| 1804 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603, | 1807 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603, |
| @@ -3573,13 +3576,12 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out | |||
| 3573 | err = stac92xx_auto_fill_dac_nids(codec); | 3576 | err = stac92xx_auto_fill_dac_nids(codec); |
| 3574 | if (err < 0) | 3577 | if (err < 0) |
| 3575 | return err; | 3578 | return err; |
| 3579 | err = stac92xx_auto_create_multi_out_ctls(codec, | ||
| 3580 | &spec->autocfg); | ||
| 3581 | if (err < 0) | ||
| 3582 | return err; | ||
| 3576 | } | 3583 | } |
| 3577 | 3584 | ||
| 3578 | err = stac92xx_auto_create_multi_out_ctls(codec, &spec->autocfg); | ||
| 3579 | |||
| 3580 | if (err < 0) | ||
| 3581 | return err; | ||
| 3582 | |||
| 3583 | /* setup analog beep controls */ | 3585 | /* setup analog beep controls */ |
| 3584 | if (spec->anabeep_nid > 0) { | 3586 | if (spec->anabeep_nid > 0) { |
| 3585 | err = stac92xx_auto_create_beep_ctls(codec, | 3587 | err = stac92xx_auto_create_beep_ctls(codec, |
| @@ -4753,7 +4755,9 @@ static struct hda_input_mux stac92hd83xxx_dmux = { | |||
| 4753 | static int patch_stac92hd83xxx(struct hda_codec *codec) | 4755 | static int patch_stac92hd83xxx(struct hda_codec *codec) |
| 4754 | { | 4756 | { |
| 4755 | struct sigmatel_spec *spec; | 4757 | struct sigmatel_spec *spec; |
| 4758 | hda_nid_t conn[STAC92HD83_DAC_COUNT + 1]; | ||
| 4756 | int err; | 4759 | int err; |
| 4760 | int num_dacs; | ||
| 4757 | 4761 | ||
| 4758 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); | 4762 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); |
| 4759 | if (spec == NULL) | 4763 | if (spec == NULL) |
| @@ -4772,15 +4776,16 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) | |||
| 4772 | spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); | 4776 | spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); |
| 4773 | spec->multiout.dac_nids = spec->dac_nids; | 4777 | spec->multiout.dac_nids = spec->dac_nids; |
| 4774 | 4778 | ||
| 4775 | spec->init = stac92hd83xxx_core_init; | ||
| 4776 | switch (codec->vendor_id) { | ||
| 4777 | case 0x111d7605: | ||
| 4778 | break; | ||
| 4779 | default: | ||
| 4780 | spec->num_pwrs--; | ||
| 4781 | spec->init++; /* switch to config #2 */ | ||
| 4782 | } | ||
| 4783 | 4779 | ||
| 4780 | /* set port 0xe to select the last DAC | ||
| 4781 | */ | ||
| 4782 | num_dacs = snd_hda_get_connections(codec, 0x0e, | ||
| 4783 | conn, STAC92HD83_DAC_COUNT + 1) - 1; | ||
| 4784 | |||
| 4785 | snd_hda_codec_write_cache(codec, 0xe, 0, | ||
| 4786 | AC_VERB_SET_CONNECT_SEL, num_dacs); | ||
| 4787 | |||
| 4788 | spec->init = stac92hd83xxx_core_init; | ||
| 4784 | spec->mixer = stac92hd83xxx_mixer; | 4789 | spec->mixer = stac92hd83xxx_mixer; |
| 4785 | spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids); | 4790 | spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids); |
| 4786 | spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids); | 4791 | spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids); |
| @@ -4806,6 +4811,15 @@ again: | |||
| 4806 | return err; | 4811 | return err; |
| 4807 | } | 4812 | } |
| 4808 | 4813 | ||
| 4814 | switch (codec->vendor_id) { | ||
| 4815 | case 0x111d7604: | ||
| 4816 | case 0x111d7605: | ||
| 4817 | if (spec->board_config == STAC_92HD83XXX_PWR_REF) | ||
| 4818 | break; | ||
| 4819 | spec->num_pwrs = 0; | ||
| 4820 | break; | ||
| 4821 | } | ||
| 4822 | |||
| 4809 | err = stac92xx_parse_auto_config(codec, 0x1d, 0); | 4823 | err = stac92xx_parse_auto_config(codec, 0x1d, 0); |
| 4810 | if (!err) { | 4824 | if (!err) { |
| 4811 | if (spec->board_config < 0) { | 4825 | if (spec->board_config < 0) { |
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index e9e829e83d7a..18c7c91786bc 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c | |||
| @@ -683,7 +683,7 @@ static void xonar_hdav_uart_input(struct oxygen *chip) | |||
| 683 | if (chip->uart_input_count >= 2 && | 683 | if (chip->uart_input_count >= 2 && |
| 684 | chip->uart_input[chip->uart_input_count - 2] == 'O' && | 684 | chip->uart_input[chip->uart_input_count - 2] == 'O' && |
| 685 | chip->uart_input[chip->uart_input_count - 1] == 'K') { | 685 | chip->uart_input[chip->uart_input_count - 1] == 'K') { |
| 686 | printk(KERN_DEBUG "message from Xonar HDAV HDMI chip received:"); | 686 | printk(KERN_DEBUG "message from Xonar HDAV HDMI chip received:\n"); |
| 687 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, | 687 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, |
| 688 | chip->uart_input, chip->uart_input_count); | 688 | chip->uart_input, chip->uart_input_count); |
| 689 | chip->uart_input_count = 0; | 689 | chip->uart_input_count = 0; |
| @@ -908,6 +908,7 @@ static const struct oxygen_model model_xonar_hdav = { | |||
| 908 | .dac_channels = 8, | 908 | .dac_channels = 8, |
| 909 | .dac_volume_min = 0x0f, | 909 | .dac_volume_min = 0x0f, |
| 910 | .dac_volume_max = 0xff, | 910 | .dac_volume_max = 0xff, |
| 911 | .misc_flags = OXYGEN_MISC_MIDI, | ||
| 911 | .function_flags = OXYGEN_FUNCTION_2WIRE, | 912 | .function_flags = OXYGEN_FUNCTION_2WIRE, |
| 912 | .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, | 913 | .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, |
| 913 | .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, | 914 | .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, |
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c index 1fac5efd285b..3dcdc4e3cfa0 100644 --- a/sound/soc/atmel/atmel-pcm.c +++ b/sound/soc/atmel/atmel-pcm.c | |||
| @@ -44,8 +44,6 @@ | |||
| 44 | #include <sound/pcm_params.h> | 44 | #include <sound/pcm_params.h> |
| 45 | #include <sound/soc.h> | 45 | #include <sound/soc.h> |
| 46 | 46 | ||
| 47 | #include <mach/hardware.h> | ||
| 48 | |||
| 49 | #include "atmel-pcm.h" | 47 | #include "atmel-pcm.h" |
| 50 | 48 | ||
| 51 | 49 | ||
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c index bcec3f60bad9..acf39a646b2f 100644 --- a/sound/soc/fsl/mpc8610_hpcd.c +++ b/sound/soc/fsl/mpc8610_hpcd.c | |||
| @@ -183,16 +183,6 @@ static struct snd_soc_ops mpc8610_hpcd_ops = { | |||
| 183 | }; | 183 | }; |
| 184 | 184 | ||
| 185 | /** | 185 | /** |
| 186 | * mpc8610_hpcd_machine: ASoC machine data | ||
| 187 | */ | ||
| 188 | static struct snd_soc_card mpc8610_hpcd_machine = { | ||
| 189 | .probe = mpc8610_hpcd_machine_probe, | ||
| 190 | .remove = mpc8610_hpcd_machine_remove, | ||
| 191 | .name = "MPC8610 HPCD", | ||
| 192 | .num_links = 1, | ||
| 193 | }; | ||
| 194 | |||
| 195 | /** | ||
| 196 | * mpc8610_hpcd_probe: OF probe function for the fabric driver | 186 | * mpc8610_hpcd_probe: OF probe function for the fabric driver |
| 197 | * | 187 | * |
| 198 | * This function gets called when an SSI node is found in the device tree. | 188 | * This function gets called when an SSI node is found in the device tree. |
| @@ -455,7 +445,11 @@ static int mpc8610_hpcd_probe(struct of_device *ofdev, | |||
| 455 | machine_data->dai.codec_dai = &cs4270_dai; /* The codec_dai we want */ | 445 | machine_data->dai.codec_dai = &cs4270_dai; /* The codec_dai we want */ |
| 456 | machine_data->dai.ops = &mpc8610_hpcd_ops; | 446 | machine_data->dai.ops = &mpc8610_hpcd_ops; |
| 457 | 447 | ||
| 458 | mpc8610_hpcd_machine.dai_link = &machine_data->dai; | 448 | machine_data->machine.probe = mpc8610_hpcd_machine_probe; |
| 449 | machine_data->machine.remove = mpc8610_hpcd_machine_remove; | ||
| 450 | machine_data->machine.name = "MPC8610 HPCD"; | ||
| 451 | machine_data->machine.num_links = 1; | ||
| 452 | machine_data->machine.dai_link = &machine_data->dai; | ||
| 459 | 453 | ||
| 460 | /* Allocate a new audio platform device structure */ | 454 | /* Allocate a new audio platform device structure */ |
| 461 | sound_device = platform_device_alloc("soc-audio", -1); | 455 | sound_device = platform_device_alloc("soc-audio", -1); |
| @@ -465,7 +459,7 @@ static int mpc8610_hpcd_probe(struct of_device *ofdev, | |||
| 465 | goto error; | 459 | goto error; |
| 466 | } | 460 | } |
| 467 | 461 | ||
| 468 | machine_data->sound_devdata.card = &mpc8610_hpcd_machine; | 462 | machine_data->sound_devdata.card = &machine_data->machine; |
| 469 | machine_data->sound_devdata.codec_dev = &soc_codec_device_cs4270; | 463 | machine_data->sound_devdata.codec_dev = &soc_codec_device_cs4270; |
| 470 | machine_data->machine.platform = &fsl_soc_platform; | 464 | machine_data->machine.platform = &fsl_soc_platform; |
| 471 | 465 | ||
