diff options
48 files changed, 548 insertions, 280 deletions
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 8d019071190a..381fec0af52e 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c | |||
| @@ -40,8 +40,6 @@ | |||
| 40 | # define DBG_CFG(args) | 40 | # define DBG_CFG(args) |
| 41 | #endif | 41 | #endif |
| 42 | 42 | ||
| 43 | #define MCPCIA_MAX_HOSES 4 | ||
| 44 | |||
| 45 | /* | 43 | /* |
| 46 | * Given a bus, device, and function number, compute resulting | 44 | * Given a bus, device, and function number, compute resulting |
| 47 | * configuration space address and setup the MCPCIA_HAXR2 register | 45 | * configuration space address and setup the MCPCIA_HAXR2 register |
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index febe71c6869f..543d96d7fa2b 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <asm/smp.h> | 16 | #include <asm/smp.h> |
| 17 | #include <asm/err_common.h> | 17 | #include <asm/err_common.h> |
| 18 | #include <asm/err_ev6.h> | 18 | #include <asm/err_ev6.h> |
| 19 | #include <asm/irq_regs.h> | ||
| 19 | 20 | ||
| 20 | #include "err_impl.h" | 21 | #include "err_impl.h" |
| 21 | #include "proto.h" | 22 | #include "proto.h" |
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index aac6d4b22f7a..bd03dc94c72b 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c | |||
| @@ -285,12 +285,12 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, | |||
| 285 | reloc_overflow: | 285 | reloc_overflow: |
| 286 | if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) | 286 | if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) |
| 287 | printk(KERN_ERR | 287 | printk(KERN_ERR |
| 288 | "module %s: Relocation overflow vs section %d\n", | 288 | "module %s: Relocation (type %lu) overflow vs section %d\n", |
| 289 | me->name, sym->st_shndx); | 289 | me->name, r_type, sym->st_shndx); |
| 290 | else | 290 | else |
| 291 | printk(KERN_ERR | 291 | printk(KERN_ERR |
| 292 | "module %s: Relocation overflow vs %s\n", | 292 | "module %s: Relocation (type %lu) overflow vs %s\n", |
| 293 | me->name, strtab + sym->st_name); | 293 | me->name, r_type, strtab + sym->st_name); |
| 294 | return -ENOEXEC; | 294 | return -ENOEXEC; |
| 295 | } | 295 | } |
| 296 | } | 296 | } |
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index e7594a7cf585..920196bcbb61 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c | |||
| @@ -70,6 +70,12 @@ nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | |||
| 70 | /* Preserve the IRQ set up by the console. */ | 70 | /* Preserve the IRQ set up by the console. */ |
| 71 | 71 | ||
| 72 | u8 irq; | 72 | u8 irq; |
| 73 | /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as | ||
| 74 | console reports. Check the device id of AGP bridge to distinguish | ||
| 75 | UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */ | ||
| 76 | if (slot == 1 && pin == 2 && | ||
| 77 | dev->bus->self && dev->bus->self->device == 0x700f) | ||
| 78 | return 5; | ||
| 73 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); | 79 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); |
| 74 | return irq; | 80 | return irq; |
| 75 | } | 81 | } |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index de6ba3432e8a..eb2a1d63f484 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
| @@ -66,6 +66,13 @@ noritake_startup_irq(unsigned int irq) | |||
| 66 | return 0; | 66 | return 0; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static void | ||
| 70 | noritake_end_irq(unsigned int irq) | ||
| 71 | { | ||
| 72 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
| 73 | noritake_enable_irq(irq); | ||
| 74 | } | ||
| 75 | |||
| 69 | static struct hw_interrupt_type noritake_irq_type = { | 76 | static struct hw_interrupt_type noritake_irq_type = { |
| 70 | .typename = "NORITAKE", | 77 | .typename = "NORITAKE", |
| 71 | .startup = noritake_startup_irq, | 78 | .startup = noritake_startup_irq, |
| @@ -73,7 +80,7 @@ static struct hw_interrupt_type noritake_irq_type = { | |||
| 73 | .enable = noritake_enable_irq, | 80 | .enable = noritake_enable_irq, |
| 74 | .disable = noritake_disable_irq, | 81 | .disable = noritake_disable_irq, |
| 75 | .ack = noritake_disable_irq, | 82 | .ack = noritake_disable_irq, |
| 76 | .end = noritake_enable_irq, | 83 | .end = noritake_end_irq, |
| 77 | }; | 84 | }; |
| 78 | 85 | ||
| 79 | static void | 86 | static void |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 581d08c70b92..672cb2df53df 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
| @@ -52,6 +52,9 @@ rawhide_update_irq_hw(int hose, int mask) | |||
| 52 | *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); | 52 | *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | #define hose_exists(h) \ | ||
| 56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) | ||
| 57 | |||
| 55 | static inline void | 58 | static inline void |
| 56 | rawhide_enable_irq(unsigned int irq) | 59 | rawhide_enable_irq(unsigned int irq) |
| 57 | { | 60 | { |
| @@ -59,6 +62,9 @@ rawhide_enable_irq(unsigned int irq) | |||
| 59 | 62 | ||
| 60 | irq -= 16; | 63 | irq -= 16; |
| 61 | hose = irq / 24; | 64 | hose = irq / 24; |
| 65 | if (!hose_exists(hose)) /* if hose non-existent, exit */ | ||
| 66 | return; | ||
| 67 | |||
| 62 | irq -= hose * 24; | 68 | irq -= hose * 24; |
| 63 | mask = 1 << irq; | 69 | mask = 1 << irq; |
| 64 | 70 | ||
| @@ -76,6 +82,9 @@ rawhide_disable_irq(unsigned int irq) | |||
| 76 | 82 | ||
| 77 | irq -= 16; | 83 | irq -= 16; |
| 78 | hose = irq / 24; | 84 | hose = irq / 24; |
| 85 | if (!hose_exists(hose)) /* if hose non-existent, exit */ | ||
| 86 | return; | ||
| 87 | |||
| 79 | irq -= hose * 24; | 88 | irq -= hose * 24; |
| 80 | mask = ~(1 << irq) | hose_irq_masks[hose]; | 89 | mask = ~(1 << irq) | hose_irq_masks[hose]; |
| 81 | 90 | ||
| @@ -93,6 +102,9 @@ rawhide_mask_and_ack_irq(unsigned int irq) | |||
| 93 | 102 | ||
| 94 | irq -= 16; | 103 | irq -= 16; |
| 95 | hose = irq / 24; | 104 | hose = irq / 24; |
| 105 | if (!hose_exists(hose)) /* if hose non-existent, exit */ | ||
| 106 | return; | ||
| 107 | |||
| 96 | irq -= hose * 24; | 108 | irq -= hose * 24; |
| 97 | mask1 = 1 << irq; | 109 | mask1 = 1 << irq; |
| 98 | mask = ~mask1 | hose_irq_masks[hose]; | 110 | mask = ~mask1 | hose_irq_masks[hose]; |
| @@ -169,6 +181,9 @@ rawhide_init_irq(void) | |||
| 169 | 181 | ||
| 170 | mcpcia_init_hoses(); | 182 | mcpcia_init_hoses(); |
| 171 | 183 | ||
| 184 | /* Clear them all; only hoses that exist will be non-zero. */ | ||
| 185 | for (i = 0; i < MCPCIA_MAX_HOSES; i++) cached_irq_masks[i] = 0; | ||
| 186 | |||
| 172 | for (hose = hose_head; hose; hose = hose->next) { | 187 | for (hose = hose_head; hose; hose = hose->next) { |
| 173 | unsigned int h = hose->index; | 188 | unsigned int h = hose->index; |
| 174 | unsigned int mask = hose_irq_masks[h]; | 189 | unsigned int mask = hose_irq_masks[h]; |
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index a654014d202a..14b5a753aba5 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c | |||
| @@ -84,12 +84,16 @@ alphabook1_init_arch(void) | |||
| 84 | static void __init | 84 | static void __init |
| 85 | sio_pci_route(void) | 85 | sio_pci_route(void) |
| 86 | { | 86 | { |
| 87 | #if defined(ALPHA_RESTORE_SRM_SETUP) | 87 | unsigned int orig_route_tab; |
| 88 | /* First, read and save the original setting. */ | 88 | |
| 89 | /* First, ALWAYS read and print the original setting. */ | ||
| 89 | pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, | 90 | pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, |
| 90 | &saved_config.orig_route_tab); | 91 | &orig_route_tab); |
| 91 | printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__, | 92 | printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__, |
| 92 | saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab); | 93 | orig_route_tab, alpha_mv.sys.sio.route_tab); |
| 94 | |||
| 95 | #if defined(ALPHA_RESTORE_SRM_SETUP) | ||
| 96 | saved_config.orig_route_tab = orig_route_tab; | ||
| 93 | #endif | 97 | #endif |
| 94 | 98 | ||
| 95 | /* Now override with desired setting. */ | 99 | /* Now override with desired setting. */ |
| @@ -334,7 +338,7 @@ struct alpha_machine_vector avanti_mv __initmv = { | |||
| 334 | .pci_swizzle = common_swizzle, | 338 | .pci_swizzle = common_swizzle, |
| 335 | 339 | ||
| 336 | .sys = { .sio = { | 340 | .sys = { .sio = { |
| 337 | .route_tab = 0x0b0a0e0f, | 341 | .route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */ |
| 338 | }} | 342 | }} |
| 339 | }; | 343 | }; |
| 340 | ALIAS_MV(avanti) | 344 | ALIAS_MV(avanti) |
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c index 94ad68b7c0ae..41d4ad4c7c44 100644 --- a/arch/alpha/kernel/sys_sx164.c +++ b/arch/alpha/kernel/sys_sx164.c | |||
| @@ -132,7 +132,7 @@ sx164_init_arch(void) | |||
| 132 | 132 | ||
| 133 | if (amask(AMASK_MAX) != 0 | 133 | if (amask(AMASK_MAX) != 0 |
| 134 | && alpha_using_srm | 134 | && alpha_using_srm |
| 135 | && (cpu->pal_revision & 0xffff) == 0x117) { | 135 | && (cpu->pal_revision & 0xffff) <= 0x117) { |
| 136 | __asm__ __volatile__( | 136 | __asm__ __volatile__( |
| 137 | "lda $16,8($31)\n" | 137 | "lda $16,8($31)\n" |
| 138 | "call_pal 9\n" /* Allow PALRES insns in kernel mode */ | 138 | "call_pal 9\n" /* Allow PALRES insns in kernel mode */ |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 29ab7db81c30..f009b7bc0943 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
| @@ -257,8 +257,7 @@ titan_dispatch_irqs(u64 mask) | |||
| 257 | */ | 257 | */ |
| 258 | while (mask) { | 258 | while (mask) { |
| 259 | /* convert to SRM vector... priority is <63> -> <0> */ | 259 | /* convert to SRM vector... priority is <63> -> <0> */ |
| 260 | __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask)); | 260 | vector = 63 - __kernel_ctlz(mask); |
| 261 | vector = 63 - vector; | ||
| 262 | mask &= ~(1UL << vector); /* clear it out */ | 261 | mask &= ~(1UL << vector); /* clear it out */ |
| 263 | vector = 0x900 + (vector << 4); /* convert to SRM vector */ | 262 | vector = 0x900 + (vector << 4); /* convert to SRM vector */ |
| 264 | 263 | ||
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index a98ba88a8c0c..9f1e8c1afab7 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
| @@ -41,16 +41,17 @@ int nmi_watchdog_enabled; | |||
| 41 | * different subsystems this reservation system just tries to coordinate | 41 | * different subsystems this reservation system just tries to coordinate |
| 42 | * things a little | 42 | * things a little |
| 43 | */ | 43 | */ |
| 44 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner); | ||
| 45 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); | ||
| 46 | |||
| 47 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | ||
| 48 | 44 | ||
| 49 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | 45 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
| 50 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | 46 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) |
| 51 | */ | 47 | */ |
| 52 | #define NMI_MAX_COUNTER_BITS 66 | 48 | #define NMI_MAX_COUNTER_BITS 66 |
| 49 | #define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS) | ||
| 53 | 50 | ||
| 51 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
| 52 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
| 53 | |||
| 54 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | ||
| 54 | /* nmi_active: | 55 | /* nmi_active: |
| 55 | * >0: the lapic NMI watchdog is active, but can be disabled | 56 | * >0: the lapic NMI watchdog is active, but can be disabled |
| 56 | * <0: the lapic NMI watchdog has not been set up, and cannot | 57 | * <0: the lapic NMI watchdog has not been set up, and cannot |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index ca51610955df..6f38f818380b 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
| @@ -26,7 +26,7 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") | |||
| 26 | OUTPUT_ARCH(i386) | 26 | OUTPUT_ARCH(i386) |
| 27 | ENTRY(phys_startup_32) | 27 | ENTRY(phys_startup_32) |
| 28 | jiffies = jiffies_64; | 28 | jiffies = jiffies_64; |
| 29 | _proxy_pda = 0; | 29 | _proxy_pda = 1; |
| 30 | 30 | ||
| 31 | PHDRS { | 31 | PHDRS { |
| 32 | text PT_LOAD FLAGS(5); /* R_E */ | 32 | text PT_LOAD FLAGS(5); /* R_E */ |
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist index 01fa23580c85..7ae18ec12454 100644 --- a/arch/x86_64/kernel/functionlist +++ b/arch/x86_64/kernel/functionlist | |||
| @@ -514,7 +514,6 @@ | |||
| 514 | *(.text.dentry_open) | 514 | *(.text.dentry_open) |
| 515 | *(.text.dentry_iput) | 515 | *(.text.dentry_iput) |
| 516 | *(.text.bio_alloc) | 516 | *(.text.bio_alloc) |
| 517 | *(.text.alloc_skb_from_cache) | ||
| 518 | *(.text.wait_on_page_bit) | 517 | *(.text.wait_on_page_bit) |
| 519 | *(.text.vfs_readdir) | 518 | *(.text.vfs_readdir) |
| 520 | *(.text.vfs_lstat) | 519 | *(.text.vfs_lstat) |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index a90996c27dc8..dfab9f167366 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
| @@ -39,15 +39,17 @@ int panic_on_unrecovered_nmi; | |||
| 39 | * different subsystems this reservation system just tries to coordinate | 39 | * different subsystems this reservation system just tries to coordinate |
| 40 | * things a little | 40 | * things a little |
| 41 | */ | 41 | */ |
| 42 | static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner); | ||
| 43 | static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]); | ||
| 44 | |||
| 45 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | ||
| 46 | 42 | ||
| 47 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | 43 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
| 48 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | 44 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) |
| 49 | */ | 45 | */ |
| 50 | #define NMI_MAX_COUNTER_BITS 66 | 46 | #define NMI_MAX_COUNTER_BITS 66 |
| 47 | #define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS) | ||
| 48 | |||
| 49 | static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
| 50 | static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
| 51 | |||
| 52 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | ||
| 51 | 53 | ||
| 52 | /* nmi_active: | 54 | /* nmi_active: |
| 53 | * >0: the lapic NMI watchdog is active, but can be disabled | 55 | * >0: the lapic NMI watchdog is active, but can be disabled |
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index b73212c0a550..5176ecf006ee 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S | |||
| @@ -13,7 +13,7 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") | |||
| 13 | OUTPUT_ARCH(i386:x86-64) | 13 | OUTPUT_ARCH(i386:x86-64) |
| 14 | ENTRY(phys_startup_64) | 14 | ENTRY(phys_startup_64) |
| 15 | jiffies_64 = jiffies; | 15 | jiffies_64 = jiffies; |
| 16 | _proxy_pda = 0; | 16 | _proxy_pda = 1; |
| 17 | PHDRS { | 17 | PHDRS { |
| 18 | text PT_LOAD FLAGS(5); /* R_E */ | 18 | text PT_LOAD FLAGS(5); /* R_E */ |
| 19 | data PT_LOAD FLAGS(7); /* RWE */ | 19 | data PT_LOAD FLAGS(7); /* RWE */ |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index f5c160caf9f4..5f066963f171 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
| @@ -248,7 +248,7 @@ static unsigned long get_unmapped_area_mem(struct file *file, | |||
| 248 | { | 248 | { |
| 249 | if (!valid_mmap_phys_addr_range(pgoff, len)) | 249 | if (!valid_mmap_phys_addr_range(pgoff, len)) |
| 250 | return (unsigned long) -EINVAL; | 250 | return (unsigned long) -EINVAL; |
| 251 | return pgoff; | 251 | return pgoff << PAGE_SHIFT; |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | /* can't do an in-place private mapping if there's no MMU */ | 254 | /* can't do an in-place private mapping if there's no MMU */ |
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index da5828f2dfc2..01206ebb1cf2 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c | |||
| @@ -407,7 +407,7 @@ static void w83627ehf_write_fan_div(struct i2c_client *client, int nr) | |||
| 407 | break; | 407 | break; |
| 408 | case 4: | 408 | case 4: |
| 409 | reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73) | 409 | reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73) |
| 410 | | ((data->fan_div[4] & 0x03) << 3) | 410 | | ((data->fan_div[4] & 0x03) << 2) |
| 411 | | ((data->fan_div[4] & 0x04) << 5); | 411 | | ((data->fan_div[4] & 0x04) << 5); |
| 412 | w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg); | 412 | w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg); |
| 413 | break; | 413 | break; |
| @@ -471,9 +471,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) | |||
| 471 | time */ | 471 | time */ |
| 472 | if (data->fan[i] == 0xff | 472 | if (data->fan[i] == 0xff |
| 473 | && data->fan_div[i] < 0x07) { | 473 | && data->fan_div[i] < 0x07) { |
| 474 | dev_dbg(&client->dev, "Increasing fan %d " | 474 | dev_dbg(&client->dev, "Increasing fan%d " |
| 475 | "clock divider from %u to %u\n", | 475 | "clock divider from %u to %u\n", |
| 476 | i, div_from_reg(data->fan_div[i]), | 476 | i + 1, div_from_reg(data->fan_div[i]), |
| 477 | div_from_reg(data->fan_div[i] + 1)); | 477 | div_from_reg(data->fan_div[i] + 1)); |
| 478 | data->fan_div[i]++; | 478 | data->fan_div[i]++; |
| 479 | w83627ehf_write_fan_div(client, i); | 479 | w83627ehf_write_fan_div(client, i); |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index fb19dbb31e42..ece31d2c6c64 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -344,8 +344,7 @@ config I2C_PARPORT_LIGHT | |||
| 344 | 344 | ||
| 345 | config I2C_PASEMI | 345 | config I2C_PASEMI |
| 346 | tristate "PA Semi SMBus interface" | 346 | tristate "PA Semi SMBus interface" |
| 347 | # depends on PPC_PASEMI && I2C && PCI | 347 | depends on PPC_PASEMI && I2C && PCI |
| 348 | depends on I2C && PCI | ||
| 349 | help | 348 | help |
| 350 | Supports the PA Semi PWRficient on-chip SMBus interfaces. | 349 | Supports the PA Semi PWRficient on-chip SMBus interfaces. |
| 351 | 350 | ||
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index f54fb5d65cc4..bf89eeef74e9 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c | |||
| @@ -141,7 +141,7 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, | |||
| 141 | for (i = 0; i < msg->len - 1; i++) | 141 | for (i = 0; i < msg->len - 1; i++) |
| 142 | TXFIFO_WR(smbus, msg->buf[i]); | 142 | TXFIFO_WR(smbus, msg->buf[i]); |
| 143 | 143 | ||
| 144 | TXFIFO_WR(smbus, msg->buf[msg->len] | | 144 | TXFIFO_WR(smbus, msg->buf[msg->len-1] | |
| 145 | (stop ? MTXFIFO_STOP : 0)); | 145 | (stop ? MTXFIFO_STOP : 0)); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| @@ -226,7 +226,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter, | |||
| 226 | rd = RXFIFO_RD(smbus); | 226 | rd = RXFIFO_RD(smbus); |
| 227 | len = min_t(u8, (rd & MRXFIFO_DATA_M), | 227 | len = min_t(u8, (rd & MRXFIFO_DATA_M), |
| 228 | I2C_SMBUS_BLOCK_MAX); | 228 | I2C_SMBUS_BLOCK_MAX); |
| 229 | TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ | | 229 | TXFIFO_WR(smbus, len | MTXFIFO_READ | |
| 230 | MTXFIFO_STOP); | 230 | MTXFIFO_STOP); |
| 231 | } else { | 231 | } else { |
| 232 | len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); | 232 | len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); |
| @@ -258,7 +258,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter, | |||
| 258 | rd = RXFIFO_RD(smbus); | 258 | rd = RXFIFO_RD(smbus); |
| 259 | len = min_t(u8, (rd & MRXFIFO_DATA_M), | 259 | len = min_t(u8, (rd & MRXFIFO_DATA_M), |
| 260 | I2C_SMBUS_BLOCK_MAX - len); | 260 | I2C_SMBUS_BLOCK_MAX - len); |
| 261 | TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ | MTXFIFO_STOP); | 261 | TXFIFO_WR(smbus, len | MTXFIFO_READ | MTXFIFO_STOP); |
| 262 | break; | 262 | break; |
| 263 | 263 | ||
| 264 | default: | 264 | default: |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index fdb576dcfaa8..ee561c569d5f 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
| @@ -835,6 +835,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
| 835 | 835 | ||
| 836 | key = arbel_key_to_hw_index(fmr->ibmr.lkey); | 836 | key = arbel_key_to_hw_index(fmr->ibmr.lkey); |
| 837 | key &= dev->limits.num_mpts - 1; | 837 | key &= dev->limits.num_mpts - 1; |
| 838 | key = adjust_key(dev, key); | ||
| 838 | fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); | 839 | fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); |
| 839 | 840 | ||
| 840 | fmr->maps = 0; | 841 | fmr->maps = 0; |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index e85b4c7c36f7..cab26f301eab 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
| @@ -1171,6 +1171,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | |||
| 1171 | * and zap two pdes instead of one. | 1171 | * and zap two pdes instead of one. |
| 1172 | */ | 1172 | */ |
| 1173 | if (level == PT32_ROOT_LEVEL) { | 1173 | if (level == PT32_ROOT_LEVEL) { |
| 1174 | page_offset &= ~7; /* kill rounding error */ | ||
| 1174 | page_offset <<= 1; | 1175 | page_offset <<= 1; |
| 1175 | npte = 2; | 1176 | npte = 2; |
| 1176 | } | 1177 | } |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 3096836d8bd3..c9f3dc4fd3ee 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
| @@ -1259,9 +1259,9 @@ static int smu_release(struct inode *inode, struct file *file) | |||
| 1259 | set_current_state(TASK_UNINTERRUPTIBLE); | 1259 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1260 | if (pp->cmd.status != 1) | 1260 | if (pp->cmd.status != 1) |
| 1261 | break; | 1261 | break; |
| 1262 | spin_lock_irqsave(&pp->lock, flags); | ||
| 1263 | schedule(); | ||
| 1264 | spin_unlock_irqrestore(&pp->lock, flags); | 1262 | spin_unlock_irqrestore(&pp->lock, flags); |
| 1263 | schedule(); | ||
| 1264 | spin_lock_irqsave(&pp->lock, flags); | ||
| 1265 | } | 1265 | } |
| 1266 | set_current_state(TASK_RUNNING); | 1266 | set_current_state(TASK_RUNNING); |
| 1267 | remove_wait_queue(&pp->wait, &wait); | 1267 | remove_wait_queue(&pp->wait, &wait); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c3f9f599f134..a3d46ea37126 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2263,6 +2263,7 @@ config GIANFAR | |||
| 2263 | tristate "Gianfar Ethernet" | 2263 | tristate "Gianfar Ethernet" |
| 2264 | depends on 85xx || 83xx || PPC_86xx | 2264 | depends on 85xx || 83xx || PPC_86xx |
| 2265 | select PHYLIB | 2265 | select PHYLIB |
| 2266 | select CRC32 | ||
| 2266 | help | 2267 | help |
| 2267 | This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, | 2268 | This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, |
| 2268 | and MPC86xx family of chips, and the FEC on the 8540. | 2269 | and MPC86xx family of chips, and the FEC on the 8540. |
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h index e14862b43d17..483a594210a7 100644 --- a/drivers/net/cxgb3/cxgb3_defs.h +++ b/drivers/net/cxgb3/cxgb3_defs.h | |||
| @@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t, | |||
| 67 | static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, | 67 | static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, |
| 68 | unsigned int tid) | 68 | unsigned int tid) |
| 69 | { | 69 | { |
| 70 | return tid < t->ntids ? &(t->tid_tab[tid]) : NULL; | 70 | struct t3c_tid_entry *t3c_tid = tid < t->ntids ? |
| 71 | &(t->tid_tab[tid]) : NULL; | ||
| 72 | |||
| 73 | return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL; | ||
| 71 | } | 74 | } |
| 72 | 75 | ||
| 73 | /* | 76 | /* |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 48649244673e..199e5066acf3 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
| @@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) | |||
| 508 | 508 | ||
| 509 | spin_lock_bh(&td->tid_release_lock); | 509 | spin_lock_bh(&td->tid_release_lock); |
| 510 | p->ctx = (void *)td->tid_release_list; | 510 | p->ctx = (void *)td->tid_release_list; |
| 511 | p->client = NULL; | ||
| 511 | td->tid_release_list = p; | 512 | td->tid_release_list = p; |
| 512 | if (!p->ctx) | 513 | if (!p->ctx) |
| 513 | schedule_work(&td->tid_release_task); | 514 | schedule_work(&td->tid_release_task); |
| @@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
| 623 | struct t3c_tid_entry *t3c_tid; | 624 | struct t3c_tid_entry *t3c_tid; |
| 624 | 625 | ||
| 625 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | 626 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); |
| 626 | if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && | 627 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client && |
| 628 | t3c_tid->client->handlers && | ||
| 627 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { | 629 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { |
| 628 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, | 630 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, |
| 629 | t3c_tid-> | 631 | t3c_tid-> |
| @@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
| 642 | struct t3c_tid_entry *t3c_tid; | 644 | struct t3c_tid_entry *t3c_tid; |
| 643 | 645 | ||
| 644 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); | 646 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); |
| 645 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 647 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
| 646 | t3c_tid->client->handlers[p->opcode]) { | 648 | t3c_tid->client->handlers[p->opcode]) { |
| 647 | return t3c_tid->client->handlers[p->opcode] (dev, skb, | 649 | return t3c_tid->client->handlers[p->opcode] (dev, skb, |
| 648 | t3c_tid->ctx); | 650 | t3c_tid->ctx); |
| @@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
| 660 | struct t3c_tid_entry *t3c_tid; | 662 | struct t3c_tid_entry *t3c_tid; |
| 661 | 663 | ||
| 662 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 664 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
| 663 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 665 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
| 664 | t3c_tid->client->handlers[p->opcode]) { | 666 | t3c_tid->client->handlers[p->opcode]) { |
| 665 | return t3c_tid->client->handlers[p->opcode] | 667 | return t3c_tid->client->handlers[p->opcode] |
| 666 | (dev, skb, t3c_tid->ctx); | 668 | (dev, skb, t3c_tid->ctx); |
| @@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) | |||
| 689 | } | 691 | } |
| 690 | } | 692 | } |
| 691 | 693 | ||
| 694 | /* | ||
| 695 | * Returns an sk_buff for a reply CPL message of size len. If the input | ||
| 696 | * sk_buff has no other users it is trimmed and reused, otherwise a new buffer | ||
| 697 | * is allocated. The input skb must be of size at least len. Note that this | ||
| 698 | * operation does not destroy the original skb data even if it decides to reuse | ||
| 699 | * the buffer. | ||
| 700 | */ | ||
| 701 | static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, | ||
| 702 | int gfp) | ||
| 703 | { | ||
| 704 | if (likely(!skb_cloned(skb))) { | ||
| 705 | BUG_ON(skb->len < len); | ||
| 706 | __skb_trim(skb, len); | ||
| 707 | skb_get(skb); | ||
| 708 | } else { | ||
| 709 | skb = alloc_skb(len, gfp); | ||
| 710 | if (skb) | ||
| 711 | __skb_put(skb, len); | ||
| 712 | } | ||
| 713 | return skb; | ||
| 714 | } | ||
| 715 | |||
| 692 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) | 716 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) |
| 693 | { | 717 | { |
| 694 | union opcode_tid *p = cplhdr(skb); | 718 | union opcode_tid *p = cplhdr(skb); |
| @@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) | |||
| 696 | struct t3c_tid_entry *t3c_tid; | 720 | struct t3c_tid_entry *t3c_tid; |
| 697 | 721 | ||
| 698 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 722 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
| 699 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 723 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
| 700 | t3c_tid->client->handlers[p->opcode]) { | 724 | t3c_tid->client->handlers[p->opcode]) { |
| 701 | return t3c_tid->client->handlers[p->opcode] | 725 | return t3c_tid->client->handlers[p->opcode] |
| 702 | (dev, skb, t3c_tid->ctx); | 726 | (dev, skb, t3c_tid->ctx); |
| 703 | } else { | 727 | } else { |
| 704 | struct cpl_abort_req_rss *req = cplhdr(skb); | 728 | struct cpl_abort_req_rss *req = cplhdr(skb); |
| 705 | struct cpl_abort_rpl *rpl; | 729 | struct cpl_abort_rpl *rpl; |
| 730 | struct sk_buff *reply_skb; | ||
| 731 | unsigned int tid = GET_TID(req); | ||
| 732 | u8 cmd = req->status; | ||
| 733 | |||
| 734 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | ||
| 735 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) | ||
| 736 | goto out; | ||
| 706 | 737 | ||
| 707 | struct sk_buff *skb = | 738 | reply_skb = cxgb3_get_cpl_reply_skb(skb, |
| 708 | alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); | 739 | sizeof(struct |
| 709 | if (!skb) { | 740 | cpl_abort_rpl), |
| 741 | GFP_ATOMIC); | ||
| 742 | |||
| 743 | if (!reply_skb) { | ||
| 710 | printk("do_abort_req_rss: couldn't get skb!\n"); | 744 | printk("do_abort_req_rss: couldn't get skb!\n"); |
| 711 | goto out; | 745 | goto out; |
| 712 | } | 746 | } |
| 713 | skb->priority = CPL_PRIORITY_DATA; | 747 | reply_skb->priority = CPL_PRIORITY_DATA; |
| 714 | __skb_put(skb, sizeof(struct cpl_abort_rpl)); | 748 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); |
| 715 | rpl = cplhdr(skb); | 749 | rpl = cplhdr(reply_skb); |
| 716 | rpl->wr.wr_hi = | 750 | rpl->wr.wr_hi = |
| 717 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | 751 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); |
| 718 | rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); | 752 | rpl->wr.wr_lo = htonl(V_WR_TID(tid)); |
| 719 | OPCODE_TID(rpl) = | 753 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); |
| 720 | htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); | 754 | rpl->cmd = cmd; |
| 721 | rpl->cmd = req->status; | 755 | cxgb3_ofld_send(dev, reply_skb); |
| 722 | cxgb3_ofld_send(dev, skb); | ||
| 723 | out: | 756 | out: |
| 724 | return CPL_RET_BUF_DONE; | 757 | return CPL_RET_BUF_DONE; |
| 725 | } | 758 | } |
| @@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) | |||
| 732 | struct t3c_tid_entry *t3c_tid; | 765 | struct t3c_tid_entry *t3c_tid; |
| 733 | 766 | ||
| 734 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | 767 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); |
| 735 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 768 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
| 736 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { | 769 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { |
| 737 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] | 770 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] |
| 738 | (dev, skb, t3c_tid->ctx); | 771 | (dev, skb, t3c_tid->ctx); |
| @@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb) | |||
| 762 | struct t3c_tid_entry *t3c_tid; | 795 | struct t3c_tid_entry *t3c_tid; |
| 763 | 796 | ||
| 764 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 797 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
| 765 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 798 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
| 766 | t3c_tid->client->handlers[opcode]) { | 799 | t3c_tid->client->handlers[opcode]) { |
| 767 | return t3c_tid->client->handlers[opcode] (dev, skb, | 800 | return t3c_tid->client->handlers[opcode] (dev, skb, |
| 768 | t3c_tid->ctx); | 801 | t3c_tid->ctx); |
| @@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
| 961 | for (tid = 0; tid < ti->ntids; tid++) { | 994 | for (tid = 0; tid < ti->ntids; tid++) { |
| 962 | te = lookup_tid(ti, tid); | 995 | te = lookup_tid(ti, tid); |
| 963 | BUG_ON(!te); | 996 | BUG_ON(!te); |
| 964 | if (te->ctx && te->client && te->client->redirect) { | 997 | if (te && te->ctx && te->client && te->client->redirect) { |
| 965 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 998 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
| 966 | if (update_tcb) { | 999 | if (update_tcb) { |
| 967 | l2t_hold(L2DATA(tdev), e); | 1000 | l2t_hold(L2DATA(tdev), e); |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index d83f075ef2d7..fb485d0a43d8 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
| @@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx) | |||
| 1523 | */ | 1523 | */ |
| 1524 | int t3_phy_intr_handler(struct adapter *adapter) | 1524 | int t3_phy_intr_handler(struct adapter *adapter) |
| 1525 | { | 1525 | { |
| 1526 | static const int intr_gpio_bits[] = { 8, 0x20 }; | 1526 | u32 mask, gpi = adapter_info(adapter)->gpio_intr; |
| 1527 | |||
| 1528 | u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); | 1527 | u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); |
| 1529 | 1528 | ||
| 1530 | for_each_port(adapter, i) { | 1529 | for_each_port(adapter, i) { |
| 1531 | if (cause & intr_gpio_bits[i]) { | 1530 | struct port_info *p = adap2pinfo(adapter, i); |
| 1532 | struct cphy *phy = &adap2pinfo(adapter, i)->phy; | 1531 | |
| 1533 | int phy_cause = phy->ops->intr_handler(phy); | 1532 | mask = gpi - (gpi & (gpi - 1)); |
| 1533 | gpi -= mask; | ||
| 1534 | |||
| 1535 | if (!(p->port_type->caps & SUPPORTED_IRQ)) | ||
| 1536 | continue; | ||
| 1537 | |||
| 1538 | if (cause & mask) { | ||
| 1539 | int phy_cause = p->phy.ops->intr_handler(&p->phy); | ||
| 1534 | 1540 | ||
| 1535 | if (phy_cause & cphy_cause_link_change) | 1541 | if (phy_cause & cphy_cause_link_change) |
| 1536 | t3_link_changed(adapter, i); | 1542 | t3_link_changed(adapter, i); |
| 1537 | if (phy_cause & cphy_cause_fifo_error) | 1543 | if (phy_cause & cphy_cause_fifo_error) |
| 1538 | phy->fifo_errors++; | 1544 | p->phy.fifo_errors++; |
| 1539 | } | 1545 | } |
| 1540 | } | 1546 | } |
| 1541 | 1547 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 4a009b7b1777..ac36152c68bf 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | #include "sky2.h" | 49 | #include "sky2.h" |
| 50 | 50 | ||
| 51 | #define DRV_NAME "sky2" | 51 | #define DRV_NAME "sky2" |
| 52 | #define DRV_VERSION "1.13" | 52 | #define DRV_VERSION "1.14" |
| 53 | #define PFX DRV_NAME " " | 53 | #define PFX DRV_NAME " " |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| @@ -123,7 +123,10 @@ static const struct pci_device_id sky2_id_table[] = { | |||
| 123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ | 123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ |
| 124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ | 124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ |
| 125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ | 125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ |
| 126 | #ifdef broken | ||
| 127 | /* This device causes data corruption problems that are not resolved */ | ||
| 126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ | 128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ |
| 129 | #endif | ||
| 127 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ | 130 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ |
| 128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ | 131 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ |
| 129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ | 132 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ |
| @@ -740,12 +743,17 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
| 740 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { | 743 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { |
| 741 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); | 744 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); |
| 742 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); | 745 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); |
| 743 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { | 746 | |
| 744 | /* set Tx GMAC FIFO Almost Empty Threshold */ | 747 | /* set Tx GMAC FIFO Almost Empty Threshold */ |
| 745 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180); | 748 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), |
| 746 | /* Disable Store & Forward mode for TX */ | 749 | (ECU_JUMBO_WM << 16) | ECU_AE_THR); |
| 747 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); | 750 | |
| 748 | } | 751 | if (hw->dev[port]->mtu > ETH_DATA_LEN) |
| 752 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
| 753 | TX_JUMBO_ENA | TX_STFW_DIS); | ||
| 754 | else | ||
| 755 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
| 756 | TX_JUMBO_DIS | TX_STFW_ENA); | ||
| 749 | } | 757 | } |
| 750 | 758 | ||
| 751 | } | 759 | } |
| @@ -1278,7 +1286,7 @@ static int sky2_up(struct net_device *dev) | |||
| 1278 | /* Set almost empty threshold */ | 1286 | /* Set almost empty threshold */ |
| 1279 | if (hw->chip_id == CHIP_ID_YUKON_EC_U | 1287 | if (hw->chip_id == CHIP_ID_YUKON_EC_U |
| 1280 | && hw->chip_rev == CHIP_REV_YU_EC_U_A0) | 1288 | && hw->chip_rev == CHIP_REV_YU_EC_U_A0) |
| 1281 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); | 1289 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); |
| 1282 | 1290 | ||
| 1283 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1291 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
| 1284 | TX_RING_SIZE - 1); | 1292 | TX_RING_SIZE - 1); |
| @@ -1584,13 +1592,6 @@ static int sky2_down(struct net_device *dev) | |||
| 1584 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), | 1592 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), |
| 1585 | RB_RST_SET | RB_DIS_OP_MD); | 1593 | RB_RST_SET | RB_DIS_OP_MD); |
| 1586 | 1594 | ||
| 1587 | /* WA for dev. #4.209 */ | ||
| 1588 | if (hw->chip_id == CHIP_ID_YUKON_EC_U | ||
| 1589 | && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) | ||
| 1590 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
| 1591 | sky2->speed != SPEED_1000 ? | ||
| 1592 | TX_STFW_ENA : TX_STFW_DIS); | ||
| 1593 | |||
| 1594 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | 1595 | ctrl = gma_read16(hw, port, GM_GP_CTRL); |
| 1595 | ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); | 1596 | ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); |
| 1596 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | 1597 | gma_write16(hw, port, GM_GP_CTRL, ctrl); |
| @@ -1890,6 +1891,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1890 | { | 1891 | { |
| 1891 | struct sky2_port *sky2 = netdev_priv(dev); | 1892 | struct sky2_port *sky2 = netdev_priv(dev); |
| 1892 | struct sky2_hw *hw = sky2->hw; | 1893 | struct sky2_hw *hw = sky2->hw; |
| 1894 | unsigned port = sky2->port; | ||
| 1893 | int err; | 1895 | int err; |
| 1894 | u16 ctl, mode; | 1896 | u16 ctl, mode; |
| 1895 | u32 imask; | 1897 | u32 imask; |
| @@ -1897,9 +1899,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1897 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 1899 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
| 1898 | return -EINVAL; | 1900 | return -EINVAL; |
| 1899 | 1901 | ||
| 1900 | /* TSO on Yukon Ultra and MTU > 1500 not supported */ | 1902 | if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE) |
| 1901 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) | 1903 | return -EINVAL; |
| 1902 | dev->features &= ~NETIF_F_TSO; | ||
| 1903 | 1904 | ||
| 1904 | if (!netif_running(dev)) { | 1905 | if (!netif_running(dev)) { |
| 1905 | dev->mtu = new_mtu; | 1906 | dev->mtu = new_mtu; |
| @@ -1915,8 +1916,18 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1915 | 1916 | ||
| 1916 | synchronize_irq(hw->pdev->irq); | 1917 | synchronize_irq(hw->pdev->irq); |
| 1917 | 1918 | ||
| 1918 | ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); | 1919 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { |
| 1919 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | 1920 | if (new_mtu > ETH_DATA_LEN) { |
| 1921 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
| 1922 | TX_JUMBO_ENA | TX_STFW_DIS); | ||
| 1923 | dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; | ||
| 1924 | } else | ||
| 1925 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
| 1926 | TX_JUMBO_DIS | TX_STFW_ENA); | ||
| 1927 | } | ||
| 1928 | |||
| 1929 | ctl = gma_read16(hw, port, GM_GP_CTRL); | ||
| 1930 | gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | ||
| 1920 | sky2_rx_stop(sky2); | 1931 | sky2_rx_stop(sky2); |
| 1921 | sky2_rx_clean(sky2); | 1932 | sky2_rx_clean(sky2); |
| 1922 | 1933 | ||
| @@ -1928,9 +1939,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1928 | if (dev->mtu > ETH_DATA_LEN) | 1939 | if (dev->mtu > ETH_DATA_LEN) |
| 1929 | mode |= GM_SMOD_JUMBO_ENA; | 1940 | mode |= GM_SMOD_JUMBO_ENA; |
| 1930 | 1941 | ||
| 1931 | gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode); | 1942 | gma_write16(hw, port, GM_SERIAL_MODE, mode); |
| 1932 | 1943 | ||
| 1933 | sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); | 1944 | sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); |
| 1934 | 1945 | ||
| 1935 | err = sky2_rx_start(sky2); | 1946 | err = sky2_rx_start(sky2); |
| 1936 | sky2_write32(hw, B0_IMSK, imask); | 1947 | sky2_write32(hw, B0_IMSK, imask); |
| @@ -1938,7 +1949,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1938 | if (err) | 1949 | if (err) |
| 1939 | dev_close(dev); | 1950 | dev_close(dev); |
| 1940 | else { | 1951 | else { |
| 1941 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl); | 1952 | gma_write16(hw, port, GM_GP_CTRL, ctl); |
| 1942 | 1953 | ||
| 1943 | netif_poll_enable(hw->dev[0]); | 1954 | netif_poll_enable(hw->dev[0]); |
| 1944 | netif_wake_queue(dev); | 1955 | netif_wake_queue(dev); |
| @@ -2340,26 +2351,22 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) | |||
| 2340 | } | 2351 | } |
| 2341 | } | 2352 | } |
| 2342 | 2353 | ||
| 2343 | /* This should never happen it is a fatal situation */ | 2354 | /* This should never happen it is a bug. */ |
| 2344 | static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, | 2355 | static void sky2_le_error(struct sky2_hw *hw, unsigned port, |
| 2345 | const char *rxtx, u32 mask) | 2356 | u16 q, unsigned ring_size) |
| 2346 | { | 2357 | { |
| 2347 | struct net_device *dev = hw->dev[port]; | 2358 | struct net_device *dev = hw->dev[port]; |
| 2348 | struct sky2_port *sky2 = netdev_priv(dev); | 2359 | struct sky2_port *sky2 = netdev_priv(dev); |
| 2349 | u32 imask; | 2360 | unsigned idx; |
| 2350 | 2361 | const u64 *le = (q == Q_R1 || q == Q_R2) | |
| 2351 | printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n", | 2362 | ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le; |
| 2352 | dev ? dev->name : "<not registered>", rxtx); | ||
| 2353 | 2363 | ||
| 2354 | imask = sky2_read32(hw, B0_IMSK); | 2364 | idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); |
| 2355 | imask &= ~mask; | 2365 | printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n", |
| 2356 | sky2_write32(hw, B0_IMSK, imask); | 2366 | dev->name, (unsigned) q, idx, (unsigned long long) le[idx], |
| 2367 | (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); | ||
| 2357 | 2368 | ||
| 2358 | if (dev) { | 2369 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); |
| 2359 | spin_lock(&sky2->phy_lock); | ||
| 2360 | sky2_link_down(sky2); | ||
| 2361 | spin_unlock(&sky2->phy_lock); | ||
| 2362 | } | ||
| 2363 | } | 2370 | } |
| 2364 | 2371 | ||
| 2365 | /* If idle then force a fake soft NAPI poll once a second | 2372 | /* If idle then force a fake soft NAPI poll once a second |
| @@ -2383,23 +2390,15 @@ static void sky2_idle(unsigned long arg) | |||
| 2383 | mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); | 2390 | mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); |
| 2384 | } | 2391 | } |
| 2385 | 2392 | ||
| 2386 | 2393 | /* Hardware/software error handling */ | |
| 2387 | static int sky2_poll(struct net_device *dev0, int *budget) | 2394 | static void sky2_err_intr(struct sky2_hw *hw, u32 status) |
| 2388 | { | 2395 | { |
| 2389 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | 2396 | if (net_ratelimit()) |
| 2390 | int work_limit = min(dev0->quota, *budget); | 2397 | dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); |
| 2391 | int work_done = 0; | ||
| 2392 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
| 2393 | 2398 | ||
| 2394 | if (status & Y2_IS_HW_ERR) | 2399 | if (status & Y2_IS_HW_ERR) |
| 2395 | sky2_hw_intr(hw); | 2400 | sky2_hw_intr(hw); |
| 2396 | 2401 | ||
| 2397 | if (status & Y2_IS_IRQ_PHY1) | ||
| 2398 | sky2_phy_intr(hw, 0); | ||
| 2399 | |||
| 2400 | if (status & Y2_IS_IRQ_PHY2) | ||
| 2401 | sky2_phy_intr(hw, 1); | ||
| 2402 | |||
| 2403 | if (status & Y2_IS_IRQ_MAC1) | 2402 | if (status & Y2_IS_IRQ_MAC1) |
| 2404 | sky2_mac_intr(hw, 0); | 2403 | sky2_mac_intr(hw, 0); |
| 2405 | 2404 | ||
| @@ -2407,16 +2406,33 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
| 2407 | sky2_mac_intr(hw, 1); | 2406 | sky2_mac_intr(hw, 1); |
| 2408 | 2407 | ||
| 2409 | if (status & Y2_IS_CHK_RX1) | 2408 | if (status & Y2_IS_CHK_RX1) |
| 2410 | sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); | 2409 | sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE); |
| 2411 | 2410 | ||
| 2412 | if (status & Y2_IS_CHK_RX2) | 2411 | if (status & Y2_IS_CHK_RX2) |
| 2413 | sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); | 2412 | sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE); |
| 2414 | 2413 | ||
| 2415 | if (status & Y2_IS_CHK_TXA1) | 2414 | if (status & Y2_IS_CHK_TXA1) |
| 2416 | sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); | 2415 | sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE); |
| 2417 | 2416 | ||
| 2418 | if (status & Y2_IS_CHK_TXA2) | 2417 | if (status & Y2_IS_CHK_TXA2) |
| 2419 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | 2418 | sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); |
| 2419 | } | ||
| 2420 | |||
| 2421 | static int sky2_poll(struct net_device *dev0, int *budget) | ||
| 2422 | { | ||
| 2423 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | ||
| 2424 | int work_limit = min(dev0->quota, *budget); | ||
| 2425 | int work_done = 0; | ||
| 2426 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
| 2427 | |||
| 2428 | if (unlikely(status & Y2_IS_ERROR)) | ||
| 2429 | sky2_err_intr(hw, status); | ||
| 2430 | |||
| 2431 | if (status & Y2_IS_IRQ_PHY1) | ||
| 2432 | sky2_phy_intr(hw, 0); | ||
| 2433 | |||
| 2434 | if (status & Y2_IS_IRQ_PHY2) | ||
| 2435 | sky2_phy_intr(hw, 1); | ||
| 2420 | 2436 | ||
| 2421 | work_done = sky2_status_intr(hw, work_limit); | 2437 | work_done = sky2_status_intr(hw, work_limit); |
| 2422 | if (work_done < work_limit) { | 2438 | if (work_done < work_limit) { |
| @@ -2534,16 +2550,14 @@ static void sky2_reset(struct sky2_hw *hw) | |||
| 2534 | int i; | 2550 | int i; |
| 2535 | 2551 | ||
| 2536 | /* disable ASF */ | 2552 | /* disable ASF */ |
| 2537 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { | 2553 | if (hw->chip_id == CHIP_ID_YUKON_EX) { |
| 2538 | if (hw->chip_id == CHIP_ID_YUKON_EX) { | 2554 | status = sky2_read16(hw, HCU_CCSR); |
| 2539 | status = sky2_read16(hw, HCU_CCSR); | 2555 | status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | |
| 2540 | status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | | 2556 | HCU_CCSR_UC_STATE_MSK); |
| 2541 | HCU_CCSR_UC_STATE_MSK); | 2557 | sky2_write16(hw, HCU_CCSR, status); |
| 2542 | sky2_write16(hw, HCU_CCSR, status); | 2558 | } else |
| 2543 | } else | 2559 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); |
| 2544 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); | 2560 | sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); |
| 2545 | sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); | ||
| 2546 | } | ||
| 2547 | 2561 | ||
| 2548 | /* do a SW reset */ | 2562 | /* do a SW reset */ |
| 2549 | sky2_write8(hw, B0_CTST, CS_RST_SET); | 2563 | sky2_write8(hw, B0_CTST, CS_RST_SET); |
| @@ -3328,6 +3342,36 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
| 3328 | regs->len - B3_RI_WTO_R1); | 3342 | regs->len - B3_RI_WTO_R1); |
| 3329 | } | 3343 | } |
| 3330 | 3344 | ||
| 3345 | /* In order to do Jumbo packets on these chips, need to turn off the | ||
| 3346 | * transmit store/forward. Therefore checksum offload won't work. | ||
| 3347 | */ | ||
| 3348 | static int no_tx_offload(struct net_device *dev) | ||
| 3349 | { | ||
| 3350 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
| 3351 | const struct sky2_hw *hw = sky2->hw; | ||
| 3352 | |||
| 3353 | return dev->mtu > ETH_DATA_LEN && | ||
| 3354 | (hw->chip_id == CHIP_ID_YUKON_EX | ||
| 3355 | || hw->chip_id == CHIP_ID_YUKON_EC_U); | ||
| 3356 | } | ||
| 3357 | |||
| 3358 | static int sky2_set_tx_csum(struct net_device *dev, u32 data) | ||
| 3359 | { | ||
| 3360 | if (data && no_tx_offload(dev)) | ||
| 3361 | return -EINVAL; | ||
| 3362 | |||
| 3363 | return ethtool_op_set_tx_csum(dev, data); | ||
| 3364 | } | ||
| 3365 | |||
| 3366 | |||
| 3367 | static int sky2_set_tso(struct net_device *dev, u32 data) | ||
| 3368 | { | ||
| 3369 | if (data && no_tx_offload(dev)) | ||
| 3370 | return -EINVAL; | ||
| 3371 | |||
| 3372 | return ethtool_op_set_tso(dev, data); | ||
| 3373 | } | ||
| 3374 | |||
| 3331 | static const struct ethtool_ops sky2_ethtool_ops = { | 3375 | static const struct ethtool_ops sky2_ethtool_ops = { |
| 3332 | .get_settings = sky2_get_settings, | 3376 | .get_settings = sky2_get_settings, |
| 3333 | .set_settings = sky2_set_settings, | 3377 | .set_settings = sky2_set_settings, |
| @@ -3343,9 +3387,9 @@ static const struct ethtool_ops sky2_ethtool_ops = { | |||
| 3343 | .get_sg = ethtool_op_get_sg, | 3387 | .get_sg = ethtool_op_get_sg, |
| 3344 | .set_sg = ethtool_op_set_sg, | 3388 | .set_sg = ethtool_op_set_sg, |
| 3345 | .get_tx_csum = ethtool_op_get_tx_csum, | 3389 | .get_tx_csum = ethtool_op_get_tx_csum, |
| 3346 | .set_tx_csum = ethtool_op_set_tx_csum, | 3390 | .set_tx_csum = sky2_set_tx_csum, |
| 3347 | .get_tso = ethtool_op_get_tso, | 3391 | .get_tso = ethtool_op_get_tso, |
| 3348 | .set_tso = ethtool_op_set_tso, | 3392 | .set_tso = sky2_set_tso, |
| 3349 | .get_rx_csum = sky2_get_rx_csum, | 3393 | .get_rx_csum = sky2_get_rx_csum, |
| 3350 | .set_rx_csum = sky2_set_rx_csum, | 3394 | .set_rx_csum = sky2_set_rx_csum, |
| 3351 | .get_strings = sky2_get_strings, | 3395 | .get_strings = sky2_get_strings, |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index ac24bdc42976..5efb5afc45ba 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
| @@ -288,6 +288,9 @@ enum { | |||
| 288 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, | 288 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, |
| 289 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | 289 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
| 290 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | 290 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, |
| 291 | Y2_IS_ERROR = Y2_IS_HW_ERR | | ||
| 292 | Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | | ||
| 293 | Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | ||
| 291 | }; | 294 | }; |
| 292 | 295 | ||
| 293 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ | 296 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ |
| @@ -738,6 +741,11 @@ enum { | |||
| 738 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ | 741 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ |
| 739 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ | 742 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ |
| 740 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ | 743 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ |
| 744 | |||
| 745 | /* Threshold values for Yukon-EC Ultra and Extreme */ | ||
| 746 | ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ | ||
| 747 | ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ | ||
| 748 | ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ | ||
| 741 | }; | 749 | }; |
| 742 | 750 | ||
| 743 | /* Descriptor Poll Timer Registers */ | 751 | /* Descriptor Poll Timer Registers */ |
| @@ -1631,6 +1639,9 @@ enum { | |||
| 1631 | TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ | 1639 | TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ |
| 1632 | TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ | 1640 | TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ |
| 1633 | 1641 | ||
| 1642 | TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ | ||
| 1643 | TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ | ||
| 1644 | |||
| 1634 | GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ | 1645 | GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ |
| 1635 | GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ | 1646 | GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ |
| 1636 | GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ | 1647 | GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 3b91af89e4c7..e3019d52c30f 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
| @@ -719,7 +719,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
| 719 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | 719 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; |
| 720 | spin_unlock_irqrestore(&chain->lock, flags); | 720 | spin_unlock_irqrestore(&chain->lock, flags); |
| 721 | 721 | ||
| 722 | if (skb->protocol == htons(ETH_P_IP)) | 722 | if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) |
| 723 | switch (skb->nh.iph->protocol) { | 723 | switch (skb->nh.iph->protocol) { |
| 724 | case IPPROTO_TCP: | 724 | case IPPROTO_TCP: |
| 725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | 725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; |
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 220abce63e4a..b10211c420ef 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c | |||
| @@ -77,7 +77,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) | |||
| 77 | 77 | ||
| 78 | switch (value) { | 78 | switch (value) { |
| 79 | case BITBANG_CS_INACTIVE: | 79 | case BITBANG_CS_INACTIVE: |
| 80 | hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol^1); | 80 | hw->set_cs(hw->pdata, spi->chip_select, cspol^1); |
| 81 | break; | 81 | break; |
| 82 | 82 | ||
| 83 | case BITBANG_CS_ACTIVE: | 83 | case BITBANG_CS_ACTIVE: |
| @@ -98,7 +98,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) | |||
| 98 | /* write new configration */ | 98 | /* write new configration */ |
| 99 | 99 | ||
| 100 | writeb(spcon, hw->regs + S3C2410_SPCON); | 100 | writeb(spcon, hw->regs + S3C2410_SPCON); |
| 101 | hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol); | 101 | hw->set_cs(hw->pdata, spi->chip_select, cspol); |
| 102 | 102 | ||
| 103 | break; | 103 | break; |
| 104 | } | 104 | } |
| @@ -1244,13 +1244,17 @@ EXPORT_SYMBOL(set_binfmt); | |||
| 1244 | * name into corename, which must have space for at least | 1244 | * name into corename, which must have space for at least |
| 1245 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | 1245 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. |
| 1246 | */ | 1246 | */ |
| 1247 | static void format_corename(char *corename, const char *pattern, long signr) | 1247 | static int format_corename(char *corename, const char *pattern, long signr) |
| 1248 | { | 1248 | { |
| 1249 | const char *pat_ptr = pattern; | 1249 | const char *pat_ptr = pattern; |
| 1250 | char *out_ptr = corename; | 1250 | char *out_ptr = corename; |
| 1251 | char *const out_end = corename + CORENAME_MAX_SIZE; | 1251 | char *const out_end = corename + CORENAME_MAX_SIZE; |
| 1252 | int rc; | 1252 | int rc; |
| 1253 | int pid_in_pattern = 0; | 1253 | int pid_in_pattern = 0; |
| 1254 | int ispipe = 0; | ||
| 1255 | |||
| 1256 | if (*pattern == '|') | ||
| 1257 | ispipe = 1; | ||
| 1254 | 1258 | ||
| 1255 | /* Repeat as long as we have more pattern to process and more output | 1259 | /* Repeat as long as we have more pattern to process and more output |
| 1256 | space */ | 1260 | space */ |
| @@ -1341,8 +1345,8 @@ static void format_corename(char *corename, const char *pattern, long signr) | |||
| 1341 | * | 1345 | * |
| 1342 | * If core_pattern does not include a %p (as is the default) | 1346 | * If core_pattern does not include a %p (as is the default) |
| 1343 | * and core_uses_pid is set, then .%pid will be appended to | 1347 | * and core_uses_pid is set, then .%pid will be appended to |
| 1344 | * the filename */ | 1348 | * the filename. Do not do this for piped commands. */ |
| 1345 | if (!pid_in_pattern | 1349 | if (!ispipe && !pid_in_pattern |
| 1346 | && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) { | 1350 | && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) { |
| 1347 | rc = snprintf(out_ptr, out_end - out_ptr, | 1351 | rc = snprintf(out_ptr, out_end - out_ptr, |
| 1348 | ".%d", current->tgid); | 1352 | ".%d", current->tgid); |
| @@ -1350,8 +1354,9 @@ static void format_corename(char *corename, const char *pattern, long signr) | |||
| 1350 | goto out; | 1354 | goto out; |
| 1351 | out_ptr += rc; | 1355 | out_ptr += rc; |
| 1352 | } | 1356 | } |
| 1353 | out: | 1357 | out: |
| 1354 | *out_ptr = 0; | 1358 | *out_ptr = 0; |
| 1359 | return ispipe; | ||
| 1355 | } | 1360 | } |
| 1356 | 1361 | ||
| 1357 | static void zap_process(struct task_struct *start) | 1362 | static void zap_process(struct task_struct *start) |
| @@ -1502,16 +1507,15 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) | |||
| 1502 | * uses lock_kernel() | 1507 | * uses lock_kernel() |
| 1503 | */ | 1508 | */ |
| 1504 | lock_kernel(); | 1509 | lock_kernel(); |
| 1505 | format_corename(corename, core_pattern, signr); | 1510 | ispipe = format_corename(corename, core_pattern, signr); |
| 1506 | unlock_kernel(); | 1511 | unlock_kernel(); |
| 1507 | if (corename[0] == '|') { | 1512 | if (ispipe) { |
| 1508 | /* SIGPIPE can happen, but it's just never processed */ | 1513 | /* SIGPIPE can happen, but it's just never processed */ |
| 1509 | if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) { | 1514 | if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) { |
| 1510 | printk(KERN_INFO "Core dump to %s pipe failed\n", | 1515 | printk(KERN_INFO "Core dump to %s pipe failed\n", |
| 1511 | corename); | 1516 | corename); |
| 1512 | goto fail_unlock; | 1517 | goto fail_unlock; |
| 1513 | } | 1518 | } |
| 1514 | ispipe = 1; | ||
| 1515 | } else | 1519 | } else |
| 1516 | file = filp_open(corename, | 1520 | file = filp_open(corename, |
| 1517 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | 1521 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 013d7afe7cde..f18b79122fa3 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
| @@ -601,7 +601,7 @@ static void ufs_set_inode_ops(struct inode *inode) | |||
| 601 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); | 601 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); |
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) | 604 | static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
| 605 | { | 605 | { |
| 606 | struct ufs_inode_info *ufsi = UFS_I(inode); | 606 | struct ufs_inode_info *ufsi = UFS_I(inode); |
| 607 | struct super_block *sb = inode->i_sb; | 607 | struct super_block *sb = inode->i_sb; |
| @@ -613,8 +613,10 @@ static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) | |||
| 613 | */ | 613 | */ |
| 614 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); | 614 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); |
| 615 | inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); | 615 | inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); |
| 616 | if (inode->i_nlink == 0) | 616 | if (inode->i_nlink == 0) { |
| 617 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | 617 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); |
| 618 | return -1; | ||
| 619 | } | ||
| 618 | 620 | ||
| 619 | /* | 621 | /* |
| 620 | * Linux now has 32-bit uid and gid, so we can support EFT. | 622 | * Linux now has 32-bit uid and gid, so we can support EFT. |
| @@ -643,9 +645,10 @@ static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) | |||
| 643 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | 645 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) |
| 644 | ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; | 646 | ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; |
| 645 | } | 647 | } |
| 648 | return 0; | ||
| 646 | } | 649 | } |
| 647 | 650 | ||
| 648 | static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) | 651 | static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) |
| 649 | { | 652 | { |
| 650 | struct ufs_inode_info *ufsi = UFS_I(inode); | 653 | struct ufs_inode_info *ufsi = UFS_I(inode); |
| 651 | struct super_block *sb = inode->i_sb; | 654 | struct super_block *sb = inode->i_sb; |
| @@ -658,8 +661,10 @@ static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) | |||
| 658 | */ | 661 | */ |
| 659 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); | 662 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); |
| 660 | inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); | 663 | inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); |
| 661 | if (inode->i_nlink == 0) | 664 | if (inode->i_nlink == 0) { |
| 662 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | 665 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); |
| 666 | return -1; | ||
| 667 | } | ||
| 663 | 668 | ||
| 664 | /* | 669 | /* |
| 665 | * Linux now has 32-bit uid and gid, so we can support EFT. | 670 | * Linux now has 32-bit uid and gid, so we can support EFT. |
| @@ -690,6 +695,7 @@ static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) | |||
| 690 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) | 695 | for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) |
| 691 | ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; | 696 | ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; |
| 692 | } | 697 | } |
| 698 | return 0; | ||
| 693 | } | 699 | } |
| 694 | 700 | ||
| 695 | void ufs_read_inode(struct inode * inode) | 701 | void ufs_read_inode(struct inode * inode) |
| @@ -698,6 +704,7 @@ void ufs_read_inode(struct inode * inode) | |||
| 698 | struct super_block * sb; | 704 | struct super_block * sb; |
| 699 | struct ufs_sb_private_info * uspi; | 705 | struct ufs_sb_private_info * uspi; |
| 700 | struct buffer_head * bh; | 706 | struct buffer_head * bh; |
| 707 | int err; | ||
| 701 | 708 | ||
| 702 | UFSD("ENTER, ino %lu\n", inode->i_ino); | 709 | UFSD("ENTER, ino %lu\n", inode->i_ino); |
| 703 | 710 | ||
| @@ -720,14 +727,17 @@ void ufs_read_inode(struct inode * inode) | |||
| 720 | if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { | 727 | if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { |
| 721 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; | 728 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
| 722 | 729 | ||
| 723 | ufs2_read_inode(inode, | 730 | err = ufs2_read_inode(inode, |
| 724 | ufs2_inode + ufs_inotofsbo(inode->i_ino)); | 731 | ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
| 725 | } else { | 732 | } else { |
| 726 | struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; | 733 | struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; |
| 727 | 734 | ||
| 728 | ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); | 735 | err = ufs1_read_inode(inode, |
| 736 | ufs_inode + ufs_inotofsbo(inode->i_ino)); | ||
| 729 | } | 737 | } |
| 730 | 738 | ||
| 739 | if (err) | ||
| 740 | goto bad_inode; | ||
| 731 | inode->i_version++; | 741 | inode->i_version++; |
| 732 | ufsi->i_lastfrag = | 742 | ufsi->i_lastfrag = |
| 733 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; | 743 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; |
| @@ -888,6 +898,8 @@ void ufs_delete_inode (struct inode * inode) | |||
| 888 | loff_t old_i_size; | 898 | loff_t old_i_size; |
| 889 | 899 | ||
| 890 | truncate_inode_pages(&inode->i_data, 0); | 900 | truncate_inode_pages(&inode->i_data, 0); |
| 901 | if (is_bad_inode(inode)) | ||
| 902 | goto no_delete; | ||
| 891 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ | 903 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ |
| 892 | lock_kernel(); | 904 | lock_kernel(); |
| 893 | mark_inode_dirty(inode); | 905 | mark_inode_dirty(inode); |
| @@ -898,4 +910,7 @@ void ufs_delete_inode (struct inode * inode) | |||
| 898 | ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n"); | 910 | ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n"); |
| 899 | ufs_free_inode (inode); | 911 | ufs_free_inode (inode); |
| 900 | unlock_kernel(); | 912 | unlock_kernel(); |
| 913 | return; | ||
| 914 | no_delete: | ||
| 915 | clear_inode(inode); /* We must guarantee clearing of inode... */ | ||
| 901 | } | 916 | } |
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index d2768cc3d7a4..da6bb199839c 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h | |||
| @@ -17,9 +17,6 @@ | |||
| 17 | # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) | 17 | # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) |
| 18 | # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) | 18 | # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) |
| 19 | # define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) | 19 | # define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) |
| 20 | # define __kernel_cttz(x) __builtin_ctzl(x) | ||
| 21 | # define __kernel_ctlz(x) __builtin_clzl(x) | ||
| 22 | # define __kernel_ctpop(x) __builtin_popcountl(x) | ||
| 23 | #else | 20 | #else |
| 24 | # define __kernel_insbl(val, shift) \ | 21 | # define __kernel_insbl(val, shift) \ |
| 25 | ({ unsigned long __kir; \ | 22 | ({ unsigned long __kir; \ |
| @@ -49,17 +46,39 @@ | |||
| 49 | ({ unsigned long __kir; \ | 46 | ({ unsigned long __kir; \ |
| 50 | __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ | 47 | __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ |
| 51 | __kir; }) | 48 | __kir; }) |
| 49 | #endif | ||
| 50 | |||
| 51 | #ifdef __alpha_cix__ | ||
| 52 | # if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 | ||
| 53 | # define __kernel_cttz(x) __builtin_ctzl(x) | ||
| 54 | # define __kernel_ctlz(x) __builtin_clzl(x) | ||
| 55 | # define __kernel_ctpop(x) __builtin_popcountl(x) | ||
| 56 | # else | ||
| 57 | # define __kernel_cttz(x) \ | ||
| 58 | ({ unsigned long __kir; \ | ||
| 59 | __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
| 60 | __kir; }) | ||
| 61 | # define __kernel_ctlz(x) \ | ||
| 62 | ({ unsigned long __kir; \ | ||
| 63 | __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
| 64 | __kir; }) | ||
| 65 | # define __kernel_ctpop(x) \ | ||
| 66 | ({ unsigned long __kir; \ | ||
| 67 | __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
| 68 | __kir; }) | ||
| 69 | # endif | ||
| 70 | #else | ||
| 52 | # define __kernel_cttz(x) \ | 71 | # define __kernel_cttz(x) \ |
| 53 | ({ unsigned long __kir; \ | 72 | ({ unsigned long __kir; \ |
| 54 | __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ | 73 | __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \ |
| 55 | __kir; }) | 74 | __kir; }) |
| 56 | # define __kernel_ctlz(x) \ | 75 | # define __kernel_ctlz(x) \ |
| 57 | ({ unsigned long __kir; \ | 76 | ({ unsigned long __kir; \ |
| 58 | __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ | 77 | __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ |
| 59 | __kir; }) | 78 | __kir; }) |
| 60 | # define __kernel_ctpop(x) \ | 79 | # define __kernel_ctpop(x) \ |
| 61 | ({ unsigned long __kir; \ | 80 | ({ unsigned long __kir; \ |
| 62 | __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ | 81 | __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ |
| 63 | __kir; }) | 82 | __kir; }) |
| 64 | #endif | 83 | #endif |
| 65 | 84 | ||
| @@ -78,16 +97,20 @@ | |||
| 78 | #else | 97 | #else |
| 79 | #define __kernel_ldbu(mem) \ | 98 | #define __kernel_ldbu(mem) \ |
| 80 | ({ unsigned char __kir; \ | 99 | ({ unsigned char __kir; \ |
| 81 | __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ | 100 | __asm__(".arch ev56; \ |
| 101 | ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ | ||
| 82 | __kir; }) | 102 | __kir; }) |
| 83 | #define __kernel_ldwu(mem) \ | 103 | #define __kernel_ldwu(mem) \ |
| 84 | ({ unsigned short __kir; \ | 104 | ({ unsigned short __kir; \ |
| 85 | __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ | 105 | __asm__(".arch ev56; \ |
| 106 | ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ | ||
| 86 | __kir; }) | 107 | __kir; }) |
| 87 | #define __kernel_stb(val,mem) \ | 108 | #define __kernel_stb(val,mem) \ |
| 88 | __asm__("stb %1,%0" : "=m"(mem) : "r"(val)) | 109 | __asm__(".arch ev56; \ |
| 89 | #define __kernel_stw(val,mem) \ | 110 | stb %1,%0" : "=m"(mem) : "r"(val)) |
| 90 | __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) | 111 | #define __kernel_stw(val,mem) \ |
| 112 | __asm__(".arch ev56; \ | ||
| 113 | stw %1,%0" : "=m"(mem) : "r"(val)) | ||
| 91 | #endif | 114 | #endif |
| 92 | 115 | ||
| 93 | #ifdef __KERNEL__ | 116 | #ifdef __KERNEL__ |
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h index 980a3c51b18e..525b4f6a7ace 100644 --- a/include/asm-alpha/core_mcpcia.h +++ b/include/asm-alpha/core_mcpcia.h | |||
| @@ -72,6 +72,8 @@ | |||
| 72 | * | 72 | * |
| 73 | */ | 73 | */ |
| 74 | 74 | ||
| 75 | #define MCPCIA_MAX_HOSES 4 | ||
| 76 | |||
| 75 | #define MCPCIA_MID(m) ((unsigned long)(m) << 33) | 77 | #define MCPCIA_MID(m) ((unsigned long)(m) << 33) |
| 76 | 78 | ||
| 77 | /* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. | 79 | /* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. |
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h index 24bdcc8b63aa..21a86f1a05b3 100644 --- a/include/asm-alpha/io.h +++ b/include/asm-alpha/io.h | |||
| @@ -113,6 +113,7 @@ static inline unsigned long virt_to_bus(void *address) | |||
| 113 | unsigned long bus = phys + __direct_map_base; | 113 | unsigned long bus = phys + __direct_map_base; |
| 114 | return phys <= __direct_map_size ? bus : 0; | 114 | return phys <= __direct_map_size ? bus : 0; |
| 115 | } | 115 | } |
| 116 | #define isa_virt_to_bus virt_to_bus | ||
| 116 | 117 | ||
| 117 | static inline void *bus_to_virt(unsigned long address) | 118 | static inline void *bus_to_virt(unsigned long address) |
| 118 | { | 119 | { |
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h index 8d853c554631..0b00068313f9 100644 --- a/include/asm-powerpc/systbl.h +++ b/include/asm-powerpc/systbl.h | |||
| @@ -288,7 +288,7 @@ COMPAT_SYS(ppoll) | |||
| 288 | SYSCALL_SPU(unshare) | 288 | SYSCALL_SPU(unshare) |
| 289 | SYSCALL_SPU(splice) | 289 | SYSCALL_SPU(splice) |
| 290 | SYSCALL_SPU(tee) | 290 | SYSCALL_SPU(tee) |
| 291 | SYSCALL_SPU(vmsplice) | 291 | COMPAT_SYS_SPU(vmsplice) |
| 292 | COMPAT_SYS_SPU(openat) | 292 | COMPAT_SYS_SPU(openat) |
| 293 | SYSCALL_SPU(mkdirat) | 293 | SYSCALL_SPU(mkdirat) |
| 294 | SYSCALL_SPU(mknodat) | 294 | SYSCALL_SPU(mknodat) |
diff --git a/include/linux/io.h b/include/linux/io.h index c244a0cc9319..09d351236379 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
| @@ -33,9 +33,22 @@ int ioremap_page_range(unsigned long addr, unsigned long end, | |||
| 33 | /* | 33 | /* |
| 34 | * Managed iomap interface | 34 | * Managed iomap interface |
| 35 | */ | 35 | */ |
| 36 | #ifdef CONFIG_HAS_IOPORT | ||
| 36 | void __iomem * devm_ioport_map(struct device *dev, unsigned long port, | 37 | void __iomem * devm_ioport_map(struct device *dev, unsigned long port, |
| 37 | unsigned int nr); | 38 | unsigned int nr); |
| 38 | void devm_ioport_unmap(struct device *dev, void __iomem *addr); | 39 | void devm_ioport_unmap(struct device *dev, void __iomem *addr); |
| 40 | #else | ||
| 41 | static inline void __iomem *devm_ioport_map(struct device *dev, | ||
| 42 | unsigned long port, | ||
| 43 | unsigned int nr) | ||
| 44 | { | ||
| 45 | return NULL; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) | ||
| 49 | { | ||
| 50 | } | ||
| 51 | #endif | ||
| 39 | 52 | ||
| 40 | void __iomem * devm_ioremap(struct device *dev, unsigned long offset, | 53 | void __iomem * devm_ioremap(struct device *dev, unsigned long offset, |
| 41 | unsigned long size); | 54 | unsigned long size); |
diff --git a/include/linux/plist.h b/include/linux/plist.h index b95818a037ad..85de2f055874 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h | |||
| @@ -97,9 +97,9 @@ struct plist_node { | |||
| 97 | #endif | 97 | #endif |
| 98 | 98 | ||
| 99 | /** | 99 | /** |
| 100 | * #PLIST_HEAD_INIT - static struct plist_head initializer | 100 | * PLIST_HEAD_INIT - static struct plist_head initializer |
| 101 | * | ||
| 102 | * @head: struct plist_head variable name | 101 | * @head: struct plist_head variable name |
| 102 | * @_lock: lock to initialize for this list | ||
| 103 | */ | 103 | */ |
| 104 | #define PLIST_HEAD_INIT(head, _lock) \ | 104 | #define PLIST_HEAD_INIT(head, _lock) \ |
| 105 | { \ | 105 | { \ |
| @@ -109,8 +109,7 @@ struct plist_node { | |||
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /** | 111 | /** |
| 112 | * #PLIST_NODE_INIT - static struct plist_node initializer | 112 | * PLIST_NODE_INIT - static struct plist_node initializer |
| 113 | * | ||
| 114 | * @node: struct plist_node variable name | 113 | * @node: struct plist_node variable name |
| 115 | * @__prio: initial node priority | 114 | * @__prio: initial node priority |
| 116 | */ | 115 | */ |
| @@ -122,8 +121,8 @@ struct plist_node { | |||
| 122 | 121 | ||
| 123 | /** | 122 | /** |
| 124 | * plist_head_init - dynamic struct plist_head initializer | 123 | * plist_head_init - dynamic struct plist_head initializer |
| 125 | * | ||
| 126 | * @head: &struct plist_head pointer | 124 | * @head: &struct plist_head pointer |
| 125 | * @lock: list spinlock, remembered for debugging | ||
| 127 | */ | 126 | */ |
| 128 | static inline void | 127 | static inline void |
| 129 | plist_head_init(struct plist_head *head, spinlock_t *lock) | 128 | plist_head_init(struct plist_head *head, spinlock_t *lock) |
| @@ -137,7 +136,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) | |||
| 137 | 136 | ||
| 138 | /** | 137 | /** |
| 139 | * plist_node_init - Dynamic struct plist_node initializer | 138 | * plist_node_init - Dynamic struct plist_node initializer |
| 140 | * | ||
| 141 | * @node: &struct plist_node pointer | 139 | * @node: &struct plist_node pointer |
| 142 | * @prio: initial node priority | 140 | * @prio: initial node priority |
| 143 | */ | 141 | */ |
| @@ -152,49 +150,46 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); | |||
| 152 | 150 | ||
| 153 | /** | 151 | /** |
| 154 | * plist_for_each - iterate over the plist | 152 | * plist_for_each - iterate over the plist |
| 155 | * | 153 | * @pos: the type * to use as a loop counter |
| 156 | * @pos1: the type * to use as a loop counter. | 154 | * @head: the head for your list |
| 157 | * @head: the head for your list. | ||
| 158 | */ | 155 | */ |
| 159 | #define plist_for_each(pos, head) \ | 156 | #define plist_for_each(pos, head) \ |
| 160 | list_for_each_entry(pos, &(head)->node_list, plist.node_list) | 157 | list_for_each_entry(pos, &(head)->node_list, plist.node_list) |
| 161 | 158 | ||
| 162 | /** | 159 | /** |
| 163 | * plist_for_each_entry_safe - iterate over a plist of given type safe | 160 | * plist_for_each_safe - iterate safely over a plist of given type |
| 164 | * against removal of list entry | 161 | * @pos: the type * to use as a loop counter |
| 162 | * @n: another type * to use as temporary storage | ||
| 163 | * @head: the head for your list | ||
| 165 | * | 164 | * |
| 166 | * @pos1: the type * to use as a loop counter. | 165 | * Iterate over a plist of given type, safe against removal of list entry. |
| 167 | * @n1: another type * to use as temporary storage | ||
| 168 | * @head: the head for your list. | ||
| 169 | */ | 166 | */ |
| 170 | #define plist_for_each_safe(pos, n, head) \ | 167 | #define plist_for_each_safe(pos, n, head) \ |
| 171 | list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) | 168 | list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) |
| 172 | 169 | ||
| 173 | /** | 170 | /** |
| 174 | * plist_for_each_entry - iterate over list of given type | 171 | * plist_for_each_entry - iterate over list of given type |
| 175 | * | 172 | * @pos: the type * to use as a loop counter |
| 176 | * @pos: the type * to use as a loop counter. | 173 | * @head: the head for your list |
| 177 | * @head: the head for your list. | 174 | * @mem: the name of the list_struct within the struct |
| 178 | * @member: the name of the list_struct within the struct. | ||
| 179 | */ | 175 | */ |
| 180 | #define plist_for_each_entry(pos, head, mem) \ | 176 | #define plist_for_each_entry(pos, head, mem) \ |
| 181 | list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) | 177 | list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) |
| 182 | 178 | ||
| 183 | /** | 179 | /** |
| 184 | * plist_for_each_entry_safe - iterate over list of given type safe against | 180 | * plist_for_each_entry_safe - iterate safely over list of given type |
| 185 | * removal of list entry | 181 | * @pos: the type * to use as a loop counter |
| 186 | * | ||
| 187 | * @pos: the type * to use as a loop counter. | ||
| 188 | * @n: another type * to use as temporary storage | 182 | * @n: another type * to use as temporary storage |
| 189 | * @head: the head for your list. | 183 | * @head: the head for your list |
| 190 | * @m: the name of the list_struct within the struct. | 184 | * @m: the name of the list_struct within the struct |
| 185 | * | ||
| 186 | * Iterate over list of given type, safe against removal of list entry. | ||
| 191 | */ | 187 | */ |
| 192 | #define plist_for_each_entry_safe(pos, n, head, m) \ | 188 | #define plist_for_each_entry_safe(pos, n, head, m) \ |
| 193 | list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) | 189 | list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) |
| 194 | 190 | ||
| 195 | /** | 191 | /** |
| 196 | * plist_head_empty - return !0 if a plist_head is empty | 192 | * plist_head_empty - return !0 if a plist_head is empty |
| 197 | * | ||
| 198 | * @head: &struct plist_head pointer | 193 | * @head: &struct plist_head pointer |
| 199 | */ | 194 | */ |
| 200 | static inline int plist_head_empty(const struct plist_head *head) | 195 | static inline int plist_head_empty(const struct plist_head *head) |
| @@ -204,7 +199,6 @@ static inline int plist_head_empty(const struct plist_head *head) | |||
| 204 | 199 | ||
| 205 | /** | 200 | /** |
| 206 | * plist_node_empty - return !0 if plist_node is not on a list | 201 | * plist_node_empty - return !0 if plist_node is not on a list |
| 207 | * | ||
| 208 | * @node: &struct plist_node pointer | 202 | * @node: &struct plist_node pointer |
| 209 | */ | 203 | */ |
| 210 | static inline int plist_node_empty(const struct plist_node *node) | 204 | static inline int plist_node_empty(const struct plist_node *node) |
| @@ -216,10 +210,9 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
| 216 | 210 | ||
| 217 | /** | 211 | /** |
| 218 | * plist_first_entry - get the struct for the first entry | 212 | * plist_first_entry - get the struct for the first entry |
| 219 | * | 213 | * @head: the &struct plist_head pointer |
| 220 | * @ptr: the &struct plist_head pointer. | 214 | * @type: the type of the struct this is embedded in |
| 221 | * @type: the type of the struct this is embedded in. | 215 | * @member: the name of the list_struct within the struct |
| 222 | * @member: the name of the list_struct within the struct. | ||
| 223 | */ | 216 | */ |
| 224 | #ifdef CONFIG_DEBUG_PI_LIST | 217 | #ifdef CONFIG_DEBUG_PI_LIST |
| 225 | # define plist_first_entry(head, type, member) \ | 218 | # define plist_first_entry(head, type, member) \ |
| @@ -234,7 +227,6 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
| 234 | 227 | ||
| 235 | /** | 228 | /** |
| 236 | * plist_first - return the first node (and thus, highest priority) | 229 | * plist_first - return the first node (and thus, highest priority) |
| 237 | * | ||
| 238 | * @head: the &struct plist_head pointer | 230 | * @head: the &struct plist_head pointer |
| 239 | * | 231 | * |
| 240 | * Assumes the plist is _not_ empty. | 232 | * Assumes the plist is _not_ empty. |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 82f43ad478c7..5992f65b4184 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -346,9 +346,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | |||
| 346 | return __alloc_skb(size, priority, 1, -1); | 346 | return __alloc_skb(size, priority, 1, -1); |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, | ||
| 350 | unsigned int size, | ||
| 351 | gfp_t priority); | ||
| 352 | extern void kfree_skbmem(struct sk_buff *skb); | 349 | extern void kfree_skbmem(struct sk_buff *skb); |
| 353 | extern struct sk_buff *skb_clone(struct sk_buff *skb, | 350 | extern struct sk_buff *skb_clone(struct sk_buff *skb, |
| 354 | gfp_t priority); | 351 | gfp_t priority); |
| @@ -622,6 +619,13 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) | |||
| 622 | list->qlen = 0; | 619 | list->qlen = 0; |
| 623 | } | 620 | } |
| 624 | 621 | ||
| 622 | static inline void skb_queue_head_init_class(struct sk_buff_head *list, | ||
| 623 | struct lock_class_key *class) | ||
| 624 | { | ||
| 625 | skb_queue_head_init(list); | ||
| 626 | lockdep_set_class(&list->lock, class); | ||
| 627 | } | ||
| 628 | |||
| 625 | /* | 629 | /* |
| 626 | * Insert an sk_buff at the start of a list. | 630 | * Insert an sk_buff at the start of a list. |
| 627 | * | 631 | * |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 58d13f2bd121..a285897a2fb4 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
| @@ -126,7 +126,9 @@ void br_stp_disable_port(struct net_bridge_port *p) | |||
| 126 | /* called under bridge lock */ | 126 | /* called under bridge lock */ |
| 127 | void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | 127 | void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) |
| 128 | { | 128 | { |
| 129 | unsigned char oldaddr[6]; | 129 | /* should be aligned on 2 bytes for compare_ether_addr() */ |
| 130 | unsigned short oldaddr_aligned[ETH_ALEN >> 1]; | ||
| 131 | unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; | ||
| 130 | struct net_bridge_port *p; | 132 | struct net_bridge_port *p; |
| 131 | int wasroot; | 133 | int wasroot; |
| 132 | 134 | ||
| @@ -151,11 +153,14 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | |||
| 151 | br_become_root_bridge(br); | 153 | br_become_root_bridge(br); |
| 152 | } | 154 | } |
| 153 | 155 | ||
| 154 | static const unsigned char br_mac_zero[6]; | 156 | /* should be aligned on 2 bytes for compare_ether_addr() */ |
| 157 | static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; | ||
| 155 | 158 | ||
| 156 | /* called under bridge lock */ | 159 | /* called under bridge lock */ |
| 157 | void br_stp_recalculate_bridge_id(struct net_bridge *br) | 160 | void br_stp_recalculate_bridge_id(struct net_bridge *br) |
| 158 | { | 161 | { |
| 162 | const unsigned char *br_mac_zero = | ||
| 163 | (const unsigned char *)br_mac_zero_aligned; | ||
| 159 | const unsigned char *addr = br_mac_zero; | 164 | const unsigned char *addr = br_mac_zero; |
| 160 | struct net_bridge_port *p; | 165 | struct net_bridge_port *p; |
| 161 | 166 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cfc60019cf92..841e3f32cab1 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms) | |||
| 1331 | kfree(parms); | 1331 | kfree(parms); |
| 1332 | } | 1332 | } |
| 1333 | 1333 | ||
| 1334 | static struct lock_class_key neigh_table_proxy_queue_class; | ||
| 1335 | |||
| 1334 | void neigh_table_init_no_netlink(struct neigh_table *tbl) | 1336 | void neigh_table_init_no_netlink(struct neigh_table *tbl) |
| 1335 | { | 1337 | { |
| 1336 | unsigned long now = jiffies; | 1338 | unsigned long now = jiffies; |
| @@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
| 1379 | init_timer(&tbl->proxy_timer); | 1381 | init_timer(&tbl->proxy_timer); |
| 1380 | tbl->proxy_timer.data = (unsigned long)tbl; | 1382 | tbl->proxy_timer.data = (unsigned long)tbl; |
| 1381 | tbl->proxy_timer.function = neigh_proxy_process; | 1383 | tbl->proxy_timer.function = neigh_proxy_process; |
| 1382 | skb_queue_head_init(&tbl->proxy_queue); | 1384 | skb_queue_head_init_class(&tbl->proxy_queue, |
| 1385 | &neigh_table_proxy_queue_class); | ||
| 1383 | 1386 | ||
| 1384 | tbl->last_flush = now; | 1387 | tbl->last_flush = now; |
| 1385 | tbl->last_rand = now + tbl->parms.reachable_time * 20; | 1388 | tbl->last_rand = now + tbl->parms.reachable_time * 20; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index da1019451ccb..4581ece48bb2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -471,6 +471,13 @@ int __netpoll_rx(struct sk_buff *skb) | |||
| 471 | if (skb->len < len || len < iph->ihl*4) | 471 | if (skb->len < len || len < iph->ihl*4) |
| 472 | goto out; | 472 | goto out; |
| 473 | 473 | ||
| 474 | /* | ||
| 475 | * Our transport medium may have padded the buffer out. | ||
| 476 | * Now We trim to the true length of the frame. | ||
| 477 | */ | ||
| 478 | if (pskb_trim_rcsum(skb, len)) | ||
| 479 | goto out; | ||
| 480 | |||
| 474 | if (iph->protocol != IPPROTO_UDP) | 481 | if (iph->protocol != IPPROTO_UDP) |
| 475 | goto out; | 482 | goto out; |
| 476 | 483 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 87573ae35b02..336958fbbcb2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -197,61 +197,6 @@ nodata: | |||
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | /** | 199 | /** |
| 200 | * alloc_skb_from_cache - allocate a network buffer | ||
| 201 | * @cp: kmem_cache from which to allocate the data area | ||
| 202 | * (object size must be big enough for @size bytes + skb overheads) | ||
| 203 | * @size: size to allocate | ||
| 204 | * @gfp_mask: allocation mask | ||
| 205 | * | ||
| 206 | * Allocate a new &sk_buff. The returned buffer has no headroom and | ||
| 207 | * tail room of size bytes. The object has a reference count of one. | ||
| 208 | * The return is the buffer. On a failure the return is %NULL. | ||
| 209 | * | ||
| 210 | * Buffers may only be allocated from interrupts using a @gfp_mask of | ||
| 211 | * %GFP_ATOMIC. | ||
| 212 | */ | ||
| 213 | struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, | ||
| 214 | unsigned int size, | ||
| 215 | gfp_t gfp_mask) | ||
| 216 | { | ||
| 217 | struct sk_buff *skb; | ||
| 218 | u8 *data; | ||
| 219 | |||
| 220 | /* Get the HEAD */ | ||
| 221 | skb = kmem_cache_alloc(skbuff_head_cache, | ||
| 222 | gfp_mask & ~__GFP_DMA); | ||
| 223 | if (!skb) | ||
| 224 | goto out; | ||
| 225 | |||
| 226 | /* Get the DATA. */ | ||
| 227 | size = SKB_DATA_ALIGN(size); | ||
| 228 | data = kmem_cache_alloc(cp, gfp_mask); | ||
| 229 | if (!data) | ||
| 230 | goto nodata; | ||
| 231 | |||
| 232 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | ||
| 233 | skb->truesize = size + sizeof(struct sk_buff); | ||
| 234 | atomic_set(&skb->users, 1); | ||
| 235 | skb->head = data; | ||
| 236 | skb->data = data; | ||
| 237 | skb->tail = data; | ||
| 238 | skb->end = data + size; | ||
| 239 | |||
| 240 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | ||
| 241 | skb_shinfo(skb)->nr_frags = 0; | ||
| 242 | skb_shinfo(skb)->gso_size = 0; | ||
| 243 | skb_shinfo(skb)->gso_segs = 0; | ||
| 244 | skb_shinfo(skb)->gso_type = 0; | ||
| 245 | skb_shinfo(skb)->frag_list = NULL; | ||
| 246 | out: | ||
| 247 | return skb; | ||
| 248 | nodata: | ||
| 249 | kmem_cache_free(skbuff_head_cache, skb); | ||
| 250 | skb = NULL; | ||
| 251 | goto out; | ||
| 252 | } | ||
| 253 | |||
| 254 | /** | ||
| 255 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | 200 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 256 | * @dev: network device to receive on | 201 | * @dev: network device to receive on |
| 257 | * @length: length to allocate | 202 | * @length: length to allocate |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index eabd6838f50a..0eb7d596d470 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -138,7 +138,6 @@ static void irda_disconnect_indication(void *instance, void *sap, | |||
| 138 | sk->sk_shutdown |= SEND_SHUTDOWN; | 138 | sk->sk_shutdown |= SEND_SHUTDOWN; |
| 139 | 139 | ||
| 140 | sk->sk_state_change(sk); | 140 | sk->sk_state_change(sk); |
| 141 | sock_orphan(sk); | ||
| 142 | release_sock(sk); | 141 | release_sock(sk); |
| 143 | 142 | ||
| 144 | /* Close our TSAP. | 143 | /* Close our TSAP. |
| @@ -1446,7 +1445,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1446 | */ | 1445 | */ |
| 1447 | ret = sock_error(sk); | 1446 | ret = sock_error(sk); |
| 1448 | if (ret) | 1447 | if (ret) |
| 1449 | break; | 1448 | ; |
| 1450 | else if (sk->sk_shutdown & RCV_SHUTDOWN) | 1449 | else if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 1451 | ; | 1450 | ; |
| 1452 | else if (noblock) | 1451 | else if (noblock) |
diff --git a/net/key/af_key.c b/net/key/af_key.c index a4e7e2db0ff3..345019345f09 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -630,6 +630,35 @@ pfkey_sockaddr_size(sa_family_t family) | |||
| 630 | /* NOTREACHED */ | 630 | /* NOTREACHED */ |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | static inline int pfkey_mode_from_xfrm(int mode) | ||
| 634 | { | ||
| 635 | switch(mode) { | ||
| 636 | case XFRM_MODE_TRANSPORT: | ||
| 637 | return IPSEC_MODE_TRANSPORT; | ||
| 638 | case XFRM_MODE_TUNNEL: | ||
| 639 | return IPSEC_MODE_TUNNEL; | ||
| 640 | case XFRM_MODE_BEET: | ||
| 641 | return IPSEC_MODE_BEET; | ||
| 642 | default: | ||
| 643 | return -1; | ||
| 644 | } | ||
| 645 | } | ||
| 646 | |||
| 647 | static inline int pfkey_mode_to_xfrm(int mode) | ||
| 648 | { | ||
| 649 | switch(mode) { | ||
| 650 | case IPSEC_MODE_ANY: /*XXX*/ | ||
| 651 | case IPSEC_MODE_TRANSPORT: | ||
| 652 | return XFRM_MODE_TRANSPORT; | ||
| 653 | case IPSEC_MODE_TUNNEL: | ||
| 654 | return XFRM_MODE_TUNNEL; | ||
| 655 | case IPSEC_MODE_BEET: | ||
| 656 | return XFRM_MODE_BEET; | ||
| 657 | default: | ||
| 658 | return -1; | ||
| 659 | } | ||
| 660 | } | ||
| 661 | |||
| 633 | static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) | 662 | static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) |
| 634 | { | 663 | { |
| 635 | struct sk_buff *skb; | 664 | struct sk_buff *skb; |
| @@ -651,6 +680,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, | |||
| 651 | int encrypt_key_size = 0; | 680 | int encrypt_key_size = 0; |
| 652 | int sockaddr_size; | 681 | int sockaddr_size; |
| 653 | struct xfrm_encap_tmpl *natt = NULL; | 682 | struct xfrm_encap_tmpl *natt = NULL; |
| 683 | int mode; | ||
| 654 | 684 | ||
| 655 | /* address family check */ | 685 | /* address family check */ |
| 656 | sockaddr_size = pfkey_sockaddr_size(x->props.family); | 686 | sockaddr_size = pfkey_sockaddr_size(x->props.family); |
| @@ -928,7 +958,11 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, | |||
| 928 | sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); | 958 | sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); |
| 929 | sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); | 959 | sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); |
| 930 | sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; | 960 | sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; |
| 931 | sa2->sadb_x_sa2_mode = x->props.mode + 1; | 961 | if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) { |
| 962 | kfree_skb(skb); | ||
| 963 | return ERR_PTR(-EINVAL); | ||
| 964 | } | ||
| 965 | sa2->sadb_x_sa2_mode = mode; | ||
| 932 | sa2->sadb_x_sa2_reserved1 = 0; | 966 | sa2->sadb_x_sa2_reserved1 = 0; |
| 933 | sa2->sadb_x_sa2_reserved2 = 0; | 967 | sa2->sadb_x_sa2_reserved2 = 0; |
| 934 | sa2->sadb_x_sa2_sequence = 0; | 968 | sa2->sadb_x_sa2_sequence = 0; |
| @@ -1155,9 +1189,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, | |||
| 1155 | 1189 | ||
| 1156 | if (ext_hdrs[SADB_X_EXT_SA2-1]) { | 1190 | if (ext_hdrs[SADB_X_EXT_SA2-1]) { |
| 1157 | struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1]; | 1191 | struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1]; |
| 1158 | x->props.mode = sa2->sadb_x_sa2_mode; | 1192 | int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); |
| 1159 | if (x->props.mode) | 1193 | if (mode < 0) { |
| 1160 | x->props.mode--; | 1194 | err = -EINVAL; |
| 1195 | goto out; | ||
| 1196 | } | ||
| 1197 | x->props.mode = mode; | ||
| 1161 | x->props.reqid = sa2->sadb_x_sa2_reqid; | 1198 | x->props.reqid = sa2->sadb_x_sa2_reqid; |
| 1162 | } | 1199 | } |
| 1163 | 1200 | ||
| @@ -1218,7 +1255,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
| 1218 | struct sadb_address *saddr, *daddr; | 1255 | struct sadb_address *saddr, *daddr; |
| 1219 | struct sadb_msg *out_hdr; | 1256 | struct sadb_msg *out_hdr; |
| 1220 | struct xfrm_state *x = NULL; | 1257 | struct xfrm_state *x = NULL; |
| 1221 | u8 mode; | 1258 | int mode; |
| 1222 | u32 reqid; | 1259 | u32 reqid; |
| 1223 | u8 proto; | 1260 | u8 proto; |
| 1224 | unsigned short family; | 1261 | unsigned short family; |
| @@ -1233,7 +1270,9 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
| 1233 | return -EINVAL; | 1270 | return -EINVAL; |
| 1234 | 1271 | ||
| 1235 | if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { | 1272 | if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { |
| 1236 | mode = sa2->sadb_x_sa2_mode - 1; | 1273 | mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); |
| 1274 | if (mode < 0) | ||
| 1275 | return -EINVAL; | ||
| 1237 | reqid = sa2->sadb_x_sa2_reqid; | 1276 | reqid = sa2->sadb_x_sa2_reqid; |
| 1238 | } else { | 1277 | } else { |
| 1239 | mode = 0; | 1278 | mode = 0; |
| @@ -1756,6 +1795,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
| 1756 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1795 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 1757 | struct sockaddr_in6 *sin6; | 1796 | struct sockaddr_in6 *sin6; |
| 1758 | #endif | 1797 | #endif |
| 1798 | int mode; | ||
| 1759 | 1799 | ||
| 1760 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) | 1800 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) |
| 1761 | return -ELOOP; | 1801 | return -ELOOP; |
| @@ -1764,7 +1804,9 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
| 1764 | return -EINVAL; | 1804 | return -EINVAL; |
| 1765 | 1805 | ||
| 1766 | t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ | 1806 | t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ |
| 1767 | t->mode = rq->sadb_x_ipsecrequest_mode-1; | 1807 | if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) |
| 1808 | return -EINVAL; | ||
| 1809 | t->mode = mode; | ||
| 1768 | if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) | 1810 | if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) |
| 1769 | t->optional = 1; | 1811 | t->optional = 1; |
| 1770 | else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { | 1812 | else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { |
| @@ -1877,7 +1919,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp) | |||
| 1877 | return skb; | 1919 | return skb; |
| 1878 | } | 1920 | } |
| 1879 | 1921 | ||
| 1880 | static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) | 1922 | static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) |
| 1881 | { | 1923 | { |
| 1882 | struct sadb_msg *hdr; | 1924 | struct sadb_msg *hdr; |
| 1883 | struct sadb_address *addr; | 1925 | struct sadb_address *addr; |
| @@ -2014,6 +2056,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2014 | struct sadb_x_ipsecrequest *rq; | 2056 | struct sadb_x_ipsecrequest *rq; |
| 2015 | struct xfrm_tmpl *t = xp->xfrm_vec + i; | 2057 | struct xfrm_tmpl *t = xp->xfrm_vec + i; |
| 2016 | int req_size; | 2058 | int req_size; |
| 2059 | int mode; | ||
| 2017 | 2060 | ||
| 2018 | req_size = sizeof(struct sadb_x_ipsecrequest); | 2061 | req_size = sizeof(struct sadb_x_ipsecrequest); |
| 2019 | if (t->mode == XFRM_MODE_TUNNEL) | 2062 | if (t->mode == XFRM_MODE_TUNNEL) |
| @@ -2027,7 +2070,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2027 | memset(rq, 0, sizeof(*rq)); | 2070 | memset(rq, 0, sizeof(*rq)); |
| 2028 | rq->sadb_x_ipsecrequest_len = req_size; | 2071 | rq->sadb_x_ipsecrequest_len = req_size; |
| 2029 | rq->sadb_x_ipsecrequest_proto = t->id.proto; | 2072 | rq->sadb_x_ipsecrequest_proto = t->id.proto; |
| 2030 | rq->sadb_x_ipsecrequest_mode = t->mode+1; | 2073 | if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0) |
| 2074 | return -EINVAL; | ||
| 2075 | rq->sadb_x_ipsecrequest_mode = mode; | ||
| 2031 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; | 2076 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; |
| 2032 | if (t->reqid) | 2077 | if (t->reqid) |
| 2033 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; | 2078 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; |
| @@ -2089,6 +2134,8 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2089 | 2134 | ||
| 2090 | hdr->sadb_msg_len = size / sizeof(uint64_t); | 2135 | hdr->sadb_msg_len = size / sizeof(uint64_t); |
| 2091 | hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); | 2136 | hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); |
| 2137 | |||
| 2138 | return 0; | ||
| 2092 | } | 2139 | } |
| 2093 | 2140 | ||
| 2094 | static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) | 2141 | static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) |
| @@ -2102,7 +2149,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
| 2102 | err = PTR_ERR(out_skb); | 2149 | err = PTR_ERR(out_skb); |
| 2103 | goto out; | 2150 | goto out; |
| 2104 | } | 2151 | } |
| 2105 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2152 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2153 | if (err < 0) | ||
| 2154 | return err; | ||
| 2106 | 2155 | ||
| 2107 | out_hdr = (struct sadb_msg *) out_skb->data; | 2156 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2108 | out_hdr->sadb_msg_version = PF_KEY_V2; | 2157 | out_hdr->sadb_msg_version = PF_KEY_V2; |
| @@ -2327,7 +2376,9 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb | |||
| 2327 | err = PTR_ERR(out_skb); | 2376 | err = PTR_ERR(out_skb); |
| 2328 | goto out; | 2377 | goto out; |
| 2329 | } | 2378 | } |
| 2330 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2379 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2380 | if (err < 0) | ||
| 2381 | goto out; | ||
| 2331 | 2382 | ||
| 2332 | out_hdr = (struct sadb_msg *) out_skb->data; | 2383 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2333 | out_hdr->sadb_msg_version = hdr->sadb_msg_version; | 2384 | out_hdr->sadb_msg_version = hdr->sadb_msg_version; |
| @@ -2409,6 +2460,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, | |||
| 2409 | { | 2460 | { |
| 2410 | int err; | 2461 | int err; |
| 2411 | struct sadb_x_ipsecrequest *rq2; | 2462 | struct sadb_x_ipsecrequest *rq2; |
| 2463 | int mode; | ||
| 2412 | 2464 | ||
| 2413 | if (len <= sizeof(struct sadb_x_ipsecrequest) || | 2465 | if (len <= sizeof(struct sadb_x_ipsecrequest) || |
| 2414 | len < rq1->sadb_x_ipsecrequest_len) | 2466 | len < rq1->sadb_x_ipsecrequest_len) |
| @@ -2439,7 +2491,9 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, | |||
| 2439 | return -EINVAL; | 2491 | return -EINVAL; |
| 2440 | 2492 | ||
| 2441 | m->proto = rq1->sadb_x_ipsecrequest_proto; | 2493 | m->proto = rq1->sadb_x_ipsecrequest_proto; |
| 2442 | m->mode = rq1->sadb_x_ipsecrequest_mode - 1; | 2494 | if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0) |
| 2495 | return -EINVAL; | ||
| 2496 | m->mode = mode; | ||
| 2443 | m->reqid = rq1->sadb_x_ipsecrequest_reqid; | 2497 | m->reqid = rq1->sadb_x_ipsecrequest_reqid; |
| 2444 | 2498 | ||
| 2445 | return ((int)(rq1->sadb_x_ipsecrequest_len + | 2499 | return ((int)(rq1->sadb_x_ipsecrequest_len + |
| @@ -2579,12 +2633,15 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) | |||
| 2579 | struct pfkey_dump_data *data = ptr; | 2633 | struct pfkey_dump_data *data = ptr; |
| 2580 | struct sk_buff *out_skb; | 2634 | struct sk_buff *out_skb; |
| 2581 | struct sadb_msg *out_hdr; | 2635 | struct sadb_msg *out_hdr; |
| 2636 | int err; | ||
| 2582 | 2637 | ||
| 2583 | out_skb = pfkey_xfrm_policy2msg_prep(xp); | 2638 | out_skb = pfkey_xfrm_policy2msg_prep(xp); |
| 2584 | if (IS_ERR(out_skb)) | 2639 | if (IS_ERR(out_skb)) |
| 2585 | return PTR_ERR(out_skb); | 2640 | return PTR_ERR(out_skb); |
| 2586 | 2641 | ||
| 2587 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2642 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2643 | if (err < 0) | ||
| 2644 | return err; | ||
| 2588 | 2645 | ||
| 2589 | out_hdr = (struct sadb_msg *) out_skb->data; | 2646 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2590 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; | 2647 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; |
| @@ -3513,7 +3570,10 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | |||
| 3513 | 3570 | ||
| 3514 | for (i = 0, mp = m; i < num_bundles; i++, mp++) { | 3571 | for (i = 0, mp = m; i < num_bundles; i++, mp++) { |
| 3515 | /* old ipsecrequest */ | 3572 | /* old ipsecrequest */ |
| 3516 | if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, | 3573 | int mode = pfkey_mode_from_xfrm(mp->mode); |
| 3574 | if (mode < 0) | ||
| 3575 | return -EINVAL; | ||
| 3576 | if (set_ipsecrequest(skb, mp->proto, mode, | ||
| 3517 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), | 3577 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), |
| 3518 | mp->reqid, mp->old_family, | 3578 | mp->reqid, mp->old_family, |
| 3519 | &mp->old_saddr, &mp->old_daddr) < 0) { | 3579 | &mp->old_saddr, &mp->old_daddr) < 0) { |
| @@ -3521,7 +3581,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | |||
| 3521 | } | 3581 | } |
| 3522 | 3582 | ||
| 3523 | /* new ipsecrequest */ | 3583 | /* new ipsecrequest */ |
| 3524 | if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, | 3584 | if (set_ipsecrequest(skb, mp->proto, mode, |
| 3525 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), | 3585 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), |
| 3526 | mp->reqid, mp->new_family, | 3586 | mp->reqid, mp->new_family, |
| 3527 | &mp->new_saddr, &mp->new_daddr) < 0) { | 3587 | &mp->new_saddr, &mp->new_daddr) < 0) { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index e73d8f546c6b..c48b0f49f003 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -443,6 +443,7 @@ static int netlink_release(struct socket *sock) | |||
| 443 | return 0; | 443 | return 0; |
| 444 | 444 | ||
| 445 | netlink_remove(sk); | 445 | netlink_remove(sk); |
| 446 | sock_orphan(sk); | ||
| 446 | nlk = nlk_sk(sk); | 447 | nlk = nlk_sk(sk); |
| 447 | 448 | ||
| 448 | spin_lock(&nlk->cb_lock); | 449 | spin_lock(&nlk->cb_lock); |
| @@ -457,7 +458,6 @@ static int netlink_release(struct socket *sock) | |||
| 457 | /* OK. Socket is unlinked, and, therefore, | 458 | /* OK. Socket is unlinked, and, therefore, |
| 458 | no new packets will arrive */ | 459 | no new packets will arrive */ |
| 459 | 460 | ||
| 460 | sock_orphan(sk); | ||
| 461 | sock->sk = NULL; | 461 | sock->sk = NULL; |
| 462 | wake_up_interruptible_all(&nlk->wait); | 462 | wake_up_interruptible_all(&nlk->wait); |
| 463 | 463 | ||
| @@ -1412,9 +1412,9 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1412 | return -ECONNREFUSED; | 1412 | return -ECONNREFUSED; |
| 1413 | } | 1413 | } |
| 1414 | nlk = nlk_sk(sk); | 1414 | nlk = nlk_sk(sk); |
| 1415 | /* A dump is in progress... */ | 1415 | /* A dump or destruction is in progress... */ |
| 1416 | spin_lock(&nlk->cb_lock); | 1416 | spin_lock(&nlk->cb_lock); |
| 1417 | if (nlk->cb) { | 1417 | if (nlk->cb || sock_flag(sk, SOCK_DEAD)) { |
| 1418 | spin_unlock(&nlk->cb_lock); | 1418 | spin_unlock(&nlk->cb_lock); |
| 1419 | netlink_destroy_callback(cb); | 1419 | netlink_destroy_callback(cb); |
| 1420 | sock_put(sk); | 1420 | sock_put(sk); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 536298c2eda2..a1d026f12b0e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
| 627 | retval = -EINVAL; | 627 | retval = -EINVAL; |
| 628 | goto err_bindx_rem; | 628 | goto err_bindx_rem; |
| 629 | } | 629 | } |
| 630 | |||
| 631 | if (!af->addr_valid(sa_addr, sp, NULL)) { | ||
| 632 | retval = -EADDRNOTAVAIL; | ||
| 633 | goto err_bindx_rem; | ||
| 634 | } | ||
| 635 | |||
| 630 | if (sa_addr->v4.sin_port != htons(bp->port)) { | 636 | if (sa_addr->v4.sin_port != htons(bp->port)) { |
| 631 | retval = -EINVAL; | 637 | retval = -EINVAL; |
| 632 | goto err_bindx_rem; | 638 | goto err_bindx_rem; |
| @@ -5638,6 +5644,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout) | |||
| 5638 | finish_wait(sk->sk_sleep, &wait); | 5644 | finish_wait(sk->sk_sleep, &wait); |
| 5639 | } | 5645 | } |
| 5640 | 5646 | ||
| 5647 | static void sctp_sock_rfree_frag(struct sk_buff *skb) | ||
| 5648 | { | ||
| 5649 | struct sk_buff *frag; | ||
| 5650 | |||
| 5651 | if (!skb->data_len) | ||
| 5652 | goto done; | ||
| 5653 | |||
| 5654 | /* Don't forget the fragments. */ | ||
| 5655 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5656 | sctp_sock_rfree_frag(frag); | ||
| 5657 | |||
| 5658 | done: | ||
| 5659 | sctp_sock_rfree(skb); | ||
| 5660 | } | ||
| 5661 | |||
| 5662 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | ||
| 5663 | { | ||
| 5664 | struct sk_buff *frag; | ||
| 5665 | |||
| 5666 | if (!skb->data_len) | ||
| 5667 | goto done; | ||
| 5668 | |||
| 5669 | /* Don't forget the fragments. */ | ||
| 5670 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5671 | sctp_skb_set_owner_r_frag(frag, sk); | ||
| 5672 | |||
| 5673 | done: | ||
| 5674 | sctp_skb_set_owner_r(skb, sk); | ||
| 5675 | } | ||
| 5676 | |||
| 5641 | /* Populate the fields of the newsk from the oldsk and migrate the assoc | 5677 | /* Populate the fields of the newsk from the oldsk and migrate the assoc |
| 5642 | * and its messages to the newsk. | 5678 | * and its messages to the newsk. |
| 5643 | */ | 5679 | */ |
| @@ -5692,10 +5728,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5692 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { | 5728 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { |
| 5693 | event = sctp_skb2event(skb); | 5729 | event = sctp_skb2event(skb); |
| 5694 | if (event->asoc == assoc) { | 5730 | if (event->asoc == assoc) { |
| 5695 | sctp_sock_rfree(skb); | 5731 | sctp_sock_rfree_frag(skb); |
| 5696 | __skb_unlink(skb, &oldsk->sk_receive_queue); | 5732 | __skb_unlink(skb, &oldsk->sk_receive_queue); |
| 5697 | __skb_queue_tail(&newsk->sk_receive_queue, skb); | 5733 | __skb_queue_tail(&newsk->sk_receive_queue, skb); |
| 5698 | sctp_skb_set_owner_r(skb, newsk); | 5734 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5699 | } | 5735 | } |
| 5700 | } | 5736 | } |
| 5701 | 5737 | ||
| @@ -5723,10 +5759,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5723 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { | 5759 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { |
| 5724 | event = sctp_skb2event(skb); | 5760 | event = sctp_skb2event(skb); |
| 5725 | if (event->asoc == assoc) { | 5761 | if (event->asoc == assoc) { |
| 5726 | sctp_sock_rfree(skb); | 5762 | sctp_sock_rfree_frag(skb); |
| 5727 | __skb_unlink(skb, &oldsp->pd_lobby); | 5763 | __skb_unlink(skb, &oldsp->pd_lobby); |
| 5728 | __skb_queue_tail(queue, skb); | 5764 | __skb_queue_tail(queue, skb); |
| 5729 | sctp_skb_set_owner_r(skb, newsk); | 5765 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5730 | } | 5766 | } |
| 5731 | } | 5767 | } |
| 5732 | 5768 | ||
| @@ -5738,6 +5774,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5738 | 5774 | ||
| 5739 | } | 5775 | } |
| 5740 | 5776 | ||
| 5777 | sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) { | ||
| 5778 | sctp_sock_rfree_frag(skb); | ||
| 5779 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 5780 | } | ||
| 5781 | |||
| 5782 | sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) { | ||
| 5783 | sctp_sock_rfree_frag(skb); | ||
| 5784 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 5785 | } | ||
| 5786 | |||
| 5741 | /* Set the type of socket to indicate that it is peeled off from the | 5787 | /* Set the type of socket to indicate that it is peeled off from the |
| 5742 | * original UDP-style socket or created with the accept() call on a | 5788 | * original UDP-style socket or created with the accept() call on a |
| 5743 | * TCP-style socket.. | 5789 | * TCP-style socket.. |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index bfb197e37da3..b29e3e4b72c9 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -190,7 +190,14 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
| 190 | if (!sctp_sk(sk)->pd_mode) { | 190 | if (!sctp_sk(sk)->pd_mode) { |
| 191 | queue = &sk->sk_receive_queue; | 191 | queue = &sk->sk_receive_queue; |
| 192 | } else if (ulpq->pd_mode) { | 192 | } else if (ulpq->pd_mode) { |
| 193 | if (event->msg_flags & MSG_NOTIFICATION) | 193 | /* If the association is in partial delivery, we |
| 194 | * need to finish delivering the partially processed | ||
| 195 | * packet before passing any other data. This is | ||
| 196 | * because we don't truly support stream interleaving. | ||
| 197 | */ | ||
| 198 | if ((event->msg_flags & MSG_NOTIFICATION) || | ||
| 199 | (SCTP_DATA_NOT_FRAG == | ||
| 200 | (event->msg_flags & SCTP_DATA_FRAG_MASK))) | ||
| 194 | queue = &sctp_sk(sk)->pd_lobby; | 201 | queue = &sctp_sk(sk)->pd_lobby; |
| 195 | else { | 202 | else { |
| 196 | clear_pd = event->msg_flags & MSG_EOR; | 203 | clear_pd = event->msg_flags & MSG_EOR; |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 9bae4090254c..2bd23ea2aa8b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -383,7 +383,10 @@ void svcauth_unix_purge(void) | |||
| 383 | static inline struct ip_map * | 383 | static inline struct ip_map * |
| 384 | ip_map_cached_get(struct svc_rqst *rqstp) | 384 | ip_map_cached_get(struct svc_rqst *rqstp) |
| 385 | { | 385 | { |
| 386 | struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix; | 386 | struct ip_map *ipm; |
| 387 | struct svc_sock *svsk = rqstp->rq_sock; | ||
| 388 | spin_lock_bh(&svsk->sk_defer_lock); | ||
| 389 | ipm = svsk->sk_info_authunix; | ||
| 387 | if (ipm != NULL) { | 390 | if (ipm != NULL) { |
| 388 | if (!cache_valid(&ipm->h)) { | 391 | if (!cache_valid(&ipm->h)) { |
| 389 | /* | 392 | /* |
| @@ -391,12 +394,14 @@ ip_map_cached_get(struct svc_rqst *rqstp) | |||
| 391 | * remembered, e.g. by a second mount from the | 394 | * remembered, e.g. by a second mount from the |
| 392 | * same IP address. | 395 | * same IP address. |
| 393 | */ | 396 | */ |
| 394 | rqstp->rq_sock->sk_info_authunix = NULL; | 397 | svsk->sk_info_authunix = NULL; |
| 398 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 395 | cache_put(&ipm->h, &ip_map_cache); | 399 | cache_put(&ipm->h, &ip_map_cache); |
| 396 | return NULL; | 400 | return NULL; |
| 397 | } | 401 | } |
| 398 | cache_get(&ipm->h); | 402 | cache_get(&ipm->h); |
| 399 | } | 403 | } |
| 404 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 400 | return ipm; | 405 | return ipm; |
| 401 | } | 406 | } |
| 402 | 407 | ||
| @@ -405,9 +410,15 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) | |||
| 405 | { | 410 | { |
| 406 | struct svc_sock *svsk = rqstp->rq_sock; | 411 | struct svc_sock *svsk = rqstp->rq_sock; |
| 407 | 412 | ||
| 408 | if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL) | 413 | spin_lock_bh(&svsk->sk_defer_lock); |
| 409 | svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */ | 414 | if (svsk->sk_sock->type == SOCK_STREAM && |
| 410 | else | 415 | svsk->sk_info_authunix == NULL) { |
| 416 | /* newly cached, keep the reference */ | ||
| 417 | svsk->sk_info_authunix = ipm; | ||
| 418 | ipm = NULL; | ||
| 419 | } | ||
| 420 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 421 | if (ipm) | ||
| 411 | cache_put(&ipm->h, &ip_map_cache); | 422 | cache_put(&ipm->h, &ip_map_cache); |
| 412 | } | 423 | } |
| 413 | 424 | ||
