diff options
109 files changed, 1133 insertions, 865 deletions
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt index 7fedc00c3d30..555c8cf3650a 100644 --- a/Documentation/cpu-freq/user-guide.txt +++ b/Documentation/cpu-freq/user-guide.txt | |||
| @@ -153,10 +153,13 @@ scaling_governor, and by "echoing" the name of another | |||
| 153 | that some governors won't load - they only | 153 | that some governors won't load - they only |
| 154 | work on some specific architectures or | 154 | work on some specific architectures or |
| 155 | processors. | 155 | processors. |
| 156 | scaling_min_freq and | 156 | scaling_min_freq and |
| 157 | scaling_max_freq show the current "policy limits" (in | 157 | scaling_max_freq show the current "policy limits" (in |
| 158 | kHz). By echoing new values into these | 158 | kHz). By echoing new values into these |
| 159 | files, you can change these limits. | 159 | files, you can change these limits. |
| 160 | NOTE: when setting a policy you need to | ||
| 161 | first set scaling_max_freq, then | ||
| 162 | scaling_min_freq. | ||
| 160 | 163 | ||
| 161 | 164 | ||
| 162 | If you have selected the "userspace" governor which allows you to | 165 | If you have selected the "userspace" governor which allows you to |
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt index 187035560d7f..864ff3283780 100644 --- a/Documentation/infiniband/ipoib.txt +++ b/Documentation/infiniband/ipoib.txt | |||
| @@ -51,8 +51,6 @@ Debugging Information | |||
| 51 | 51 | ||
| 52 | References | 52 | References |
| 53 | 53 | ||
| 54 | IETF IP over InfiniBand (ipoib) Working Group | ||
| 55 | http://ietf.org/html.charters/ipoib-charter.html | ||
| 56 | Transmission of IP over InfiniBand (IPoIB) (RFC 4391) | 54 | Transmission of IP over InfiniBand (IPoIB) (RFC 4391) |
| 57 | http://ietf.org/rfc/rfc4391.txt | 55 | http://ietf.org/rfc/rfc4391.txt |
| 58 | IP over InfiniBand (IPoIB) Architecture (RFC 4392) | 56 | IP over InfiniBand (IPoIB) Architecture (RFC 4392) |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index b0c7ab93dcb9..7345c338080a 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
| @@ -211,9 +211,8 @@ Controls the kernel's behaviour when an oops or BUG is encountered. | |||
| 211 | 211 | ||
| 212 | 0: try to continue operation | 212 | 0: try to continue operation |
| 213 | 213 | ||
| 214 | 1: delay a few seconds (to give klogd time to record the oops output) and | 214 | 1: panic immediatly. If the `panic' sysctl is also non-zero then the |
| 215 | then panic. If the `panic' sysctl is also non-zero then the machine will | 215 | machine will be rebooted. |
| 216 | be rebooted. | ||
| 217 | 216 | ||
| 218 | ============================================================== | 217 | ============================================================== |
| 219 | 218 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 8c267aa2f6ce..e3e1515ba5a9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -214,6 +214,12 @@ W: http://acpi.sourceforge.net/ | |||
| 214 | T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git | 214 | T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git |
| 215 | S: Maintained | 215 | S: Maintained |
| 216 | 216 | ||
| 217 | ACPI PCI HOTPLUG DRIVER | ||
| 218 | P: Kristen Carlson Accardi | ||
| 219 | M: kristen.c.accardi@intel.com | ||
| 220 | L: pcihpd-discuss@lists.sourceforge.net | ||
| 221 | S: Maintained | ||
| 222 | |||
| 217 | AD1816 SOUND DRIVER | 223 | AD1816 SOUND DRIVER |
| 218 | P: Thorsten Knabe | 224 | P: Thorsten Knabe |
| 219 | M: Thorsten Knabe <linux@thorsten-knabe.de> | 225 | M: Thorsten Knabe <linux@thorsten-knabe.de> |
| @@ -2642,6 +2648,14 @@ M: dbrownell@users.sourceforge.net | |||
| 2642 | L: spi-devel-general@lists.sourceforge.net | 2648 | L: spi-devel-general@lists.sourceforge.net |
| 2643 | S: Maintained | 2649 | S: Maintained |
| 2644 | 2650 | ||
| 2651 | STABLE BRANCH: | ||
| 2652 | P: Greg Kroah-Hartman | ||
| 2653 | M: greg@kroah.com | ||
| 2654 | P: Chris Wright | ||
| 2655 | M: chrisw@sous-sol.org | ||
| 2656 | L: stable@kernel.org | ||
| 2657 | S: Maintained | ||
| 2658 | |||
| 2645 | TPM DEVICE DRIVER | 2659 | TPM DEVICE DRIVER |
| 2646 | P: Kylene Hall | 2660 | P: Kylene Hall |
| 2647 | M: kjhall@us.ibm.com | 2661 | M: kjhall@us.ibm.com |
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index e44a4c6a4fe5..ccc1edff5c97 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig | |||
| @@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI | |||
| 96 | 96 | ||
| 97 | config X86_GX_SUSPMOD | 97 | config X86_GX_SUSPMOD |
| 98 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" | 98 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" |
| 99 | depends on PCI | ||
| 99 | help | 100 | help |
| 100 | This add the CPUFreq driver for NatSemi Geode processors which | 101 | This add the CPUFreq driver for NatSemi Geode processors which |
| 101 | support suspend modulation. | 102 | support suspend modulation. |
| @@ -202,7 +203,7 @@ config X86_LONGRUN | |||
| 202 | config X86_LONGHAUL | 203 | config X86_LONGHAUL |
| 203 | tristate "VIA Cyrix III Longhaul" | 204 | tristate "VIA Cyrix III Longhaul" |
| 204 | select CPU_FREQ_TABLE | 205 | select CPU_FREQ_TABLE |
| 205 | depends on BROKEN | 206 | depends on ACPI_PROCESSOR |
| 206 | help | 207 | help |
| 207 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, | 208 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, |
| 208 | VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T | 209 | VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T |
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 567b39bea07e..efb41e81351c 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void) | |||
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | /* Do initialization in ACPI core */ | 386 | /* Do initialization in ACPI core */ |
| 387 | acpi_processor_preregister_performance(acpi_perf_data); | 387 | return acpi_processor_preregister_performance(acpi_perf_data); |
| 388 | return 0; | ||
| 389 | } | 388 | } |
| 390 | 389 | ||
| 391 | static int | 390 | static int |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index 146f607e9c44..4f2c3aeef724 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c | |||
| @@ -29,11 +29,13 @@ | |||
| 29 | #include <linux/cpufreq.h> | 29 | #include <linux/cpufreq.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
| 32 | #include <linux/pci.h> | ||
| 33 | 32 | ||
| 34 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
| 35 | #include <asm/timex.h> | 34 | #include <asm/timex.h> |
| 36 | #include <asm/io.h> | 35 | #include <asm/io.h> |
| 36 | #include <asm/acpi.h> | ||
| 37 | #include <linux/acpi.h> | ||
| 38 | #include <acpi/processor.h> | ||
| 37 | 39 | ||
| 38 | #include "longhaul.h" | 40 | #include "longhaul.h" |
| 39 | 41 | ||
| @@ -56,6 +58,8 @@ static int minvid, maxvid; | |||
| 56 | static unsigned int minmult, maxmult; | 58 | static unsigned int minmult, maxmult; |
| 57 | static int can_scale_voltage; | 59 | static int can_scale_voltage; |
| 58 | static int vrmrev; | 60 | static int vrmrev; |
| 61 | static struct acpi_processor *pr = NULL; | ||
| 62 | static struct acpi_processor_cx *cx = NULL; | ||
| 59 | 63 | ||
| 60 | /* Module parameters */ | 64 | /* Module parameters */ |
| 61 | static int dont_scale_voltage; | 65 | static int dont_scale_voltage; |
| @@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void) | |||
| 118 | return eblcr_table[invalue]; | 122 | return eblcr_table[invalue]; |
| 119 | } | 123 | } |
| 120 | 124 | ||
| 125 | /* For processor with BCR2 MSR */ | ||
| 121 | 126 | ||
| 122 | static void do_powersaver(union msr_longhaul *longhaul, | 127 | static void do_longhaul1(int cx_address, unsigned int clock_ratio_index) |
| 123 | unsigned int clock_ratio_index) | ||
| 124 | { | 128 | { |
| 125 | struct pci_dev *dev; | 129 | union msr_bcr2 bcr2; |
| 126 | unsigned long flags; | 130 | u32 t; |
| 127 | unsigned int tmp_mask; | ||
| 128 | int version; | ||
| 129 | int i; | ||
| 130 | u16 pci_cmd; | ||
| 131 | u16 cmd_state[64]; | ||
| 132 | 131 | ||
| 133 | switch (cpu_model) { | 132 | rdmsrl(MSR_VIA_BCR2, bcr2.val); |
| 134 | case CPU_EZRA_T: | 133 | /* Enable software clock multiplier */ |
| 135 | version = 3; | 134 | bcr2.bits.ESOFTBF = 1; |
| 136 | break; | 135 | bcr2.bits.CLOCKMUL = clock_ratio_index; |
| 137 | case CPU_NEHEMIAH: | ||
| 138 | version = 0xf; | ||
| 139 | break; | ||
| 140 | default: | ||
| 141 | return; | ||
| 142 | } | ||
| 143 | 136 | ||
| 144 | rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); | 137 | /* Sync to timer tick */ |
| 145 | longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; | 138 | safe_halt(); |
| 146 | longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | 139 | ACPI_FLUSH_CPU_CACHE(); |
| 147 | longhaul->bits.EnableSoftBusRatio = 1; | 140 | /* Change frequency on next halt or sleep */ |
| 148 | longhaul->bits.RevisionKey = 0; | 141 | wrmsrl(MSR_VIA_BCR2, bcr2.val); |
| 142 | /* Invoke C3 */ | ||
| 143 | inb(cx_address); | ||
| 144 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
| 145 | t = inl(acpi_fadt.xpm_tmr_blk.address); | ||
| 146 | |||
| 147 | /* Disable software clock multiplier */ | ||
| 148 | local_irq_disable(); | ||
| 149 | rdmsrl(MSR_VIA_BCR2, bcr2.val); | ||
| 150 | bcr2.bits.ESOFTBF = 0; | ||
| 151 | wrmsrl(MSR_VIA_BCR2, bcr2.val); | ||
| 152 | } | ||
| 149 | 153 | ||
| 150 | preempt_disable(); | 154 | /* For processor with Longhaul MSR */ |
| 151 | local_irq_save(flags); | ||
| 152 | 155 | ||
| 153 | /* | 156 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index) |
| 154 | * get current pci bus master state for all devices | 157 | { |
| 155 | * and clear bus master bit | 158 | union msr_longhaul longhaul; |
| 156 | */ | 159 | u32 t; |
| 157 | dev = NULL; | ||
| 158 | i = 0; | ||
| 159 | do { | ||
| 160 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
| 161 | if (dev != NULL) { | ||
| 162 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | ||
| 163 | cmd_state[i++] = pci_cmd; | ||
| 164 | pci_cmd &= ~PCI_COMMAND_MASTER; | ||
| 165 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); | ||
| 166 | } | ||
| 167 | } while (dev != NULL); | ||
| 168 | 160 | ||
| 169 | tmp_mask=inb(0x21); /* works on C3. save mask. */ | 161 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
| 170 | outb(0xFE,0x21); /* TMR0 only */ | 162 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; |
| 171 | outb(0xFF,0x80); /* delay */ | 163 | longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; |
| 164 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | ||
| 165 | longhaul.bits.EnableSoftBusRatio = 1; | ||
| 172 | 166 | ||
| 167 | /* Sync to timer tick */ | ||
| 173 | safe_halt(); | 168 | safe_halt(); |
| 174 | wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); | 169 | ACPI_FLUSH_CPU_CACHE(); |
| 175 | halt(); | 170 | /* Change frequency on next halt or sleep */ |
| 176 | 171 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | |
| 172 | /* Invoke C3 */ | ||
| 173 | inb(cx_address); | ||
| 174 | /* Dummy op - must do something useless after P_LVL3 read */ | ||
| 175 | t = inl(acpi_fadt.xpm_tmr_blk.address); | ||
| 176 | |||
| 177 | /* Disable bus ratio bit */ | ||
| 177 | local_irq_disable(); | 178 | local_irq_disable(); |
| 178 | 179 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | |
| 179 | outb(tmp_mask,0x21); /* restore mask */ | 180 | longhaul.bits.EnableSoftBusRatio = 0; |
| 180 | 181 | longhaul.bits.EnableSoftBSEL = 0; | |
| 181 | /* restore pci bus master state for all devices */ | 182 | longhaul.bits.EnableSoftVID = 0; |
| 182 | dev = NULL; | 183 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
| 183 | i = 0; | ||
| 184 | do { | ||
| 185 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
| 186 | if (dev != NULL) { | ||
| 187 | pci_cmd = cmd_state[i++]; | ||
| 188 | pci_write_config_byte(dev, PCI_COMMAND, pci_cmd); | ||
| 189 | } | ||
| 190 | } while (dev != NULL); | ||
| 191 | local_irq_restore(flags); | ||
| 192 | preempt_enable(); | ||
| 193 | |||
| 194 | /* disable bus ratio bit */ | ||
| 195 | rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); | ||
| 196 | longhaul->bits.EnableSoftBusRatio = 0; | ||
| 197 | longhaul->bits.RevisionKey = version; | ||
| 198 | wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); | ||
| 199 | } | 184 | } |
| 200 | 185 | ||
| 201 | /** | 186 | /** |
| @@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
| 209 | { | 194 | { |
| 210 | int speed, mult; | 195 | int speed, mult; |
| 211 | struct cpufreq_freqs freqs; | 196 | struct cpufreq_freqs freqs; |
| 212 | union msr_longhaul longhaul; | ||
| 213 | union msr_bcr2 bcr2; | ||
| 214 | static unsigned int old_ratio=-1; | 197 | static unsigned int old_ratio=-1; |
| 198 | unsigned long flags; | ||
| 199 | unsigned int pic1_mask, pic2_mask; | ||
| 215 | 200 | ||
| 216 | if (old_ratio == clock_ratio_index) | 201 | if (old_ratio == clock_ratio_index) |
| 217 | return; | 202 | return; |
| @@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
| 234 | dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | 219 | dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", |
| 235 | fsb, mult/10, mult%10, print_speed(speed/1000)); | 220 | fsb, mult/10, mult%10, print_speed(speed/1000)); |
| 236 | 221 | ||
| 222 | preempt_disable(); | ||
| 223 | local_irq_save(flags); | ||
| 224 | |||
| 225 | pic2_mask = inb(0xA1); | ||
| 226 | pic1_mask = inb(0x21); /* works on C3. save mask. */ | ||
| 227 | outb(0xFF,0xA1); /* Overkill */ | ||
| 228 | outb(0xFE,0x21); /* TMR0 only */ | ||
| 229 | |||
| 230 | /* Disable bus master arbitration */ | ||
| 231 | if (pr->flags.bm_check) { | ||
| 232 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, | ||
| 233 | ACPI_MTX_DO_NOT_LOCK); | ||
| 234 | } | ||
| 235 | |||
| 237 | switch (longhaul_version) { | 236 | switch (longhaul_version) { |
| 238 | 237 | ||
| 239 | /* | 238 | /* |
| @@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
| 245 | */ | 244 | */ |
| 246 | case TYPE_LONGHAUL_V1: | 245 | case TYPE_LONGHAUL_V1: |
| 247 | case TYPE_LONGHAUL_V2: | 246 | case TYPE_LONGHAUL_V2: |
| 248 | rdmsrl (MSR_VIA_BCR2, bcr2.val); | 247 | do_longhaul1(cx->address, clock_ratio_index); |
| 249 | /* Enable software clock multiplier */ | ||
| 250 | bcr2.bits.ESOFTBF = 1; | ||
| 251 | bcr2.bits.CLOCKMUL = clock_ratio_index; | ||
| 252 | local_irq_disable(); | ||
| 253 | wrmsrl (MSR_VIA_BCR2, bcr2.val); | ||
| 254 | safe_halt(); | ||
| 255 | |||
| 256 | /* Disable software clock multiplier */ | ||
| 257 | rdmsrl (MSR_VIA_BCR2, bcr2.val); | ||
| 258 | bcr2.bits.ESOFTBF = 0; | ||
| 259 | local_irq_disable(); | ||
| 260 | wrmsrl (MSR_VIA_BCR2, bcr2.val); | ||
| 261 | local_irq_enable(); | ||
| 262 | break; | 248 | break; |
| 263 | 249 | ||
| 264 | /* | 250 | /* |
| @@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
| 273 | * to work in practice. | 259 | * to work in practice. |
| 274 | */ | 260 | */ |
| 275 | case TYPE_POWERSAVER: | 261 | case TYPE_POWERSAVER: |
| 276 | do_powersaver(&longhaul, clock_ratio_index); | 262 | do_powersaver(cx->address, clock_ratio_index); |
| 277 | break; | 263 | break; |
| 278 | } | 264 | } |
| 279 | 265 | ||
| 266 | /* Enable bus master arbitration */ | ||
| 267 | if (pr->flags.bm_check) { | ||
| 268 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, | ||
| 269 | ACPI_MTX_DO_NOT_LOCK); | ||
| 270 | } | ||
| 271 | |||
| 272 | outb(pic2_mask,0xA1); /* restore mask */ | ||
| 273 | outb(pic1_mask,0x21); | ||
| 274 | |||
| 275 | local_irq_restore(flags); | ||
| 276 | preempt_enable(); | ||
| 277 | |||
| 280 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 278 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
| 281 | } | 279 | } |
| 282 | 280 | ||
| @@ -324,9 +322,11 @@ static int guess_fsb(void) | |||
| 324 | static int __init longhaul_get_ranges(void) | 322 | static int __init longhaul_get_ranges(void) |
| 325 | { | 323 | { |
| 326 | unsigned long invalue; | 324 | unsigned long invalue; |
| 327 | unsigned int multipliers[32]= { | 325 | unsigned int ezra_t_multipliers[32]= { |
| 328 | 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, | 326 | 90, 30, 40, 100, 55, 35, 45, 95, |
| 329 | -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; | 327 | 50, 70, 80, 60, 120, 75, 85, 65, |
| 328 | -1, 110, 120, -1, 135, 115, 125, 105, | ||
| 329 | 130, 150, 160, 140, -1, 155, -1, 145 }; | ||
| 330 | unsigned int j, k = 0; | 330 | unsigned int j, k = 0; |
| 331 | union msr_longhaul longhaul; | 331 | union msr_longhaul longhaul; |
| 332 | unsigned long lo, hi; | 332 | unsigned long lo, hi; |
| @@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void) | |||
| 355 | invalue = longhaul.bits.MaxMHzBR; | 355 | invalue = longhaul.bits.MaxMHzBR; |
| 356 | if (longhaul.bits.MaxMHzBR4) | 356 | if (longhaul.bits.MaxMHzBR4) |
| 357 | invalue += 16; | 357 | invalue += 16; |
| 358 | maxmult=multipliers[invalue]; | 358 | maxmult=ezra_t_multipliers[invalue]; |
| 359 | 359 | ||
| 360 | invalue = longhaul.bits.MinMHzBR; | 360 | invalue = longhaul.bits.MinMHzBR; |
| 361 | if (longhaul.bits.MinMHzBR4 == 1) | 361 | if (longhaul.bits.MinMHzBR4 == 1) |
| 362 | minmult = 30; | 362 | minmult = 30; |
| 363 | else | 363 | else |
| 364 | minmult = multipliers[invalue]; | 364 | minmult = ezra_t_multipliers[invalue]; |
| 365 | fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; | 365 | fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; |
| 366 | break; | 366 | break; |
| 367 | } | 367 | } |
| @@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu) | |||
| 527 | return calc_speed(longhaul_get_cpu_mult()); | 527 | return calc_speed(longhaul_get_cpu_mult()); |
| 528 | } | 528 | } |
| 529 | 529 | ||
| 530 | static acpi_status longhaul_walk_callback(acpi_handle obj_handle, | ||
| 531 | u32 nesting_level, | ||
| 532 | void *context, void **return_value) | ||
| 533 | { | ||
| 534 | struct acpi_device *d; | ||
| 535 | |||
| 536 | if ( acpi_bus_get_device(obj_handle, &d) ) { | ||
| 537 | return 0; | ||
| 538 | } | ||
| 539 | *return_value = (void *)acpi_driver_data(d); | ||
| 540 | return 1; | ||
| 541 | } | ||
| 530 | 542 | ||
| 531 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | 543 | static int __init longhaul_cpu_init(struct cpufreq_policy *policy) |
| 532 | { | 544 | { |
| @@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
| 534 | char *cpuname=NULL; | 546 | char *cpuname=NULL; |
| 535 | int ret; | 547 | int ret; |
| 536 | 548 | ||
| 549 | /* Check ACPI support for C3 state */ | ||
| 550 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, | ||
| 551 | &longhaul_walk_callback, NULL, (void *)&pr); | ||
| 552 | if (pr == NULL) goto err_acpi; | ||
| 553 | |||
| 554 | cx = &pr->power.states[ACPI_STATE_C3]; | ||
| 555 | if (cx->address == 0 || cx->latency > 1000) goto err_acpi; | ||
| 556 | |||
| 557 | /* Now check what we have on this motherboard */ | ||
| 537 | switch (c->x86_model) { | 558 | switch (c->x86_model) { |
| 538 | case 6: | 559 | case 6: |
| 539 | cpu_model = CPU_SAMUEL; | 560 | cpu_model = CPU_SAMUEL; |
| @@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
| 634 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); | 655 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); |
| 635 | 656 | ||
| 636 | return 0; | 657 | return 0; |
| 658 | |||
| 659 | err_acpi: | ||
| 660 | printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n"); | ||
| 661 | return -ENODEV; | ||
| 637 | } | 662 | } |
| 638 | 663 | ||
| 639 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) | 664 | static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) |
| @@ -666,6 +691,18 @@ static int __init longhaul_init(void) | |||
| 666 | if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) | 691 | if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) |
| 667 | return -ENODEV; | 692 | return -ENODEV; |
| 668 | 693 | ||
| 694 | #ifdef CONFIG_SMP | ||
| 695 | if (num_online_cpus() > 1) { | ||
| 696 | return -ENODEV; | ||
| 697 | printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); | ||
| 698 | } | ||
| 699 | #endif | ||
| 700 | #ifdef CONFIG_X86_IO_APIC | ||
| 701 | if (cpu_has_apic) { | ||
| 702 | printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); | ||
| 703 | return -ENODEV; | ||
| 704 | } | ||
| 705 | #endif | ||
| 669 | switch (c->x86_model) { | 706 | switch (c->x86_model) { |
| 670 | case 6 ... 9: | 707 | case 6 ... 9: |
| 671 | return cpufreq_register_driver(&longhaul_driver); | 708 | return cpufreq_register_driver(&longhaul_driver); |
| @@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | |||
| 699 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); | 736 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); |
| 700 | MODULE_LICENSE ("GPL"); | 737 | MODULE_LICENSE ("GPL"); |
| 701 | 738 | ||
| 702 | module_init(longhaul_init); | 739 | late_initcall(longhaul_init); |
| 703 | module_exit(longhaul_exit); | 740 | module_exit(longhaul_exit); |
| 704 | 741 | ||
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index a3fe97531134..8a4f0d0d17a3 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
| @@ -151,7 +151,7 @@ static void | |||
| 151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) | 151 | simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) |
| 152 | { | 152 | { |
| 153 | int list_len = sc->use_sg; | 153 | int list_len = sc->use_sg; |
| 154 | struct scatterlist *sl = (struct scatterlist *)sc->buffer; | 154 | struct scatterlist *sl = (struct scatterlist *)sc->request_buffer; |
| 155 | struct disk_stat stat; | 155 | struct disk_stat stat; |
| 156 | struct disk_req req; | 156 | struct disk_req req; |
| 157 | 157 | ||
| @@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) | |||
| 244 | 244 | ||
| 245 | if (scatterlen == 0) | 245 | if (scatterlen == 0) |
| 246 | memcpy(sc->request_buffer, buf, len); | 246 | memcpy(sc->request_buffer, buf, len); |
| 247 | else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { | 247 | else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { |
| 248 | unsigned thislen = min(len, slp->length); | 248 | unsigned thislen = min(len, slp->length); |
| 249 | 249 | ||
| 250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); | 250 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index e4bfa9dafbce..bb8770a177b5 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
| @@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr) | |||
| 632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) | 632 | if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) |
| 633 | return md; | 633 | return md; |
| 634 | } | 634 | } |
| 635 | return 0; | 635 | return NULL; |
| 636 | } | 636 | } |
| 637 | 637 | ||
| 638 | static efi_memory_desc_t * | 638 | static efi_memory_desc_t * |
| @@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr) | |||
| 652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) | 652 | if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) |
| 653 | return md; | 653 | return md; |
| 654 | } | 654 | } |
| 655 | return 0; | 655 | return NULL; |
| 656 | } | 656 | } |
| 657 | 657 | ||
| 658 | u32 | 658 | u32 |
| @@ -923,7 +923,7 @@ find_memmap_space (void) | |||
| 923 | void | 923 | void |
| 924 | efi_memmap_init(unsigned long *s, unsigned long *e) | 924 | efi_memmap_init(unsigned long *s, unsigned long *e) |
| 925 | { | 925 | { |
| 926 | struct kern_memdesc *k, *prev = 0; | 926 | struct kern_memdesc *k, *prev = NULL; |
| 927 | u64 contig_low=0, contig_high=0; | 927 | u64 contig_low=0, contig_high=0; |
| 928 | u64 as, ae, lim; | 928 | u64 as, ae, lim; |
| 929 | void *efi_map_start, *efi_map_end, *p, *q; | 929 | void *efi_map_start, *efi_map_end, *p, *q; |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 561b8f1d3bc7..29236f0c62b5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
| @@ -853,7 +853,6 @@ END(__ia64_init_fpu) | |||
| 853 | */ | 853 | */ |
| 854 | GLOBAL_ENTRY(ia64_switch_mode_phys) | 854 | GLOBAL_ENTRY(ia64_switch_mode_phys) |
| 855 | { | 855 | { |
| 856 | alloc r2=ar.pfs,0,0,0,0 | ||
| 857 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 856 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
| 858 | mov r15=ip | 857 | mov r15=ip |
| 859 | } | 858 | } |
| @@ -902,7 +901,6 @@ END(ia64_switch_mode_phys) | |||
| 902 | */ | 901 | */ |
| 903 | GLOBAL_ENTRY(ia64_switch_mode_virt) | 902 | GLOBAL_ENTRY(ia64_switch_mode_virt) |
| 904 | { | 903 | { |
| 905 | alloc r2=ar.pfs,0,0,0,0 | ||
| 906 | rsm psr.i | psr.ic // disable interrupts and interrupt collection | 904 | rsm psr.i | psr.ic // disable interrupts and interrupt collection |
| 907 | mov r15=ip | 905 | mov r15=ip |
| 908 | } | 906 | } |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index b7cf651ceb14..3ead20fb6f4b 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
| @@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3); | |||
| 62 | EXPORT_SYMBOL(__moddi3); | 62 | EXPORT_SYMBOL(__moddi3); |
| 63 | EXPORT_SYMBOL(__umoddi3); | 63 | EXPORT_SYMBOL(__umoddi3); |
| 64 | 64 | ||
| 65 | #if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) | 65 | #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) |
| 66 | extern void xor_ia64_2(void); | 66 | extern void xor_ia64_2(void); |
| 67 | extern void xor_ia64_3(void); | 67 | extern void xor_ia64_3(void); |
| 68 | extern void xor_ia64_4(void); | 68 | extern void xor_ia64_4(void); |
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index 5018c7f2e7a8..ebaf1e685f5e 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S | |||
| @@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
| 217 | .body | 217 | .body |
| 218 | ;; | 218 | ;; |
| 219 | ld8 loc2 = [loc2] // loc2 <- entry point | 219 | ld8 loc2 = [loc2] // loc2 <- entry point |
| 220 | mov out0 = in0 // first argument | 220 | mov loc3 = psr // save psr |
| 221 | mov out1 = in1 // copy arg2 | ||
| 222 | mov out2 = in2 // copy arg3 | ||
| 223 | mov out3 = in3 // copy arg3 | ||
| 224 | ;; | ||
| 225 | mov loc3 = psr // save psr | ||
| 226 | ;; | 221 | ;; |
| 227 | mov loc4=ar.rsc // save RSE configuration | 222 | mov loc4=ar.rsc // save RSE configuration |
| 228 | dep.z loc2=loc2,0,61 // convert pal entry point to physical | 223 | dep.z loc2=loc2,0,61 // convert pal entry point to physical |
| @@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) | |||
| 236 | ;; | 231 | ;; |
| 237 | andcm r16=loc3,r16 // removes bits to clear from psr | 232 | andcm r16=loc3,r16 // removes bits to clear from psr |
| 238 | br.call.sptk.many rp=ia64_switch_mode_phys | 233 | br.call.sptk.many rp=ia64_switch_mode_phys |
| 239 | .ret6: | 234 | |
| 235 | mov out0 = in0 // first argument | ||
| 236 | mov out1 = in1 // copy arg2 | ||
| 237 | mov out2 = in2 // copy arg3 | ||
| 238 | mov out3 = in3 // copy arg3 | ||
| 240 | mov loc5 = r19 | 239 | mov loc5 = r19 |
| 241 | mov loc6 = r20 | 240 | mov loc6 = r20 |
| 241 | |||
| 242 | br.call.sptk.many rp=b7 // now make the call | 242 | br.call.sptk.many rp=b7 // now make the call |
| 243 | .ret7: | 243 | |
| 244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode | 244 | mov ar.rsc=0 // put RSE in enforced lazy, LE mode |
| 245 | mov r16=loc3 // r16= original psr | 245 | mov r16=loc3 // r16= original psr |
| 246 | mov r19=loc5 | 246 | mov r19=loc5 |
| 247 | mov r20=loc6 | 247 | mov r20=loc6 |
| 248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode | 248 | br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode |
| 249 | 249 | ||
| 250 | .ret8: mov psr.l = loc3 // restore init PSR | 250 | mov psr.l = loc3 // restore init PSR |
| 251 | mov ar.pfs = loc1 | 251 | mov ar.pfs = loc1 |
| 252 | mov rp = loc0 | 252 | mov rp = loc0 |
| 253 | ;; | 253 | ;; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index ab5b52413e91..0b546e2b36ac 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
| @@ -566,29 +566,23 @@ version_info(char *page) | |||
| 566 | pal_version_u_t min_ver, cur_ver; | 566 | pal_version_u_t min_ver, cur_ver; |
| 567 | char *p = page; | 567 | char *p = page; |
| 568 | 568 | ||
| 569 | /* The PAL_VERSION call is advertised as being able to support | 569 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) |
| 570 | * both physical and virtual mode calls. This seems to be a documentation | 570 | return 0; |
| 571 | * bug rather than firmware bug. In fact, it does only support physical mode. | ||
| 572 | * So now the code reflects this fact and the pal_version() has been updated | ||
| 573 | * accordingly. | ||
| 574 | */ | ||
| 575 | if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; | ||
| 576 | 571 | ||
| 577 | p += sprintf(p, | 572 | p += sprintf(p, |
| 578 | "PAL_vendor : 0x%02x (min=0x%02x)\n" | 573 | "PAL_vendor : 0x%02x (min=0x%02x)\n" |
| 579 | "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" | 574 | "PAL_A : %02x.%02x (min=%02x.%02x)\n" |
| 580 | "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", | 575 | "PAL_B : %02x.%02x (min=%02x.%02x)\n", |
| 581 | cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, | 576 | cur_ver.pal_version_s.pv_pal_vendor, |
| 582 | 577 | min_ver.pal_version_s.pv_pal_vendor, | |
| 583 | cur_ver.pal_version_s.pv_pal_a_model>>4, | 578 | cur_ver.pal_version_s.pv_pal_a_model, |
| 584 | cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, | 579 | cur_ver.pal_version_s.pv_pal_a_rev, |
| 585 | min_ver.pal_version_s.pv_pal_a_model>>4, | 580 | min_ver.pal_version_s.pv_pal_a_model, |
| 586 | min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, | 581 | min_ver.pal_version_s.pv_pal_a_rev, |
| 587 | 582 | cur_ver.pal_version_s.pv_pal_b_model, | |
| 588 | cur_ver.pal_version_s.pv_pal_b_model>>4, | 583 | cur_ver.pal_version_s.pv_pal_b_rev, |
| 589 | cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, | 584 | min_ver.pal_version_s.pv_pal_b_model, |
| 590 | min_ver.pal_version_s.pv_pal_b_model>>4, | 585 | min_ver.pal_version_s.pv_pal_b_rev); |
| 591 | min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev); | ||
| 592 | return p - page; | 586 | return p - page; |
| 593 | } | 587 | } |
| 594 | 588 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 5f03b9e524dd..4c73a6763669 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
| @@ -32,32 +32,38 @@ | |||
| 32 | 32 | ||
| 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); | 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
| 34 | 34 | ||
| 35 | #define MAX_UNCACHED_GRANULES 5 | 35 | struct uncached_pool { |
| 36 | static int allocated_granules; | 36 | struct gen_pool *pool; |
| 37 | struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ | ||
| 38 | int nchunks_added; /* #of converted chunks added to pool */ | ||
| 39 | atomic_t status; /* smp called function's return status*/ | ||
| 40 | }; | ||
| 41 | |||
| 42 | #define MAX_CONVERTED_CHUNKS_PER_NODE 2 | ||
| 37 | 43 | ||
| 38 | struct gen_pool *uncached_pool[MAX_NUMNODES]; | 44 | struct uncached_pool uncached_pools[MAX_NUMNODES]; |
| 39 | 45 | ||
| 40 | 46 | ||
| 41 | static void uncached_ipi_visibility(void *data) | 47 | static void uncached_ipi_visibility(void *data) |
| 42 | { | 48 | { |
| 43 | int status; | 49 | int status; |
| 50 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; | ||
| 44 | 51 | ||
| 45 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 52 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
| 46 | if ((status != PAL_VISIBILITY_OK) && | 53 | if ((status != PAL_VISIBILITY_OK) && |
| 47 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) | 54 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) |
| 48 | printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " | 55 | atomic_inc(&uc_pool->status); |
| 49 | "CPU %i\n", status, raw_smp_processor_id()); | ||
| 50 | } | 56 | } |
| 51 | 57 | ||
| 52 | 58 | ||
| 53 | static void uncached_ipi_mc_drain(void *data) | 59 | static void uncached_ipi_mc_drain(void *data) |
| 54 | { | 60 | { |
| 55 | int status; | 61 | int status; |
| 62 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; | ||
| 56 | 63 | ||
| 57 | status = ia64_pal_mc_drain(); | 64 | status = ia64_pal_mc_drain(); |
| 58 | if (status) | 65 | if (status != PAL_STATUS_SUCCESS) |
| 59 | printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " | 66 | atomic_inc(&uc_pool->status); |
| 60 | "CPU %i\n", status, raw_smp_processor_id()); | ||
| 61 | } | 67 | } |
| 62 | 68 | ||
| 63 | 69 | ||
| @@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data) | |||
| 70 | * This is accomplished by first allocating a granule of cached memory pages | 76 | * This is accomplished by first allocating a granule of cached memory pages |
| 71 | * and then converting them to uncached memory pages. | 77 | * and then converting them to uncached memory pages. |
| 72 | */ | 78 | */ |
| 73 | static int uncached_add_chunk(struct gen_pool *pool, int nid) | 79 | static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) |
| 74 | { | 80 | { |
| 75 | struct page *page; | 81 | struct page *page; |
| 76 | int status, i; | 82 | int status, i, nchunks_added = uc_pool->nchunks_added; |
| 77 | unsigned long c_addr, uc_addr; | 83 | unsigned long c_addr, uc_addr; |
| 78 | 84 | ||
| 79 | if (allocated_granules >= MAX_UNCACHED_GRANULES) | 85 | if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) |
| 86 | return -1; /* interrupted by a signal */ | ||
| 87 | |||
| 88 | if (uc_pool->nchunks_added > nchunks_added) { | ||
| 89 | /* someone added a new chunk while we were waiting */ | ||
| 90 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { | ||
| 95 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
| 80 | return -1; | 96 | return -1; |
| 97 | } | ||
| 81 | 98 | ||
| 82 | /* attempt to allocate a granule's worth of cached memory pages */ | 99 | /* attempt to allocate a granule's worth of cached memory pages */ |
| 83 | 100 | ||
| 84 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, | 101 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, |
| 85 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
| 86 | if (!page) | 103 | if (!page) { |
| 104 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
| 87 | return -1; | 105 | return -1; |
| 106 | } | ||
| 88 | 107 | ||
| 89 | /* convert the memory pages from cached to uncached */ | 108 | /* convert the memory pages from cached to uncached */ |
| 90 | 109 | ||
| @@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) | |||
| 102 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); | 121 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); |
| 103 | 122 | ||
| 104 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
| 105 | if (!status) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
| 106 | status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); | 125 | atomic_set(&uc_pool->status, 0); |
| 107 | if (status) | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, |
| 127 | 0, 1); | ||
| 128 | if (status || atomic_read(&uc_pool->status)) | ||
| 108 | goto failed; | 129 | goto failed; |
| 109 | } | 130 | } else if (status != PAL_VISIBILITY_OK) |
| 131 | goto failed; | ||
| 110 | 132 | ||
| 111 | preempt_disable(); | 133 | preempt_disable(); |
| 112 | 134 | ||
| @@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) | |||
| 120 | 142 | ||
| 121 | preempt_enable(); | 143 | preempt_enable(); |
| 122 | 144 | ||
| 123 | ia64_pal_mc_drain(); | 145 | status = ia64_pal_mc_drain(); |
| 124 | status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); | 146 | if (status != PAL_STATUS_SUCCESS) |
| 125 | if (status) | 147 | goto failed; |
| 148 | atomic_set(&uc_pool->status, 0); | ||
| 149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); | ||
| 150 | if (status || atomic_read(&uc_pool->status)) | ||
| 126 | goto failed; | 151 | goto failed; |
| 127 | 152 | ||
| 128 | /* | 153 | /* |
| 129 | * The chunk of memory pages has been converted to uncached so now we | 154 | * The chunk of memory pages has been converted to uncached so now we |
| 130 | * can add it to the pool. | 155 | * can add it to the pool. |
| 131 | */ | 156 | */ |
| 132 | status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); | 157 | status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); |
| 133 | if (status) | 158 | if (status) |
| 134 | goto failed; | 159 | goto failed; |
| 135 | 160 | ||
| 136 | allocated_granules++; | 161 | uc_pool->nchunks_added++; |
| 162 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
| 137 | return 0; | 163 | return 0; |
| 138 | 164 | ||
| 139 | /* failed to convert or add the chunk so give it back to the kernel */ | 165 | /* failed to convert or add the chunk so give it back to the kernel */ |
| @@ -142,6 +168,7 @@ failed: | |||
| 142 | ClearPageUncached(&page[i]); | 168 | ClearPageUncached(&page[i]); |
| 143 | 169 | ||
| 144 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); | 170 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); |
| 171 | mutex_unlock(&uc_pool->add_chunk_mutex); | ||
| 145 | return -1; | 172 | return -1; |
| 146 | } | 173 | } |
| 147 | 174 | ||
| @@ -158,7 +185,7 @@ failed: | |||
| 158 | unsigned long uncached_alloc_page(int starting_nid) | 185 | unsigned long uncached_alloc_page(int starting_nid) |
| 159 | { | 186 | { |
| 160 | unsigned long uc_addr; | 187 | unsigned long uc_addr; |
| 161 | struct gen_pool *pool; | 188 | struct uncached_pool *uc_pool; |
| 162 | int nid; | 189 | int nid; |
| 163 | 190 | ||
| 164 | if (unlikely(starting_nid >= MAX_NUMNODES)) | 191 | if (unlikely(starting_nid >= MAX_NUMNODES)) |
| @@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid) | |||
| 171 | do { | 198 | do { |
| 172 | if (!node_online(nid)) | 199 | if (!node_online(nid)) |
| 173 | continue; | 200 | continue; |
| 174 | pool = uncached_pool[nid]; | 201 | uc_pool = &uncached_pools[nid]; |
| 175 | if (pool == NULL) | 202 | if (uc_pool->pool == NULL) |
| 176 | continue; | 203 | continue; |
| 177 | do { | 204 | do { |
| 178 | uc_addr = gen_pool_alloc(pool, PAGE_SIZE); | 205 | uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE); |
| 179 | if (uc_addr != 0) | 206 | if (uc_addr != 0) |
| 180 | return uc_addr; | 207 | return uc_addr; |
| 181 | } while (uncached_add_chunk(pool, nid) == 0); | 208 | } while (uncached_add_chunk(uc_pool, nid) == 0); |
| 182 | 209 | ||
| 183 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); | 210 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); |
| 184 | 211 | ||
| @@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page); | |||
| 197 | void uncached_free_page(unsigned long uc_addr) | 224 | void uncached_free_page(unsigned long uc_addr) |
| 198 | { | 225 | { |
| 199 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); | 226 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); |
| 200 | struct gen_pool *pool = uncached_pool[nid]; | 227 | struct gen_pool *pool = uncached_pools[nid].pool; |
| 201 | 228 | ||
| 202 | if (unlikely(pool == NULL)) | 229 | if (unlikely(pool == NULL)) |
| 203 | return; | 230 | return; |
| @@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start, | |||
| 224 | unsigned long uc_end, void *arg) | 251 | unsigned long uc_end, void *arg) |
| 225 | { | 252 | { |
| 226 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); | 253 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); |
| 227 | struct gen_pool *pool = uncached_pool[nid]; | 254 | struct gen_pool *pool = uncached_pools[nid].pool; |
| 228 | size_t size = uc_end - uc_start; | 255 | size_t size = uc_end - uc_start; |
| 229 | 256 | ||
| 230 | touch_softlockup_watchdog(); | 257 | touch_softlockup_watchdog(); |
| @@ -242,7 +269,8 @@ static int __init uncached_init(void) | |||
| 242 | int nid; | 269 | int nid; |
| 243 | 270 | ||
| 244 | for_each_online_node(nid) { | 271 | for_each_online_node(nid) { |
| 245 | uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); | 272 | uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); |
| 273 | mutex_init(&uncached_pools[nid].add_chunk_mutex); | ||
| 246 | } | 274 | } |
| 247 | 275 | ||
| 248 | efi_memmap_walk_uc(uncached_build_memmap, NULL); | 276 | efi_memmap_walk_uc(uncached_build_memmap, NULL); |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index d8536a2c22a9..38fa6e49e791 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
| @@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
| 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
| 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
| 16 | lib-$(CONFIG_PERFMON) += carta_random.o | 16 | lib-$(CONFIG_PERFMON) += carta_random.o |
| 17 | lib-$(CONFIG_MD_RAID5) += xor.o | 17 | lib-$(CONFIG_MD_RAID456) += xor.o |
| 18 | 18 | ||
| 19 | AFLAGS___divdi3.o = | 19 | AFLAGS___divdi3.o = |
| 20 | AFLAGS___udivdi3.o = -DUNSIGNED | 20 | AFLAGS___udivdi3.o = -DUNSIGNED |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2a88cdd6d924..e004143ba86b 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 29 | static unsigned long num_dma_physpages; | 29 | static unsigned long num_dma_physpages; |
| 30 | static unsigned long max_gap; | ||
| 30 | #endif | 31 | #endif |
| 31 | 32 | ||
| 32 | /** | 33 | /** |
| @@ -45,9 +46,15 @@ show_mem (void) | |||
| 45 | 46 | ||
| 46 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 47 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
| 47 | i = max_mapnr; | 48 | i = max_mapnr; |
| 48 | while (i-- > 0) { | 49 | for (i = 0; i < max_mapnr; i++) { |
| 49 | if (!pfn_valid(i)) | 50 | if (!pfn_valid(i)) { |
| 51 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
| 52 | if (max_gap < LARGE_GAP) | ||
| 53 | continue; | ||
| 54 | i = vmemmap_find_next_valid_pfn(0, i) - 1; | ||
| 55 | #endif | ||
| 50 | continue; | 56 | continue; |
| 57 | } | ||
| 51 | total++; | 58 | total++; |
| 52 | if (PageReserved(mem_map+i)) | 59 | if (PageReserved(mem_map+i)) |
| 53 | reserved++; | 60 | reserved++; |
| @@ -234,7 +241,6 @@ paging_init (void) | |||
| 234 | unsigned long zones_size[MAX_NR_ZONES]; | 241 | unsigned long zones_size[MAX_NR_ZONES]; |
| 235 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 242 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 236 | unsigned long zholes_size[MAX_NR_ZONES]; | 243 | unsigned long zholes_size[MAX_NR_ZONES]; |
| 237 | unsigned long max_gap; | ||
| 238 | #endif | 244 | #endif |
| 239 | 245 | ||
| 240 | /* initialize mem_map[] */ | 246 | /* initialize mem_map[] */ |
| @@ -266,7 +272,6 @@ paging_init (void) | |||
| 266 | } | 272 | } |
| 267 | } | 273 | } |
| 268 | 274 | ||
| 269 | max_gap = 0; | ||
| 270 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 275 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
| 271 | if (max_gap < LARGE_GAP) { | 276 | if (max_gap < LARGE_GAP) { |
| 272 | vmem_map = (struct page *) 0; | 277 | vmem_map = (struct page *) 0; |
| @@ -277,7 +282,8 @@ paging_init (void) | |||
| 277 | 282 | ||
| 278 | /* allocate virtual_mem_map */ | 283 | /* allocate virtual_mem_map */ |
| 279 | 284 | ||
| 280 | map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 285 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
| 286 | sizeof(struct page)); | ||
| 281 | vmalloc_end -= map_size; | 287 | vmalloc_end -= map_size; |
| 282 | vmem_map = (struct page *) vmalloc_end; | 288 | vmem_map = (struct page *) vmalloc_end; |
| 283 | efi_memmap_walk(create_mem_map_page_table, NULL); | 289 | efi_memmap_walk(create_mem_map_page_table, NULL); |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 99bd9e30db96..d260bffa01ab 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
| @@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) | |||
| 534 | } | 534 | } |
| 535 | #endif /* CONFIG_SMP */ | 535 | #endif /* CONFIG_SMP */ |
| 536 | 536 | ||
| 537 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
| 538 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
| 539 | { | ||
| 540 | unsigned long end_address, hole_next_pfn; | ||
| 541 | unsigned long stop_address; | ||
| 542 | |||
| 543 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
| 544 | end_address = PAGE_ALIGN(end_address); | ||
| 545 | |||
| 546 | stop_address = (unsigned long) &vmem_map[ | ||
| 547 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
| 548 | |||
| 549 | do { | ||
| 550 | pgd_t *pgd; | ||
| 551 | pud_t *pud; | ||
| 552 | pmd_t *pmd; | ||
| 553 | pte_t *pte; | ||
| 554 | |||
| 555 | pgd = pgd_offset_k(end_address); | ||
| 556 | if (pgd_none(*pgd)) { | ||
| 557 | end_address += PGDIR_SIZE; | ||
| 558 | continue; | ||
| 559 | } | ||
| 560 | |||
| 561 | pud = pud_offset(pgd, end_address); | ||
| 562 | if (pud_none(*pud)) { | ||
| 563 | end_address += PUD_SIZE; | ||
| 564 | continue; | ||
| 565 | } | ||
| 566 | |||
| 567 | pmd = pmd_offset(pud, end_address); | ||
| 568 | if (pmd_none(*pmd)) { | ||
| 569 | end_address += PMD_SIZE; | ||
| 570 | continue; | ||
| 571 | } | ||
| 572 | |||
| 573 | pte = pte_offset_kernel(pmd, end_address); | ||
| 574 | retry_pte: | ||
| 575 | if (pte_none(*pte)) { | ||
| 576 | end_address += PAGE_SIZE; | ||
| 577 | pte++; | ||
| 578 | if ((end_address < stop_address) && | ||
| 579 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
| 580 | goto retry_pte; | ||
| 581 | continue; | ||
| 582 | } | ||
| 583 | /* Found next valid vmem_map page */ | ||
| 584 | break; | ||
| 585 | } while (end_address < stop_address); | ||
| 586 | |||
| 587 | end_address = min(end_address, stop_address); | ||
| 588 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
| 589 | hole_next_pfn = end_address / sizeof(struct page); | ||
| 590 | return hole_next_pfn - pgdat->node_start_pfn; | ||
| 591 | } | ||
| 592 | #else | ||
| 593 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
| 594 | { | ||
| 595 | return i + 1; | ||
| 596 | } | ||
| 597 | #endif | ||
| 598 | |||
| 599 | /** | 537 | /** |
| 600 | * show_mem - give short summary of memory stats | 538 | * show_mem - give short summary of memory stats |
| 601 | * | 539 | * |
| @@ -625,7 +563,8 @@ void show_mem(void) | |||
| 625 | if (pfn_valid(pgdat->node_start_pfn + i)) | 563 | if (pfn_valid(pgdat->node_start_pfn + i)) |
| 626 | page = pfn_to_page(pgdat->node_start_pfn + i); | 564 | page = pfn_to_page(pgdat->node_start_pfn + i); |
| 627 | else { | 565 | else { |
| 628 | i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; | 566 | i = vmemmap_find_next_valid_pfn(pgdat->node_id, |
| 567 | i) - 1; | ||
| 629 | continue; | 568 | continue; |
| 630 | } | 569 | } |
| 631 | if (PageReserved(page)) | 570 | if (PageReserved(page)) |
| @@ -751,7 +690,8 @@ void __init paging_init(void) | |||
| 751 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 690 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
| 752 | 691 | ||
| 753 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 692 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 754 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 693 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
| 694 | sizeof(struct page)); | ||
| 755 | vmem_map = (struct page *) vmalloc_end; | 695 | vmem_map = (struct page *) vmalloc_end; |
| 756 | efi_memmap_walk(create_mem_map_page_table, NULL); | 696 | efi_memmap_walk(create_mem_map_page_table, NULL); |
| 757 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 697 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2f50c064513c..30617ccb4f7e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) | |||
| 415 | } | 415 | } |
| 416 | 416 | ||
| 417 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 417 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 418 | int vmemmap_find_next_valid_pfn(int node, int i) | ||
| 419 | { | ||
| 420 | unsigned long end_address, hole_next_pfn; | ||
| 421 | unsigned long stop_address; | ||
| 422 | pg_data_t *pgdat = NODE_DATA(node); | ||
| 423 | |||
| 424 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
| 425 | end_address = PAGE_ALIGN(end_address); | ||
| 426 | |||
| 427 | stop_address = (unsigned long) &vmem_map[ | ||
| 428 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
| 429 | |||
| 430 | do { | ||
| 431 | pgd_t *pgd; | ||
| 432 | pud_t *pud; | ||
| 433 | pmd_t *pmd; | ||
| 434 | pte_t *pte; | ||
| 435 | |||
| 436 | pgd = pgd_offset_k(end_address); | ||
| 437 | if (pgd_none(*pgd)) { | ||
| 438 | end_address += PGDIR_SIZE; | ||
| 439 | continue; | ||
| 440 | } | ||
| 441 | |||
| 442 | pud = pud_offset(pgd, end_address); | ||
| 443 | if (pud_none(*pud)) { | ||
| 444 | end_address += PUD_SIZE; | ||
| 445 | continue; | ||
| 446 | } | ||
| 447 | |||
| 448 | pmd = pmd_offset(pud, end_address); | ||
| 449 | if (pmd_none(*pmd)) { | ||
| 450 | end_address += PMD_SIZE; | ||
| 451 | continue; | ||
| 452 | } | ||
| 453 | |||
| 454 | pte = pte_offset_kernel(pmd, end_address); | ||
| 455 | retry_pte: | ||
| 456 | if (pte_none(*pte)) { | ||
| 457 | end_address += PAGE_SIZE; | ||
| 458 | pte++; | ||
| 459 | if ((end_address < stop_address) && | ||
| 460 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
| 461 | goto retry_pte; | ||
| 462 | continue; | ||
| 463 | } | ||
| 464 | /* Found next valid vmem_map page */ | ||
| 465 | break; | ||
| 466 | } while (end_address < stop_address); | ||
| 467 | |||
| 468 | end_address = min(end_address, stop_address); | ||
| 469 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
| 470 | hole_next_pfn = end_address / sizeof(struct page); | ||
| 471 | return hole_next_pfn - pgdat->node_start_pfn; | ||
| 472 | } | ||
| 418 | 473 | ||
| 419 | int __init | 474 | int __init |
| 420 | create_mem_map_page_table (u64 start, u64 end, void *arg) | 475 | create_mem_map_page_table (u64 start, u64 end, void *arg) |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 07bd02b6c372..4280c074d64e 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c | |||
| @@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
| 32 | */ | 32 | */ |
| 33 | attr = kern_mem_attribute(offset, size); | 33 | attr = kern_mem_attribute(offset, size); |
| 34 | if (attr & EFI_MEMORY_WB) | 34 | if (attr & EFI_MEMORY_WB) |
| 35 | return phys_to_virt(offset); | 35 | return (void __iomem *) phys_to_virt(offset); |
| 36 | else if (attr & EFI_MEMORY_UC) | 36 | else if (attr & EFI_MEMORY_UC) |
| 37 | return __ioremap(offset, size); | 37 | return __ioremap(offset, size); |
| 38 | 38 | ||
| @@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size) | |||
| 43 | gran_base = GRANULEROUNDDOWN(offset); | 43 | gran_base = GRANULEROUNDDOWN(offset); |
| 44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; | 44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; |
| 45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) | 45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) |
| 46 | return phys_to_virt(offset); | 46 | return (void __iomem *) phys_to_virt(offset); |
| 47 | 47 | ||
| 48 | return __ioremap(offset, size); | 48 | return __ioremap(offset, size); |
| 49 | } | 49 | } |
| @@ -53,7 +53,7 @@ void __iomem * | |||
| 53 | ioremap_nocache (unsigned long offset, unsigned long size) | 53 | ioremap_nocache (unsigned long offset, unsigned long size) |
| 54 | { | 54 | { |
| 55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) | 55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) |
| 56 | return 0; | 56 | return NULL; |
| 57 | 57 | ||
| 58 | return __ioremap(offset, size); | 58 | return __ioremap(offset, size); |
| 59 | } | 59 | } |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 99b123a6421a..5e8e59efb347 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
| @@ -480,7 +480,7 @@ xpc_activating(void *__partid) | |||
| 480 | partid_t partid = (u64) __partid; | 480 | partid_t partid = (u64) __partid; |
| 481 | struct xpc_partition *part = &xpc_partitions[partid]; | 481 | struct xpc_partition *part = &xpc_partitions[partid]; |
| 482 | unsigned long irq_flags; | 482 | unsigned long irq_flags; |
| 483 | struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; | 483 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 484 | int ret; | 484 | int ret; |
| 485 | 485 | ||
| 486 | 486 | ||
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 17cd34284886..af7171adcd2c 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
| @@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) | |||
| 74 | else | 74 | else |
| 75 | mmr_war_offset = 0x158; | 75 | mmr_war_offset = 0x158; |
| 76 | 76 | ||
| 77 | readq_relaxed((void *)(mmr_base + mmr_war_offset)); | 77 | readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); |
| 78 | } | 78 | } |
| 79 | } | 79 | } |
| 80 | 80 | ||
| @@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
| 92 | 92 | ||
| 93 | if (mmr_offset < 0x45000) { | 93 | if (mmr_offset < 0x45000) { |
| 94 | if (mmr_offset == 0x100) | 94 | if (mmr_offset == 0x100) |
| 95 | readq_relaxed((void *)(mmr_base + 0x38)); | 95 | readq_relaxed((void __iomem *)(mmr_base + 0x38)); |
| 96 | readq_relaxed((void *)(mmr_base + 0xb050)); | 96 | readq_relaxed((void __iomem *)(mmr_base + 0xb050)); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
| 99 | 99 | ||
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 781dbb11c038..b09805f3ee23 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
| @@ -421,18 +421,22 @@ static struct miscdevice sq_dev = { | |||
| 421 | 421 | ||
| 422 | static int __init sq_api_init(void) | 422 | static int __init sq_api_init(void) |
| 423 | { | 423 | { |
| 424 | int ret; | ||
| 424 | printk(KERN_NOTICE "sq: Registering store queue API.\n"); | 425 | printk(KERN_NOTICE "sq: Registering store queue API.\n"); |
| 425 | 426 | ||
| 426 | #ifdef CONFIG_PROC_FS | ||
| 427 | create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0); | 427 | create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0); |
| 428 | #endif | ||
| 429 | 428 | ||
| 430 | return misc_register(&sq_dev); | 429 | ret = misc_register(&sq_dev); |
| 430 | if (ret) | ||
| 431 | remove_proc_entry("sq_mapping", NULL); | ||
| 432 | |||
| 433 | return ret; | ||
| 431 | } | 434 | } |
| 432 | 435 | ||
| 433 | static void __exit sq_api_exit(void) | 436 | static void __exit sq_api_exit(void) |
| 434 | { | 437 | { |
| 435 | misc_deregister(&sq_dev); | 438 | misc_deregister(&sq_dev); |
| 439 | remove_proc_entry("sq_mapping", NULL); | ||
| 436 | } | 440 | } |
| 437 | 441 | ||
| 438 | module_init(sq_api_init); | 442 | module_init(sq_api_init); |
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 5a1c0a3bf872..06af6ca60129 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c | |||
| @@ -203,7 +203,7 @@ int __cpuinit init_smp_flush(void) | |||
| 203 | { | 203 | { |
| 204 | int i; | 204 | int i; |
| 205 | for_each_cpu_mask(i, cpu_possible_map) { | 205 | for_each_cpu_mask(i, cpu_possible_map) { |
| 206 | spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i)); | 206 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); |
| 207 | } | 207 | } |
| 208 | return 0; | 208 | return 0; |
| 209 | } | 209 | } |
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 81e970adeab3..b0d4b147b19e 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
| @@ -129,11 +129,15 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) | |||
| 129 | struct acpi_memory_info *info, *n; | 129 | struct acpi_memory_info *info, *n; |
| 130 | 130 | ||
| 131 | 131 | ||
| 132 | if (!list_empty(&mem_device->res_list)) | ||
| 133 | return 0; | ||
| 134 | |||
| 132 | status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, | 135 | status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, |
| 133 | acpi_memory_get_resource, mem_device); | 136 | acpi_memory_get_resource, mem_device); |
| 134 | if (ACPI_FAILURE(status)) { | 137 | if (ACPI_FAILURE(status)) { |
| 135 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) | 138 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) |
| 136 | kfree(info); | 139 | kfree(info); |
| 140 | INIT_LIST_HEAD(&mem_device->res_list); | ||
| 137 | return -EINVAL; | 141 | return -EINVAL; |
| 138 | } | 142 | } |
| 139 | 143 | ||
| @@ -230,17 +234,10 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
| 230 | * (i.e. memory-hot-remove function) | 234 | * (i.e. memory-hot-remove function) |
| 231 | */ | 235 | */ |
| 232 | list_for_each_entry(info, &mem_device->res_list, list) { | 236 | list_for_each_entry(info, &mem_device->res_list, list) { |
| 233 | u64 start_pfn, end_pfn; | 237 | if (info->enabled) { /* just sanity check...*/ |
| 234 | |||
| 235 | start_pfn = info->start_addr >> PAGE_SHIFT; | ||
| 236 | end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT; | ||
| 237 | |||
| 238 | if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) { | ||
| 239 | /* already enabled. try next area */ | ||
| 240 | num_enabled++; | 238 | num_enabled++; |
| 241 | continue; | 239 | continue; |
| 242 | } | 240 | } |
| 243 | |||
| 244 | result = add_memory(node, info->start_addr, info->length); | 241 | result = add_memory(node, info->start_addr, info->length); |
| 245 | if (result) | 242 | if (result) |
| 246 | continue; | 243 | continue; |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 1c0a39d8b04e..578b99b71d9c 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
| @@ -58,8 +58,8 @@ struct dock_dependent_device { | |||
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | #define DOCK_DOCKING 0x00000001 | 60 | #define DOCK_DOCKING 0x00000001 |
| 61 | #define DOCK_EVENT KOBJ_DOCK | 61 | #define DOCK_EVENT 3 |
| 62 | #define UNDOCK_EVENT KOBJ_UNDOCK | 62 | #define UNDOCK_EVENT 2 |
| 63 | 63 | ||
| 64 | static struct dock_station *dock_station; | 64 | static struct dock_station *dock_station; |
| 65 | 65 | ||
| @@ -322,11 +322,10 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) | |||
| 322 | 322 | ||
| 323 | static void dock_event(struct dock_station *ds, u32 event, int num) | 323 | static void dock_event(struct dock_station *ds, u32 event, int num) |
| 324 | { | 324 | { |
| 325 | struct acpi_device *device; | 325 | /* |
| 326 | 326 | * we don't do events until someone tells me that | |
| 327 | device = dock_create_acpi_device(ds->handle); | 327 | * they would like to have them. |
| 328 | if (device) | 328 | */ |
| 329 | kobject_uevent(&device->kobj, num); | ||
| 330 | } | 329 | } |
| 331 | 330 | ||
| 332 | /** | 331 | /** |
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 41db8060e8f7..017f755632a3 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
| @@ -311,7 +311,8 @@ static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet, | |||
| 311 | /* CD went away; no more connection */ | 311 | /* CD went away; no more connection */ |
| 312 | pr_debug("hvsi%i: CD dropped\n", hp->index); | 312 | pr_debug("hvsi%i: CD dropped\n", hp->index); |
| 313 | hp->mctrl &= TIOCM_CD; | 313 | hp->mctrl &= TIOCM_CD; |
| 314 | if (!(hp->tty->flags & CLOCAL)) | 314 | /* If userland hasn't done an open(2) yet, hp->tty is NULL. */ |
| 315 | if (hp->tty && !(hp->tty->flags & CLOCAL)) | ||
| 315 | *to_hangup = hp->tty; | 316 | *to_hangup = hp->tty; |
| 316 | } | 317 | } |
| 317 | break; | 318 | break; |
| @@ -986,10 +987,7 @@ static void hvsi_write_worker(void *arg) | |||
| 986 | start_j = 0; | 987 | start_j = 0; |
| 987 | #endif /* DEBUG */ | 988 | #endif /* DEBUG */ |
| 988 | wake_up_all(&hp->emptyq); | 989 | wake_up_all(&hp->emptyq); |
| 989 | if (test_bit(TTY_DO_WRITE_WAKEUP, &hp->tty->flags) | 990 | tty_wakeup(hp->tty); |
| 990 | && hp->tty->ldisc.write_wakeup) | ||
| 991 | hp->tty->ldisc.write_wakeup(hp->tty); | ||
| 992 | wake_up_interruptible(&hp->tty->write_wait); | ||
| 993 | } | 991 | } |
| 994 | 992 | ||
| 995 | out: | 993 | out: |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 819516b35a79..a01d796d1eeb 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
| @@ -25,12 +25,12 @@ | |||
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 27 | #include <linux/random.h> | 27 | #include <linux/random.h> |
| 28 | #include <linux/clk.h> | ||
| 28 | #include <linux/err.h> | 29 | #include <linux/err.h> |
| 29 | #include <linux/device.h> | 30 | #include <linux/platform_device.h> |
| 30 | #include <linux/hw_random.h> | 31 | #include <linux/hw_random.h> |
| 31 | 32 | ||
| 32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
| 33 | #include <asm/hardware/clock.h> | ||
| 34 | 34 | ||
| 35 | #define RNG_OUT_REG 0x00 /* Output register */ | 35 | #define RNG_OUT_REG 0x00 /* Output register */ |
| 36 | #define RNG_STAT_REG 0x04 /* Status register | 36 | #define RNG_STAT_REG 0x04 /* Status register |
| @@ -52,7 +52,7 @@ | |||
| 52 | 52 | ||
| 53 | static void __iomem *rng_base; | 53 | static void __iomem *rng_base; |
| 54 | static struct clk *rng_ick; | 54 | static struct clk *rng_ick; |
| 55 | static struct device *rng_dev; | 55 | static struct platform_device *rng_dev; |
| 56 | 56 | ||
| 57 | static u32 omap_rng_read_reg(int reg) | 57 | static u32 omap_rng_read_reg(int reg) |
| 58 | { | 58 | { |
| @@ -83,9 +83,8 @@ static struct hwrng omap_rng_ops = { | |||
| 83 | .data_read = omap_rng_data_read, | 83 | .data_read = omap_rng_data_read, |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | static int __init omap_rng_probe(struct device *dev) | 86 | static int __init omap_rng_probe(struct platform_device *pdev) |
| 87 | { | 87 | { |
| 88 | struct platform_device *pdev = to_platform_device(dev); | ||
| 89 | struct resource *res, *mem; | 88 | struct resource *res, *mem; |
| 90 | int ret; | 89 | int ret; |
| 91 | 90 | ||
| @@ -95,16 +94,14 @@ static int __init omap_rng_probe(struct device *dev) | |||
| 95 | */ | 94 | */ |
| 96 | BUG_ON(rng_dev); | 95 | BUG_ON(rng_dev); |
| 97 | 96 | ||
| 98 | if (cpu_is_omap24xx()) { | 97 | if (cpu_is_omap24xx()) { |
| 99 | rng_ick = clk_get(NULL, "rng_ick"); | 98 | rng_ick = clk_get(NULL, "rng_ick"); |
| 100 | if (IS_ERR(rng_ick)) { | 99 | if (IS_ERR(rng_ick)) { |
| 101 | dev_err(dev, "Could not get rng_ick\n"); | 100 | dev_err(&pdev->dev, "Could not get rng_ick\n"); |
| 102 | ret = PTR_ERR(rng_ick); | 101 | ret = PTR_ERR(rng_ick); |
| 103 | return ret; | 102 | return ret; |
| 104 | } | 103 | } else |
| 105 | else { | 104 | clk_enable(rng_ick); |
| 106 | clk_use(rng_ick); | ||
| 107 | } | ||
| 108 | } | 105 | } |
| 109 | 106 | ||
| 110 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 107 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -117,7 +114,7 @@ static int __init omap_rng_probe(struct device *dev) | |||
| 117 | if (mem == NULL) | 114 | if (mem == NULL) |
| 118 | return -EBUSY; | 115 | return -EBUSY; |
| 119 | 116 | ||
| 120 | dev_set_drvdata(dev, mem); | 117 | dev_set_drvdata(&pdev->dev, mem); |
| 121 | rng_base = (u32 __iomem *)io_p2v(res->start); | 118 | rng_base = (u32 __iomem *)io_p2v(res->start); |
| 122 | 119 | ||
| 123 | ret = hwrng_register(&omap_rng_ops); | 120 | ret = hwrng_register(&omap_rng_ops); |
| @@ -127,25 +124,25 @@ static int __init omap_rng_probe(struct device *dev) | |||
| 127 | return ret; | 124 | return ret; |
| 128 | } | 125 | } |
| 129 | 126 | ||
| 130 | dev_info(dev, "OMAP Random Number Generator ver. %02x\n", | 127 | dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", |
| 131 | omap_rng_read_reg(RNG_REV_REG)); | 128 | omap_rng_read_reg(RNG_REV_REG)); |
| 132 | omap_rng_write_reg(RNG_MASK_REG, 0x1); | 129 | omap_rng_write_reg(RNG_MASK_REG, 0x1); |
| 133 | 130 | ||
| 134 | rng_dev = dev; | 131 | rng_dev = pdev; |
| 135 | 132 | ||
| 136 | return 0; | 133 | return 0; |
| 137 | } | 134 | } |
| 138 | 135 | ||
| 139 | static int __exit omap_rng_remove(struct device *dev) | 136 | static int __exit omap_rng_remove(struct platform_device *pdev) |
| 140 | { | 137 | { |
| 141 | struct resource *mem = dev_get_drvdata(dev); | 138 | struct resource *mem = dev_get_drvdata(&pdev->dev); |
| 142 | 139 | ||
| 143 | hwrng_unregister(&omap_rng_ops); | 140 | hwrng_unregister(&omap_rng_ops); |
| 144 | 141 | ||
| 145 | omap_rng_write_reg(RNG_MASK_REG, 0x0); | 142 | omap_rng_write_reg(RNG_MASK_REG, 0x0); |
| 146 | 143 | ||
| 147 | if (cpu_is_omap24xx()) { | 144 | if (cpu_is_omap24xx()) { |
| 148 | clk_unuse(rng_ick); | 145 | clk_disable(rng_ick); |
| 149 | clk_put(rng_ick); | 146 | clk_put(rng_ick); |
| 150 | } | 147 | } |
| 151 | 148 | ||
| @@ -157,18 +154,16 @@ static int __exit omap_rng_remove(struct device *dev) | |||
| 157 | 154 | ||
| 158 | #ifdef CONFIG_PM | 155 | #ifdef CONFIG_PM |
| 159 | 156 | ||
| 160 | static int omap_rng_suspend(struct device *dev, pm_message_t message, u32 level) | 157 | static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) |
| 161 | { | 158 | { |
| 162 | omap_rng_write_reg(RNG_MASK_REG, 0x0); | 159 | omap_rng_write_reg(RNG_MASK_REG, 0x0); |
| 163 | |||
| 164 | return 0; | 160 | return 0; |
| 165 | } | 161 | } |
| 166 | 162 | ||
| 167 | static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level) | 163 | static int omap_rng_resume(struct platform_device *pdev) |
| 168 | { | 164 | { |
| 169 | omap_rng_write_reg(RNG_MASK_REG, 0x1); | 165 | omap_rng_write_reg(RNG_MASK_REG, 0x1); |
| 170 | 166 | return 0; | |
| 171 | return 1; | ||
| 172 | } | 167 | } |
| 173 | 168 | ||
| 174 | #else | 169 | #else |
| @@ -179,9 +174,11 @@ static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level) | |||
| 179 | #endif | 174 | #endif |
| 180 | 175 | ||
| 181 | 176 | ||
| 182 | static struct device_driver omap_rng_driver = { | 177 | static struct platform_driver omap_rng_driver = { |
| 183 | .name = "omap_rng", | 178 | .driver = { |
| 184 | .bus = &platform_bus_type, | 179 | .name = "omap_rng", |
| 180 | .owner = THIS_MODULE, | ||
| 181 | }, | ||
| 185 | .probe = omap_rng_probe, | 182 | .probe = omap_rng_probe, |
| 186 | .remove = __exit_p(omap_rng_remove), | 183 | .remove = __exit_p(omap_rng_remove), |
| 187 | .suspend = omap_rng_suspend, | 184 | .suspend = omap_rng_suspend, |
| @@ -193,12 +190,12 @@ static int __init omap_rng_init(void) | |||
| 193 | if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) | 190 | if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) |
| 194 | return -ENODEV; | 191 | return -ENODEV; |
| 195 | 192 | ||
| 196 | return driver_register(&omap_rng_driver); | 193 | return platform_driver_register(&omap_rng_driver); |
| 197 | } | 194 | } |
| 198 | 195 | ||
| 199 | static void __exit omap_rng_exit(void) | 196 | static void __exit omap_rng_exit(void) |
| 200 | { | 197 | { |
| 201 | driver_unregister(&omap_rng_driver); | 198 | platform_driver_unregister(&omap_rng_driver); |
| 202 | } | 199 | } |
| 203 | 200 | ||
| 204 | module_init(omap_rng_init); | 201 | module_init(omap_rng_init); |
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index afc6eda602f7..07e0b75f2338 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c | |||
| @@ -374,7 +374,12 @@ scdrv_init(void) | |||
| 374 | struct sysctl_data_s *scd; | 374 | struct sysctl_data_s *scd; |
| 375 | void *salbuf; | 375 | void *salbuf; |
| 376 | dev_t first_dev, dev; | 376 | dev_t first_dev, dev; |
| 377 | nasid_t event_nasid = ia64_sn_get_console_nasid(); | 377 | nasid_t event_nasid; |
| 378 | |||
| 379 | if (!ia64_platform_is("sn2")) | ||
| 380 | return -ENODEV; | ||
| 381 | |||
| 382 | event_nasid = ia64_sn_get_console_nasid(); | ||
| 378 | 383 | ||
| 379 | if (alloc_chrdev_region(&first_dev, 0, num_cnodes, | 384 | if (alloc_chrdev_region(&first_dev, 0, num_cnodes, |
| 380 | SYSCTL_BASENAME) < 0) { | 385 | SYSCTL_BASENAME) < 0) { |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bc1088d9b379..b3df613ae4ec 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -284,39 +284,69 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_transition); | |||
| 284 | * SYSFS INTERFACE * | 284 | * SYSFS INTERFACE * |
| 285 | *********************************************************************/ | 285 | *********************************************************************/ |
| 286 | 286 | ||
| 287 | static struct cpufreq_governor *__find_governor(const char *str_governor) | ||
| 288 | { | ||
| 289 | struct cpufreq_governor *t; | ||
| 290 | |||
| 291 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) | ||
| 292 | if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) | ||
| 293 | return t; | ||
| 294 | |||
| 295 | return NULL; | ||
| 296 | } | ||
| 297 | |||
| 287 | /** | 298 | /** |
| 288 | * cpufreq_parse_governor - parse a governor string | 299 | * cpufreq_parse_governor - parse a governor string |
| 289 | */ | 300 | */ |
| 290 | static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, | 301 | static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, |
| 291 | struct cpufreq_governor **governor) | 302 | struct cpufreq_governor **governor) |
| 292 | { | 303 | { |
| 304 | int err = -EINVAL; | ||
| 305 | |||
| 293 | if (!cpufreq_driver) | 306 | if (!cpufreq_driver) |
| 294 | return -EINVAL; | 307 | goto out; |
| 308 | |||
| 295 | if (cpufreq_driver->setpolicy) { | 309 | if (cpufreq_driver->setpolicy) { |
| 296 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { | 310 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { |
| 297 | *policy = CPUFREQ_POLICY_PERFORMANCE; | 311 | *policy = CPUFREQ_POLICY_PERFORMANCE; |
| 298 | return 0; | 312 | err = 0; |
| 299 | } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { | 313 | } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { |
| 300 | *policy = CPUFREQ_POLICY_POWERSAVE; | 314 | *policy = CPUFREQ_POLICY_POWERSAVE; |
| 301 | return 0; | 315 | err = 0; |
| 302 | } | 316 | } |
| 303 | return -EINVAL; | 317 | } else if (cpufreq_driver->target) { |
| 304 | } else { | ||
| 305 | struct cpufreq_governor *t; | 318 | struct cpufreq_governor *t; |
| 319 | |||
| 306 | mutex_lock(&cpufreq_governor_mutex); | 320 | mutex_lock(&cpufreq_governor_mutex); |
| 307 | if (!cpufreq_driver || !cpufreq_driver->target) | 321 | |
| 308 | goto out; | 322 | t = __find_governor(str_governor); |
| 309 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | 323 | |
| 310 | if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { | 324 | if (t == NULL) { |
| 311 | *governor = t; | 325 | char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); |
| 326 | |||
| 327 | if (name) { | ||
| 328 | int ret; | ||
| 329 | |||
| 312 | mutex_unlock(&cpufreq_governor_mutex); | 330 | mutex_unlock(&cpufreq_governor_mutex); |
| 313 | return 0; | 331 | ret = request_module(name); |
| 332 | mutex_lock(&cpufreq_governor_mutex); | ||
| 333 | |||
| 334 | if (ret == 0) | ||
| 335 | t = __find_governor(str_governor); | ||
| 314 | } | 336 | } |
| 337 | |||
| 338 | kfree(name); | ||
| 315 | } | 339 | } |
| 316 | out: | 340 | |
| 341 | if (t != NULL) { | ||
| 342 | *governor = t; | ||
| 343 | err = 0; | ||
| 344 | } | ||
| 345 | |||
| 317 | mutex_unlock(&cpufreq_governor_mutex); | 346 | mutex_unlock(&cpufreq_governor_mutex); |
| 318 | } | 347 | } |
| 319 | return -EINVAL; | 348 | out: |
| 349 | return err; | ||
| 320 | } | 350 | } |
| 321 | 351 | ||
| 322 | 352 | ||
| @@ -1265,23 +1295,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) | |||
| 1265 | 1295 | ||
| 1266 | int cpufreq_register_governor(struct cpufreq_governor *governor) | 1296 | int cpufreq_register_governor(struct cpufreq_governor *governor) |
| 1267 | { | 1297 | { |
| 1268 | struct cpufreq_governor *t; | 1298 | int err; |
| 1269 | 1299 | ||
| 1270 | if (!governor) | 1300 | if (!governor) |
| 1271 | return -EINVAL; | 1301 | return -EINVAL; |
| 1272 | 1302 | ||
| 1273 | mutex_lock(&cpufreq_governor_mutex); | 1303 | mutex_lock(&cpufreq_governor_mutex); |
| 1274 | 1304 | ||
| 1275 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | 1305 | err = -EBUSY; |
| 1276 | if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { | 1306 | if (__find_governor(governor->name) == NULL) { |
| 1277 | mutex_unlock(&cpufreq_governor_mutex); | 1307 | err = 0; |
| 1278 | return -EBUSY; | 1308 | list_add(&governor->governor_list, &cpufreq_governor_list); |
| 1279 | } | ||
| 1280 | } | 1309 | } |
| 1281 | list_add(&governor->governor_list, &cpufreq_governor_list); | ||
| 1282 | 1310 | ||
| 1283 | mutex_unlock(&cpufreq_governor_mutex); | 1311 | mutex_unlock(&cpufreq_governor_mutex); |
| 1284 | return 0; | 1312 | return err; |
| 1285 | } | 1313 | } |
| 1286 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); | 1314 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); |
| 1287 | 1315 | ||
| @@ -1343,6 +1371,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli | |||
| 1343 | 1371 | ||
| 1344 | memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); | 1372 | memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); |
| 1345 | 1373 | ||
| 1374 | if (policy->min > data->min && policy->min > policy->max) { | ||
| 1375 | ret = -EINVAL; | ||
| 1376 | goto error_out; | ||
| 1377 | } | ||
| 1378 | |||
| 1346 | /* verify the cpu speed can be set within this limit */ | 1379 | /* verify the cpu speed can be set within this limit */ |
| 1347 | ret = cpufreq_driver->verify(policy); | 1380 | ret = cpufreq_driver->verify(policy); |
| 1348 | if (ret) | 1381 | if (ret) |
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index bf6ab8a8d5ed..a1cfd4e3c97d 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/rcupdate.h> | 29 | #include <linux/rcupdate.h> |
| 30 | #include <linux/completion.h> | 30 | #include <linux/completion.h> |
| 31 | #include <linux/kobject.h> | 31 | #include <linux/kobject.h> |
| 32 | #include <linux/platform_device.h> | ||
| 32 | 33 | ||
| 33 | #define EDAC_MC_LABEL_LEN 31 | 34 | #define EDAC_MC_LABEL_LEN 31 |
| 34 | #define MC_PROC_NAME_MAX_LEN 7 | 35 | #define MC_PROC_NAME_MAX_LEN 7 |
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index ced309ff056f..eae9e81be375 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c | |||
| @@ -232,7 +232,7 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface) | |||
| 232 | unsigned long timeout; | 232 | unsigned long timeout; |
| 233 | 233 | ||
| 234 | timeout = jiffies + POLL_TIMEOUT; | 234 | timeout = jiffies + POLL_TIMEOUT; |
| 235 | while (time_before(jiffies, timeout)) { | 235 | while (1) { |
| 236 | status = inb(ACBST); | 236 | status = inb(ACBST); |
| 237 | 237 | ||
| 238 | /* Reset the status register to avoid the hang */ | 238 | /* Reset the status register to avoid the hang */ |
| @@ -242,7 +242,10 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface) | |||
| 242 | scx200_acb_machine(iface, status); | 242 | scx200_acb_machine(iface, status); |
| 243 | return; | 243 | return; |
| 244 | } | 244 | } |
| 245 | yield(); | 245 | if (time_after(jiffies, timeout)) |
| 246 | break; | ||
| 247 | cpu_relax(); | ||
| 248 | cond_resched(); | ||
| 246 | } | 249 | } |
| 247 | 250 | ||
| 248 | dev_err(&iface->adapter.dev, "timeout in state %s\n", | 251 | dev_err(&iface->adapter.dev, "timeout in state %s\n", |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index aaa74f293aaf..b08755e2e68f 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
| @@ -2515,6 +2515,9 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev) | |||
| 2515 | sdev->skip_ms_page_8 = 1; | 2515 | sdev->skip_ms_page_8 = 1; |
| 2516 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | 2516 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) |
| 2517 | sdev->fix_capacity = 1; | 2517 | sdev->fix_capacity = 1; |
| 2518 | if (scsi_id->ne->guid_vendor_id == 0x0010b9 && /* Maxtor's OUI */ | ||
| 2519 | (sdev->type == TYPE_DISK || sdev->type == TYPE_RBC)) | ||
| 2520 | sdev->allow_restart = 1; | ||
| 2518 | return 0; | 2521 | return 0; |
| 2519 | } | 2522 | } |
| 2520 | 2523 | ||
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f85c97f7500a..0de335b7bfc2 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -975,8 +975,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
| 975 | 975 | ||
| 976 | cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> | 976 | cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> |
| 977 | id.local_id); | 977 | id.local_id); |
| 978 | if (IS_ERR(cm_id_priv->timewait_info)) | 978 | if (IS_ERR(cm_id_priv->timewait_info)) { |
| 979 | ret = PTR_ERR(cm_id_priv->timewait_info); | ||
| 979 | goto out; | 980 | goto out; |
| 981 | } | ||
| 980 | 982 | ||
| 981 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); | 983 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); |
| 982 | if (ret) | 984 | if (ret) |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bb9bee56a824..102a59c033ff 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
| 43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
| 44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
| 45 | #include <linux/completion.h> | ||
| 45 | 46 | ||
| 46 | #include <rdma/ib_verbs.h> | 47 | #include <rdma/ib_verbs.h> |
| 47 | #include <rdma/ib_user_verbs.h> | 48 | #include <rdma/ib_user_verbs.h> |
| @@ -69,6 +70,7 @@ | |||
| 69 | 70 | ||
| 70 | struct ib_uverbs_device { | 71 | struct ib_uverbs_device { |
| 71 | struct kref ref; | 72 | struct kref ref; |
| 73 | struct completion comp; | ||
| 72 | int devnum; | 74 | int devnum; |
| 73 | struct cdev *dev; | 75 | struct cdev *dev; |
| 74 | struct class_device *class_dev; | 76 | struct class_device *class_dev; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e725cccc7cde..4e16314e8e6d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -122,7 +122,7 @@ static void ib_uverbs_release_dev(struct kref *ref) | |||
| 122 | struct ib_uverbs_device *dev = | 122 | struct ib_uverbs_device *dev = |
| 123 | container_of(ref, struct ib_uverbs_device, ref); | 123 | container_of(ref, struct ib_uverbs_device, ref); |
| 124 | 124 | ||
| 125 | kfree(dev); | 125 | complete(&dev->comp); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | void ib_uverbs_release_ucq(struct ib_uverbs_file *file, | 128 | void ib_uverbs_release_ucq(struct ib_uverbs_file *file, |
| @@ -740,6 +740,7 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
| 740 | return; | 740 | return; |
| 741 | 741 | ||
| 742 | kref_init(&uverbs_dev->ref); | 742 | kref_init(&uverbs_dev->ref); |
| 743 | init_completion(&uverbs_dev->comp); | ||
| 743 | 744 | ||
| 744 | spin_lock(&map_lock); | 745 | spin_lock(&map_lock); |
| 745 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 746 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
| @@ -793,6 +794,8 @@ err_cdev: | |||
| 793 | 794 | ||
| 794 | err: | 795 | err: |
| 795 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 796 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
| 797 | wait_for_completion(&uverbs_dev->comp); | ||
| 798 | kfree(uverbs_dev); | ||
| 796 | return; | 799 | return; |
| 797 | } | 800 | } |
| 798 | 801 | ||
| @@ -812,7 +815,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
| 812 | spin_unlock(&map_lock); | 815 | spin_unlock(&map_lock); |
| 813 | 816 | ||
| 814 | clear_bit(uverbs_dev->devnum, dev_map); | 817 | clear_bit(uverbs_dev->devnum, dev_map); |
| 818 | |||
| 815 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 819 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
| 820 | wait_for_completion(&uverbs_dev->comp); | ||
| 821 | kfree(uverbs_dev); | ||
| 816 | } | 822 | } |
| 817 | 823 | ||
| 818 | static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, | 824 | static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, |
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index 9ba3211cef7c..25157f57a6d0 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c | |||
| @@ -108,14 +108,15 @@ void mthca_alloc_cleanup(struct mthca_alloc *alloc) | |||
| 108 | * serialize access to the array. | 108 | * serialize access to the array. |
| 109 | */ | 109 | */ |
| 110 | 110 | ||
| 111 | #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1) | ||
| 112 | |||
| 111 | void *mthca_array_get(struct mthca_array *array, int index) | 113 | void *mthca_array_get(struct mthca_array *array, int index) |
| 112 | { | 114 | { |
| 113 | int p = (index * sizeof (void *)) >> PAGE_SHIFT; | 115 | int p = (index * sizeof (void *)) >> PAGE_SHIFT; |
| 114 | 116 | ||
| 115 | if (array->page_list[p].page) { | 117 | if (array->page_list[p].page) |
| 116 | int i = index & (PAGE_SIZE / sizeof (void *) - 1); | 118 | return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; |
| 117 | return array->page_list[p].page[i]; | 119 | else |
| 118 | } else | ||
| 119 | return NULL; | 120 | return NULL; |
| 120 | } | 121 | } |
| 121 | 122 | ||
| @@ -130,8 +131,7 @@ int mthca_array_set(struct mthca_array *array, int index, void *value) | |||
| 130 | if (!array->page_list[p].page) | 131 | if (!array->page_list[p].page) |
| 131 | return -ENOMEM; | 132 | return -ENOMEM; |
| 132 | 133 | ||
| 133 | array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = | 134 | array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; |
| 134 | value; | ||
| 135 | ++array->page_list[p].used; | 135 | ++array->page_list[p].used; |
| 136 | 136 | ||
| 137 | return 0; | 137 | return 0; |
| @@ -144,7 +144,8 @@ void mthca_array_clear(struct mthca_array *array, int index) | |||
| 144 | if (--array->page_list[p].used == 0) { | 144 | if (--array->page_list[p].used == 0) { |
| 145 | free_page((unsigned long) array->page_list[p].page); | 145 | free_page((unsigned long) array->page_list[p].page); |
| 146 | array->page_list[p].page = NULL; | 146 | array->page_list[p].page = NULL; |
| 147 | } | 147 | } else |
| 148 | array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; | ||
| 148 | 149 | ||
| 149 | if (array->page_list[p].used < 0) | 150 | if (array->page_list[p].used < 0) |
| 150 | pr_debug("Array %p index %d page %d with ref count %d < 0\n", | 151 | pr_debug("Array %p index %d page %d with ref count %d < 0\n", |
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig index 13d6d01c72c0..d74653d7de1c 100644 --- a/drivers/infiniband/ulp/ipoib/Kconfig +++ b/drivers/infiniband/ulp/ipoib/Kconfig | |||
| @@ -6,8 +6,7 @@ config INFINIBAND_IPOIB | |||
| 6 | transports IP packets over InfiniBand so you can use your IB | 6 | transports IP packets over InfiniBand so you can use your IB |
| 7 | device as a fancy NIC. | 7 | device as a fancy NIC. |
| 8 | 8 | ||
| 9 | The IPoIB protocol is defined by the IETF ipoib working | 9 | See Documentation/infiniband/ipoib.txt for more information |
| 10 | group: <http://www.ietf.org/html.charters/ipoib-charter.html>. | ||
| 11 | 10 | ||
| 12 | config INFINIBAND_IPOIB_DEBUG | 11 | config INFINIBAND_IPOIB_DEBUG |
| 13 | bool "IP-over-InfiniBand debugging" if EMBEDDED | 12 | bool "IP-over-InfiniBand debugging" if EMBEDDED |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8f472e7113b4..8257d5a2c8f8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -77,6 +77,14 @@ MODULE_PARM_DESC(topspin_workarounds, | |||
| 77 | 77 | ||
| 78 | static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; | 78 | static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; |
| 79 | 79 | ||
| 80 | static int mellanox_workarounds = 1; | ||
| 81 | |||
| 82 | module_param(mellanox_workarounds, int, 0444); | ||
| 83 | MODULE_PARM_DESC(mellanox_workarounds, | ||
| 84 | "Enable workarounds for Mellanox SRP target bugs if != 0"); | ||
| 85 | |||
| 86 | static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; | ||
| 87 | |||
| 80 | static void srp_add_one(struct ib_device *device); | 88 | static void srp_add_one(struct ib_device *device); |
| 81 | static void srp_remove_one(struct ib_device *device); | 89 | static void srp_remove_one(struct ib_device *device); |
| 82 | static void srp_completion(struct ib_cq *cq, void *target_ptr); | 90 | static void srp_completion(struct ib_cq *cq, void *target_ptr); |
| @@ -526,8 +534,10 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
| 526 | while (ib_poll_cq(target->cq, 1, &wc) > 0) | 534 | while (ib_poll_cq(target->cq, 1, &wc) > 0) |
| 527 | ; /* nothing */ | 535 | ; /* nothing */ |
| 528 | 536 | ||
| 537 | spin_lock_irq(target->scsi_host->host_lock); | ||
| 529 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | 538 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) |
| 530 | srp_reset_req(target, req); | 539 | srp_reset_req(target, req); |
| 540 | spin_unlock_irq(target->scsi_host->host_lock); | ||
| 531 | 541 | ||
| 532 | target->rx_head = 0; | 542 | target->rx_head = 0; |
| 533 | target->tx_head = 0; | 543 | target->tx_head = 0; |
| @@ -567,7 +577,7 @@ err: | |||
| 567 | return ret; | 577 | return ret; |
| 568 | } | 578 | } |
| 569 | 579 | ||
| 570 | static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, | 580 | static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, |
| 571 | int sg_cnt, struct srp_request *req, | 581 | int sg_cnt, struct srp_request *req, |
| 572 | struct srp_direct_buf *buf) | 582 | struct srp_direct_buf *buf) |
| 573 | { | 583 | { |
| @@ -577,10 +587,15 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, | |||
| 577 | int page_cnt; | 587 | int page_cnt; |
| 578 | int i, j; | 588 | int i, j; |
| 579 | int ret; | 589 | int ret; |
| 590 | struct srp_device *dev = target->srp_host->dev; | ||
| 580 | 591 | ||
| 581 | if (!dev->fmr_pool) | 592 | if (!dev->fmr_pool) |
| 582 | return -ENODEV; | 593 | return -ENODEV; |
| 583 | 594 | ||
| 595 | if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && | ||
| 596 | mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) | ||
| 597 | return -EINVAL; | ||
| 598 | |||
| 584 | len = page_cnt = 0; | 599 | len = page_cnt = 0; |
| 585 | for (i = 0; i < sg_cnt; ++i) { | 600 | for (i = 0; i < sg_cnt; ++i) { |
| 586 | if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { | 601 | if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { |
| @@ -683,7 +698,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
| 683 | buf->va = cpu_to_be64(sg_dma_address(scat)); | 698 | buf->va = cpu_to_be64(sg_dma_address(scat)); |
| 684 | buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); | 699 | buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); |
| 685 | buf->len = cpu_to_be32(sg_dma_len(scat)); | 700 | buf->len = cpu_to_be32(sg_dma_len(scat)); |
| 686 | } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, | 701 | } else if (srp_map_fmr(target, scat, count, req, |
| 687 | (void *) cmd->add_data)) { | 702 | (void *) cmd->add_data)) { |
| 688 | /* | 703 | /* |
| 689 | * FMR mapping failed, and the scatterlist has more | 704 | * FMR mapping failed, and the scatterlist has more |
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h index 0a5be7f969f2..af3eb9e795b5 100644 --- a/drivers/isdn/hardware/eicon/divasync.h +++ b/drivers/isdn/hardware/eicon/divasync.h | |||
| @@ -256,7 +256,6 @@ typedef struct | |||
| 256 | #define NO_ORDER_CHECK_MASK 0x00000010 | 256 | #define NO_ORDER_CHECK_MASK 0x00000010 |
| 257 | #define LOW_CHANNEL_MASK 0x00000020 | 257 | #define LOW_CHANNEL_MASK 0x00000020 |
| 258 | #define NO_HSCX30_MASK 0x00000040 | 258 | #define NO_HSCX30_MASK 0x00000040 |
| 259 | #define MODE_MASK 0x00000080 | ||
| 260 | #define SET_BOARD 0x00001000 | 259 | #define SET_BOARD 0x00001000 |
| 261 | #define SET_CRC4 0x00030000 | 260 | #define SET_CRC4 0x00030000 |
| 262 | #define SET_L1_TRISTATE 0x00040000 | 261 | #define SET_L1_TRISTATE 0x00040000 |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index ff83c9b5979e..b99c19c7eb22 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -162,7 +162,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
| 162 | goto out; | 162 | goto out; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | min_spacing = mddev->array_size; | 165 | min_spacing = conf->array_size; |
| 166 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); | 166 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); |
| 167 | 167 | ||
| 168 | /* min_spacing is the minimum spacing that will fit the hash | 168 | /* min_spacing is the minimum spacing that will fit the hash |
| @@ -171,7 +171,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
| 171 | * that is larger than min_spacing as use the size of that as | 171 | * that is larger than min_spacing as use the size of that as |
| 172 | * the actual spacing | 172 | * the actual spacing |
| 173 | */ | 173 | */ |
| 174 | conf->hash_spacing = mddev->array_size; | 174 | conf->hash_spacing = conf->array_size; |
| 175 | for (i=0; i < cnt-1 ; i++) { | 175 | for (i=0; i < cnt-1 ; i++) { |
| 176 | sector_t sz = 0; | 176 | sector_t sz = 0; |
| 177 | int j; | 177 | int j; |
| @@ -228,7 +228,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
| 228 | curr_offset = 0; | 228 | curr_offset = 0; |
| 229 | i = 0; | 229 | i = 0; |
| 230 | for (curr_offset = 0; | 230 | for (curr_offset = 0; |
| 231 | curr_offset < mddev->array_size; | 231 | curr_offset < conf->array_size; |
| 232 | curr_offset += conf->hash_spacing) { | 232 | curr_offset += conf->hash_spacing) { |
| 233 | 233 | ||
| 234 | while (i < mddev->raid_disks-1 && | 234 | while (i < mddev->raid_disks-1 && |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index c3e52c806b13..06440a86baef 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -177,6 +177,7 @@ struct myri10ge_priv { | |||
| 177 | struct work_struct watchdog_work; | 177 | struct work_struct watchdog_work; |
| 178 | struct timer_list watchdog_timer; | 178 | struct timer_list watchdog_timer; |
| 179 | int watchdog_tx_done; | 179 | int watchdog_tx_done; |
| 180 | int watchdog_tx_req; | ||
| 180 | int watchdog_resets; | 181 | int watchdog_resets; |
| 181 | int tx_linearized; | 182 | int tx_linearized; |
| 182 | int pause; | 183 | int pause; |
| @@ -448,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | |||
| 448 | struct mcp_gen_header *hdr; | 449 | struct mcp_gen_header *hdr; |
| 449 | size_t hdr_offset; | 450 | size_t hdr_offset; |
| 450 | int status; | 451 | int status; |
| 452 | unsigned i; | ||
| 451 | 453 | ||
| 452 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { | 454 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { |
| 453 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", | 455 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", |
| @@ -479,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | |||
| 479 | goto abort_with_fw; | 481 | goto abort_with_fw; |
| 480 | 482 | ||
| 481 | crc = crc32(~0, fw->data, fw->size); | 483 | crc = crc32(~0, fw->data, fw->size); |
| 482 | if (mgp->tx.boundary == 2048) { | 484 | for (i = 0; i < fw->size; i += 256) { |
| 483 | /* Avoid PCI burst on chipset with unaligned completions. */ | 485 | myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, |
| 484 | int i; | 486 | fw->data + i, |
| 485 | __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + | 487 | min(256U, (unsigned)(fw->size - i))); |
| 486 | MYRI10GE_FW_OFFSET); | 488 | mb(); |
| 487 | for (i = 0; i < fw->size / 4; i++) { | 489 | readb(mgp->sram); |
| 488 | __raw_writel(((u32 *) fw->data)[i], ptr + i); | ||
| 489 | wmb(); | ||
| 490 | } | ||
| 491 | } else { | ||
| 492 | myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, | ||
| 493 | fw->size); | ||
| 494 | } | 490 | } |
| 495 | /* corruption checking is good for parity recovery and buggy chipset */ | 491 | /* corruption checking is good for parity recovery and buggy chipset */ |
| 496 | memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); | 492 | memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); |
| @@ -2547,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
| 2547 | 2543 | ||
| 2548 | mgp = (struct myri10ge_priv *)arg; | 2544 | mgp = (struct myri10ge_priv *)arg; |
| 2549 | if (mgp->tx.req != mgp->tx.done && | 2545 | if (mgp->tx.req != mgp->tx.done && |
| 2550 | mgp->tx.done == mgp->watchdog_tx_done) | 2546 | mgp->tx.done == mgp->watchdog_tx_done && |
| 2547 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) | ||
| 2551 | /* nic seems like it might be stuck.. */ | 2548 | /* nic seems like it might be stuck.. */ |
| 2552 | schedule_work(&mgp->watchdog_work); | 2549 | schedule_work(&mgp->watchdog_work); |
| 2553 | else | 2550 | else |
| @@ -2556,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
| 2556 | jiffies + myri10ge_watchdog_timeout * HZ); | 2553 | jiffies + myri10ge_watchdog_timeout * HZ); |
| 2557 | 2554 | ||
| 2558 | mgp->watchdog_tx_done = mgp->tx.done; | 2555 | mgp->watchdog_tx_done = mgp->tx.done; |
| 2556 | mgp->watchdog_tx_req = mgp->tx.req; | ||
| 2559 | } | 2557 | } |
| 2560 | 2558 | ||
| 2561 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 2559 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d5c2233c252..f5aad77288f9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev, | |||
| 419 | 419 | ||
| 420 | /* phy_stop_machine | 420 | /* phy_stop_machine |
| 421 | * | 421 | * |
| 422 | * description: Stops the state machine timer, sets the state to | 422 | * description: Stops the state machine timer, sets the state to UP |
| 423 | * UP (unless it wasn't up yet), and then frees the interrupt, | 423 | * (unless it wasn't up yet). This function must be called BEFORE |
| 424 | * if it is in use. This function must be called BEFORE | ||
| 425 | * phy_detach. | 424 | * phy_detach. |
| 426 | */ | 425 | */ |
| 427 | void phy_stop_machine(struct phy_device *phydev) | 426 | void phy_stop_machine(struct phy_device *phydev) |
| @@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev) | |||
| 433 | phydev->state = PHY_UP; | 432 | phydev->state = PHY_UP; |
| 434 | spin_unlock(&phydev->lock); | 433 | spin_unlock(&phydev->lock); |
| 435 | 434 | ||
| 436 | if (phydev->irq != PHY_POLL) | ||
| 437 | phy_stop_interrupts(phydev); | ||
| 438 | |||
| 439 | phydev->adjust_state = NULL; | 435 | phydev->adjust_state = NULL; |
| 440 | } | 436 | } |
| 441 | 437 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index e1fe3a0a7b0b..132ed32bce1a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
| @@ -76,7 +76,7 @@ | |||
| 76 | #include "s2io.h" | 76 | #include "s2io.h" |
| 77 | #include "s2io-regs.h" | 77 | #include "s2io-regs.h" |
| 78 | 78 | ||
| 79 | #define DRV_VERSION "2.0.14.2" | 79 | #define DRV_VERSION "2.0.15.2" |
| 80 | 80 | ||
| 81 | /* S2io Driver name & version. */ | 81 | /* S2io Driver name & version. */ |
| 82 | static char s2io_driver_name[] = "Neterion"; | 82 | static char s2io_driver_name[] = "Neterion"; |
| @@ -370,38 +370,50 @@ static const u64 fix_mac[] = { | |||
| 370 | END_SIGN | 370 | END_SIGN |
| 371 | }; | 371 | }; |
| 372 | 372 | ||
| 373 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
| 374 | MODULE_LICENSE("GPL"); | ||
| 375 | MODULE_VERSION(DRV_VERSION); | ||
| 376 | |||
| 377 | |||
| 373 | /* Module Loadable parameters. */ | 378 | /* Module Loadable parameters. */ |
| 374 | static unsigned int tx_fifo_num = 1; | 379 | S2IO_PARM_INT(tx_fifo_num, 1); |
| 375 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 380 | S2IO_PARM_INT(rx_ring_num, 1); |
| 376 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | 381 | |
| 377 | static unsigned int rx_ring_num = 1; | 382 | |
| 378 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | 383 | S2IO_PARM_INT(rx_ring_mode, 1); |
| 379 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | 384 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); |
| 380 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | 385 | S2IO_PARM_INT(rmac_pause_time, 0x100); |
| 381 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 386 | S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); |
| 382 | static unsigned int rx_ring_mode = 1; | 387 | S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); |
| 383 | static unsigned int use_continuous_tx_intrs = 1; | 388 | S2IO_PARM_INT(shared_splits, 0); |
| 384 | static unsigned int rmac_pause_time = 0x100; | 389 | S2IO_PARM_INT(tmac_util_period, 5); |
| 385 | static unsigned int mc_pause_threshold_q0q3 = 187; | 390 | S2IO_PARM_INT(rmac_util_period, 5); |
| 386 | static unsigned int mc_pause_threshold_q4q7 = 187; | 391 | S2IO_PARM_INT(bimodal, 0); |
| 387 | static unsigned int shared_splits; | 392 | S2IO_PARM_INT(l3l4hdr_size, 128); |
| 388 | static unsigned int tmac_util_period = 5; | ||
| 389 | static unsigned int rmac_util_period = 5; | ||
| 390 | static unsigned int bimodal = 0; | ||
| 391 | static unsigned int l3l4hdr_size = 128; | ||
| 392 | #ifndef CONFIG_S2IO_NAPI | ||
| 393 | static unsigned int indicate_max_pkts; | ||
| 394 | #endif | ||
| 395 | /* Frequency of Rx desc syncs expressed as power of 2 */ | 393 | /* Frequency of Rx desc syncs expressed as power of 2 */ |
| 396 | static unsigned int rxsync_frequency = 3; | 394 | S2IO_PARM_INT(rxsync_frequency, 3); |
| 397 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ | 395 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ |
| 398 | static unsigned int intr_type = 0; | 396 | S2IO_PARM_INT(intr_type, 0); |
| 399 | /* Large receive offload feature */ | 397 | /* Large receive offload feature */ |
| 400 | static unsigned int lro = 0; | 398 | S2IO_PARM_INT(lro, 0); |
| 401 | /* Max pkts to be aggregated by LRO at one time. If not specified, | 399 | /* Max pkts to be aggregated by LRO at one time. If not specified, |
| 402 | * aggregation happens until we hit max IP pkt size(64K) | 400 | * aggregation happens until we hit max IP pkt size(64K) |
| 403 | */ | 401 | */ |
| 404 | static unsigned int lro_max_pkts = 0xFFFF; | 402 | S2IO_PARM_INT(lro_max_pkts, 0xFFFF); |
| 403 | #ifndef CONFIG_S2IO_NAPI | ||
| 404 | S2IO_PARM_INT(indicate_max_pkts, 0); | ||
| 405 | #endif | ||
| 406 | |||
| 407 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | ||
| 408 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | ||
| 409 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | ||
| 410 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | ||
| 411 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | ||
| 412 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | ||
| 413 | |||
| 414 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
| 415 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
| 416 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
| 405 | 417 | ||
| 406 | /* | 418 | /* |
| 407 | * S2IO device table. | 419 | * S2IO device table. |
| @@ -464,10 +476,9 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
| 464 | size += config->tx_cfg[i].fifo_len; | 476 | size += config->tx_cfg[i].fifo_len; |
| 465 | } | 477 | } |
| 466 | if (size > MAX_AVAILABLE_TXDS) { | 478 | if (size > MAX_AVAILABLE_TXDS) { |
| 467 | DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", | 479 | DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); |
| 468 | __FUNCTION__); | ||
| 469 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); | 480 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); |
| 470 | return FAILURE; | 481 | return -EINVAL; |
| 471 | } | 482 | } |
| 472 | 483 | ||
| 473 | lst_size = (sizeof(TxD_t) * config->max_txds); | 484 | lst_size = (sizeof(TxD_t) * config->max_txds); |
| @@ -547,6 +558,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
| 547 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); | 558 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); |
| 548 | if (!nic->ufo_in_band_v) | 559 | if (!nic->ufo_in_band_v) |
| 549 | return -ENOMEM; | 560 | return -ENOMEM; |
| 561 | memset(nic->ufo_in_band_v, 0, size); | ||
| 550 | 562 | ||
| 551 | /* Allocation and initialization of RXDs in Rings */ | 563 | /* Allocation and initialization of RXDs in Rings */ |
| 552 | size = 0; | 564 | size = 0; |
| @@ -1213,7 +1225,7 @@ static int init_nic(struct s2io_nic *nic) | |||
| 1213 | break; | 1225 | break; |
| 1214 | } | 1226 | } |
| 1215 | 1227 | ||
| 1216 | /* Enable Tx FIFO partition 0. */ | 1228 | /* Enable all configured Tx FIFO partitions */ |
| 1217 | val64 = readq(&bar0->tx_fifo_partition_0); | 1229 | val64 = readq(&bar0->tx_fifo_partition_0); |
| 1218 | val64 |= (TX_FIFO_PARTITION_EN); | 1230 | val64 |= (TX_FIFO_PARTITION_EN); |
| 1219 | writeq(val64, &bar0->tx_fifo_partition_0); | 1231 | writeq(val64, &bar0->tx_fifo_partition_0); |
| @@ -1650,7 +1662,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) | |||
| 1650 | writeq(temp64, &bar0->general_int_mask); | 1662 | writeq(temp64, &bar0->general_int_mask); |
| 1651 | /* | 1663 | /* |
| 1652 | * If Hercules adapter enable GPIO otherwise | 1664 | * If Hercules adapter enable GPIO otherwise |
| 1653 | * disabled all PCIX, Flash, MDIO, IIC and GPIO | 1665 | * disable all PCIX, Flash, MDIO, IIC and GPIO |
| 1654 | * interrupts for now. | 1666 | * interrupts for now. |
| 1655 | * TODO | 1667 | * TODO |
| 1656 | */ | 1668 | */ |
| @@ -2119,7 +2131,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in | |||
| 2119 | frag->size, PCI_DMA_TODEVICE); | 2131 | frag->size, PCI_DMA_TODEVICE); |
| 2120 | } | 2132 | } |
| 2121 | } | 2133 | } |
| 2122 | txdlp->Host_Control = 0; | 2134 | memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); |
| 2123 | return(skb); | 2135 | return(skb); |
| 2124 | } | 2136 | } |
| 2125 | 2137 | ||
| @@ -2371,9 +2383,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2371 | skb->data = (void *) (unsigned long)tmp; | 2383 | skb->data = (void *) (unsigned long)tmp; |
| 2372 | skb->tail = (void *) (unsigned long)tmp; | 2384 | skb->tail = (void *) (unsigned long)tmp; |
| 2373 | 2385 | ||
| 2374 | ((RxD3_t*)rxdp)->Buffer0_ptr = | 2386 | if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) |
| 2375 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2387 | ((RxD3_t*)rxdp)->Buffer0_ptr = |
| 2388 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | ||
| 2376 | PCI_DMA_FROMDEVICE); | 2389 | PCI_DMA_FROMDEVICE); |
| 2390 | else | ||
| 2391 | pci_dma_sync_single_for_device(nic->pdev, | ||
| 2392 | (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, | ||
| 2393 | BUF0_LEN, PCI_DMA_FROMDEVICE); | ||
| 2377 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2394 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
| 2378 | if (nic->rxd_mode == RXD_MODE_3B) { | 2395 | if (nic->rxd_mode == RXD_MODE_3B) { |
| 2379 | /* Two buffer mode */ | 2396 | /* Two buffer mode */ |
| @@ -2386,10 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2386 | (nic->pdev, skb->data, dev->mtu + 4, | 2403 | (nic->pdev, skb->data, dev->mtu + 4, |
| 2387 | PCI_DMA_FROMDEVICE); | 2404 | PCI_DMA_FROMDEVICE); |
| 2388 | 2405 | ||
| 2389 | /* Buffer-1 will be dummy buffer not used */ | 2406 | /* Buffer-1 will be dummy buffer. Not used */ |
| 2390 | ((RxD3_t*)rxdp)->Buffer1_ptr = | 2407 | if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { |
| 2391 | pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, | 2408 | ((RxD3_t*)rxdp)->Buffer1_ptr = |
| 2392 | PCI_DMA_FROMDEVICE); | 2409 | pci_map_single(nic->pdev, |
| 2410 | ba->ba_1, BUF1_LEN, | ||
| 2411 | PCI_DMA_FROMDEVICE); | ||
| 2412 | } | ||
| 2393 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2413 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
| 2394 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2414 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
| 2395 | (dev->mtu + 4); | 2415 | (dev->mtu + 4); |
| @@ -2614,23 +2634,23 @@ no_rx: | |||
| 2614 | } | 2634 | } |
| 2615 | #endif | 2635 | #endif |
| 2616 | 2636 | ||
| 2637 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2617 | /** | 2638 | /** |
| 2618 | * s2io_netpoll - Rx interrupt service handler for netpoll support | 2639 | * s2io_netpoll - netpoll event handler entry point |
| 2619 | * @dev : pointer to the device structure. | 2640 | * @dev : pointer to the device structure. |
| 2620 | * Description: | 2641 | * Description: |
| 2621 | * Polling 'interrupt' - used by things like netconsole to send skbs | 2642 | * This function will be called by upper layer to check for events on the |
| 2622 | * without having to re-enable interrupts. It's not called while | 2643 | * interface in situations where interrupts are disabled. It is used for |
| 2623 | * the interrupt routine is executing. | 2644 | * specific in-kernel networking tasks, such as remote consoles and kernel |
| 2645 | * debugging over the network (example netdump in RedHat). | ||
| 2624 | */ | 2646 | */ |
| 2625 | |||
| 2626 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2627 | static void s2io_netpoll(struct net_device *dev) | 2647 | static void s2io_netpoll(struct net_device *dev) |
| 2628 | { | 2648 | { |
| 2629 | nic_t *nic = dev->priv; | 2649 | nic_t *nic = dev->priv; |
| 2630 | mac_info_t *mac_control; | 2650 | mac_info_t *mac_control; |
| 2631 | struct config_param *config; | 2651 | struct config_param *config; |
| 2632 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 2652 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
| 2633 | u64 val64; | 2653 | u64 val64 = 0xFFFFFFFFFFFFFFFFULL; |
| 2634 | int i; | 2654 | int i; |
| 2635 | 2655 | ||
| 2636 | disable_irq(dev->irq); | 2656 | disable_irq(dev->irq); |
| @@ -2639,9 +2659,17 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2639 | mac_control = &nic->mac_control; | 2659 | mac_control = &nic->mac_control; |
| 2640 | config = &nic->config; | 2660 | config = &nic->config; |
| 2641 | 2661 | ||
| 2642 | val64 = readq(&bar0->rx_traffic_int); | ||
| 2643 | writeq(val64, &bar0->rx_traffic_int); | 2662 | writeq(val64, &bar0->rx_traffic_int); |
| 2663 | writeq(val64, &bar0->tx_traffic_int); | ||
| 2644 | 2664 | ||
| 2665 | /* we need to free up the transmitted skbufs or else netpoll will | ||
| 2666 | * run out of skbs and will fail and eventually netpoll application such | ||
| 2667 | * as netdump will fail. | ||
| 2668 | */ | ||
| 2669 | for (i = 0; i < config->tx_fifo_num; i++) | ||
| 2670 | tx_intr_handler(&mac_control->fifos[i]); | ||
| 2671 | |||
| 2672 | /* check for received packet and indicate up to network */ | ||
| 2645 | for (i = 0; i < config->rx_ring_num; i++) | 2673 | for (i = 0; i < config->rx_ring_num; i++) |
| 2646 | rx_intr_handler(&mac_control->rings[i]); | 2674 | rx_intr_handler(&mac_control->rings[i]); |
| 2647 | 2675 | ||
| @@ -2708,7 +2736,7 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
| 2708 | /* If your are next to put index then it's FIFO full condition */ | 2736 | /* If your are next to put index then it's FIFO full condition */ |
| 2709 | if ((get_block == put_block) && | 2737 | if ((get_block == put_block) && |
| 2710 | (get_info.offset + 1) == put_info.offset) { | 2738 | (get_info.offset + 1) == put_info.offset) { |
| 2711 | DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); | 2739 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); |
| 2712 | break; | 2740 | break; |
| 2713 | } | 2741 | } |
| 2714 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 2742 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); |
| @@ -2728,18 +2756,15 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
| 2728 | HEADER_SNAP_SIZE, | 2756 | HEADER_SNAP_SIZE, |
| 2729 | PCI_DMA_FROMDEVICE); | 2757 | PCI_DMA_FROMDEVICE); |
| 2730 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2758 | } else if (nic->rxd_mode == RXD_MODE_3B) { |
| 2731 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2759 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
| 2732 | ((RxD3_t*)rxdp)->Buffer0_ptr, | 2760 | ((RxD3_t*)rxdp)->Buffer0_ptr, |
| 2733 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2761 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
| 2734 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2762 | pci_unmap_single(nic->pdev, (dma_addr_t) |
| 2735 | ((RxD3_t*)rxdp)->Buffer1_ptr, | ||
| 2736 | BUF1_LEN, PCI_DMA_FROMDEVICE); | ||
| 2737 | pci_unmap_single(nic->pdev, (dma_addr_t) | ||
| 2738 | ((RxD3_t*)rxdp)->Buffer2_ptr, | 2763 | ((RxD3_t*)rxdp)->Buffer2_ptr, |
| 2739 | dev->mtu + 4, | 2764 | dev->mtu + 4, |
| 2740 | PCI_DMA_FROMDEVICE); | 2765 | PCI_DMA_FROMDEVICE); |
| 2741 | } else { | 2766 | } else { |
| 2742 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2767 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
| 2743 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, | 2768 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, |
| 2744 | PCI_DMA_FROMDEVICE); | 2769 | PCI_DMA_FROMDEVICE); |
| 2745 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2770 | pci_unmap_single(nic->pdev, (dma_addr_t) |
| @@ -3327,7 +3352,7 @@ static void s2io_reset(nic_t * sp) | |||
| 3327 | 3352 | ||
| 3328 | /* Clear certain PCI/PCI-X fields after reset */ | 3353 | /* Clear certain PCI/PCI-X fields after reset */ |
| 3329 | if (sp->device_type == XFRAME_II_DEVICE) { | 3354 | if (sp->device_type == XFRAME_II_DEVICE) { |
| 3330 | /* Clear parity err detect bit */ | 3355 | /* Clear "detected parity error" bit */ |
| 3331 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); | 3356 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); |
| 3332 | 3357 | ||
| 3333 | /* Clearing PCIX Ecc status register */ | 3358 | /* Clearing PCIX Ecc status register */ |
| @@ -3528,7 +3553,7 @@ static void restore_xmsi_data(nic_t *nic) | |||
| 3528 | u64 val64; | 3553 | u64 val64; |
| 3529 | int i; | 3554 | int i; |
| 3530 | 3555 | ||
| 3531 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3556 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3532 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3557 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
| 3533 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3558 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
| 3534 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); | 3559 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); |
| @@ -3547,7 +3572,7 @@ static void store_xmsi_data(nic_t *nic) | |||
| 3547 | int i; | 3572 | int i; |
| 3548 | 3573 | ||
| 3549 | /* Store and display */ | 3574 | /* Store and display */ |
| 3550 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3575 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3551 | val64 = (BIT(15) | vBIT(i, 26, 6)); | 3576 | val64 = (BIT(15) | vBIT(i, 26, 6)); |
| 3552 | writeq(val64, &bar0->xmsi_access); | 3577 | writeq(val64, &bar0->xmsi_access); |
| 3553 | if (wait_for_msix_trans(nic, i)) { | 3578 | if (wait_for_msix_trans(nic, i)) { |
| @@ -3808,13 +3833,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3808 | TxD_t *txdp; | 3833 | TxD_t *txdp; |
| 3809 | TxFIFO_element_t __iomem *tx_fifo; | 3834 | TxFIFO_element_t __iomem *tx_fifo; |
| 3810 | unsigned long flags; | 3835 | unsigned long flags; |
| 3811 | #ifdef NETIF_F_TSO | ||
| 3812 | int mss; | ||
| 3813 | #endif | ||
| 3814 | u16 vlan_tag = 0; | 3836 | u16 vlan_tag = 0; |
| 3815 | int vlan_priority = 0; | 3837 | int vlan_priority = 0; |
| 3816 | mac_info_t *mac_control; | 3838 | mac_info_t *mac_control; |
| 3817 | struct config_param *config; | 3839 | struct config_param *config; |
| 3840 | int offload_type; | ||
| 3818 | 3841 | ||
| 3819 | mac_control = &sp->mac_control; | 3842 | mac_control = &sp->mac_control; |
| 3820 | config = &sp->config; | 3843 | config = &sp->config; |
| @@ -3862,13 +3885,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3862 | return 0; | 3885 | return 0; |
| 3863 | } | 3886 | } |
| 3864 | 3887 | ||
| 3865 | txdp->Control_1 = 0; | 3888 | offload_type = s2io_offload_type(skb); |
| 3866 | txdp->Control_2 = 0; | ||
| 3867 | #ifdef NETIF_F_TSO | 3889 | #ifdef NETIF_F_TSO |
| 3868 | mss = skb_shinfo(skb)->gso_size; | 3890 | if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
| 3869 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | ||
| 3870 | txdp->Control_1 |= TXD_TCP_LSO_EN; | 3891 | txdp->Control_1 |= TXD_TCP_LSO_EN; |
| 3871 | txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); | 3892 | txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); |
| 3872 | } | 3893 | } |
| 3873 | #endif | 3894 | #endif |
| 3874 | if (skb->ip_summed == CHECKSUM_HW) { | 3895 | if (skb->ip_summed == CHECKSUM_HW) { |
| @@ -3886,10 +3907,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3886 | } | 3907 | } |
| 3887 | 3908 | ||
| 3888 | frg_len = skb->len - skb->data_len; | 3909 | frg_len = skb->len - skb->data_len; |
| 3889 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { | 3910 | if (offload_type == SKB_GSO_UDP) { |
| 3890 | int ufo_size; | 3911 | int ufo_size; |
| 3891 | 3912 | ||
| 3892 | ufo_size = skb_shinfo(skb)->gso_size; | 3913 | ufo_size = s2io_udp_mss(skb); |
| 3893 | ufo_size &= ~7; | 3914 | ufo_size &= ~7; |
| 3894 | txdp->Control_1 |= TXD_UFO_EN; | 3915 | txdp->Control_1 |= TXD_UFO_EN; |
| 3895 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); | 3916 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); |
| @@ -3906,16 +3927,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3906 | sp->ufo_in_band_v, | 3927 | sp->ufo_in_band_v, |
| 3907 | sizeof(u64), PCI_DMA_TODEVICE); | 3928 | sizeof(u64), PCI_DMA_TODEVICE); |
| 3908 | txdp++; | 3929 | txdp++; |
| 3909 | txdp->Control_1 = 0; | ||
| 3910 | txdp->Control_2 = 0; | ||
| 3911 | } | 3930 | } |
| 3912 | 3931 | ||
| 3913 | txdp->Buffer_Pointer = pci_map_single | 3932 | txdp->Buffer_Pointer = pci_map_single |
| 3914 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 3933 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
| 3915 | txdp->Host_Control = (unsigned long) skb; | 3934 | txdp->Host_Control = (unsigned long) skb; |
| 3916 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); | 3935 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); |
| 3917 | 3936 | if (offload_type == SKB_GSO_UDP) | |
| 3918 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
| 3919 | txdp->Control_1 |= TXD_UFO_EN; | 3937 | txdp->Control_1 |= TXD_UFO_EN; |
| 3920 | 3938 | ||
| 3921 | frg_cnt = skb_shinfo(skb)->nr_frags; | 3939 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| @@ -3930,12 +3948,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3930 | (sp->pdev, frag->page, frag->page_offset, | 3948 | (sp->pdev, frag->page, frag->page_offset, |
| 3931 | frag->size, PCI_DMA_TODEVICE); | 3949 | frag->size, PCI_DMA_TODEVICE); |
| 3932 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); | 3950 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); |
| 3933 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3951 | if (offload_type == SKB_GSO_UDP) |
| 3934 | txdp->Control_1 |= TXD_UFO_EN; | 3952 | txdp->Control_1 |= TXD_UFO_EN; |
| 3935 | } | 3953 | } |
| 3936 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; | 3954 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; |
| 3937 | 3955 | ||
| 3938 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3956 | if (offload_type == SKB_GSO_UDP) |
| 3939 | frg_cnt++; /* as Txd0 was used for inband header */ | 3957 | frg_cnt++; /* as Txd0 was used for inband header */ |
| 3940 | 3958 | ||
| 3941 | tx_fifo = mac_control->tx_FIFO_start[queue]; | 3959 | tx_fifo = mac_control->tx_FIFO_start[queue]; |
| @@ -3944,13 +3962,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3944 | 3962 | ||
| 3945 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | | 3963 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | |
| 3946 | TX_FIFO_LAST_LIST); | 3964 | TX_FIFO_LAST_LIST); |
| 3947 | 3965 | if (offload_type) | |
| 3948 | #ifdef NETIF_F_TSO | ||
| 3949 | if (mss) | ||
| 3950 | val64 |= TX_FIFO_SPECIAL_FUNC; | ||
| 3951 | #endif | ||
| 3952 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
| 3953 | val64 |= TX_FIFO_SPECIAL_FUNC; | 3966 | val64 |= TX_FIFO_SPECIAL_FUNC; |
| 3967 | |||
| 3954 | writeq(val64, &tx_fifo->List_Control); | 3968 | writeq(val64, &tx_fifo->List_Control); |
| 3955 | 3969 | ||
| 3956 | mmiowb(); | 3970 | mmiowb(); |
| @@ -3984,13 +3998,41 @@ s2io_alarm_handle(unsigned long data) | |||
| 3984 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 3998 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
| 3985 | } | 3999 | } |
| 3986 | 4000 | ||
| 4001 | static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) | ||
| 4002 | { | ||
| 4003 | int rxb_size, level; | ||
| 4004 | |||
| 4005 | if (!sp->lro) { | ||
| 4006 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
| 4007 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
| 4008 | |||
| 4009 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4010 | int ret; | ||
| 4011 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
| 4012 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4013 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
| 4014 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
| 4015 | __FUNCTION__); | ||
| 4016 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4017 | return -1; | ||
| 4018 | } | ||
| 4019 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4020 | } else if (level == LOW) | ||
| 4021 | tasklet_schedule(&sp->task); | ||
| 4022 | |||
| 4023 | } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
| 4024 | DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); | ||
| 4025 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4026 | } | ||
| 4027 | return 0; | ||
| 4028 | } | ||
| 4029 | |||
| 3987 | static irqreturn_t | 4030 | static irqreturn_t |
| 3988 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | 4031 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) |
| 3989 | { | 4032 | { |
| 3990 | struct net_device *dev = (struct net_device *) dev_id; | 4033 | struct net_device *dev = (struct net_device *) dev_id; |
| 3991 | nic_t *sp = dev->priv; | 4034 | nic_t *sp = dev->priv; |
| 3992 | int i; | 4035 | int i; |
| 3993 | int ret; | ||
| 3994 | mac_info_t *mac_control; | 4036 | mac_info_t *mac_control; |
| 3995 | struct config_param *config; | 4037 | struct config_param *config; |
| 3996 | 4038 | ||
| @@ -4012,35 +4054,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4012 | * reallocate the buffers from the interrupt handler itself, | 4054 | * reallocate the buffers from the interrupt handler itself, |
| 4013 | * else schedule a tasklet to reallocate the buffers. | 4055 | * else schedule a tasklet to reallocate the buffers. |
| 4014 | */ | 4056 | */ |
| 4015 | for (i = 0; i < config->rx_ring_num; i++) { | 4057 | for (i = 0; i < config->rx_ring_num; i++) |
| 4016 | if (!sp->lro) { | 4058 | s2io_chk_rx_buffers(sp, i); |
| 4017 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
| 4018 | int level = rx_buffer_level(sp, rxb_size, i); | ||
| 4019 | |||
| 4020 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4021 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
| 4022 | dev->name); | ||
| 4023 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4024 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
| 4025 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4026 | dev->name); | ||
| 4027 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
| 4028 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4029 | atomic_dec(&sp->isr_cnt); | ||
| 4030 | return IRQ_HANDLED; | ||
| 4031 | } | ||
| 4032 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4033 | } else if (level == LOW) { | ||
| 4034 | tasklet_schedule(&sp->task); | ||
| 4035 | } | ||
| 4036 | } | ||
| 4037 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
| 4038 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4039 | dev->name); | ||
| 4040 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4041 | break; | ||
| 4042 | } | ||
| 4043 | } | ||
| 4044 | 4059 | ||
| 4045 | atomic_dec(&sp->isr_cnt); | 4060 | atomic_dec(&sp->isr_cnt); |
| 4046 | return IRQ_HANDLED; | 4061 | return IRQ_HANDLED; |
| @@ -4051,39 +4066,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4051 | { | 4066 | { |
| 4052 | ring_info_t *ring = (ring_info_t *)dev_id; | 4067 | ring_info_t *ring = (ring_info_t *)dev_id; |
| 4053 | nic_t *sp = ring->nic; | 4068 | nic_t *sp = ring->nic; |
| 4054 | struct net_device *dev = (struct net_device *) dev_id; | ||
| 4055 | int rxb_size, level, rng_n; | ||
| 4056 | 4069 | ||
| 4057 | atomic_inc(&sp->isr_cnt); | 4070 | atomic_inc(&sp->isr_cnt); |
| 4058 | rx_intr_handler(ring); | ||
| 4059 | |||
| 4060 | rng_n = ring->ring_no; | ||
| 4061 | if (!sp->lro) { | ||
| 4062 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
| 4063 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
| 4064 | 4071 | ||
| 4065 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 4072 | rx_intr_handler(ring); |
| 4066 | int ret; | 4073 | s2io_chk_rx_buffers(sp, ring->ring_no); |
| 4067 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
| 4068 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4069 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
| 4070 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
| 4071 | __FUNCTION__); | ||
| 4072 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4073 | return IRQ_HANDLED; | ||
| 4074 | } | ||
| 4075 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4076 | } else if (level == LOW) { | ||
| 4077 | tasklet_schedule(&sp->task); | ||
| 4078 | } | ||
| 4079 | } | ||
| 4080 | else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
| 4081 | DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); | ||
| 4082 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4083 | } | ||
| 4084 | 4074 | ||
| 4085 | atomic_dec(&sp->isr_cnt); | 4075 | atomic_dec(&sp->isr_cnt); |
| 4086 | |||
| 4087 | return IRQ_HANDLED; | 4076 | return IRQ_HANDLED; |
| 4088 | } | 4077 | } |
| 4089 | 4078 | ||
| @@ -4248,37 +4237,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4248 | * else schedule a tasklet to reallocate the buffers. | 4237 | * else schedule a tasklet to reallocate the buffers. |
| 4249 | */ | 4238 | */ |
| 4250 | #ifndef CONFIG_S2IO_NAPI | 4239 | #ifndef CONFIG_S2IO_NAPI |
| 4251 | for (i = 0; i < config->rx_ring_num; i++) { | 4240 | for (i = 0; i < config->rx_ring_num; i++) |
| 4252 | if (!sp->lro) { | 4241 | s2io_chk_rx_buffers(sp, i); |
| 4253 | int ret; | ||
| 4254 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
| 4255 | int level = rx_buffer_level(sp, rxb_size, i); | ||
| 4256 | |||
| 4257 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4258 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
| 4259 | dev->name); | ||
| 4260 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4261 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
| 4262 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4263 | dev->name); | ||
| 4264 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
| 4265 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4266 | atomic_dec(&sp->isr_cnt); | ||
| 4267 | writeq(org_mask, &bar0->general_int_mask); | ||
| 4268 | return IRQ_HANDLED; | ||
| 4269 | } | ||
| 4270 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4271 | } else if (level == LOW) { | ||
| 4272 | tasklet_schedule(&sp->task); | ||
| 4273 | } | ||
| 4274 | } | ||
| 4275 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
| 4276 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4277 | dev->name); | ||
| 4278 | DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); | ||
| 4279 | break; | ||
| 4280 | } | ||
| 4281 | } | ||
| 4282 | #endif | 4242 | #endif |
| 4283 | writeq(org_mask, &bar0->general_int_mask); | 4243 | writeq(org_mask, &bar0->general_int_mask); |
| 4284 | atomic_dec(&sp->isr_cnt); | 4244 | atomic_dec(&sp->isr_cnt); |
| @@ -4308,6 +4268,8 @@ static void s2io_updt_stats(nic_t *sp) | |||
| 4308 | if (cnt == 5) | 4268 | if (cnt == 5) |
| 4309 | break; /* Updt failed */ | 4269 | break; /* Updt failed */ |
| 4310 | } while(1); | 4270 | } while(1); |
| 4271 | } else { | ||
| 4272 | memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); | ||
| 4311 | } | 4273 | } |
| 4312 | } | 4274 | } |
| 4313 | 4275 | ||
| @@ -4942,7 +4904,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) | |||
| 4942 | } | 4904 | } |
| 4943 | static void s2io_vpd_read(nic_t *nic) | 4905 | static void s2io_vpd_read(nic_t *nic) |
| 4944 | { | 4906 | { |
| 4945 | u8 vpd_data[256],data; | 4907 | u8 *vpd_data; |
| 4908 | u8 data; | ||
| 4946 | int i=0, cnt, fail = 0; | 4909 | int i=0, cnt, fail = 0; |
| 4947 | int vpd_addr = 0x80; | 4910 | int vpd_addr = 0x80; |
| 4948 | 4911 | ||
| @@ -4955,6 +4918,10 @@ static void s2io_vpd_read(nic_t *nic) | |||
| 4955 | vpd_addr = 0x50; | 4918 | vpd_addr = 0x50; |
| 4956 | } | 4919 | } |
| 4957 | 4920 | ||
| 4921 | vpd_data = kmalloc(256, GFP_KERNEL); | ||
| 4922 | if (!vpd_data) | ||
| 4923 | return; | ||
| 4924 | |||
| 4958 | for (i = 0; i < 256; i +=4 ) { | 4925 | for (i = 0; i < 256; i +=4 ) { |
| 4959 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); | 4926 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); |
| 4960 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); | 4927 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); |
| @@ -4977,6 +4944,7 @@ static void s2io_vpd_read(nic_t *nic) | |||
| 4977 | memset(nic->product_name, 0, vpd_data[1]); | 4944 | memset(nic->product_name, 0, vpd_data[1]); |
| 4978 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); | 4945 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); |
| 4979 | } | 4946 | } |
| 4947 | kfree(vpd_data); | ||
| 4980 | } | 4948 | } |
| 4981 | 4949 | ||
| 4982 | /** | 4950 | /** |
| @@ -5295,7 +5263,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) | |||
| 5295 | else | 5263 | else |
| 5296 | *data = 0; | 5264 | *data = 0; |
| 5297 | 5265 | ||
| 5298 | return 0; | 5266 | return *data; |
| 5299 | } | 5267 | } |
| 5300 | 5268 | ||
| 5301 | /** | 5269 | /** |
| @@ -5753,6 +5721,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) | |||
| 5753 | return 0; | 5721 | return 0; |
| 5754 | } | 5722 | } |
| 5755 | 5723 | ||
| 5724 | static u32 s2io_ethtool_op_get_tso(struct net_device *dev) | ||
| 5725 | { | ||
| 5726 | return (dev->features & NETIF_F_TSO) != 0; | ||
| 5727 | } | ||
| 5728 | static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) | ||
| 5729 | { | ||
| 5730 | if (data) | ||
| 5731 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
| 5732 | else | ||
| 5733 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
| 5734 | |||
| 5735 | return 0; | ||
| 5736 | } | ||
| 5756 | 5737 | ||
| 5757 | static struct ethtool_ops netdev_ethtool_ops = { | 5738 | static struct ethtool_ops netdev_ethtool_ops = { |
| 5758 | .get_settings = s2io_ethtool_gset, | 5739 | .get_settings = s2io_ethtool_gset, |
| @@ -5773,8 +5754,8 @@ static struct ethtool_ops netdev_ethtool_ops = { | |||
| 5773 | .get_sg = ethtool_op_get_sg, | 5754 | .get_sg = ethtool_op_get_sg, |
| 5774 | .set_sg = ethtool_op_set_sg, | 5755 | .set_sg = ethtool_op_set_sg, |
| 5775 | #ifdef NETIF_F_TSO | 5756 | #ifdef NETIF_F_TSO |
| 5776 | .get_tso = ethtool_op_get_tso, | 5757 | .get_tso = s2io_ethtool_op_get_tso, |
| 5777 | .set_tso = ethtool_op_set_tso, | 5758 | .set_tso = s2io_ethtool_op_set_tso, |
| 5778 | #endif | 5759 | #endif |
| 5779 | .get_ufo = ethtool_op_get_ufo, | 5760 | .get_ufo = ethtool_op_get_ufo, |
| 5780 | .set_ufo = ethtool_op_set_ufo, | 5761 | .set_ufo = ethtool_op_set_ufo, |
| @@ -6337,7 +6318,7 @@ static int s2io_card_up(nic_t * sp) | |||
| 6337 | s2io_set_multicast(dev); | 6318 | s2io_set_multicast(dev); |
| 6338 | 6319 | ||
| 6339 | if (sp->lro) { | 6320 | if (sp->lro) { |
| 6340 | /* Initialize max aggregatable pkts based on MTU */ | 6321 | /* Initialize max aggregatable pkts per session based on MTU */ |
| 6341 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | 6322 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; |
| 6342 | /* Check if we can use(if specified) user provided value */ | 6323 | /* Check if we can use(if specified) user provided value */ |
| 6343 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | 6324 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) |
| @@ -6438,7 +6419,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
| 6438 | * @cksum : FCS checksum of the frame. | 6419 | * @cksum : FCS checksum of the frame. |
| 6439 | * @ring_no : the ring from which this RxD was extracted. | 6420 | * @ring_no : the ring from which this RxD was extracted. |
| 6440 | * Description: | 6421 | * Description: |
| 6441 | * This function is called by the Tx interrupt serivce routine to perform | 6422 | * This function is called by the Rx interrupt serivce routine to perform |
| 6442 | * some OS related operations on the SKB before passing it to the upper | 6423 | * some OS related operations on the SKB before passing it to the upper |
| 6443 | * layers. It mainly checks if the checksum is OK, if so adds it to the | 6424 | * layers. It mainly checks if the checksum is OK, if so adds it to the |
| 6444 | * SKBs cksum variable, increments the Rx packet count and passes the SKB | 6425 | * SKBs cksum variable, increments the Rx packet count and passes the SKB |
| @@ -6698,33 +6679,6 @@ static void s2io_init_pci(nic_t * sp) | |||
| 6698 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 6679 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
| 6699 | } | 6680 | } |
| 6700 | 6681 | ||
| 6701 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
| 6702 | MODULE_LICENSE("GPL"); | ||
| 6703 | MODULE_VERSION(DRV_VERSION); | ||
| 6704 | |||
| 6705 | module_param(tx_fifo_num, int, 0); | ||
| 6706 | module_param(rx_ring_num, int, 0); | ||
| 6707 | module_param(rx_ring_mode, int, 0); | ||
| 6708 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
| 6709 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
| 6710 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
| 6711 | module_param(use_continuous_tx_intrs, int, 1); | ||
| 6712 | module_param(rmac_pause_time, int, 0); | ||
| 6713 | module_param(mc_pause_threshold_q0q3, int, 0); | ||
| 6714 | module_param(mc_pause_threshold_q4q7, int, 0); | ||
| 6715 | module_param(shared_splits, int, 0); | ||
| 6716 | module_param(tmac_util_period, int, 0); | ||
| 6717 | module_param(rmac_util_period, int, 0); | ||
| 6718 | module_param(bimodal, bool, 0); | ||
| 6719 | module_param(l3l4hdr_size, int , 0); | ||
| 6720 | #ifndef CONFIG_S2IO_NAPI | ||
| 6721 | module_param(indicate_max_pkts, int, 0); | ||
| 6722 | #endif | ||
| 6723 | module_param(rxsync_frequency, int, 0); | ||
| 6724 | module_param(intr_type, int, 0); | ||
| 6725 | module_param(lro, int, 0); | ||
| 6726 | module_param(lro_max_pkts, int, 0); | ||
| 6727 | |||
| 6728 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | 6682 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) |
| 6729 | { | 6683 | { |
| 6730 | if ( tx_fifo_num > 8) { | 6684 | if ( tx_fifo_num > 8) { |
| @@ -6832,8 +6786,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 6832 | } | 6786 | } |
| 6833 | if (dev_intr_type != MSI_X) { | 6787 | if (dev_intr_type != MSI_X) { |
| 6834 | if (pci_request_regions(pdev, s2io_driver_name)) { | 6788 | if (pci_request_regions(pdev, s2io_driver_name)) { |
| 6835 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"), | 6789 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"); |
| 6836 | pci_disable_device(pdev); | 6790 | pci_disable_device(pdev); |
| 6837 | return -ENODEV; | 6791 | return -ENODEV; |
| 6838 | } | 6792 | } |
| 6839 | } | 6793 | } |
| @@ -6957,7 +6911,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 6957 | /* initialize the shared memory used by the NIC and the host */ | 6911 | /* initialize the shared memory used by the NIC and the host */ |
| 6958 | if (init_shared_mem(sp)) { | 6912 | if (init_shared_mem(sp)) { |
| 6959 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 6913 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", |
| 6960 | __FUNCTION__); | 6914 | dev->name); |
| 6961 | ret = -ENOMEM; | 6915 | ret = -ENOMEM; |
| 6962 | goto mem_alloc_failed; | 6916 | goto mem_alloc_failed; |
| 6963 | } | 6917 | } |
| @@ -7094,6 +7048,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7094 | dev->addr_len = ETH_ALEN; | 7048 | dev->addr_len = ETH_ALEN; |
| 7095 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); | 7049 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); |
| 7096 | 7050 | ||
| 7051 | /* reset Nic and bring it to known state */ | ||
| 7052 | s2io_reset(sp); | ||
| 7053 | |||
| 7097 | /* | 7054 | /* |
| 7098 | * Initialize the tasklet status and link state flags | 7055 | * Initialize the tasklet status and link state flags |
| 7099 | * and the card state parameter | 7056 | * and the card state parameter |
| @@ -7131,11 +7088,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7131 | goto register_failed; | 7088 | goto register_failed; |
| 7132 | } | 7089 | } |
| 7133 | s2io_vpd_read(sp); | 7090 | s2io_vpd_read(sp); |
| 7134 | DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); | ||
| 7135 | DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", | ||
| 7136 | get_xena_rev_id(sp->pdev), | ||
| 7137 | s2io_driver_version); | ||
| 7138 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); | 7091 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); |
| 7092 | DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, | ||
| 7093 | sp->product_name, get_xena_rev_id(sp->pdev)); | ||
| 7094 | DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, | ||
| 7095 | s2io_driver_version); | ||
| 7139 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " | 7096 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " |
| 7140 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, | 7097 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, |
| 7141 | sp->def_mac_addr[0].mac_addr[0], | 7098 | sp->def_mac_addr[0].mac_addr[0], |
| @@ -7436,8 +7393,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, | |||
| 7436 | if (ip->ihl != 5) /* IP has options */ | 7393 | if (ip->ihl != 5) /* IP has options */ |
| 7437 | return -1; | 7394 | return -1; |
| 7438 | 7395 | ||
| 7396 | /* If we see CE codepoint in IP header, packet is not mergeable */ | ||
| 7397 | if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) | ||
| 7398 | return -1; | ||
| 7399 | |||
| 7400 | /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ | ||
| 7439 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | 7401 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || |
| 7440 | !tcp->ack) { | 7402 | tcp->ece || tcp->cwr || !tcp->ack) { |
| 7441 | /* | 7403 | /* |
| 7442 | * Currently recognize only the ack control word and | 7404 | * Currently recognize only the ack control word and |
| 7443 | * any other control field being set would result in | 7405 | * any other control field being set would result in |
| @@ -7591,18 +7553,16 @@ static void queue_rx_frame(struct sk_buff *skb) | |||
| 7591 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | 7553 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, |
| 7592 | u32 tcp_len) | 7554 | u32 tcp_len) |
| 7593 | { | 7555 | { |
| 7594 | struct sk_buff *tmp, *first = lro->parent; | 7556 | struct sk_buff *first = lro->parent; |
| 7595 | 7557 | ||
| 7596 | first->len += tcp_len; | 7558 | first->len += tcp_len; |
| 7597 | first->data_len = lro->frags_len; | 7559 | first->data_len = lro->frags_len; |
| 7598 | skb_pull(skb, (skb->len - tcp_len)); | 7560 | skb_pull(skb, (skb->len - tcp_len)); |
| 7599 | if ((tmp = skb_shinfo(first)->frag_list)) { | 7561 | if (skb_shinfo(first)->frag_list) |
| 7600 | while (tmp->next) | 7562 | lro->last_frag->next = skb; |
| 7601 | tmp = tmp->next; | ||
| 7602 | tmp->next = skb; | ||
| 7603 | } | ||
| 7604 | else | 7563 | else |
| 7605 | skb_shinfo(first)->frag_list = skb; | 7564 | skb_shinfo(first)->frag_list = skb; |
| 7565 | lro->last_frag = skb; | ||
| 7606 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; | 7566 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; |
| 7607 | return; | 7567 | return; |
| 7608 | } | 7568 | } |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 217097bc22f1..5ed49c3be1e9 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
| @@ -719,6 +719,7 @@ struct msix_info_st { | |||
| 719 | /* Data structure to represent a LRO session */ | 719 | /* Data structure to represent a LRO session */ |
| 720 | typedef struct lro { | 720 | typedef struct lro { |
| 721 | struct sk_buff *parent; | 721 | struct sk_buff *parent; |
| 722 | struct sk_buff *last_frag; | ||
| 722 | u8 *l2h; | 723 | u8 *l2h; |
| 723 | struct iphdr *iph; | 724 | struct iphdr *iph; |
| 724 | struct tcphdr *tcph; | 725 | struct tcphdr *tcph; |
| @@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro); | |||
| 1011 | static void queue_rx_frame(struct sk_buff *skb); | 1012 | static void queue_rx_frame(struct sk_buff *skb); |
| 1012 | static void update_L3L4_header(nic_t *sp, lro_t *lro); | 1013 | static void update_L3L4_header(nic_t *sp, lro_t *lro); |
| 1013 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); | 1014 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); |
| 1015 | |||
| 1016 | #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size | ||
| 1017 | #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size | ||
| 1018 | #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type | ||
| 1019 | |||
| 1020 | #define S2IO_PARM_INT(X, def_val) \ | ||
| 1021 | static unsigned int X = def_val;\ | ||
| 1022 | module_param(X , uint, 0); | ||
| 1023 | |||
| 1014 | #endif /* _S2IO_H */ | 1024 | #endif /* _S2IO_H */ |
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index efc9c4bd826f..da9d06bdb818 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c | |||
| @@ -797,7 +797,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip) | |||
| 797 | { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, | 797 | { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, |
| 798 | { CR_ZD1211_RETRY_MAX, 0x2 }, | 798 | { CR_ZD1211_RETRY_MAX, 0x2 }, |
| 799 | { CR_SNIFFER_ON, 0 }, | 799 | { CR_SNIFFER_ON, 0 }, |
| 800 | { CR_RX_FILTER, AP_RX_FILTER }, | 800 | { CR_RX_FILTER, STA_RX_FILTER }, |
| 801 | { CR_GROUP_HASH_P1, 0x00 }, | 801 | { CR_GROUP_HASH_P1, 0x00 }, |
| 802 | { CR_GROUP_HASH_P2, 0x80000000 }, | 802 | { CR_GROUP_HASH_P2, 0x80000000 }, |
| 803 | { CR_REG1, 0xa4 }, | 803 | { CR_REG1, 0xa4 }, |
| @@ -844,7 +844,7 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip) | |||
| 844 | { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, | 844 | { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, |
| 845 | { CR_ZD1211B_TXOP, 0x01800824 }, | 845 | { CR_ZD1211B_TXOP, 0x01800824 }, |
| 846 | { CR_SNIFFER_ON, 0 }, | 846 | { CR_SNIFFER_ON, 0 }, |
| 847 | { CR_RX_FILTER, AP_RX_FILTER }, | 847 | { CR_RX_FILTER, STA_RX_FILTER }, |
| 848 | { CR_GROUP_HASH_P1, 0x00 }, | 848 | { CR_GROUP_HASH_P1, 0x00 }, |
| 849 | { CR_GROUP_HASH_P2, 0x80000000 }, | 849 | { CR_GROUP_HASH_P2, 0x80000000 }, |
| 850 | { CR_REG1, 0xa4 }, | 850 | { CR_REG1, 0xa4 }, |
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index 805121093ab5..069d2b467339 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h | |||
| @@ -461,10 +461,15 @@ | |||
| 461 | 461 | ||
| 462 | #define CR_RX_FILTER CTL_REG(0x068c) | 462 | #define CR_RX_FILTER CTL_REG(0x068c) |
| 463 | #define RX_FILTER_ASSOC_RESPONSE 0x0002 | 463 | #define RX_FILTER_ASSOC_RESPONSE 0x0002 |
| 464 | #define RX_FILTER_REASSOC_RESPONSE 0x0008 | ||
| 464 | #define RX_FILTER_PROBE_RESPONSE 0x0020 | 465 | #define RX_FILTER_PROBE_RESPONSE 0x0020 |
| 465 | #define RX_FILTER_BEACON 0x0100 | 466 | #define RX_FILTER_BEACON 0x0100 |
| 467 | #define RX_FILTER_DISASSOC 0x0400 | ||
| 466 | #define RX_FILTER_AUTH 0x0800 | 468 | #define RX_FILTER_AUTH 0x0800 |
| 467 | /* Sniff modus sets filter to 0xfffff */ | 469 | #define AP_RX_FILTER 0x0400feff |
| 470 | #define STA_RX_FILTER 0x0000ffff | ||
| 471 | |||
| 472 | /* Monitor mode sets filter to 0xfffff */ | ||
| 468 | 473 | ||
| 469 | #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) | 474 | #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) |
| 470 | #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) | 475 | #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) |
| @@ -546,9 +551,6 @@ | |||
| 546 | #define CR_ZD1211B_TXOP CTL_REG(0x0b20) | 551 | #define CR_ZD1211B_TXOP CTL_REG(0x0b20) |
| 547 | #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) | 552 | #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) |
| 548 | 553 | ||
| 549 | #define AP_RX_FILTER 0x0400feff | ||
| 550 | #define STA_RX_FILTER 0x0000ffff | ||
| 551 | |||
| 552 | #define CWIN_SIZE 0x007f043f | 554 | #define CWIN_SIZE 0x007f043f |
| 553 | 555 | ||
| 554 | 556 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 3bdc54d128d0..d6f3e02a0b54 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
| @@ -108,7 +108,9 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) | |||
| 108 | if (r) | 108 | if (r) |
| 109 | goto disable_int; | 109 | goto disable_int; |
| 110 | 110 | ||
| 111 | r = zd_set_encryption_type(chip, NO_WEP); | 111 | /* We must inform the device that we are doing encryption/decryption in |
| 112 | * software at the moment. */ | ||
| 113 | r = zd_set_encryption_type(chip, ENC_SNIFFER); | ||
| 112 | if (r) | 114 | if (r) |
| 113 | goto disable_int; | 115 | goto disable_int; |
| 114 | 116 | ||
| @@ -136,10 +138,8 @@ static int reset_mode(struct zd_mac *mac) | |||
| 136 | { | 138 | { |
| 137 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | 139 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); |
| 138 | struct zd_ioreq32 ioreqs[3] = { | 140 | struct zd_ioreq32 ioreqs[3] = { |
| 139 | { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| | 141 | { CR_RX_FILTER, STA_RX_FILTER }, |
| 140 | RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE }, | ||
| 141 | { CR_SNIFFER_ON, 0U }, | 142 | { CR_SNIFFER_ON, 0U }, |
| 142 | { CR_ENCRYPTION_TYPE, NO_WEP }, | ||
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | if (ieee->iw_mode == IW_MODE_MONITOR) { | 145 | if (ieee->iw_mode == IW_MODE_MONITOR) { |
| @@ -713,10 +713,10 @@ static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri) | |||
| 713 | struct zd_rt_hdr { | 713 | struct zd_rt_hdr { |
| 714 | struct ieee80211_radiotap_header rt_hdr; | 714 | struct ieee80211_radiotap_header rt_hdr; |
| 715 | u8 rt_flags; | 715 | u8 rt_flags; |
| 716 | u8 rt_rate; | ||
| 716 | u16 rt_channel; | 717 | u16 rt_channel; |
| 717 | u16 rt_chbitmask; | 718 | u16 rt_chbitmask; |
| 718 | u16 rt_rate; | 719 | } __attribute__((packed)); |
| 719 | }; | ||
| 720 | 720 | ||
| 721 | static void fill_rt_header(void *buffer, struct zd_mac *mac, | 721 | static void fill_rt_header(void *buffer, struct zd_mac *mac, |
| 722 | const struct ieee80211_rx_stats *stats, | 722 | const struct ieee80211_rx_stats *stats, |
| @@ -735,14 +735,14 @@ static void fill_rt_header(void *buffer, struct zd_mac *mac, | |||
| 735 | if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) | 735 | if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) |
| 736 | hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; | 736 | hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; |
| 737 | 737 | ||
| 738 | hdr->rt_rate = stats->rate / 5; | ||
| 739 | |||
| 738 | /* FIXME: 802.11a */ | 740 | /* FIXME: 802.11a */ |
| 739 | hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( | 741 | hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( |
| 740 | _zd_chip_get_channel(&mac->chip))); | 742 | _zd_chip_get_channel(&mac->chip))); |
| 741 | hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | | 743 | hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | |
| 742 | ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == | 744 | ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == |
| 743 | ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); | 745 | ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); |
| 744 | |||
| 745 | hdr->rt_rate = stats->rate / 5; | ||
| 746 | } | 746 | } |
| 747 | 747 | ||
| 748 | /* Returns 1 if the data packet is for us and 0 otherwise. */ | 748 | /* Returns 1 if the data packet is for us and 0 otherwise. */ |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 72f90525bf68..6320984126c7 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
| @@ -323,7 +323,6 @@ static void disable_read_regs_int(struct zd_usb *usb) | |||
| 323 | { | 323 | { |
| 324 | struct zd_usb_interrupt *intr = &usb->intr; | 324 | struct zd_usb_interrupt *intr = &usb->intr; |
| 325 | 325 | ||
| 326 | ZD_ASSERT(in_interrupt()); | ||
| 327 | spin_lock(&intr->lock); | 326 | spin_lock(&intr->lock); |
| 328 | intr->read_regs_enabled = 0; | 327 | intr->read_regs_enabled = 0; |
| 329 | spin_unlock(&intr->lock); | 328 | spin_unlock(&intr->lock); |
| @@ -545,11 +544,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | |||
| 545 | * be padded. Unaligned access might also happen if the length_info | 544 | * be padded. Unaligned access might also happen if the length_info |
| 546 | * structure is not present. | 545 | * structure is not present. |
| 547 | */ | 546 | */ |
| 548 | if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { | 547 | if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) |
| 548 | { | ||
| 549 | unsigned int l, k, n; | 549 | unsigned int l, k, n; |
| 550 | for (i = 0, l = 0;; i++) { | 550 | for (i = 0, l = 0;; i++) { |
| 551 | k = le16_to_cpu(get_unaligned( | 551 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); |
| 552 | &length_info->length[i])); | ||
| 553 | n = l+k; | 552 | n = l+k; |
| 554 | if (n > length) | 553 | if (n > length) |
| 555 | return; | 554 | return; |
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 34de5697983d..e2fef60c2d06 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c | |||
| @@ -27,8 +27,7 @@ | |||
| 27 | * along with this program; if not, write to the Free Software | 27 | * along with this program; if not, write to the Free Software |
| 28 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 28 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 29 | * | 29 | * |
| 30 | * Send feedback to <gregkh@us.ibm.com>, | 30 | * Send feedback to <kristen.c.accardi@intel.com> |
| 31 | * <t-kochi@bq.jp.nec.com> | ||
| 32 | * | 31 | * |
| 33 | */ | 32 | */ |
| 34 | 33 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ef95d12fb32c..ae67a8f55ba1 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | * along with this program; if not, write to the Free Software | 26 | * along with this program; if not, write to the Free Software |
| 27 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 27 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 28 | * | 28 | * |
| 29 | * Send feedback to <t-kochi@bq.jp.nec.com> | 29 | * Send feedback to <kristen.c.accardi@intel.com> |
| 30 | * | 30 | * |
| 31 | */ | 31 | */ |
| 32 | 32 | ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 50bfc1b2f3bf..478d0d28f7ad 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
| @@ -30,23 +30,6 @@ MODULE_LICENSE("GPL"); | |||
| 30 | /* global data */ | 30 | /* global data */ |
| 31 | static const char device_name[] = "pcieport-driver"; | 31 | static const char device_name[] = "pcieport-driver"; |
| 32 | 32 | ||
| 33 | static int pcie_portdrv_save_config(struct pci_dev *dev) | ||
| 34 | { | ||
| 35 | return pci_save_state(dev); | ||
| 36 | } | ||
| 37 | |||
| 38 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | ||
| 39 | { | ||
| 40 | int retval; | ||
| 41 | |||
| 42 | pci_restore_state(dev); | ||
| 43 | retval = pci_enable_device(dev); | ||
| 44 | if (retval) | ||
| 45 | return retval; | ||
| 46 | pci_set_master(dev); | ||
| 47 | return 0; | ||
| 48 | } | ||
| 49 | |||
| 50 | /* | 33 | /* |
| 51 | * pcie_portdrv_probe - Probe PCI-Express port devices | 34 | * pcie_portdrv_probe - Probe PCI-Express port devices |
| 52 | * @dev: PCI-Express port device being probed | 35 | * @dev: PCI-Express port device being probed |
| @@ -73,8 +56,10 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, | |||
| 73 | "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", | 56 | "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", |
| 74 | __FUNCTION__, dev->device, dev->vendor); | 57 | __FUNCTION__, dev->device, dev->vendor); |
| 75 | } | 58 | } |
| 76 | if (pcie_port_device_register(dev)) | 59 | if (pcie_port_device_register(dev)) { |
| 60 | pci_disable_device(dev); | ||
| 77 | return -ENOMEM; | 61 | return -ENOMEM; |
| 62 | } | ||
| 78 | 63 | ||
| 79 | return 0; | 64 | return 0; |
| 80 | } | 65 | } |
| @@ -86,6 +71,23 @@ static void pcie_portdrv_remove (struct pci_dev *dev) | |||
| 86 | } | 71 | } |
| 87 | 72 | ||
| 88 | #ifdef CONFIG_PM | 73 | #ifdef CONFIG_PM |
| 74 | static int pcie_portdrv_save_config(struct pci_dev *dev) | ||
| 75 | { | ||
| 76 | return pci_save_state(dev); | ||
| 77 | } | ||
| 78 | |||
| 79 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | ||
| 80 | { | ||
| 81 | int retval; | ||
| 82 | |||
| 83 | pci_restore_state(dev); | ||
| 84 | retval = pci_enable_device(dev); | ||
| 85 | if (retval) | ||
| 86 | return retval; | ||
| 87 | pci_set_master(dev); | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 89 | static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) | 91 | static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) |
| 90 | { | 92 | { |
| 91 | int ret = pcie_port_device_suspend(dev, state); | 93 | int ret = pcie_port_device_suspend(dev, state); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e3c78c39b7e4..fb08bc951ac0 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -990,6 +990,11 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | |||
| 990 | case 0x8070: /* P4G8X Deluxe */ | 990 | case 0x8070: /* P4G8X Deluxe */ |
| 991 | asus_hides_smbus = 1; | 991 | asus_hides_smbus = 1; |
| 992 | } | 992 | } |
| 993 | if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) | ||
| 994 | switch (dev->subsystem_device) { | ||
| 995 | case 0x80c9: /* PU-DLS */ | ||
| 996 | asus_hides_smbus = 1; | ||
| 997 | } | ||
| 993 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) | 998 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) |
| 994 | switch (dev->subsystem_device) { | 999 | switch (dev->subsystem_device) { |
| 995 | case 0x1751: /* M2N notebook */ | 1000 | case 0x1751: /* M2N notebook */ |
| @@ -1058,6 +1063,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asu | |||
| 1058 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); | 1063 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); |
| 1059 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); | 1064 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); |
| 1060 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); | 1065 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); |
| 1066 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge ); | ||
| 1061 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); | 1067 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); |
| 1062 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); | 1068 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); |
| 1063 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); | 1069 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); |
| @@ -1081,6 +1087,7 @@ static void __init asus_hides_smbus_lpc(struct pci_dev *dev) | |||
| 1081 | } | 1087 | } |
| 1082 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); | 1088 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); |
| 1083 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); | 1089 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); |
| 1090 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); | ||
| 1084 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); | 1091 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); |
| 1085 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); | 1092 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); |
| 1086 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); | 1093 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index f8ae2b7db0a7..d529462d1b53 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
| @@ -41,7 +41,7 @@ pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) | |||
| 41 | * in the global list of PCI buses. If the bus is found, a pointer to its | 41 | * in the global list of PCI buses. If the bus is found, a pointer to its |
| 42 | * data structure is returned. If no bus is found, %NULL is returned. | 42 | * data structure is returned. If no bus is found, %NULL is returned. |
| 43 | */ | 43 | */ |
| 44 | struct pci_bus * __devinit pci_find_bus(int domain, int busnr) | 44 | struct pci_bus * pci_find_bus(int domain, int busnr) |
| 45 | { | 45 | { |
| 46 | struct pci_bus *bus = NULL; | 46 | struct pci_bus *bus = NULL; |
| 47 | struct pci_bus *tmp_bus; | 47 | struct pci_bus *tmp_bus; |
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index 3163e3d73da1..9d8b415eca79 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c | |||
| @@ -265,8 +265,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at | |||
| 265 | pnp_printf(buffer," disabled\n"); | 265 | pnp_printf(buffer," disabled\n"); |
| 266 | else | 266 | else |
| 267 | pnp_printf(buffer," 0x%llx-0x%llx\n", | 267 | pnp_printf(buffer," 0x%llx-0x%llx\n", |
| 268 | pnp_port_start(dev, i), | 268 | (unsigned long long)pnp_port_start(dev, i), |
| 269 | pnp_port_end(dev, i)); | 269 | (unsigned long long)pnp_port_end(dev, i)); |
| 270 | } | 270 | } |
| 271 | } | 271 | } |
| 272 | for (i = 0; i < PNP_MAX_MEM; i++) { | 272 | for (i = 0; i < PNP_MAX_MEM; i++) { |
| @@ -276,8 +276,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at | |||
| 276 | pnp_printf(buffer," disabled\n"); | 276 | pnp_printf(buffer," disabled\n"); |
| 277 | else | 277 | else |
| 278 | pnp_printf(buffer," 0x%llx-0x%llx\n", | 278 | pnp_printf(buffer," 0x%llx-0x%llx\n", |
| 279 | pnp_mem_start(dev, i), | 279 | (unsigned long long)pnp_mem_start(dev, i), |
| 280 | pnp_mem_end(dev, i)); | 280 | (unsigned long long)pnp_mem_end(dev, i)); |
| 281 | } | 281 | } |
| 282 | } | 282 | } |
| 283 | for (i = 0; i < PNP_MAX_IRQ; i++) { | 283 | for (i = 0; i < PNP_MAX_IRQ; i++) { |
| @@ -287,7 +287,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at | |||
| 287 | pnp_printf(buffer," disabled\n"); | 287 | pnp_printf(buffer," disabled\n"); |
| 288 | else | 288 | else |
| 289 | pnp_printf(buffer," %lld\n", | 289 | pnp_printf(buffer," %lld\n", |
| 290 | pnp_irq(dev, i)); | 290 | (unsigned long long)pnp_irq(dev, i)); |
| 291 | } | 291 | } |
| 292 | } | 292 | } |
| 293 | for (i = 0; i < PNP_MAX_DMA; i++) { | 293 | for (i = 0; i < PNP_MAX_DMA; i++) { |
| @@ -297,7 +297,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at | |||
| 297 | pnp_printf(buffer," disabled\n"); | 297 | pnp_printf(buffer," disabled\n"); |
| 298 | else | 298 | else |
| 299 | pnp_printf(buffer," %lld\n", | 299 | pnp_printf(buffer," %lld\n", |
| 300 | pnp_dma(dev, i)); | 300 | (unsigned long long)pnp_dma(dev, i)); |
| 301 | } | 301 | } |
| 302 | } | 302 | } |
| 303 | ret = (buffer->curr - buf); | 303 | ret = (buffer->curr - buf); |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 212268881857..dc79b0a0059f 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
| @@ -173,6 +173,9 @@ pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table, | |||
| 173 | return; | 173 | return; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | if (p->producer_consumer == ACPI_PRODUCER) | ||
| 177 | return; | ||
| 178 | |||
| 176 | if (p->resource_type == ACPI_MEMORY_RANGE) | 179 | if (p->resource_type == ACPI_MEMORY_RANGE) |
| 177 | pnpacpi_parse_allocated_memresource(res_table, | 180 | pnpacpi_parse_allocated_memresource(res_table, |
| 178 | p->minimum, p->address_length); | 181 | p->minimum, p->address_length); |
| @@ -252,9 +255,14 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
| 252 | break; | 255 | break; |
| 253 | 256 | ||
| 254 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | 257 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: |
| 258 | if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER) | ||
| 259 | return AE_OK; | ||
| 255 | break; | 260 | break; |
| 256 | 261 | ||
| 257 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 262 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
| 263 | if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER) | ||
| 264 | return AE_OK; | ||
| 265 | |||
| 258 | for (i = 0; i < res->data.extended_irq.interrupt_count; i++) { | 266 | for (i = 0; i < res->data.extended_irq.interrupt_count; i++) { |
| 259 | pnpacpi_parse_allocated_irqresource(res_table, | 267 | pnpacpi_parse_allocated_irqresource(res_table, |
| 260 | res->data.extended_irq.interrupts[i], | 268 | res->data.extended_irq.interrupts[i], |
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 77e7202a0eba..904c25fb4ba4 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
| @@ -940,14 +940,8 @@ static void ahci_host_intr(struct ata_port *ap) | |||
| 940 | return; | 940 | return; |
| 941 | 941 | ||
| 942 | /* ignore interim PIO setup fis interrupts */ | 942 | /* ignore interim PIO setup fis interrupts */ |
| 943 | if (ata_tag_valid(ap->active_tag)) { | 943 | if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) |
| 944 | struct ata_queued_cmd *qc = | 944 | return; |
| 945 | ata_qc_from_tag(ap, ap->active_tag); | ||
| 946 | |||
| 947 | if (qc && qc->tf.protocol == ATA_PROT_PIO && | ||
| 948 | (status & PORT_IRQ_PIOS_FIS)) | ||
| 949 | return; | ||
| 950 | } | ||
| 951 | 945 | ||
| 952 | if (ata_ratelimit()) | 946 | if (ata_ratelimit()) |
| 953 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | 947 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " |
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 8b08121b390b..3e827e04a2aa 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
| @@ -1913,9 +1913,6 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
| 1913 | u8 chip_rev; | 1913 | u8 chip_rev; |
| 1914 | u32 dac; | 1914 | u32 dac; |
| 1915 | 1915 | ||
| 1916 | if (!par->vram_size) /* may have already been probed */ | ||
| 1917 | par->vram_size = aty_ld_le32(CONFIG_MEMSIZE) & 0x03FFFFFF; | ||
| 1918 | |||
| 1919 | /* Get the chip revision */ | 1916 | /* Get the chip revision */ |
| 1920 | chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; | 1917 | chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; |
| 1921 | 1918 | ||
| @@ -2028,9 +2025,6 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
| 2028 | 2025 | ||
| 2029 | aty128_init_engine(par); | 2026 | aty128_init_engine(par); |
| 2030 | 2027 | ||
| 2031 | if (register_framebuffer(info) < 0) | ||
| 2032 | return 0; | ||
| 2033 | |||
| 2034 | par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); | 2028 | par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); |
| 2035 | par->pdev = pdev; | 2029 | par->pdev = pdev; |
| 2036 | par->asleep = 0; | 2030 | par->asleep = 0; |
| @@ -2040,6 +2034,9 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
| 2040 | aty128_bl_init(par); | 2034 | aty128_bl_init(par); |
| 2041 | #endif | 2035 | #endif |
| 2042 | 2036 | ||
| 2037 | if (register_framebuffer(info) < 0) | ||
| 2038 | return 0; | ||
| 2039 | |||
| 2043 | printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", | 2040 | printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", |
| 2044 | info->node, info->fix.id, video_card); | 2041 | info->node, info->fix.id, video_card); |
| 2045 | 2042 | ||
| @@ -2089,7 +2086,6 @@ static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_ | |||
| 2089 | par = info->par; | 2086 | par = info->par; |
| 2090 | 2087 | ||
| 2091 | info->pseudo_palette = par->pseudo_palette; | 2088 | info->pseudo_palette = par->pseudo_palette; |
| 2092 | info->fix = aty128fb_fix; | ||
| 2093 | 2089 | ||
| 2094 | /* Virtualize mmio region */ | 2090 | /* Virtualize mmio region */ |
| 2095 | info->fix.mmio_start = reg_addr; | 2091 | info->fix.mmio_start = reg_addr; |
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index a92a91fef16f..f25d5d648333 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c | |||
| @@ -156,7 +156,7 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) | |||
| 156 | 156 | ||
| 157 | info->fix.visual = FB_VISUAL_TRUECOLOR; | 157 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
| 158 | info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */ | 158 | info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */ |
| 159 | } | 159 | } |
| 160 | } else { | 160 | } else { |
| 161 | /* mono */ | 161 | /* mono */ |
| 162 | info->fix.visual = FB_VISUAL_MONO10; | 162 | info->fix.visual = FB_VISUAL_MONO10; |
| @@ -164,20 +164,16 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) | |||
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | info->screen_size = info->fix.line_length * info->var.yres_virtual; | 166 | info->screen_size = info->fix.line_length * info->var.yres_virtual; |
| 167 | info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \ | ||
| 168 | >> LCD_CONTROL_SM_BIT) * 90; | ||
| 167 | 169 | ||
| 168 | /* Determine BPP mode and format */ | 170 | /* Determine BPP mode and format */ |
| 169 | fbdev->regs->lcd_control = fbdev->panel->control_base | | 171 | fbdev->regs->lcd_control = fbdev->panel->control_base; |
| 170 | ((info->var.rotate/90) << LCD_CONTROL_SM_BIT); | ||
| 171 | |||
| 172 | fbdev->regs->lcd_intenable = 0; | ||
| 173 | fbdev->regs->lcd_intstatus = 0; | ||
| 174 | |||
| 175 | fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; | 172 | fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; |
| 176 | |||
| 177 | fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; | 173 | fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; |
| 178 | |||
| 179 | fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; | 174 | fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; |
| 180 | 175 | fbdev->regs->lcd_intenable = 0; | |
| 176 | fbdev->regs->lcd_intstatus = 0; | ||
| 181 | fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys); | 177 | fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys); |
| 182 | 178 | ||
| 183 | if (panel_is_dual(fbdev->panel)) { | 179 | if (panel_is_dual(fbdev->panel)) { |
| @@ -206,6 +202,8 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) | |||
| 206 | 202 | ||
| 207 | /* Resume controller */ | 203 | /* Resume controller */ |
| 208 | fbdev->regs->lcd_control |= LCD_CONTROL_GO; | 204 | fbdev->regs->lcd_control |= LCD_CONTROL_GO; |
| 205 | mdelay(10); | ||
| 206 | au1100fb_fb_blank(VESA_NO_BLANKING, info); | ||
| 209 | 207 | ||
| 210 | return 0; | 208 | return 0; |
| 211 | } | 209 | } |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index fcaeead9696b..50cfca5c7efd 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
| @@ -512,7 +512,11 @@ befs_utf2nls(struct super_block *sb, const char *in, | |||
| 512 | wchar_t uni; | 512 | wchar_t uni; |
| 513 | int unilen, utflen; | 513 | int unilen, utflen; |
| 514 | char *result; | 514 | char *result; |
| 515 | int maxlen = in_len; /* The utf8->nls conversion can't make more chars */ | 515 | /* The utf8->nls conversion won't make the final nls string bigger |
| 516 | * than the utf one, but if the string is pure ascii they'll have the | ||
| 517 | * same width and an extra char is needed to save the additional \0 | ||
| 518 | */ | ||
| 519 | int maxlen = in_len + 1; | ||
| 516 | 520 | ||
| 517 | befs_debug(sb, "---> utf2nls()"); | 521 | befs_debug(sb, "---> utf2nls()"); |
| 518 | 522 | ||
| @@ -588,7 +592,10 @@ befs_nls2utf(struct super_block *sb, const char *in, | |||
| 588 | wchar_t uni; | 592 | wchar_t uni; |
| 589 | int unilen, utflen; | 593 | int unilen, utflen; |
| 590 | char *result; | 594 | char *result; |
| 591 | int maxlen = 3 * in_len; | 595 | /* There're nls characters that will translate to 3-chars-wide UTF-8 |
| 596 | * characters, a additional byte is needed to save the final \0 | ||
| 597 | * in special cases */ | ||
| 598 | int maxlen = (3 * in_len) + 1; | ||
| 592 | 599 | ||
| 593 | befs_debug(sb, "---> nls2utf()\n"); | 600 | befs_debug(sb, "---> nls2utf()\n"); |
| 594 | 601 | ||
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index baf5ae513481..c9d419703cf3 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
| @@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) | |||
| 638 | if (task->tk_status < 0) { | 638 | if (task->tk_status < 0) { |
| 639 | /* RPC error: Re-insert for retransmission */ | 639 | /* RPC error: Re-insert for retransmission */ |
| 640 | timeout = 10 * HZ; | 640 | timeout = 10 * HZ; |
| 641 | } else if (block->b_done) { | ||
| 642 | /* Block already removed, kill it for real */ | ||
| 643 | timeout = 0; | ||
| 644 | } else { | 641 | } else { |
| 645 | /* Call was successful, now wait for client callback */ | 642 | /* Call was successful, now wait for client callback */ |
| 646 | timeout = 60 * HZ; | 643 | timeout = 60 * HZ; |
| @@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void) | |||
| 709 | break; | 706 | break; |
| 710 | if (time_after(block->b_when,jiffies)) | 707 | if (time_after(block->b_when,jiffies)) |
| 711 | break; | 708 | break; |
| 712 | dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", | 709 | dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", |
| 713 | block, block->b_when, block->b_done); | 710 | block, block->b_when); |
| 714 | kref_get(&block->b_count); | 711 | kref_get(&block->b_count); |
| 715 | if (block->b_done) | 712 | nlmsvc_grant_blocked(block); |
| 716 | nlmsvc_unlink_block(block); | ||
| 717 | else | ||
| 718 | nlmsvc_grant_blocked(block); | ||
| 719 | nlmsvc_release_block(block); | 713 | nlmsvc_release_block(block); |
| 720 | } | 714 | } |
| 721 | 715 | ||
diff --git a/fs/namei.c b/fs/namei.c index e01070d7bf58..55a131230f94 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -159,7 +159,7 @@ char * getname(const char __user * filename) | |||
| 159 | #ifdef CONFIG_AUDITSYSCALL | 159 | #ifdef CONFIG_AUDITSYSCALL |
| 160 | void putname(const char *name) | 160 | void putname(const char *name) |
| 161 | { | 161 | { |
| 162 | if (unlikely(current->audit_context)) | 162 | if (unlikely(!audit_dummy_context())) |
| 163 | audit_putname(name); | 163 | audit_putname(name); |
| 164 | else | 164 | else |
| 165 | __putname(name); | 165 | __putname(name); |
| @@ -1125,7 +1125,7 @@ static int fastcall do_path_lookup(int dfd, const char *name, | |||
| 1125 | retval = link_path_walk(name, nd); | 1125 | retval = link_path_walk(name, nd); |
| 1126 | out: | 1126 | out: |
| 1127 | if (likely(retval == 0)) { | 1127 | if (likely(retval == 0)) { |
| 1128 | if (unlikely(current->audit_context && nd && nd->dentry && | 1128 | if (unlikely(!audit_dummy_context() && nd && nd->dentry && |
| 1129 | nd->dentry->d_inode)) | 1129 | nd->dentry->d_inode)) |
| 1130 | audit_inode(name, nd->dentry->d_inode); | 1130 | audit_inode(name, nd->dentry->d_inode); |
| 1131 | } | 1131 | } |
| @@ -1357,7 +1357,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) | |||
| 1357 | return -ENOENT; | 1357 | return -ENOENT; |
| 1358 | 1358 | ||
| 1359 | BUG_ON(victim->d_parent->d_inode != dir); | 1359 | BUG_ON(victim->d_parent->d_inode != dir); |
| 1360 | audit_inode_child(victim->d_name.name, victim->d_inode, dir->i_ino); | 1360 | audit_inode_child(victim->d_name.name, victim->d_inode, dir); |
| 1361 | 1361 | ||
| 1362 | error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); | 1362 | error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); |
| 1363 | if (error) | 1363 | if (error) |
| @@ -1659,6 +1659,7 @@ do_last: | |||
| 1659 | * It already exists. | 1659 | * It already exists. |
| 1660 | */ | 1660 | */ |
| 1661 | mutex_unlock(&dir->d_inode->i_mutex); | 1661 | mutex_unlock(&dir->d_inode->i_mutex); |
| 1662 | audit_inode_update(path.dentry->d_inode); | ||
| 1662 | 1663 | ||
| 1663 | error = -EEXIST; | 1664 | error = -EEXIST; |
| 1664 | if (flag & O_EXCL) | 1665 | if (flag & O_EXCL) |
| @@ -1669,6 +1670,7 @@ do_last: | |||
| 1669 | if (flag & O_NOFOLLOW) | 1670 | if (flag & O_NOFOLLOW) |
| 1670 | goto exit_dput; | 1671 | goto exit_dput; |
| 1671 | } | 1672 | } |
| 1673 | |||
| 1672 | error = -ENOENT; | 1674 | error = -ENOENT; |
| 1673 | if (!path.dentry->d_inode) | 1675 | if (!path.dentry->d_inode) |
| 1674 | goto exit_dput; | 1676 | goto exit_dput; |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 19b98ca468eb..86b3169c8cac 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
| @@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
| 51 | namelen = dentry->d_name.len; | 51 | namelen = dentry->d_name.len; |
| 52 | buflen -= namelen + 1; | 52 | buflen -= namelen + 1; |
| 53 | if (buflen < 0) | 53 | if (buflen < 0) |
| 54 | goto Elong; | 54 | goto Elong_unlock; |
| 55 | end -= namelen; | 55 | end -= namelen; |
| 56 | memcpy(end, dentry->d_name.name, namelen); | 56 | memcpy(end, dentry->d_name.name, namelen); |
| 57 | *--end = '/'; | 57 | *--end = '/'; |
| @@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, | |||
| 68 | end -= namelen; | 68 | end -= namelen; |
| 69 | memcpy(end, base, namelen); | 69 | memcpy(end, base, namelen); |
| 70 | return end; | 70 | return end; |
| 71 | Elong_unlock: | ||
| 72 | spin_unlock(&dcache_lock); | ||
| 71 | Elong: | 73 | Elong: |
| 72 | return ERR_PTR(-ENAMETOOLONG); | 74 | return ERR_PTR(-ENAMETOOLONG); |
| 73 | } | 75 | } |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 52bf634260a1..65c0c5b32351 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
| @@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | |||
| 63 | return p; | 63 | return p; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | void nfs_readdata_free(struct nfs_read_data *p) | 66 | static void nfs_readdata_free(struct nfs_read_data *p) |
| 67 | { | 67 | { |
| 68 | if (p && (p->pagevec != &p->page_array[0])) | 68 | if (p && (p->pagevec != &p->page_array[0])) |
| 69 | kfree(p->pagevec); | 69 | kfree(p->pagevec); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 86bac6a5008e..50774991f8d5 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
| 137 | return p; | 137 | return p; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | void nfs_writedata_free(struct nfs_write_data *p) | 140 | static void nfs_writedata_free(struct nfs_write_data *p) |
| 141 | { | 141 | { |
| 142 | if (p && (p->pagevec != &p->page_array[0])) | 142 | if (p && (p->pagevec != &p->page_array[0])) |
| 143 | kfree(p->pagevec); | 143 | kfree(p->pagevec); |
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index f318b58510fd..1627edd50810 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c | |||
| @@ -48,8 +48,8 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp) | |||
| 48 | return 0; | 48 | return 0; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | reiserfs_write_lock(inode->i_sb); | ||
| 52 | mutex_lock(&inode->i_mutex); | 51 | mutex_lock(&inode->i_mutex); |
| 52 | reiserfs_write_lock(inode->i_sb); | ||
| 53 | /* freeing preallocation only involves relogging blocks that | 53 | /* freeing preallocation only involves relogging blocks that |
| 54 | * are already in the current transaction. preallocation gets | 54 | * are already in the current transaction. preallocation gets |
| 55 | * freed at the end of each transaction, so it is impossible for | 55 | * freed at the end of each transaction, so it is impossible for |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 12dfdcfbee3d..52f1e2136546 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -39,14 +39,10 @@ void reiserfs_delete_inode(struct inode *inode) | |||
| 39 | 39 | ||
| 40 | /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ | 40 | /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ |
| 41 | if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ | 41 | if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ |
| 42 | mutex_lock(&inode->i_mutex); | ||
| 43 | |||
| 44 | reiserfs_delete_xattrs(inode); | 42 | reiserfs_delete_xattrs(inode); |
| 45 | 43 | ||
| 46 | if (journal_begin(&th, inode->i_sb, jbegin_count)) { | 44 | if (journal_begin(&th, inode->i_sb, jbegin_count)) |
| 47 | mutex_unlock(&inode->i_mutex); | ||
| 48 | goto out; | 45 | goto out; |
| 49 | } | ||
| 50 | reiserfs_update_inode_transaction(inode); | 46 | reiserfs_update_inode_transaction(inode); |
| 51 | 47 | ||
| 52 | err = reiserfs_delete_object(&th, inode); | 48 | err = reiserfs_delete_object(&th, inode); |
| @@ -57,12 +53,8 @@ void reiserfs_delete_inode(struct inode *inode) | |||
| 57 | if (!err) | 53 | if (!err) |
| 58 | DQUOT_FREE_INODE(inode); | 54 | DQUOT_FREE_INODE(inode); |
| 59 | 55 | ||
| 60 | if (journal_end(&th, inode->i_sb, jbegin_count)) { | 56 | if (journal_end(&th, inode->i_sb, jbegin_count)) |
| 61 | mutex_unlock(&inode->i_mutex); | ||
| 62 | goto out; | 57 | goto out; |
| 63 | } | ||
| 64 | |||
| 65 | mutex_unlock(&inode->i_mutex); | ||
| 66 | 58 | ||
| 67 | /* check return value from reiserfs_delete_object after | 59 | /* check return value from reiserfs_delete_object after |
| 68 | * ending the transaction | 60 | * ending the transaction |
| @@ -2348,6 +2340,7 @@ static int reiserfs_write_full_page(struct page *page, | |||
| 2348 | unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; | 2340 | unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; |
| 2349 | int error = 0; | 2341 | int error = 0; |
| 2350 | unsigned long block; | 2342 | unsigned long block; |
| 2343 | sector_t last_block; | ||
| 2351 | struct buffer_head *head, *bh; | 2344 | struct buffer_head *head, *bh; |
| 2352 | int partial = 0; | 2345 | int partial = 0; |
| 2353 | int nr = 0; | 2346 | int nr = 0; |
| @@ -2395,10 +2388,19 @@ static int reiserfs_write_full_page(struct page *page, | |||
| 2395 | } | 2388 | } |
| 2396 | bh = head; | 2389 | bh = head; |
| 2397 | block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); | 2390 | block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); |
| 2391 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; | ||
| 2398 | /* first map all the buffers, logging any direct items we find */ | 2392 | /* first map all the buffers, logging any direct items we find */ |
| 2399 | do { | 2393 | do { |
| 2400 | if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) || | 2394 | if (block > last_block) { |
| 2401 | (buffer_mapped(bh) | 2395 | /* |
| 2396 | * This can happen when the block size is less than | ||
| 2397 | * the page size. The corresponding bytes in the page | ||
| 2398 | * were zero filled above | ||
| 2399 | */ | ||
| 2400 | clear_buffer_dirty(bh); | ||
| 2401 | set_buffer_uptodate(bh); | ||
| 2402 | } else if ((checked || buffer_dirty(bh)) && | ||
| 2403 | (!buffer_mapped(bh) || (buffer_mapped(bh) | ||
| 2402 | && bh->b_blocknr == | 2404 | && bh->b_blocknr == |
| 2403 | 0))) { | 2405 | 0))) { |
| 2404 | /* not mapped yet, or it points to a direct item, search | 2406 | /* not mapped yet, or it points to a direct item, search |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 745c88100895..a986b5e1e288 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
| @@ -116,12 +116,12 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
| 116 | if (REISERFS_I(inode)->i_flags & i_nopack_mask) { | 116 | if (REISERFS_I(inode)->i_flags & i_nopack_mask) { |
| 117 | return 0; | 117 | return 0; |
| 118 | } | 118 | } |
| 119 | reiserfs_write_lock(inode->i_sb); | ||
| 120 | 119 | ||
| 121 | /* we need to make sure nobody is changing the file size beneath | 120 | /* we need to make sure nobody is changing the file size beneath |
| 122 | ** us | 121 | ** us |
| 123 | */ | 122 | */ |
| 124 | mutex_lock(&inode->i_mutex); | 123 | mutex_lock(&inode->i_mutex); |
| 124 | reiserfs_write_lock(inode->i_sb); | ||
| 125 | 125 | ||
| 126 | write_from = inode->i_size & (blocksize - 1); | 126 | write_from = inode->i_size & (blocksize - 1); |
| 127 | /* if we are on a block boundary, we are already unpacked. */ | 127 | /* if we are on a block boundary, we are already unpacked. */ |
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 3873c672cb4c..33323473e3c4 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
| @@ -75,6 +75,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
| 75 | } | 75 | } |
| 76 | *err = -ENOSPC; | 76 | *err = -ENOSPC; |
| 77 | 77 | ||
| 78 | UDF_I_UNIQUE(inode) = 0; | ||
| 79 | UDF_I_LENEXTENTS(inode) = 0; | ||
| 80 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | ||
| 81 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | ||
| 82 | UDF_I_STRAT4096(inode) = 0; | ||
| 83 | |||
| 78 | block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, | 84 | block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, |
| 79 | start, err); | 85 | start, err); |
| 80 | if (*err) | 86 | if (*err) |
| @@ -84,11 +90,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
| 84 | } | 90 | } |
| 85 | 91 | ||
| 86 | mutex_lock(&sbi->s_alloc_mutex); | 92 | mutex_lock(&sbi->s_alloc_mutex); |
| 87 | UDF_I_UNIQUE(inode) = 0; | ||
| 88 | UDF_I_LENEXTENTS(inode) = 0; | ||
| 89 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | ||
| 90 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | ||
| 91 | UDF_I_STRAT4096(inode) = 0; | ||
| 92 | if (UDF_SB_LVIDBH(sb)) | 93 | if (UDF_SB_LVIDBH(sb)) |
| 93 | { | 94 | { |
| 94 | struct logicalVolHeaderDesc *lvhd; | 95 | struct logicalVolHeaderDesc *lvhd; |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index b01804baa120..b82381475779 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
| @@ -248,7 +248,7 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, | |||
| 248 | 248 | ||
| 249 | if (likely(cur_index != index)) { | 249 | if (likely(cur_index != index)) { |
| 250 | page = ufs_get_locked_page(mapping, index); | 250 | page = ufs_get_locked_page(mapping, index); |
| 251 | if (IS_ERR(page)) | 251 | if (!page || IS_ERR(page)) /* it was truncated or EIO */ |
| 252 | continue; | 252 | continue; |
| 253 | } else | 253 | } else |
| 254 | page = locked_page; | 254 | page = locked_page; |
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 337cf2c46d10..22f820a9b15c 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
| @@ -251,12 +251,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, | |||
| 251 | { | 251 | { |
| 252 | struct page *page; | 252 | struct page *page; |
| 253 | 253 | ||
| 254 | try_again: | ||
| 255 | page = find_lock_page(mapping, index); | 254 | page = find_lock_page(mapping, index); |
| 256 | if (!page) { | 255 | if (!page) { |
| 257 | page = read_cache_page(mapping, index, | 256 | page = read_cache_page(mapping, index, |
| 258 | (filler_t*)mapping->a_ops->readpage, | 257 | (filler_t*)mapping->a_ops->readpage, |
| 259 | NULL); | 258 | NULL); |
| 259 | |||
| 260 | if (IS_ERR(page)) { | 260 | if (IS_ERR(page)) { |
| 261 | printk(KERN_ERR "ufs_change_blocknr: " | 261 | printk(KERN_ERR "ufs_change_blocknr: " |
| 262 | "read_cache_page error: ino %lu, index: %lu\n", | 262 | "read_cache_page error: ino %lu, index: %lu\n", |
| @@ -266,6 +266,14 @@ try_again: | |||
| 266 | 266 | ||
| 267 | lock_page(page); | 267 | lock_page(page); |
| 268 | 268 | ||
| 269 | if (unlikely(page->mapping == NULL)) { | ||
| 270 | /* Truncate got there first */ | ||
| 271 | unlock_page(page); | ||
| 272 | page_cache_release(page); | ||
| 273 | page = NULL; | ||
| 274 | goto out; | ||
| 275 | } | ||
| 276 | |||
| 269 | if (!PageUptodate(page) || PageError(page)) { | 277 | if (!PageUptodate(page) || PageError(page)) { |
| 270 | unlock_page(page); | 278 | unlock_page(page); |
| 271 | page_cache_release(page); | 279 | page_cache_release(page); |
| @@ -275,15 +283,8 @@ try_again: | |||
| 275 | mapping->host->i_ino, index); | 283 | mapping->host->i_ino, index); |
| 276 | 284 | ||
| 277 | page = ERR_PTR(-EIO); | 285 | page = ERR_PTR(-EIO); |
| 278 | goto out; | ||
| 279 | } | 286 | } |
| 280 | } | 287 | } |
| 281 | |||
| 282 | if (unlikely(!page->mapping || !page_has_buffers(page))) { | ||
| 283 | unlock_page(page); | ||
| 284 | page_cache_release(page); | ||
| 285 | goto try_again;/*we really need these buffers*/ | ||
| 286 | } | ||
| 287 | out: | 288 | out: |
| 288 | return page; | 289 | return page; |
| 289 | } | 290 | } |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 894bc4d89dc0..6a33a07b3f1d 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
| @@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); | |||
| 56 | extern struct page *vmem_map; | 56 | extern struct page *vmem_map; |
| 57 | extern int find_largest_hole (u64 start, u64 end, void *arg); | 57 | extern int find_largest_hole (u64 start, u64 end, void *arg); |
| 58 | extern int create_mem_map_page_table (u64 start, u64 end, void *arg); | 58 | extern int create_mem_map_page_table (u64 start, u64 end, void *arg); |
| 59 | extern int vmemmap_find_next_valid_pfn(int, int); | ||
| 60 | #else | ||
| 61 | static inline int vmemmap_find_next_valid_pfn(int node, int i) | ||
| 62 | { | ||
| 63 | return i + 1; | ||
| 64 | } | ||
| 59 | #endif | 65 | #endif |
| 60 | |||
| 61 | #endif /* meminit_h */ | 66 | #endif /* meminit_h */ |
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 37e52a2836b0..20a8d618c845 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h | |||
| @@ -1433,7 +1433,12 @@ typedef union pal_version_u { | |||
| 1433 | } pal_version_u_t; | 1433 | } pal_version_u_t; |
| 1434 | 1434 | ||
| 1435 | 1435 | ||
| 1436 | /* Return PAL version information */ | 1436 | /* |
| 1437 | * Return PAL version information. While the documentation states that | ||
| 1438 | * PAL_VERSION can be called in either physical or virtual mode, some | ||
| 1439 | * implementations only allow physical calls. We don't call it very often, | ||
| 1440 | * so the overhead isn't worth eliminating. | ||
| 1441 | */ | ||
| 1437 | static inline s64 | 1442 | static inline s64 |
| 1438 | ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) | 1443 | ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) |
| 1439 | { | 1444 | { |
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 8406f1ef4caf..b72af597878d 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
| @@ -1124,8 +1124,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | |||
| 1124 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) | 1124 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) |
| 1125 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) | 1125 | #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) |
| 1126 | 1126 | ||
| 1127 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) | 1127 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) |
| 1128 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) | 1128 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) |
| 1129 | 1129 | ||
| 1130 | 1130 | ||
| 1131 | static inline void | 1131 | static inline void |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index fc9677bc87ee..384fbf7f2a0f 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | 24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE |
| 25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | 25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) |
| 26 | */ | 26 | */ |
| 27 | #define KERNEL_START (GATE_ADDR+0x100000000) | 27 | #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) |
| 28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | 28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) |
| 29 | 29 | ||
| 30 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
diff --git a/include/linux/audit.h b/include/linux/audit.h index b27d7debc5a1..64f9f9e56ac5 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -327,21 +327,31 @@ extern void __audit_getname(const char *name); | |||
| 327 | extern void audit_putname(const char *name); | 327 | extern void audit_putname(const char *name); |
| 328 | extern void __audit_inode(const char *name, const struct inode *inode); | 328 | extern void __audit_inode(const char *name, const struct inode *inode); |
| 329 | extern void __audit_inode_child(const char *dname, const struct inode *inode, | 329 | extern void __audit_inode_child(const char *dname, const struct inode *inode, |
| 330 | unsigned long pino); | 330 | const struct inode *parent); |
| 331 | extern void __audit_inode_update(const struct inode *inode); | ||
| 332 | static inline int audit_dummy_context(void) | ||
| 333 | { | ||
| 334 | void *p = current->audit_context; | ||
| 335 | return !p || *(int *)p; | ||
| 336 | } | ||
| 331 | static inline void audit_getname(const char *name) | 337 | static inline void audit_getname(const char *name) |
| 332 | { | 338 | { |
| 333 | if (unlikely(current->audit_context)) | 339 | if (unlikely(!audit_dummy_context())) |
| 334 | __audit_getname(name); | 340 | __audit_getname(name); |
| 335 | } | 341 | } |
| 336 | static inline void audit_inode(const char *name, const struct inode *inode) { | 342 | static inline void audit_inode(const char *name, const struct inode *inode) { |
| 337 | if (unlikely(current->audit_context)) | 343 | if (unlikely(!audit_dummy_context())) |
| 338 | __audit_inode(name, inode); | 344 | __audit_inode(name, inode); |
| 339 | } | 345 | } |
| 340 | static inline void audit_inode_child(const char *dname, | 346 | static inline void audit_inode_child(const char *dname, |
| 341 | const struct inode *inode, | 347 | const struct inode *inode, |
| 342 | unsigned long pino) { | 348 | const struct inode *parent) { |
| 343 | if (unlikely(current->audit_context)) | 349 | if (unlikely(!audit_dummy_context())) |
| 344 | __audit_inode_child(dname, inode, pino); | 350 | __audit_inode_child(dname, inode, parent); |
| 351 | } | ||
| 352 | static inline void audit_inode_update(const struct inode *inode) { | ||
| 353 | if (unlikely(!audit_dummy_context())) | ||
| 354 | __audit_inode_update(inode); | ||
| 345 | } | 355 | } |
| 346 | 356 | ||
| 347 | /* Private API (for audit.c only) */ | 357 | /* Private API (for audit.c only) */ |
| @@ -365,57 +375,61 @@ extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); | |||
| 365 | 375 | ||
| 366 | static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) | 376 | static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) |
| 367 | { | 377 | { |
| 368 | if (unlikely(current->audit_context)) | 378 | if (unlikely(!audit_dummy_context())) |
| 369 | return __audit_ipc_obj(ipcp); | 379 | return __audit_ipc_obj(ipcp); |
| 370 | return 0; | 380 | return 0; |
| 371 | } | 381 | } |
| 372 | static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) | 382 | static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) |
| 373 | { | 383 | { |
| 374 | if (unlikely(current->audit_context)) | 384 | if (unlikely(!audit_dummy_context())) |
| 375 | return __audit_ipc_set_perm(qbytes, uid, gid, mode); | 385 | return __audit_ipc_set_perm(qbytes, uid, gid, mode); |
| 376 | return 0; | 386 | return 0; |
| 377 | } | 387 | } |
| 378 | static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) | 388 | static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) |
| 379 | { | 389 | { |
| 380 | if (unlikely(current->audit_context)) | 390 | if (unlikely(!audit_dummy_context())) |
| 381 | return __audit_mq_open(oflag, mode, u_attr); | 391 | return __audit_mq_open(oflag, mode, u_attr); |
| 382 | return 0; | 392 | return 0; |
| 383 | } | 393 | } |
| 384 | static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) | 394 | static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) |
| 385 | { | 395 | { |
| 386 | if (unlikely(current->audit_context)) | 396 | if (unlikely(!audit_dummy_context())) |
| 387 | return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); | 397 | return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); |
| 388 | return 0; | 398 | return 0; |
| 389 | } | 399 | } |
| 390 | static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) | 400 | static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) |
| 391 | { | 401 | { |
| 392 | if (unlikely(current->audit_context)) | 402 | if (unlikely(!audit_dummy_context())) |
| 393 | return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); | 403 | return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); |
| 394 | return 0; | 404 | return 0; |
| 395 | } | 405 | } |
| 396 | static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) | 406 | static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) |
| 397 | { | 407 | { |
| 398 | if (unlikely(current->audit_context)) | 408 | if (unlikely(!audit_dummy_context())) |
| 399 | return __audit_mq_notify(mqdes, u_notification); | 409 | return __audit_mq_notify(mqdes, u_notification); |
| 400 | return 0; | 410 | return 0; |
| 401 | } | 411 | } |
| 402 | static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) | 412 | static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) |
| 403 | { | 413 | { |
| 404 | if (unlikely(current->audit_context)) | 414 | if (unlikely(!audit_dummy_context())) |
| 405 | return __audit_mq_getsetattr(mqdes, mqstat); | 415 | return __audit_mq_getsetattr(mqdes, mqstat); |
| 406 | return 0; | 416 | return 0; |
| 407 | } | 417 | } |
| 418 | extern int audit_n_rules; | ||
| 408 | #else | 419 | #else |
| 409 | #define audit_alloc(t) ({ 0; }) | 420 | #define audit_alloc(t) ({ 0; }) |
| 410 | #define audit_free(t) do { ; } while (0) | 421 | #define audit_free(t) do { ; } while (0) |
| 411 | #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) | 422 | #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) |
| 412 | #define audit_syscall_exit(f,r) do { ; } while (0) | 423 | #define audit_syscall_exit(f,r) do { ; } while (0) |
| 424 | #define audit_dummy_context() 1 | ||
| 413 | #define audit_getname(n) do { ; } while (0) | 425 | #define audit_getname(n) do { ; } while (0) |
| 414 | #define audit_putname(n) do { ; } while (0) | 426 | #define audit_putname(n) do { ; } while (0) |
| 415 | #define __audit_inode(n,i) do { ; } while (0) | 427 | #define __audit_inode(n,i) do { ; } while (0) |
| 416 | #define __audit_inode_child(d,i,p) do { ; } while (0) | 428 | #define __audit_inode_child(d,i,p) do { ; } while (0) |
| 429 | #define __audit_inode_update(i) do { ; } while (0) | ||
| 417 | #define audit_inode(n,i) do { ; } while (0) | 430 | #define audit_inode(n,i) do { ; } while (0) |
| 418 | #define audit_inode_child(d,i,p) do { ; } while (0) | 431 | #define audit_inode_child(d,i,p) do { ; } while (0) |
| 432 | #define audit_inode_update(i) do { ; } while (0) | ||
| 419 | #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) | 433 | #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) |
| 420 | #define audit_get_loginuid(c) ({ -1; }) | 434 | #define audit_get_loginuid(c) ({ -1; }) |
| 421 | #define audit_ipc_obj(i) ({ 0; }) | 435 | #define audit_ipc_obj(i) ({ 0; }) |
| @@ -430,6 +444,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) | |||
| 430 | #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) | 444 | #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) |
| 431 | #define audit_mq_notify(d,n) ({ 0; }) | 445 | #define audit_mq_notify(d,n) ({ 0; }) |
| 432 | #define audit_mq_getsetattr(d,s) ({ 0; }) | 446 | #define audit_mq_getsetattr(d,s) ({ 0; }) |
| 447 | #define audit_n_rules 0 | ||
| 433 | #endif | 448 | #endif |
| 434 | 449 | ||
| 435 | #ifdef CONFIG_AUDIT | 450 | #ifdef CONFIG_AUDIT |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 6a7047851e48..88dafa246d87 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef __LINUX_DEBUG_LOCKING_H | 1 | #ifndef __LINUX_DEBUG_LOCKING_H |
| 2 | #define __LINUX_DEBUG_LOCKING_H | 2 | #define __LINUX_DEBUG_LOCKING_H |
| 3 | 3 | ||
| 4 | struct task_struct; | ||
| 5 | |||
| 4 | extern int debug_locks; | 6 | extern int debug_locks; |
| 5 | extern int debug_locks_silent; | 7 | extern int debug_locks_silent; |
| 6 | 8 | ||
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index cc5dec70c32c..d4f219ffaa5d 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
| @@ -67,7 +67,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
| 67 | if (source) { | 67 | if (source) { |
| 68 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); | 68 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); |
| 69 | } | 69 | } |
| 70 | audit_inode_child(new_name, source, new_dir->i_ino); | 70 | audit_inode_child(new_name, source, new_dir); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | /* | 73 | /* |
| @@ -98,7 +98,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | |||
| 98 | inode_dir_notify(inode, DN_CREATE); | 98 | inode_dir_notify(inode, DN_CREATE); |
| 99 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, | 99 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, |
| 100 | dentry->d_inode); | 100 | dentry->d_inode); |
| 101 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); | 101 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| @@ -109,7 +109,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
| 109 | inode_dir_notify(inode, DN_CREATE); | 109 | inode_dir_notify(inode, DN_CREATE); |
| 110 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, | 110 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, |
| 111 | dentry->d_name.name, dentry->d_inode); | 111 | dentry->d_name.name, dentry->d_inode); |
| 112 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); | 112 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /* | 115 | /* |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 0503b2ed8bae..2d229327959e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
| @@ -46,8 +46,6 @@ enum kobject_action { | |||
| 46 | KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ | 46 | KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ |
| 47 | KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ | 47 | KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ |
| 48 | KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ | 48 | KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ |
| 49 | KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */ | ||
| 50 | KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */ | ||
| 51 | }; | 49 | }; |
| 52 | 50 | ||
| 53 | struct kobject { | 51 | struct kobject { |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index aa4fe905bb4d..0d92c468d55a 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
| @@ -123,7 +123,6 @@ struct nlm_block { | |||
| 123 | unsigned int b_id; /* block id */ | 123 | unsigned int b_id; /* block id */ |
| 124 | unsigned char b_queued; /* re-queued */ | 124 | unsigned char b_queued; /* re-queued */ |
| 125 | unsigned char b_granted; /* VFS granted lock */ | 125 | unsigned char b_granted; /* VFS granted lock */ |
| 126 | unsigned char b_done; /* callback complete */ | ||
| 127 | struct nlm_file * b_file; /* file in question */ | 126 | struct nlm_file * b_file; /* file in question */ |
| 128 | }; | 127 | }; |
| 129 | 128 | ||
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 55ea853d57bc..247434553ae8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page) | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | /* | 478 | /* |
| 479 | * Allocate and free nfs_write_data structures | 479 | * Allocate nfs_write_data structures |
| 480 | */ | 480 | */ |
| 481 | extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); | 481 | extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); |
| 482 | extern void nfs_writedata_free(struct nfs_write_data *p); | ||
| 483 | 482 | ||
| 484 | /* | 483 | /* |
| 485 | * linux/fs/nfs/read.c | 484 | * linux/fs/nfs/read.c |
| @@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); | |||
| 491 | extern void nfs_readdata_release(void *data); | 490 | extern void nfs_readdata_release(void *data); |
| 492 | 491 | ||
| 493 | /* | 492 | /* |
| 494 | * Allocate and free nfs_read_data structures | 493 | * Allocate nfs_read_data structures |
| 495 | */ | 494 | */ |
| 496 | extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); | 495 | extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); |
| 497 | extern void nfs_readdata_free(struct nfs_read_data *p); | ||
| 498 | 496 | ||
| 499 | /* | 497 | /* |
| 500 | * linux/fs/nfs3proc.c | 498 | * linux/fs/nfs3proc.c |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c09396d2c77b..4eae06b08cf2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2142,6 +2142,7 @@ | |||
| 2142 | #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 | 2142 | #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 |
| 2143 | #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 | 2143 | #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 |
| 2144 | #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 | 2144 | #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 |
| 2145 | #define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c | ||
| 2145 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 | 2146 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
| 2146 | #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 | 2147 | #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 |
| 2147 | #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 | 2148 | #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6afa72e080cb..6674fc1e51bf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1558,6 +1558,14 @@ static inline void freeze(struct task_struct *p) | |||
| 1558 | } | 1558 | } |
| 1559 | 1559 | ||
| 1560 | /* | 1560 | /* |
| 1561 | * Sometimes we may need to cancel the previous 'freeze' request | ||
| 1562 | */ | ||
| 1563 | static inline void do_not_freeze(struct task_struct *p) | ||
| 1564 | { | ||
| 1565 | p->flags &= ~PF_FREEZE; | ||
| 1566 | } | ||
| 1567 | |||
| 1568 | /* | ||
| 1561 | * Wake up a frozen process | 1569 | * Wake up a frozen process |
| 1562 | */ | 1570 | */ |
| 1563 | static inline int thaw_process(struct task_struct *p) | 1571 | static inline int thaw_process(struct task_struct *p) |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e8bbe8118de8..840e47a4ccc5 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task); | |||
| 229 | int xprt_reserve_xprt_cong(struct rpc_task *task); | 229 | int xprt_reserve_xprt_cong(struct rpc_task *task); |
| 230 | int xprt_prepare_transmit(struct rpc_task *task); | 230 | int xprt_prepare_transmit(struct rpc_task *task); |
| 231 | void xprt_transmit(struct rpc_task *task); | 231 | void xprt_transmit(struct rpc_task *task); |
| 232 | void xprt_abort_transmit(struct rpc_task *task); | 232 | void xprt_end_transmit(struct rpc_task *task); |
| 233 | int xprt_adjust_timeout(struct rpc_rqst *req); | 233 | int xprt_adjust_timeout(struct rpc_rqst *req); |
| 234 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); | 234 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); |
| 235 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); | 235 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 1ab806c47514..2d9b1b60798a 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -41,23 +41,23 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); | |||
| 41 | 41 | ||
| 42 | static inline void __count_vm_event(enum vm_event_item item) | 42 | static inline void __count_vm_event(enum vm_event_item item) |
| 43 | { | 43 | { |
| 44 | __get_cpu_var(vm_event_states.event[item])++; | 44 | __get_cpu_var(vm_event_states).event[item]++; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void count_vm_event(enum vm_event_item item) | 47 | static inline void count_vm_event(enum vm_event_item item) |
| 48 | { | 48 | { |
| 49 | get_cpu_var(vm_event_states.event[item])++; | 49 | get_cpu_var(vm_event_states).event[item]++; |
| 50 | put_cpu(); | 50 | put_cpu(); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static inline void __count_vm_events(enum vm_event_item item, long delta) | 53 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 54 | { | 54 | { |
| 55 | __get_cpu_var(vm_event_states.event[item]) += delta; | 55 | __get_cpu_var(vm_event_states).event[item] += delta; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static inline void count_vm_events(enum vm_event_item item, long delta) | 58 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 59 | { | 59 | { |
| 60 | get_cpu_var(vm_event_states.event[item]) += delta; | 60 | get_cpu_var(vm_event_states).event[item] += delta; |
| 61 | put_cpu(); | 61 | put_cpu(); |
| 62 | } | 62 | } |
| 63 | 63 | ||
diff --git a/include/net/red.h b/include/net/red.h index 5ccdbb3d4722..a4eb37946f2c 100644 --- a/include/net/red.h +++ b/include/net/red.h | |||
| @@ -212,7 +212,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) | |||
| 212 | * Seems, it is the best solution to | 212 | * Seems, it is the best solution to |
| 213 | * problem of too coarse exponent tabulation. | 213 | * problem of too coarse exponent tabulation. |
| 214 | */ | 214 | */ |
| 215 | us_idle = (p->qavg * us_idle) >> p->Scell_log; | 215 | us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; |
| 216 | 216 | ||
| 217 | if (us_idle < (p->qavg >> 1)) | 217 | if (us_idle < (p->qavg >> 1)) |
| 218 | return p->qavg - us_idle; | 218 | return p->qavg - us_idle; |
diff --git a/kernel/audit.c b/kernel/audit.c index d417ca1db79b..0a36091ed712 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -690,9 +690,7 @@ static const struct inotify_operations audit_inotify_ops = { | |||
| 690 | /* Initialize audit support at boot time. */ | 690 | /* Initialize audit support at boot time. */ |
| 691 | static int __init audit_init(void) | 691 | static int __init audit_init(void) |
| 692 | { | 692 | { |
| 693 | #ifdef CONFIG_AUDITSYSCALL | ||
| 694 | int i; | 693 | int i; |
| 695 | #endif | ||
| 696 | 694 | ||
| 697 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", | 695 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", |
| 698 | audit_default ? "enabled" : "disabled"); | 696 | audit_default ? "enabled" : "disabled"); |
| @@ -717,10 +715,10 @@ static int __init audit_init(void) | |||
| 717 | audit_ih = inotify_init(&audit_inotify_ops); | 715 | audit_ih = inotify_init(&audit_inotify_ops); |
| 718 | if (IS_ERR(audit_ih)) | 716 | if (IS_ERR(audit_ih)) |
| 719 | audit_panic("cannot initialize inotify handle"); | 717 | audit_panic("cannot initialize inotify handle"); |
| 718 | #endif | ||
| 720 | 719 | ||
| 721 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) | 720 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) |
| 722 | INIT_LIST_HEAD(&audit_inode_hash[i]); | 721 | INIT_LIST_HEAD(&audit_inode_hash[i]); |
| 723 | #endif | ||
| 724 | 722 | ||
| 725 | return 0; | 723 | return 0; |
| 726 | } | 724 | } |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 5b4e16276ca0..6a9a5c5a4e7d 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -442,6 +442,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | |||
| 442 | case AUDIT_EQUAL: | 442 | case AUDIT_EQUAL: |
| 443 | break; | 443 | break; |
| 444 | default: | 444 | default: |
| 445 | err = -EINVAL; | ||
| 445 | goto exit_free; | 446 | goto exit_free; |
| 446 | } | 447 | } |
| 447 | } | 448 | } |
| @@ -579,6 +580,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 579 | case AUDIT_EQUAL: | 580 | case AUDIT_EQUAL: |
| 580 | break; | 581 | break; |
| 581 | default: | 582 | default: |
| 583 | err = -EINVAL; | ||
| 582 | goto exit_free; | 584 | goto exit_free; |
| 583 | } | 585 | } |
| 584 | } | 586 | } |
| @@ -1134,6 +1136,14 @@ static inline int audit_add_rule(struct audit_entry *entry, | |||
| 1134 | struct audit_watch *watch = entry->rule.watch; | 1136 | struct audit_watch *watch = entry->rule.watch; |
| 1135 | struct nameidata *ndp, *ndw; | 1137 | struct nameidata *ndp, *ndw; |
| 1136 | int h, err, putnd_needed = 0; | 1138 | int h, err, putnd_needed = 0; |
| 1139 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1140 | int dont_count = 0; | ||
| 1141 | |||
| 1142 | /* If either of these, don't count towards total */ | ||
| 1143 | if (entry->rule.listnr == AUDIT_FILTER_USER || | ||
| 1144 | entry->rule.listnr == AUDIT_FILTER_TYPE) | ||
| 1145 | dont_count = 1; | ||
| 1146 | #endif | ||
| 1137 | 1147 | ||
| 1138 | if (inode_f) { | 1148 | if (inode_f) { |
| 1139 | h = audit_hash_ino(inode_f->val); | 1149 | h = audit_hash_ino(inode_f->val); |
| @@ -1174,6 +1184,10 @@ static inline int audit_add_rule(struct audit_entry *entry, | |||
| 1174 | } else { | 1184 | } else { |
| 1175 | list_add_tail_rcu(&entry->list, list); | 1185 | list_add_tail_rcu(&entry->list, list); |
| 1176 | } | 1186 | } |
| 1187 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1188 | if (!dont_count) | ||
| 1189 | audit_n_rules++; | ||
| 1190 | #endif | ||
| 1177 | mutex_unlock(&audit_filter_mutex); | 1191 | mutex_unlock(&audit_filter_mutex); |
| 1178 | 1192 | ||
| 1179 | if (putnd_needed) | 1193 | if (putnd_needed) |
| @@ -1198,6 +1212,14 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
| 1198 | struct audit_watch *watch, *tmp_watch = entry->rule.watch; | 1212 | struct audit_watch *watch, *tmp_watch = entry->rule.watch; |
| 1199 | LIST_HEAD(inotify_list); | 1213 | LIST_HEAD(inotify_list); |
| 1200 | int h, ret = 0; | 1214 | int h, ret = 0; |
| 1215 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1216 | int dont_count = 0; | ||
| 1217 | |||
| 1218 | /* If either of these, don't count towards total */ | ||
| 1219 | if (entry->rule.listnr == AUDIT_FILTER_USER || | ||
| 1220 | entry->rule.listnr == AUDIT_FILTER_TYPE) | ||
| 1221 | dont_count = 1; | ||
| 1222 | #endif | ||
| 1201 | 1223 | ||
| 1202 | if (inode_f) { | 1224 | if (inode_f) { |
| 1203 | h = audit_hash_ino(inode_f->val); | 1225 | h = audit_hash_ino(inode_f->val); |
| @@ -1235,6 +1257,10 @@ static inline int audit_del_rule(struct audit_entry *entry, | |||
| 1235 | list_del_rcu(&e->list); | 1257 | list_del_rcu(&e->list); |
| 1236 | call_rcu(&e->rcu, audit_free_rule_rcu); | 1258 | call_rcu(&e->rcu, audit_free_rule_rcu); |
| 1237 | 1259 | ||
| 1260 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1261 | if (!dont_count) | ||
| 1262 | audit_n_rules--; | ||
| 1263 | #endif | ||
| 1238 | mutex_unlock(&audit_filter_mutex); | 1264 | mutex_unlock(&audit_filter_mutex); |
| 1239 | 1265 | ||
| 1240 | if (!list_empty(&inotify_list)) | 1266 | if (!list_empty(&inotify_list)) |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ae40ac8c39e7..efc1b74bebf3 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -85,6 +85,9 @@ extern int audit_enabled; | |||
| 85 | /* Indicates that audit should log the full pathname. */ | 85 | /* Indicates that audit should log the full pathname. */ |
| 86 | #define AUDIT_NAME_FULL -1 | 86 | #define AUDIT_NAME_FULL -1 |
| 87 | 87 | ||
| 88 | /* number of audit rules */ | ||
| 89 | int audit_n_rules; | ||
| 90 | |||
| 88 | /* When fs/namei.c:getname() is called, we store the pointer in name and | 91 | /* When fs/namei.c:getname() is called, we store the pointer in name and |
| 89 | * we don't let putname() free it (instead we free all of the saved | 92 | * we don't let putname() free it (instead we free all of the saved |
| 90 | * pointers at syscall exit time). | 93 | * pointers at syscall exit time). |
| @@ -174,6 +177,7 @@ struct audit_aux_data_path { | |||
| 174 | 177 | ||
| 175 | /* The per-task audit context. */ | 178 | /* The per-task audit context. */ |
| 176 | struct audit_context { | 179 | struct audit_context { |
| 180 | int dummy; /* must be the first element */ | ||
| 177 | int in_syscall; /* 1 if task is in a syscall */ | 181 | int in_syscall; /* 1 if task is in a syscall */ |
| 178 | enum audit_state state; | 182 | enum audit_state state; |
| 179 | unsigned int serial; /* serial number for record */ | 183 | unsigned int serial; /* serial number for record */ |
| @@ -514,7 +518,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, | |||
| 514 | context->return_valid = return_valid; | 518 | context->return_valid = return_valid; |
| 515 | context->return_code = return_code; | 519 | context->return_code = return_code; |
| 516 | 520 | ||
| 517 | if (context->in_syscall && !context->auditable) { | 521 | if (context->in_syscall && !context->dummy && !context->auditable) { |
| 518 | enum audit_state state; | 522 | enum audit_state state; |
| 519 | 523 | ||
| 520 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); | 524 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); |
| @@ -530,17 +534,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, | |||
| 530 | } | 534 | } |
| 531 | 535 | ||
| 532 | get_context: | 536 | get_context: |
| 533 | context->pid = tsk->pid; | 537 | |
| 534 | context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ | ||
| 535 | context->uid = tsk->uid; | ||
| 536 | context->gid = tsk->gid; | ||
| 537 | context->euid = tsk->euid; | ||
| 538 | context->suid = tsk->suid; | ||
| 539 | context->fsuid = tsk->fsuid; | ||
| 540 | context->egid = tsk->egid; | ||
| 541 | context->sgid = tsk->sgid; | ||
| 542 | context->fsgid = tsk->fsgid; | ||
| 543 | context->personality = tsk->personality; | ||
| 544 | tsk->audit_context = NULL; | 538 | tsk->audit_context = NULL; |
| 545 | return context; | 539 | return context; |
| 546 | } | 540 | } |
| @@ -749,6 +743,17 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 749 | const char *tty; | 743 | const char *tty; |
| 750 | 744 | ||
| 751 | /* tsk == current */ | 745 | /* tsk == current */ |
| 746 | context->pid = tsk->pid; | ||
| 747 | context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ | ||
| 748 | context->uid = tsk->uid; | ||
| 749 | context->gid = tsk->gid; | ||
| 750 | context->euid = tsk->euid; | ||
| 751 | context->suid = tsk->suid; | ||
| 752 | context->fsuid = tsk->fsuid; | ||
| 753 | context->egid = tsk->egid; | ||
| 754 | context->sgid = tsk->sgid; | ||
| 755 | context->fsgid = tsk->fsgid; | ||
| 756 | context->personality = tsk->personality; | ||
| 752 | 757 | ||
| 753 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); | 758 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); |
| 754 | if (!ab) | 759 | if (!ab) |
| @@ -1066,7 +1071,8 @@ void audit_syscall_entry(int arch, int major, | |||
| 1066 | context->argv[3] = a4; | 1071 | context->argv[3] = a4; |
| 1067 | 1072 | ||
| 1068 | state = context->state; | 1073 | state = context->state; |
| 1069 | if (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT) | 1074 | context->dummy = !audit_n_rules; |
| 1075 | if (!context->dummy && (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT)) | ||
| 1070 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); | 1076 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); |
| 1071 | if (likely(state == AUDIT_DISABLED)) | 1077 | if (likely(state == AUDIT_DISABLED)) |
| 1072 | return; | 1078 | return; |
| @@ -1199,14 +1205,18 @@ void audit_putname(const char *name) | |||
| 1199 | #endif | 1205 | #endif |
| 1200 | } | 1206 | } |
| 1201 | 1207 | ||
| 1202 | static void audit_inode_context(int idx, const struct inode *inode) | 1208 | /* Copy inode data into an audit_names. */ |
| 1209 | static void audit_copy_inode(struct audit_names *name, const struct inode *inode) | ||
| 1203 | { | 1210 | { |
| 1204 | struct audit_context *context = current->audit_context; | 1211 | name->ino = inode->i_ino; |
| 1205 | 1212 | name->dev = inode->i_sb->s_dev; | |
| 1206 | selinux_get_inode_sid(inode, &context->names[idx].osid); | 1213 | name->mode = inode->i_mode; |
| 1214 | name->uid = inode->i_uid; | ||
| 1215 | name->gid = inode->i_gid; | ||
| 1216 | name->rdev = inode->i_rdev; | ||
| 1217 | selinux_get_inode_sid(inode, &name->osid); | ||
| 1207 | } | 1218 | } |
| 1208 | 1219 | ||
| 1209 | |||
| 1210 | /** | 1220 | /** |
| 1211 | * audit_inode - store the inode and device from a lookup | 1221 | * audit_inode - store the inode and device from a lookup |
| 1212 | * @name: name being audited | 1222 | * @name: name being audited |
| @@ -1240,20 +1250,14 @@ void __audit_inode(const char *name, const struct inode *inode) | |||
| 1240 | ++context->ino_count; | 1250 | ++context->ino_count; |
| 1241 | #endif | 1251 | #endif |
| 1242 | } | 1252 | } |
| 1243 | context->names[idx].ino = inode->i_ino; | 1253 | audit_copy_inode(&context->names[idx], inode); |
| 1244 | context->names[idx].dev = inode->i_sb->s_dev; | ||
| 1245 | context->names[idx].mode = inode->i_mode; | ||
| 1246 | context->names[idx].uid = inode->i_uid; | ||
| 1247 | context->names[idx].gid = inode->i_gid; | ||
| 1248 | context->names[idx].rdev = inode->i_rdev; | ||
| 1249 | audit_inode_context(idx, inode); | ||
| 1250 | } | 1254 | } |
| 1251 | 1255 | ||
| 1252 | /** | 1256 | /** |
| 1253 | * audit_inode_child - collect inode info for created/removed objects | 1257 | * audit_inode_child - collect inode info for created/removed objects |
| 1254 | * @dname: inode's dentry name | 1258 | * @dname: inode's dentry name |
| 1255 | * @inode: inode being audited | 1259 | * @inode: inode being audited |
| 1256 | * @pino: inode number of dentry parent | 1260 | * @parent: inode of dentry parent |
| 1257 | * | 1261 | * |
| 1258 | * For syscalls that create or remove filesystem objects, audit_inode | 1262 | * For syscalls that create or remove filesystem objects, audit_inode |
| 1259 | * can only collect information for the filesystem object's parent. | 1263 | * can only collect information for the filesystem object's parent. |
| @@ -1264,7 +1268,7 @@ void __audit_inode(const char *name, const struct inode *inode) | |||
| 1264 | * unsuccessful attempts. | 1268 | * unsuccessful attempts. |
| 1265 | */ | 1269 | */ |
| 1266 | void __audit_inode_child(const char *dname, const struct inode *inode, | 1270 | void __audit_inode_child(const char *dname, const struct inode *inode, |
| 1267 | unsigned long pino) | 1271 | const struct inode *parent) |
| 1268 | { | 1272 | { |
| 1269 | int idx; | 1273 | int idx; |
| 1270 | struct audit_context *context = current->audit_context; | 1274 | struct audit_context *context = current->audit_context; |
| @@ -1278,7 +1282,7 @@ void __audit_inode_child(const char *dname, const struct inode *inode, | |||
| 1278 | if (!dname) | 1282 | if (!dname) |
| 1279 | goto update_context; | 1283 | goto update_context; |
| 1280 | for (idx = 0; idx < context->name_count; idx++) | 1284 | for (idx = 0; idx < context->name_count; idx++) |
| 1281 | if (context->names[idx].ino == pino) { | 1285 | if (context->names[idx].ino == parent->i_ino) { |
| 1282 | const char *name = context->names[idx].name; | 1286 | const char *name = context->names[idx].name; |
| 1283 | 1287 | ||
| 1284 | if (!name) | 1288 | if (!name) |
| @@ -1302,16 +1306,47 @@ update_context: | |||
| 1302 | context->names[idx].name_len = AUDIT_NAME_FULL; | 1306 | context->names[idx].name_len = AUDIT_NAME_FULL; |
| 1303 | context->names[idx].name_put = 0; /* don't call __putname() */ | 1307 | context->names[idx].name_put = 0; /* don't call __putname() */ |
| 1304 | 1308 | ||
| 1305 | if (inode) { | 1309 | if (!inode) |
| 1306 | context->names[idx].ino = inode->i_ino; | 1310 | context->names[idx].ino = (unsigned long)-1; |
| 1307 | context->names[idx].dev = inode->i_sb->s_dev; | 1311 | else |
| 1308 | context->names[idx].mode = inode->i_mode; | 1312 | audit_copy_inode(&context->names[idx], inode); |
| 1309 | context->names[idx].uid = inode->i_uid; | 1313 | |
| 1310 | context->names[idx].gid = inode->i_gid; | 1314 | /* A parent was not found in audit_names, so copy the inode data for the |
| 1311 | context->names[idx].rdev = inode->i_rdev; | 1315 | * provided parent. */ |
| 1312 | audit_inode_context(idx, inode); | 1316 | if (!found_name) { |
| 1313 | } else | 1317 | idx = context->name_count++; |
| 1314 | context->names[idx].ino = (unsigned long)-1; | 1318 | #if AUDIT_DEBUG |
| 1319 | context->ino_count++; | ||
| 1320 | #endif | ||
| 1321 | audit_copy_inode(&context->names[idx], parent); | ||
| 1322 | } | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | /** | ||
| 1326 | * audit_inode_update - update inode info for last collected name | ||
| 1327 | * @inode: inode being audited | ||
| 1328 | * | ||
| 1329 | * When open() is called on an existing object with the O_CREAT flag, the inode | ||
| 1330 | * data audit initially collects is incorrect. This additional hook ensures | ||
| 1331 | * audit has the inode data for the actual object to be opened. | ||
| 1332 | */ | ||
| 1333 | void __audit_inode_update(const struct inode *inode) | ||
| 1334 | { | ||
| 1335 | struct audit_context *context = current->audit_context; | ||
| 1336 | int idx; | ||
| 1337 | |||
| 1338 | if (!context->in_syscall || !inode) | ||
| 1339 | return; | ||
| 1340 | |||
| 1341 | if (context->name_count == 0) { | ||
| 1342 | context->name_count++; | ||
| 1343 | #if AUDIT_DEBUG | ||
| 1344 | context->ino_count++; | ||
| 1345 | #endif | ||
| 1346 | } | ||
| 1347 | idx = context->name_count - 1; | ||
| 1348 | |||
| 1349 | audit_copy_inode(&context->names[idx], inode); | ||
| 1315 | } | 1350 | } |
| 1316 | 1351 | ||
| 1317 | /** | 1352 | /** |
| @@ -1642,7 +1677,7 @@ int audit_bprm(struct linux_binprm *bprm) | |||
| 1642 | unsigned long p, next; | 1677 | unsigned long p, next; |
| 1643 | void *to; | 1678 | void *to; |
| 1644 | 1679 | ||
| 1645 | if (likely(!audit_enabled || !context)) | 1680 | if (likely(!audit_enabled || !context || context->dummy)) |
| 1646 | return 0; | 1681 | return 0; |
| 1647 | 1682 | ||
| 1648 | ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p, | 1683 | ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p, |
| @@ -1680,7 +1715,7 @@ int audit_socketcall(int nargs, unsigned long *args) | |||
| 1680 | struct audit_aux_data_socketcall *ax; | 1715 | struct audit_aux_data_socketcall *ax; |
| 1681 | struct audit_context *context = current->audit_context; | 1716 | struct audit_context *context = current->audit_context; |
| 1682 | 1717 | ||
| 1683 | if (likely(!context)) | 1718 | if (likely(!context || context->dummy)) |
| 1684 | return 0; | 1719 | return 0; |
| 1685 | 1720 | ||
| 1686 | ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL); | 1721 | ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL); |
| @@ -1708,7 +1743,7 @@ int audit_sockaddr(int len, void *a) | |||
| 1708 | struct audit_aux_data_sockaddr *ax; | 1743 | struct audit_aux_data_sockaddr *ax; |
| 1709 | struct audit_context *context = current->audit_context; | 1744 | struct audit_context *context = current->audit_context; |
| 1710 | 1745 | ||
| 1711 | if (likely(!context)) | 1746 | if (likely(!context || context->dummy)) |
| 1712 | return 0; | 1747 | return 0; |
| 1713 | 1748 | ||
| 1714 | ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL); | 1749 | ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL); |
diff --git a/kernel/fork.c b/kernel/fork.c index 1b0f7b1e0881..aa36c43783cc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1387,8 +1387,10 @@ long do_fork(unsigned long clone_flags, | |||
| 1387 | 1387 | ||
| 1388 | if (clone_flags & CLONE_VFORK) { | 1388 | if (clone_flags & CLONE_VFORK) { |
| 1389 | wait_for_completion(&vfork); | 1389 | wait_for_completion(&vfork); |
| 1390 | if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) | 1390 | if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { |
| 1391 | current->ptrace_message = nr; | ||
| 1391 | ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); | 1392 | ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); |
| 1393 | } | ||
| 1392 | } | 1394 | } |
| 1393 | } else { | 1395 | } else { |
| 1394 | free_pid(pid); | 1396 | free_pid(pid); |
diff --git a/kernel/futex.c b/kernel/futex.c index dda2049692a2..c2b2e0b83abf 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -948,6 +948,7 @@ static int unqueue_me(struct futex_q *q) | |||
| 948 | /* In the common case we don't take the spinlock, which is nice. */ | 948 | /* In the common case we don't take the spinlock, which is nice. */ |
| 949 | retry: | 949 | retry: |
| 950 | lock_ptr = q->lock_ptr; | 950 | lock_ptr = q->lock_ptr; |
| 951 | barrier(); | ||
| 951 | if (lock_ptr != 0) { | 952 | if (lock_ptr != 0) { |
| 952 | spin_lock(lock_ptr); | 953 | spin_lock(lock_ptr); |
| 953 | /* | 954 | /* |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d1aab1a452cc..c5cca3f65cb7 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
| @@ -39,7 +39,7 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
| 39 | { | 39 | { |
| 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
| 41 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *pending; |
| 42 | unsigned int limit = ROBUST_LIST_LIMIT, pi; | 42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
| 43 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, upending; |
| 44 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
| 45 | 45 | ||
| @@ -59,10 +59,10 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
| 59 | * if it exists: | 59 | * if it exists: |
| 60 | */ | 60 | */ |
| 61 | if (fetch_robust_entry(&upending, &pending, | 61 | if (fetch_robust_entry(&upending, &pending, |
| 62 | &head->list_op_pending, &pi)) | 62 | &head->list_op_pending, &pip)) |
| 63 | return; | 63 | return; |
| 64 | if (upending) | 64 | if (upending) |
| 65 | handle_futex_death((void *)pending + futex_offset, curr, pi); | 65 | handle_futex_death((void *)pending + futex_offset, curr, pip); |
| 66 | 66 | ||
| 67 | while (compat_ptr(uentry) != &head->list) { | 67 | while (compat_ptr(uentry) != &head->list) { |
| 68 | /* | 68 | /* |
diff --git a/kernel/power/process.c b/kernel/power/process.c index b2a5f671d6cd..72e72d2c61e6 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -66,13 +66,25 @@ static inline void freeze_process(struct task_struct *p) | |||
| 66 | } | 66 | } |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static void cancel_freezing(struct task_struct *p) | ||
| 70 | { | ||
| 71 | unsigned long flags; | ||
| 72 | |||
| 73 | if (freezing(p)) { | ||
| 74 | pr_debug(" clean up: %s\n", p->comm); | ||
| 75 | do_not_freeze(p); | ||
| 76 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
| 77 | recalc_sigpending_tsk(p); | ||
| 78 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 69 | /* 0 = success, else # of processes that we failed to stop */ | 82 | /* 0 = success, else # of processes that we failed to stop */ |
| 70 | int freeze_processes(void) | 83 | int freeze_processes(void) |
| 71 | { | 84 | { |
| 72 | int todo, nr_user, user_frozen; | 85 | int todo, nr_user, user_frozen; |
| 73 | unsigned long start_time; | 86 | unsigned long start_time; |
| 74 | struct task_struct *g, *p; | 87 | struct task_struct *g, *p; |
| 75 | unsigned long flags; | ||
| 76 | 88 | ||
| 77 | printk( "Stopping tasks: " ); | 89 | printk( "Stopping tasks: " ); |
| 78 | start_time = jiffies; | 90 | start_time = jiffies; |
| @@ -85,6 +97,10 @@ int freeze_processes(void) | |||
| 85 | continue; | 97 | continue; |
| 86 | if (frozen(p)) | 98 | if (frozen(p)) |
| 87 | continue; | 99 | continue; |
| 100 | if (p->state == TASK_TRACED && frozen(p->parent)) { | ||
| 101 | cancel_freezing(p); | ||
| 102 | continue; | ||
| 103 | } | ||
| 88 | if (p->mm && !(p->flags & PF_BORROWED_MM)) { | 104 | if (p->mm && !(p->flags & PF_BORROWED_MM)) { |
| 89 | /* The task is a user-space one. | 105 | /* The task is a user-space one. |
| 90 | * Freeze it unless there's a vfork completion | 106 | * Freeze it unless there's a vfork completion |
| @@ -126,13 +142,7 @@ int freeze_processes(void) | |||
| 126 | do_each_thread(g, p) { | 142 | do_each_thread(g, p) { |
| 127 | if (freezeable(p) && !frozen(p)) | 143 | if (freezeable(p) && !frozen(p)) |
| 128 | printk(KERN_ERR " %s\n", p->comm); | 144 | printk(KERN_ERR " %s\n", p->comm); |
| 129 | if (freezing(p)) { | 145 | cancel_freezing(p); |
| 130 | pr_debug(" clean up: %s\n", p->comm); | ||
| 131 | p->flags &= ~PF_FREEZE; | ||
| 132 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
| 133 | recalc_sigpending_tsk(p); | ||
| 134 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
| 135 | } | ||
| 136 | } while_each_thread(g, p); | 146 | } while_each_thread(g, p); |
| 137 | read_unlock(&tasklist_lock); | 147 | read_unlock(&tasklist_lock); |
| 138 | return todo; | 148 | return todo; |
diff --git a/kernel/printk.c b/kernel/printk.c index 65ca0688f86f..1149365e989e 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -799,6 +799,9 @@ void release_console_sem(void) | |||
| 799 | up(&secondary_console_sem); | 799 | up(&secondary_console_sem); |
| 800 | return; | 800 | return; |
| 801 | } | 801 | } |
| 802 | |||
| 803 | console_may_schedule = 0; | ||
| 804 | |||
| 802 | for ( ; ; ) { | 805 | for ( ; ; ) { |
| 803 | spin_lock_irqsave(&logbuf_lock, flags); | 806 | spin_lock_irqsave(&logbuf_lock, flags); |
| 804 | wake_klogd |= log_start - log_end; | 807 | wake_klogd |= log_start - log_end; |
| @@ -812,7 +815,6 @@ void release_console_sem(void) | |||
| 812 | local_irq_restore(flags); | 815 | local_irq_restore(flags); |
| 813 | } | 816 | } |
| 814 | console_locked = 0; | 817 | console_locked = 0; |
| 815 | console_may_schedule = 0; | ||
| 816 | up(&console_sem); | 818 | up(&console_sem); |
| 817 | spin_unlock_irqrestore(&logbuf_lock, flags); | 819 | spin_unlock_irqrestore(&logbuf_lock, flags); |
| 818 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { | 820 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { |
diff --git a/kernel/resource.c b/kernel/resource.c index 0dd3a857579e..46286434af80 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -244,6 +244,7 @@ int find_next_system_ram(struct resource *res) | |||
| 244 | 244 | ||
| 245 | start = res->start; | 245 | start = res->start; |
| 246 | end = res->end; | 246 | end = res->end; |
| 247 | BUG_ON(start >= end); | ||
| 247 | 248 | ||
| 248 | read_lock(&resource_lock); | 249 | read_lock(&resource_lock); |
| 249 | for (p = iomem_resource.child; p ; p = p->sibling) { | 250 | for (p = iomem_resource.child; p ; p = p->sibling) { |
| @@ -254,15 +255,17 @@ int find_next_system_ram(struct resource *res) | |||
| 254 | p = NULL; | 255 | p = NULL; |
| 255 | break; | 256 | break; |
| 256 | } | 257 | } |
| 257 | if (p->start >= start) | 258 | if ((p->end >= start) && (p->start < end)) |
| 258 | break; | 259 | break; |
| 259 | } | 260 | } |
| 260 | read_unlock(&resource_lock); | 261 | read_unlock(&resource_lock); |
| 261 | if (!p) | 262 | if (!p) |
| 262 | return -1; | 263 | return -1; |
| 263 | /* copy data */ | 264 | /* copy data */ |
| 264 | res->start = p->start; | 265 | if (res->start < p->start) |
| 265 | res->end = p->end; | 266 | res->start = p->start; |
| 267 | if (res->end > p->end) | ||
| 268 | res->end = p->end; | ||
| 266 | return 0; | 269 | return 0; |
| 267 | } | 270 | } |
| 268 | #endif | 271 | #endif |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2b1530fc573b..7f20e7b857cb 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -50,10 +50,6 @@ static char *action_to_string(enum kobject_action action) | |||
| 50 | return "offline"; | 50 | return "offline"; |
| 51 | case KOBJ_ONLINE: | 51 | case KOBJ_ONLINE: |
| 52 | return "online"; | 52 | return "online"; |
| 53 | case KOBJ_DOCK: | ||
| 54 | return "dock"; | ||
| 55 | case KOBJ_UNDOCK: | ||
| 56 | return "undock"; | ||
| 57 | default: | 53 | default: |
| 58 | return NULL; | 54 | return NULL; |
| 59 | } | 55 | } |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3d9c4dc965ed..58c577dd82e5 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
| @@ -162,6 +162,7 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) | |||
| 162 | 162 | ||
| 163 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | 163 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) |
| 164 | 164 | ||
| 165 | #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ | ||
| 165 | static void __read_lock_debug(rwlock_t *lock) | 166 | static void __read_lock_debug(rwlock_t *lock) |
| 166 | { | 167 | { |
| 167 | int print_once = 1; | 168 | int print_once = 1; |
| @@ -184,12 +185,12 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 184 | } | 185 | } |
| 185 | } | 186 | } |
| 186 | } | 187 | } |
| 188 | #endif | ||
| 187 | 189 | ||
| 188 | void _raw_read_lock(rwlock_t *lock) | 190 | void _raw_read_lock(rwlock_t *lock) |
| 189 | { | 191 | { |
| 190 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 192 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 191 | if (unlikely(!__raw_read_trylock(&lock->raw_lock))) | 193 | __raw_read_lock(&lock->raw_lock); |
| 192 | __read_lock_debug(lock); | ||
| 193 | } | 194 | } |
| 194 | 195 | ||
| 195 | int _raw_read_trylock(rwlock_t *lock) | 196 | int _raw_read_trylock(rwlock_t *lock) |
| @@ -235,6 +236,7 @@ static inline void debug_write_unlock(rwlock_t *lock) | |||
| 235 | lock->owner_cpu = -1; | 236 | lock->owner_cpu = -1; |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 239 | #if 0 /* This can cause lockups */ | ||
| 238 | static void __write_lock_debug(rwlock_t *lock) | 240 | static void __write_lock_debug(rwlock_t *lock) |
| 239 | { | 241 | { |
| 240 | int print_once = 1; | 242 | int print_once = 1; |
| @@ -257,12 +259,12 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 257 | } | 259 | } |
| 258 | } | 260 | } |
| 259 | } | 261 | } |
| 262 | #endif | ||
| 260 | 263 | ||
| 261 | void _raw_write_lock(rwlock_t *lock) | 264 | void _raw_write_lock(rwlock_t *lock) |
| 262 | { | 265 | { |
| 263 | debug_write_lock_before(lock); | 266 | debug_write_lock_before(lock); |
| 264 | if (unlikely(!__raw_write_trylock(&lock->raw_lock))) | 267 | __raw_write_lock(&lock->raw_lock); |
| 265 | __write_lock_debug(lock); | ||
| 266 | debug_write_lock_after(lock); | 268 | debug_write_lock_after(lock); |
| 267 | } | 269 | } |
| 268 | 270 | ||
diff --git a/mm/fadvise.c b/mm/fadvise.c index 60a5d55e51d9..168c78a121bb 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c | |||
| @@ -73,7 +73,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) | |||
| 73 | file->f_ra.ra_pages = bdi->ra_pages * 2; | 73 | file->f_ra.ra_pages = bdi->ra_pages * 2; |
| 74 | break; | 74 | break; |
| 75 | case POSIX_FADV_WILLNEED: | 75 | case POSIX_FADV_WILLNEED: |
| 76 | case POSIX_FADV_NOREUSE: | ||
| 77 | if (!mapping->a_ops->readpage) { | 76 | if (!mapping->a_ops->readpage) { |
| 78 | ret = -EINVAL; | 77 | ret = -EINVAL; |
| 79 | break; | 78 | break; |
| @@ -94,6 +93,8 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) | |||
| 94 | if (ret > 0) | 93 | if (ret > 0) |
| 95 | ret = 0; | 94 | ret = 0; |
| 96 | break; | 95 | break; |
| 96 | case POSIX_FADV_NOREUSE: | ||
| 97 | break; | ||
| 97 | case POSIX_FADV_DONTNEED: | 98 | case POSIX_FADV_DONTNEED: |
| 98 | if (!bdi_write_congested(mapping->backing_dev_info)) | 99 | if (!bdi_write_congested(mapping->backing_dev_info)) |
| 99 | filemap_flush(mapping); | 100 | filemap_flush(mapping); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 01c9fb97c619..c37319542b70 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -52,6 +52,9 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | |||
| 52 | int nr_pages = PAGES_PER_SECTION; | 52 | int nr_pages = PAGES_PER_SECTION; |
| 53 | int ret; | 53 | int ret; |
| 54 | 54 | ||
| 55 | if (pfn_valid(phys_start_pfn)) | ||
| 56 | return -EEXIST; | ||
| 57 | |||
| 55 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); | 58 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
| 56 | 59 | ||
| 57 | if (ret < 0) | 60 | if (ret < 0) |
| @@ -76,15 +79,22 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
| 76 | { | 79 | { |
| 77 | unsigned long i; | 80 | unsigned long i; |
| 78 | int err = 0; | 81 | int err = 0; |
| 82 | int start_sec, end_sec; | ||
| 83 | /* during initialize mem_map, align hot-added range to section */ | ||
| 84 | start_sec = pfn_to_section_nr(phys_start_pfn); | ||
| 85 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | ||
| 79 | 86 | ||
| 80 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { | 87 | for (i = start_sec; i <= end_sec; i++) { |
| 81 | err = __add_section(zone, phys_start_pfn + i); | 88 | err = __add_section(zone, i << PFN_SECTION_SHIFT); |
| 82 | 89 | ||
| 83 | /* We want to keep adding the rest of the | 90 | /* |
| 84 | * sections if the first ones already exist | 91 | * EEXIST is finally dealed with by ioresource collision |
| 92 | * check. see add_memory() => register_memory_resource() | ||
| 93 | * Warning will be printed if there is collision. | ||
| 85 | */ | 94 | */ |
| 86 | if (err && (err != -EEXIST)) | 95 | if (err && (err != -EEXIST)) |
| 87 | break; | 96 | break; |
| 97 | err = 0; | ||
| 88 | } | 98 | } |
| 89 | 99 | ||
| 90 | return err; | 100 | return err; |
| @@ -156,7 +166,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
| 156 | res.flags = IORESOURCE_MEM; /* we just need system ram */ | 166 | res.flags = IORESOURCE_MEM; /* we just need system ram */ |
| 157 | section_end = res.end; | 167 | section_end = res.end; |
| 158 | 168 | ||
| 159 | while (find_next_system_ram(&res) >= 0) { | 169 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { |
| 160 | start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); | 170 | start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); |
| 161 | nr_pages = (unsigned long) | 171 | nr_pages = (unsigned long) |
| 162 | ((res.end + 1 - res.start) >> PAGE_SHIFT); | 172 | ((res.end + 1 - res.start) >> PAGE_SHIFT); |
| @@ -213,10 +223,9 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |||
| 213 | } | 223 | } |
| 214 | 224 | ||
| 215 | /* add this memory to iomem resource */ | 225 | /* add this memory to iomem resource */ |
| 216 | static void register_memory_resource(u64 start, u64 size) | 226 | static struct resource *register_memory_resource(u64 start, u64 size) |
| 217 | { | 227 | { |
| 218 | struct resource *res; | 228 | struct resource *res; |
| 219 | |||
| 220 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 229 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); |
| 221 | BUG_ON(!res); | 230 | BUG_ON(!res); |
| 222 | 231 | ||
| @@ -228,7 +237,18 @@ static void register_memory_resource(u64 start, u64 size) | |||
| 228 | printk("System RAM resource %llx - %llx cannot be added\n", | 237 | printk("System RAM resource %llx - %llx cannot be added\n", |
| 229 | (unsigned long long)res->start, (unsigned long long)res->end); | 238 | (unsigned long long)res->start, (unsigned long long)res->end); |
| 230 | kfree(res); | 239 | kfree(res); |
| 240 | res = NULL; | ||
| 231 | } | 241 | } |
| 242 | return res; | ||
| 243 | } | ||
| 244 | |||
| 245 | static void release_memory_resource(struct resource *res) | ||
| 246 | { | ||
| 247 | if (!res) | ||
| 248 | return; | ||
| 249 | release_resource(res); | ||
| 250 | kfree(res); | ||
| 251 | return; | ||
| 232 | } | 252 | } |
| 233 | 253 | ||
| 234 | 254 | ||
| @@ -237,8 +257,13 @@ int add_memory(int nid, u64 start, u64 size) | |||
| 237 | { | 257 | { |
| 238 | pg_data_t *pgdat = NULL; | 258 | pg_data_t *pgdat = NULL; |
| 239 | int new_pgdat = 0; | 259 | int new_pgdat = 0; |
| 260 | struct resource *res; | ||
| 240 | int ret; | 261 | int ret; |
| 241 | 262 | ||
| 263 | res = register_memory_resource(start, size); | ||
| 264 | if (!res) | ||
| 265 | return -EEXIST; | ||
| 266 | |||
| 242 | if (!node_online(nid)) { | 267 | if (!node_online(nid)) { |
| 243 | pgdat = hotadd_new_pgdat(nid, start); | 268 | pgdat = hotadd_new_pgdat(nid, start); |
| 244 | if (!pgdat) | 269 | if (!pgdat) |
| @@ -268,14 +293,13 @@ int add_memory(int nid, u64 start, u64 size) | |||
| 268 | BUG_ON(ret); | 293 | BUG_ON(ret); |
| 269 | } | 294 | } |
| 270 | 295 | ||
| 271 | /* register this memory as resource */ | ||
| 272 | register_memory_resource(start, size); | ||
| 273 | |||
| 274 | return ret; | 296 | return ret; |
| 275 | error: | 297 | error: |
| 276 | /* rollback pgdat allocation and others */ | 298 | /* rollback pgdat allocation and others */ |
| 277 | if (new_pgdat) | 299 | if (new_pgdat) |
| 278 | rollback_node_hotadd(nid, pgdat); | 300 | rollback_node_hotadd(nid, pgdat); |
| 301 | if (res) | ||
| 302 | release_memory_resource(res); | ||
| 279 | 303 | ||
| 280 | return ret; | 304 | return ret; |
| 281 | } | 305 | } |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 06abb6634f5b..53086fb75089 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -85,7 +85,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) | |||
| 85 | goto err_out; | 85 | goto err_out; |
| 86 | 86 | ||
| 87 | err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); | 87 | err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); |
| 88 | if (err) | 88 | if (err < 0) |
| 89 | goto err_kfree; | 89 | goto err_kfree; |
| 90 | 90 | ||
| 91 | NETLINK_CB(skb).dst_group = RTNLGRP_LINK; | 91 | NETLINK_CB(skb).dst_group = RTNLGRP_LINK; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 738dad9f7d49..104af5d5bcbc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -3541,7 +3541,8 @@ void tcp_cwnd_application_limited(struct sock *sk) | |||
| 3541 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && | 3541 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && |
| 3542 | sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 3542 | sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
| 3543 | /* Limited by application or receiver window. */ | 3543 | /* Limited by application or receiver window. */ |
| 3544 | u32 win_used = max(tp->snd_cwnd_used, 2U); | 3544 | u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); |
| 3545 | u32 win_used = max(tp->snd_cwnd_used, init_win); | ||
| 3545 | if (win_used < tp->snd_cwnd) { | 3546 | if (win_used < tp->snd_cwnd) { |
| 3546 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | 3547 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
| 3547 | tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; | 3548 | tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; |
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index d504eed416f6..7e6bc41eeb21 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c | |||
| @@ -238,11 +238,13 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) | |||
| 238 | goto out_put; | 238 | goto out_put; |
| 239 | 239 | ||
| 240 | if (lapb->state == LAPB_STATE_0) { | 240 | if (lapb->state == LAPB_STATE_0) { |
| 241 | if (((parms->mode & LAPB_EXTENDED) && | 241 | if (parms->mode & LAPB_EXTENDED) { |
| 242 | (parms->window < 1 || parms->window > 127)) || | 242 | if (parms->window < 1 || parms->window > 127) |
| 243 | (parms->window < 1 || parms->window > 7)) | 243 | goto out_put; |
| 244 | goto out_put; | 244 | } else { |
| 245 | 245 | if (parms->window < 1 || parms->window > 7) | |
| 246 | goto out_put; | ||
| 247 | } | ||
| 246 | lapb->mode = parms->mode; | 248 | lapb->mode = parms->mode; |
| 247 | lapb->window = parms->window; | 249 | lapb->window = parms->window; |
| 248 | } | 250 | } |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index d6cfe84d521b..2652ead96c64 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
| @@ -784,24 +784,20 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 784 | copied += used; | 784 | copied += used; |
| 785 | len -= used; | 785 | len -= used; |
| 786 | 786 | ||
| 787 | if (used + offset < skb->len) | ||
| 788 | continue; | ||
| 789 | |||
| 790 | if (!(flags & MSG_PEEK)) { | 787 | if (!(flags & MSG_PEEK)) { |
| 791 | sk_eat_skb(sk, skb, 0); | 788 | sk_eat_skb(sk, skb, 0); |
| 792 | *seq = 0; | 789 | *seq = 0; |
| 793 | } | 790 | } |
| 791 | |||
| 792 | /* For non stream protcols we get one packet per recvmsg call */ | ||
| 793 | if (sk->sk_type != SOCK_STREAM) | ||
| 794 | goto copy_uaddr; | ||
| 795 | |||
| 796 | /* Partial read */ | ||
| 797 | if (used + offset < skb->len) | ||
| 798 | continue; | ||
| 794 | } while (len > 0); | 799 | } while (len > 0); |
| 795 | 800 | ||
| 796 | /* | ||
| 797 | * According to UNIX98, msg_name/msg_namelen are ignored | ||
| 798 | * on connected socket. -ANK | ||
| 799 | * But... af_llc still doesn't have separate sets of methods for | ||
| 800 | * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will | ||
| 801 | * eventually fix this tho :-) -acme | ||
| 802 | */ | ||
| 803 | if (sk->sk_type == SOCK_DGRAM) | ||
| 804 | goto copy_uaddr; | ||
| 805 | out: | 801 | out: |
| 806 | release_sock(sk); | 802 | release_sock(sk); |
| 807 | return copied; | 803 | return copied; |
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 20c4eb5c1ac6..42eb0c3a9780 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
| @@ -51,10 +51,10 @@ void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim) | |||
| 51 | { | 51 | { |
| 52 | struct sockaddr_llc *addr; | 52 | struct sockaddr_llc *addr; |
| 53 | 53 | ||
| 54 | if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */ | ||
| 55 | return; | ||
| 56 | /* save primitive for use by the user. */ | 54 | /* save primitive for use by the user. */ |
| 57 | addr = llc_ui_skb_cb(skb); | 55 | addr = llc_ui_skb_cb(skb); |
| 56 | |||
| 57 | memset(addr, 0, sizeof(*addr)); | ||
| 58 | addr->sllc_family = sk->sk_family; | 58 | addr->sllc_family = sk->sk_family; |
| 59 | addr->sllc_arphrd = skb->dev->type; | 59 | addr->sllc_arphrd = skb->dev->type; |
| 60 | addr->sllc_test = prim == LLC_TEST_PRIM; | 60 | addr->sllc_test = prim == LLC_TEST_PRIM; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c7844bacbbcb..a19eff12cf78 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -430,7 +430,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) | |||
| 430 | } | 430 | } |
| 431 | #endif | 431 | #endif |
| 432 | 432 | ||
| 433 | err = -EINVAL; | 433 | err = -ENOENT; |
| 434 | if (ops == NULL) | 434 | if (ops == NULL) |
| 435 | goto err_out; | 435 | goto err_out; |
| 436 | 436 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 7026b0866b7b..00cb388ece03 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
| 71 | new = detail->alloc(); | 71 | new = detail->alloc(); |
| 72 | if (!new) | 72 | if (!new) |
| 73 | return NULL; | 73 | return NULL; |
| 74 | /* must fully initialise 'new', else | ||
| 75 | * we might get lose if we need to | ||
| 76 | * cache_put it soon. | ||
| 77 | */ | ||
| 74 | cache_init(new); | 78 | cache_init(new); |
| 79 | detail->init(new, key); | ||
| 75 | 80 | ||
| 76 | write_lock(&detail->hash_lock); | 81 | write_lock(&detail->hash_lock); |
| 77 | 82 | ||
| @@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
| 85 | return tmp; | 90 | return tmp; |
| 86 | } | 91 | } |
| 87 | } | 92 | } |
| 88 | detail->init(new, key); | ||
| 89 | new->next = *head; | 93 | new->next = *head; |
| 90 | *head = new; | 94 | *head = new; |
| 91 | detail->entries++; | 95 | detail->entries++; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4ba271f892c8..d6409e757219 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -921,26 +921,43 @@ call_transmit(struct rpc_task *task) | |||
| 921 | task->tk_status = xprt_prepare_transmit(task); | 921 | task->tk_status = xprt_prepare_transmit(task); |
| 922 | if (task->tk_status != 0) | 922 | if (task->tk_status != 0) |
| 923 | return; | 923 | return; |
| 924 | task->tk_action = call_transmit_status; | ||
| 924 | /* Encode here so that rpcsec_gss can use correct sequence number. */ | 925 | /* Encode here so that rpcsec_gss can use correct sequence number. */ |
| 925 | if (rpc_task_need_encode(task)) { | 926 | if (rpc_task_need_encode(task)) { |
| 926 | task->tk_rqstp->rq_bytes_sent = 0; | 927 | BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); |
| 927 | call_encode(task); | 928 | call_encode(task); |
| 928 | /* Did the encode result in an error condition? */ | 929 | /* Did the encode result in an error condition? */ |
| 929 | if (task->tk_status != 0) | 930 | if (task->tk_status != 0) |
| 930 | goto out_nosend; | 931 | return; |
| 931 | } | 932 | } |
| 932 | task->tk_action = call_transmit_status; | ||
| 933 | xprt_transmit(task); | 933 | xprt_transmit(task); |
| 934 | if (task->tk_status < 0) | 934 | if (task->tk_status < 0) |
| 935 | return; | 935 | return; |
| 936 | if (!task->tk_msg.rpc_proc->p_decode) { | 936 | /* |
| 937 | task->tk_action = rpc_exit_task; | 937 | * On success, ensure that we call xprt_end_transmit() before sleeping |
| 938 | rpc_wake_up_task(task); | 938 | * in order to allow access to the socket to other RPC requests. |
| 939 | } | 939 | */ |
| 940 | return; | 940 | call_transmit_status(task); |
| 941 | out_nosend: | 941 | if (task->tk_msg.rpc_proc->p_decode != NULL) |
| 942 | /* release socket write lock before attempting to handle error */ | 942 | return; |
| 943 | xprt_abort_transmit(task); | 943 | task->tk_action = rpc_exit_task; |
| 944 | rpc_wake_up_task(task); | ||
| 945 | } | ||
| 946 | |||
| 947 | /* | ||
| 948 | * 5a. Handle cleanup after a transmission | ||
| 949 | */ | ||
| 950 | static void | ||
| 951 | call_transmit_status(struct rpc_task *task) | ||
| 952 | { | ||
| 953 | task->tk_action = call_status; | ||
| 954 | /* | ||
| 955 | * Special case: if we've been waiting on the socket's write_space() | ||
| 956 | * callback, then don't call xprt_end_transmit(). | ||
| 957 | */ | ||
| 958 | if (task->tk_status == -EAGAIN) | ||
| 959 | return; | ||
| 960 | xprt_end_transmit(task); | ||
| 944 | rpc_task_force_reencode(task); | 961 | rpc_task_force_reencode(task); |
| 945 | } | 962 | } |
| 946 | 963 | ||
| @@ -992,18 +1009,7 @@ call_status(struct rpc_task *task) | |||
| 992 | } | 1009 | } |
| 993 | 1010 | ||
| 994 | /* | 1011 | /* |
| 995 | * 6a. Handle transmission errors. | 1012 | * 6a. Handle RPC timeout |
| 996 | */ | ||
| 997 | static void | ||
| 998 | call_transmit_status(struct rpc_task *task) | ||
| 999 | { | ||
| 1000 | if (task->tk_status != -EAGAIN) | ||
| 1001 | rpc_task_force_reencode(task); | ||
| 1002 | call_status(task); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | /* | ||
| 1006 | * 6b. Handle RPC timeout | ||
| 1007 | * We do not release the request slot, so we keep using the | 1013 | * We do not release the request slot, so we keep using the |
| 1008 | * same XID for all retransmits. | 1014 | * same XID for all retransmits. |
| 1009 | */ | 1015 | */ |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index dc6cb93c8830..a3bd2db2e024 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -667,10 +667,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) | |||
| 667 | RPCAUTH_info, RPCAUTH_EOF); | 667 | RPCAUTH_info, RPCAUTH_EOF); |
| 668 | if (error) | 668 | if (error) |
| 669 | goto err_depopulate; | 669 | goto err_depopulate; |
| 670 | dget(dentry); | ||
| 670 | out: | 671 | out: |
| 671 | mutex_unlock(&dir->i_mutex); | 672 | mutex_unlock(&dir->i_mutex); |
| 672 | rpc_release_path(&nd); | 673 | rpc_release_path(&nd); |
| 673 | return dget(dentry); | 674 | return dentry; |
| 674 | err_depopulate: | 675 | err_depopulate: |
| 675 | rpc_depopulate(dentry); | 676 | rpc_depopulate(dentry); |
| 676 | __rpc_rmdir(dir, dentry); | 677 | __rpc_rmdir(dir, dentry); |
| @@ -731,10 +732,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) | |||
| 731 | rpci->flags = flags; | 732 | rpci->flags = flags; |
| 732 | rpci->ops = ops; | 733 | rpci->ops = ops; |
| 733 | inode_dir_notify(dir, DN_CREATE); | 734 | inode_dir_notify(dir, DN_CREATE); |
| 735 | dget(dentry); | ||
| 734 | out: | 736 | out: |
| 735 | mutex_unlock(&dir->i_mutex); | 737 | mutex_unlock(&dir->i_mutex); |
| 736 | rpc_release_path(&nd); | 738 | rpc_release_path(&nd); |
| 737 | return dget(dentry); | 739 | return dentry; |
| 738 | err_dput: | 740 | err_dput: |
| 739 | dput(dentry); | 741 | dput(dentry); |
| 740 | dentry = ERR_PTR(-ENOMEM); | 742 | dentry = ERR_PTR(-ENOMEM); |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 313b68d892c6..e8c2bc4977f3 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -707,12 +707,9 @@ out_unlock: | |||
| 707 | return err; | 707 | return err; |
| 708 | } | 708 | } |
| 709 | 709 | ||
| 710 | void | 710 | void xprt_end_transmit(struct rpc_task *task) |
| 711 | xprt_abort_transmit(struct rpc_task *task) | ||
| 712 | { | 711 | { |
| 713 | struct rpc_xprt *xprt = task->tk_xprt; | 712 | xprt_release_write(task->tk_xprt, task); |
| 714 | |||
| 715 | xprt_release_write(xprt, task); | ||
| 716 | } | 713 | } |
| 717 | 714 | ||
| 718 | /** | 715 | /** |
| @@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task) | |||
| 761 | task->tk_status = -ENOTCONN; | 758 | task->tk_status = -ENOTCONN; |
| 762 | else if (!req->rq_received) | 759 | else if (!req->rq_received) |
| 763 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); | 760 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); |
| 764 | |||
| 765 | xprt->ops->release_xprt(xprt, task); | ||
| 766 | spin_unlock_bh(&xprt->transport_lock); | 761 | spin_unlock_bh(&xprt->transport_lock); |
| 767 | return; | 762 | return; |
| 768 | } | 763 | } |
| @@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task) | |||
| 772 | * schedq, and being picked up by a parallel run of rpciod(). | 767 | * schedq, and being picked up by a parallel run of rpciod(). |
| 773 | */ | 768 | */ |
| 774 | task->tk_status = status; | 769 | task->tk_status = status; |
| 775 | 770 | if (status == -ECONNREFUSED) | |
| 776 | switch (status) { | ||
| 777 | case -ECONNREFUSED: | ||
| 778 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | 771 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); |
| 779 | case -EAGAIN: | ||
| 780 | case -ENOTCONN: | ||
| 781 | return; | ||
| 782 | default: | ||
| 783 | break; | ||
| 784 | } | ||
| 785 | xprt_release_write(xprt, task); | ||
| 786 | return; | ||
| 787 | } | 772 | } |
| 788 | 773 | ||
| 789 | static inline void do_xprt_reserve(struct rpc_task *task) | 774 | static inline void do_xprt_reserve(struct rpc_task *task) |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ee678ed13b6f..441bd53f5eca 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -414,6 +414,33 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | /** | 416 | /** |
| 417 | * xs_tcp_release_xprt - clean up after a tcp transmission | ||
| 418 | * @xprt: transport | ||
| 419 | * @task: rpc task | ||
| 420 | * | ||
| 421 | * This cleans up if an error causes us to abort the transmission of a request. | ||
| 422 | * In this case, the socket may need to be reset in order to avoid confusing | ||
| 423 | * the server. | ||
| 424 | */ | ||
| 425 | static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | ||
| 426 | { | ||
| 427 | struct rpc_rqst *req; | ||
| 428 | |||
| 429 | if (task != xprt->snd_task) | ||
| 430 | return; | ||
| 431 | if (task == NULL) | ||
| 432 | goto out_release; | ||
| 433 | req = task->tk_rqstp; | ||
| 434 | if (req->rq_bytes_sent == 0) | ||
| 435 | goto out_release; | ||
| 436 | if (req->rq_bytes_sent == req->rq_snd_buf.len) | ||
| 437 | goto out_release; | ||
| 438 | set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); | ||
| 439 | out_release: | ||
| 440 | xprt_release_xprt(xprt, task); | ||
| 441 | } | ||
| 442 | |||
| 443 | /** | ||
| 417 | * xs_close - close a socket | 444 | * xs_close - close a socket |
| 418 | * @xprt: transport | 445 | * @xprt: transport |
| 419 | * | 446 | * |
| @@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
| 1250 | 1277 | ||
| 1251 | static struct rpc_xprt_ops xs_tcp_ops = { | 1278 | static struct rpc_xprt_ops xs_tcp_ops = { |
| 1252 | .reserve_xprt = xprt_reserve_xprt, | 1279 | .reserve_xprt = xprt_reserve_xprt, |
| 1253 | .release_xprt = xprt_release_xprt, | 1280 | .release_xprt = xs_tcp_release_xprt, |
| 1254 | .set_port = xs_set_port, | 1281 | .set_port = xs_set_port, |
| 1255 | .connect = xs_connect, | 1282 | .connect = xs_connect, |
| 1256 | .buf_alloc = rpc_malloc, | 1283 | .buf_alloc = rpc_malloc, |
