diff options
70 files changed, 1194 insertions, 589 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 03eb5ed503f7..6e92ba61f7c0 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1685,6 +1685,22 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1685 | stifb= [HW] | 1685 | stifb= [HW] |
1686 | Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]] | 1686 | Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]] |
1687 | 1687 | ||
1688 | sunrpc.pool_mode= | ||
1689 | [NFS] | ||
1690 | Control how the NFS server code allocates CPUs to | ||
1691 | service thread pools. Depending on how many NICs | ||
1692 | you have and where their interrupts are bound, this | ||
1693 | option will affect which CPUs will do NFS serving. | ||
1694 | Note: this parameter cannot be changed while the | ||
1695 | NFS server is running. | ||
1696 | |||
1697 | auto the server chooses an appropriate mode | ||
1698 | automatically using heuristics | ||
1699 | global a single global pool contains all CPUs | ||
1700 | percpu one pool for each CPU | ||
1701 | pernode one pool for each NUMA node (equivalent | ||
1702 | to global on non-NUMA machines) | ||
1703 | |||
1688 | swiotlb= [IA-64] Number of I/O TLB slabs | 1704 | swiotlb= [IA-64] Number of I/O TLB slabs |
1689 | 1705 | ||
1690 | switches= [HW,M68k] | 1706 | switches= [HW,M68k] |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 875d8a6ecc02..602660df455c 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * an extra value to store the TSC freq | 24 | * an extra value to store the TSC freq |
25 | */ | 25 | */ |
26 | unsigned int tsc_khz; | 26 | unsigned int tsc_khz; |
27 | unsigned long long (*custom_sched_clock)(void); | ||
28 | 27 | ||
29 | int tsc_disable; | 28 | int tsc_disable; |
30 | 29 | ||
diff --git a/arch/i386/kernel/vmitime.c b/arch/i386/kernel/vmitime.c index 8dc72d575666..9dfb17739b67 100644 --- a/arch/i386/kernel/vmitime.c +++ b/arch/i386/kernel/vmitime.c | |||
@@ -123,12 +123,10 @@ static struct clocksource clocksource_vmi = { | |||
123 | static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id); | 123 | static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id); |
124 | 124 | ||
125 | static struct irqaction vmi_timer_irq = { | 125 | static struct irqaction vmi_timer_irq = { |
126 | vmi_timer_interrupt, | 126 | .handler = vmi_timer_interrupt, |
127 | SA_INTERRUPT, | 127 | .flags = IRQF_DISABLED, |
128 | CPU_MASK_NONE, | 128 | .mask = CPU_MASK_NONE, |
129 | "VMI-alarm", | 129 | .name = "VMI-alarm", |
130 | NULL, | ||
131 | NULL | ||
132 | }; | 130 | }; |
133 | 131 | ||
134 | /* Alarm rate */ | 132 | /* Alarm rate */ |
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c index 771e55f39875..561844878a90 100644 --- a/arch/mips/momentum/jaguar_atx/platform.c +++ b/arch/mips/momentum/jaguar_atx/platform.c | |||
@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct mv643xx_eth_platform_data eth0_pd = { | 50 | static struct mv643xx_eth_platform_data eth0_pd = { |
51 | .port_number = 0, | ||
52 | |||
51 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 53 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
52 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 54 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
53 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 55 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
77 | }; | 79 | }; |
78 | 80 | ||
79 | static struct mv643xx_eth_platform_data eth1_pd = { | 81 | static struct mv643xx_eth_platform_data eth1_pd = { |
82 | .port_number = 1, | ||
83 | |||
80 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 84 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
81 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 85 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
82 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 86 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
105 | }, | 109 | }, |
106 | }; | 110 | }; |
107 | 111 | ||
108 | static struct mv643xx_eth_platform_data eth2_pd; | 112 | static struct mv643xx_eth_platform_data eth2_pd = { |
113 | .port_number = 2, | ||
114 | }; | ||
109 | 115 | ||
110 | static struct platform_device eth2_device = { | 116 | static struct platform_device eth2_device = { |
111 | .name = MV643XX_ETH_NAME, | 117 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/mips/momentum/ocelot_3/platform.c b/arch/mips/momentum/ocelot_3/platform.c index b80733f0c66d..44e4c3fc7403 100644 --- a/arch/mips/momentum/ocelot_3/platform.c +++ b/arch/mips/momentum/ocelot_3/platform.c | |||
@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct mv643xx_eth_platform_data eth0_pd = { | 50 | static struct mv643xx_eth_platform_data eth0_pd = { |
51 | .port_number = 0, | ||
52 | |||
51 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 53 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
52 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 54 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
53 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 55 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
77 | }; | 79 | }; |
78 | 80 | ||
79 | static struct mv643xx_eth_platform_data eth1_pd = { | 81 | static struct mv643xx_eth_platform_data eth1_pd = { |
82 | .port_number = 1, | ||
83 | |||
80 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 84 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
81 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 85 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
82 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 86 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
105 | }, | 109 | }, |
106 | }; | 110 | }; |
107 | 111 | ||
108 | static struct mv643xx_eth_platform_data eth2_pd; | 112 | static struct mv643xx_eth_platform_data eth2_pd = { |
113 | .port_number = 2, | ||
114 | }; | ||
109 | 115 | ||
110 | static struct platform_device eth2_device = { | 116 | static struct platform_device eth2_device = { |
111 | .name = MV643XX_ETH_NAME, | 117 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/mips/momentum/ocelot_c/platform.c b/arch/mips/momentum/ocelot_c/platform.c index f7cd303f3eba..7780aa0c6555 100644 --- a/arch/mips/momentum/ocelot_c/platform.c +++ b/arch/mips/momentum/ocelot_c/platform.c | |||
@@ -47,6 +47,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | static struct mv643xx_eth_platform_data eth0_pd = { | 49 | static struct mv643xx_eth_platform_data eth0_pd = { |
50 | .port_number = 0, | ||
51 | |||
50 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 52 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
51 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 53 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
52 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 54 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -76,6 +78,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
76 | }; | 78 | }; |
77 | 79 | ||
78 | static struct mv643xx_eth_platform_data eth1_pd = { | 80 | static struct mv643xx_eth_platform_data eth1_pd = { |
81 | .port_number = 1, | ||
82 | |||
79 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 83 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
80 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 84 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
81 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 85 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 6ad4b1a72c96..71045677559a 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c | |||
@@ -58,6 +58,7 @@ static struct resource mv643xx_eth0_resources[] = { | |||
58 | 58 | ||
59 | 59 | ||
60 | static struct mv643xx_eth_platform_data eth0_pd = { | 60 | static struct mv643xx_eth_platform_data eth0_pd = { |
61 | .port_number = 0, | ||
61 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0, | 62 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0, |
62 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, | 63 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, |
63 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, | 64 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, |
@@ -87,6 +88,7 @@ static struct resource mv643xx_eth1_resources[] = { | |||
87 | }; | 88 | }; |
88 | 89 | ||
89 | static struct mv643xx_eth_platform_data eth1_pd = { | 90 | static struct mv643xx_eth_platform_data eth1_pd = { |
91 | .port_number = 1, | ||
90 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1, | 92 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1, |
91 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, | 93 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, |
92 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, | 94 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, |
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c index 3b039c30a439..a6f8b686ea83 100644 --- a/arch/ppc/syslib/mv64x60.c +++ b/arch/ppc/syslib/mv64x60.c | |||
@@ -339,7 +339,9 @@ static struct resource mv64x60_eth0_resources[] = { | |||
339 | }, | 339 | }, |
340 | }; | 340 | }; |
341 | 341 | ||
342 | static struct mv643xx_eth_platform_data eth0_pd; | 342 | static struct mv643xx_eth_platform_data eth0_pd = { |
343 | .port_number = 0, | ||
344 | }; | ||
343 | 345 | ||
344 | static struct platform_device eth0_device = { | 346 | static struct platform_device eth0_device = { |
345 | .name = MV643XX_ETH_NAME, | 347 | .name = MV643XX_ETH_NAME, |
@@ -362,7 +364,9 @@ static struct resource mv64x60_eth1_resources[] = { | |||
362 | }, | 364 | }, |
363 | }; | 365 | }; |
364 | 366 | ||
365 | static struct mv643xx_eth_platform_data eth1_pd; | 367 | static struct mv643xx_eth_platform_data eth1_pd = { |
368 | .port_number = 1, | ||
369 | }; | ||
366 | 370 | ||
367 | static struct platform_device eth1_device = { | 371 | static struct platform_device eth1_device = { |
368 | .name = MV643XX_ETH_NAME, | 372 | .name = MV643XX_ETH_NAME, |
@@ -385,7 +389,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
385 | }, | 389 | }, |
386 | }; | 390 | }; |
387 | 391 | ||
388 | static struct mv643xx_eth_platform_data eth2_pd; | 392 | static struct mv643xx_eth_platform_data eth2_pd = { |
393 | .port_number = 2, | ||
394 | }; | ||
389 | 395 | ||
390 | static struct platform_device eth2_device = { | 396 | static struct platform_device eth2_device = { |
391 | .name = MV643XX_ETH_NAME, | 397 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 2a32e5e8e9c9..3c798cdde550 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c | |||
@@ -158,12 +158,12 @@ static int kern_do_signal(struct pt_regs *regs) | |||
158 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 158 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
159 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 159 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
160 | } | 160 | } |
161 | return(handled_sig); | 161 | return handled_sig; |
162 | } | 162 | } |
163 | 163 | ||
164 | int do_signal(void) | 164 | int do_signal(void) |
165 | { | 165 | { |
166 | return(kern_do_signal(¤t->thread.regs)); | 166 | return kern_do_signal(¤t->thread.regs); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
@@ -186,5 +186,5 @@ long sys_sigsuspend(int history0, int history1, old_sigset_t mask) | |||
186 | 186 | ||
187 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | 187 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) |
188 | { | 188 | { |
189 | return(do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs))); | 189 | return do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs)); |
190 | } | 190 | } |
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 9b34fe65949a..dda06789bcb0 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c | |||
@@ -419,9 +419,12 @@ void map_stub_pages(int fd, unsigned long code, | |||
419 | .offset = code_offset | 419 | .offset = code_offset |
420 | } } }); | 420 | } } }); |
421 | n = os_write_file(fd, &mmop, sizeof(mmop)); | 421 | n = os_write_file(fd, &mmop, sizeof(mmop)); |
422 | if(n != sizeof(mmop)) | 422 | if(n != sizeof(mmop)){ |
423 | printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n", | ||
424 | code, code_fd, (unsigned long long) code_offset); | ||
423 | panic("map_stub_pages : /proc/mm map for code failed, " | 425 | panic("map_stub_pages : /proc/mm map for code failed, " |
424 | "err = %d\n", -n); | 426 | "err = %d\n", -n); |
427 | } | ||
425 | 428 | ||
426 | if ( stack ) { | 429 | if ( stack ) { |
427 | __u64 map_offset; | 430 | __u64 map_offset; |
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c index 1df231a26244..d221214d2ed5 100644 --- a/arch/um/os-Linux/trap.c +++ b/arch/um/os-Linux/trap.c | |||
@@ -16,6 +16,7 @@ void usr2_handler(int sig, union uml_pt_regs *regs) | |||
16 | CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0); | 16 | CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0); |
17 | } | 17 | } |
18 | 18 | ||
19 | /* Initialized from linux_main() */ | ||
19 | void (*sig_info[NSIG])(int, union uml_pt_regs *); | 20 | void (*sig_info[NSIG])(int, union uml_pt_regs *); |
20 | 21 | ||
21 | void os_fill_handlinfo(struct kern_handlers h) | 22 | void os_fill_handlinfo(struct kern_handlers h) |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index cacb1c816e35..17ee97f3a99b 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -406,22 +406,6 @@ config BLK_DEV_RAM_BLOCKSIZE | |||
406 | setups function - apparently needed by the rd_load_image routine | 406 | setups function - apparently needed by the rd_load_image routine |
407 | that supposes the filesystem in the image uses a 1024 blocksize. | 407 | that supposes the filesystem in the image uses a 1024 blocksize. |
408 | 408 | ||
409 | config BLK_DEV_INITRD | ||
410 | bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" | ||
411 | depends on BROKEN || !FRV | ||
412 | help | ||
413 | The initial RAM filesystem is a ramfs which is loaded by the | ||
414 | boot loader (loadlin or lilo) and that is mounted as root | ||
415 | before the normal boot procedure. It is typically used to | ||
416 | load modules needed to mount the "real" root file system, | ||
417 | etc. See <file:Documentation/initrd.txt> for details. | ||
418 | |||
419 | If RAM disk support (BLK_DEV_RAM) is also included, this | ||
420 | also enables initial RAM disk (initrd) support and adds | ||
421 | 15 Kbytes (more on some other architectures) to the kernel size. | ||
422 | |||
423 | If unsure say Y. | ||
424 | |||
425 | config CDROM_PKTCDVD | 409 | config CDROM_PKTCDVD |
426 | tristate "Packet writing on CD/DVD media" | 410 | tristate "Packet writing on CD/DVD media" |
427 | depends on !UML | 411 | depends on !UML |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 05dfe357527c..0c716ee905d7 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1291,13 +1291,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index) | |||
1291 | if (inq_buff == NULL) | 1291 | if (inq_buff == NULL) |
1292 | goto mem_msg; | 1292 | goto mem_msg; |
1293 | 1293 | ||
1294 | /* testing to see if 16-byte CDBs are already being used */ | ||
1295 | if (h->cciss_read == CCISS_READ_16) { | ||
1296 | cciss_read_capacity_16(h->ctlr, drv_index, 1, | ||
1297 | &total_size, &block_size); | ||
1298 | goto geo_inq; | ||
1299 | } | ||
1300 | |||
1294 | cciss_read_capacity(ctlr, drv_index, 1, | 1301 | cciss_read_capacity(ctlr, drv_index, 1, |
1295 | &total_size, &block_size); | 1302 | &total_size, &block_size); |
1296 | 1303 | ||
1297 | /* total size = last LBA + 1 */ | 1304 | /* if read_capacity returns all F's this volume is >2TB in size */ |
1298 | /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */ | 1305 | /* so we switch to 16-byte CDB's for all read/write ops */ |
1299 | /* so we assume this volume this must be >2TB in size */ | 1306 | if (total_size == 0xFFFFFFFFULL) { |
1300 | if (total_size == (__u32) 0) { | ||
1301 | cciss_read_capacity_16(ctlr, drv_index, 1, | 1307 | cciss_read_capacity_16(ctlr, drv_index, 1, |
1302 | &total_size, &block_size); | 1308 | &total_size, &block_size); |
1303 | h->cciss_read = CCISS_READ_16; | 1309 | h->cciss_read = CCISS_READ_16; |
@@ -1306,6 +1312,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index) | |||
1306 | h->cciss_read = CCISS_READ_10; | 1312 | h->cciss_read = CCISS_READ_10; |
1307 | h->cciss_write = CCISS_WRITE_10; | 1313 | h->cciss_write = CCISS_WRITE_10; |
1308 | } | 1314 | } |
1315 | geo_inq: | ||
1309 | cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, | 1316 | cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, |
1310 | inq_buff, &h->drv[drv_index]); | 1317 | inq_buff, &h->drv[drv_index]); |
1311 | 1318 | ||
@@ -1917,13 +1924,14 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
1917 | drv->raid_level = inq_buff->data_byte[8]; | 1924 | drv->raid_level = inq_buff->data_byte[8]; |
1918 | } | 1925 | } |
1919 | drv->block_size = block_size; | 1926 | drv->block_size = block_size; |
1920 | drv->nr_blocks = total_size; | 1927 | drv->nr_blocks = total_size + 1; |
1921 | t = drv->heads * drv->sectors; | 1928 | t = drv->heads * drv->sectors; |
1922 | if (t > 1) { | 1929 | if (t > 1) { |
1923 | unsigned rem = sector_div(total_size, t); | 1930 | sector_t real_size = total_size + 1; |
1931 | unsigned long rem = sector_div(real_size, t); | ||
1924 | if (rem) | 1932 | if (rem) |
1925 | total_size++; | 1933 | real_size++; |
1926 | drv->cylinders = total_size; | 1934 | drv->cylinders = real_size; |
1927 | } | 1935 | } |
1928 | } else { /* Get geometry failed */ | 1936 | } else { /* Get geometry failed */ |
1929 | printk(KERN_WARNING "cciss: reading geometry failed\n"); | 1937 | printk(KERN_WARNING "cciss: reading geometry failed\n"); |
@@ -1953,16 +1961,16 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1953 | ctlr, buf, sizeof(ReadCapdata_struct), | 1961 | ctlr, buf, sizeof(ReadCapdata_struct), |
1954 | 1, logvol, 0, NULL, TYPE_CMD); | 1962 | 1, logvol, 0, NULL, TYPE_CMD); |
1955 | if (return_code == IO_OK) { | 1963 | if (return_code == IO_OK) { |
1956 | *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1; | 1964 | *total_size = be32_to_cpu(*(__u32 *) buf->total_size); |
1957 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); | 1965 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); |
1958 | } else { /* read capacity command failed */ | 1966 | } else { /* read capacity command failed */ |
1959 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 1967 | printk(KERN_WARNING "cciss: read capacity failed\n"); |
1960 | *total_size = 0; | 1968 | *total_size = 0; |
1961 | *block_size = BLOCK_SIZE; | 1969 | *block_size = BLOCK_SIZE; |
1962 | } | 1970 | } |
1963 | if (*total_size != (__u32) 0) | 1971 | if (*total_size != 0) |
1964 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | 1972 | printk(KERN_INFO " blocks= %llu block_size= %d\n", |
1965 | (unsigned long long)*total_size, *block_size); | 1973 | (unsigned long long)*total_size+1, *block_size); |
1966 | kfree(buf); | 1974 | kfree(buf); |
1967 | return; | 1975 | return; |
1968 | } | 1976 | } |
@@ -1989,7 +1997,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1989 | 1, logvol, 0, NULL, TYPE_CMD); | 1997 | 1, logvol, 0, NULL, TYPE_CMD); |
1990 | } | 1998 | } |
1991 | if (return_code == IO_OK) { | 1999 | if (return_code == IO_OK) { |
1992 | *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1; | 2000 | *total_size = be64_to_cpu(*(__u64 *) buf->total_size); |
1993 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); | 2001 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); |
1994 | } else { /* read capacity command failed */ | 2002 | } else { /* read capacity command failed */ |
1995 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 2003 | printk(KERN_WARNING "cciss: read capacity failed\n"); |
@@ -1997,7 +2005,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1997 | *block_size = BLOCK_SIZE; | 2005 | *block_size = BLOCK_SIZE; |
1998 | } | 2006 | } |
1999 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | 2007 | printk(KERN_INFO " blocks= %llu block_size= %d\n", |
2000 | (unsigned long long)*total_size, *block_size); | 2008 | (unsigned long long)*total_size+1, *block_size); |
2001 | kfree(buf); | 2009 | kfree(buf); |
2002 | return; | 2010 | return; |
2003 | } | 2011 | } |
@@ -3119,8 +3127,9 @@ static void cciss_getgeometry(int cntl_num) | |||
3119 | } | 3127 | } |
3120 | cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size); | 3128 | cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size); |
3121 | 3129 | ||
3122 | /* total_size = last LBA + 1 */ | 3130 | /* If read_capacity returns all F's the logical is >2TB */ |
3123 | if(total_size == (__u32) 0) { | 3131 | /* so we switch to 16-byte CDBs for all read/write ops */ |
3132 | if(total_size == 0xFFFFFFFFULL) { | ||
3124 | cciss_read_capacity_16(cntl_num, i, 0, | 3133 | cciss_read_capacity_16(cntl_num, i, 0, |
3125 | &total_size, &block_size); | 3134 | &total_size, &block_size); |
3126 | hba[cntl_num]->cciss_read = CCISS_READ_16; | 3135 | hba[cntl_num]->cciss_read = CCISS_READ_16; |
@@ -3395,7 +3404,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3395 | return -1; | 3404 | return -1; |
3396 | } | 3405 | } |
3397 | 3406 | ||
3398 | static void __devexit cciss_remove_one(struct pci_dev *pdev) | 3407 | static void cciss_remove_one(struct pci_dev *pdev) |
3399 | { | 3408 | { |
3400 | ctlr_info_t *tmp_ptr; | 3409 | ctlr_info_t *tmp_ptr; |
3401 | int i, j; | 3410 | int i, j; |
@@ -3419,9 +3428,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
3419 | memset(flush_buf, 0, 4); | 3428 | memset(flush_buf, 0, 4); |
3420 | return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, | 3429 | return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, |
3421 | TYPE_CMD); | 3430 | TYPE_CMD); |
3422 | if (return_code != IO_OK) { | 3431 | if (return_code == IO_OK) { |
3423 | printk(KERN_WARNING "Error Flushing cache on controller %d\n", | 3432 | printk(KERN_INFO "Completed flushing cache on controller %d\n", i); |
3424 | i); | 3433 | } else { |
3434 | printk(KERN_WARNING "Error flushing cache on controller %d\n", i); | ||
3425 | } | 3435 | } |
3426 | free_irq(hba[i]->intr[2], hba[i]); | 3436 | free_irq(hba[i]->intr[2], hba[i]); |
3427 | 3437 | ||
@@ -3472,6 +3482,7 @@ static struct pci_driver cciss_pci_driver = { | |||
3472 | .probe = cciss_init_one, | 3482 | .probe = cciss_init_one, |
3473 | .remove = __devexit_p(cciss_remove_one), | 3483 | .remove = __devexit_p(cciss_remove_one), |
3474 | .id_table = cciss_pci_device_id, /* id_table */ | 3484 | .id_table = cciss_pci_device_id, /* id_table */ |
3485 | .shutdown = cciss_remove_one, | ||
3475 | }; | 3486 | }; |
3476 | 3487 | ||
3477 | /* | 3488 | /* |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 0eb62841e9b0..6d3840e629de 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -99,9 +99,8 @@ do_crypt(void *src, void *dst, int len, u32 flags) | |||
99 | static unsigned int | 99 | static unsigned int |
100 | geode_aes_crypt(struct geode_aes_op *op) | 100 | geode_aes_crypt(struct geode_aes_op *op) |
101 | { | 101 | { |
102 | |||
103 | u32 flags = 0; | 102 | u32 flags = 0; |
104 | int iflags; | 103 | unsigned long iflags; |
105 | 104 | ||
106 | if (op->len == 0 || op->src == op->dst) | 105 | if (op->len == 0 || op->src == op->dst) |
107 | return 0; | 106 | return 0; |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 5046a1661342..4a73e8b2428d 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -376,10 +376,11 @@ static inline void mmc_set_ios(struct mmc_host *host) | |||
376 | { | 376 | { |
377 | struct mmc_ios *ios = &host->ios; | 377 | struct mmc_ios *ios = &host->ios; |
378 | 378 | ||
379 | pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n", | 379 | pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " |
380 | "width %u timing %u\n", | ||
380 | mmc_hostname(host), ios->clock, ios->bus_mode, | 381 | mmc_hostname(host), ios->clock, ios->bus_mode, |
381 | ios->power_mode, ios->chip_select, ios->vdd, | 382 | ios->power_mode, ios->chip_select, ios->vdd, |
382 | ios->bus_width); | 383 | ios->bus_width, ios->timing); |
383 | 384 | ||
384 | host->ops->set_ios(host, ios); | 385 | host->ops->set_ios(host, ios); |
385 | } | 386 | } |
@@ -809,6 +810,7 @@ static void mmc_power_up(struct mmc_host *host) | |||
809 | host->ios.chip_select = MMC_CS_DONTCARE; | 810 | host->ios.chip_select = MMC_CS_DONTCARE; |
810 | host->ios.power_mode = MMC_POWER_UP; | 811 | host->ios.power_mode = MMC_POWER_UP; |
811 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 812 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
813 | host->ios.timing = MMC_TIMING_LEGACY; | ||
812 | mmc_set_ios(host); | 814 | mmc_set_ios(host); |
813 | 815 | ||
814 | mmc_delay(1); | 816 | mmc_delay(1); |
@@ -828,6 +830,7 @@ static void mmc_power_off(struct mmc_host *host) | |||
828 | host->ios.chip_select = MMC_CS_DONTCARE; | 830 | host->ios.chip_select = MMC_CS_DONTCARE; |
829 | host->ios.power_mode = MMC_POWER_OFF; | 831 | host->ios.power_mode = MMC_POWER_OFF; |
830 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 832 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
833 | host->ios.timing = MMC_TIMING_LEGACY; | ||
831 | mmc_set_ios(host); | 834 | mmc_set_ios(host); |
832 | } | 835 | } |
833 | 836 | ||
@@ -1112,46 +1115,50 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1112 | continue; | 1115 | continue; |
1113 | } | 1116 | } |
1114 | 1117 | ||
1115 | /* Activate highspeed support. */ | 1118 | if (host->caps & MMC_CAP_MMC_HIGHSPEED) { |
1116 | cmd.opcode = MMC_SWITCH; | 1119 | /* Activate highspeed support. */ |
1117 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | 1120 | cmd.opcode = MMC_SWITCH; |
1118 | (EXT_CSD_HS_TIMING << 16) | | 1121 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1119 | (1 << 8) | | 1122 | (EXT_CSD_HS_TIMING << 16) | |
1120 | EXT_CSD_CMD_SET_NORMAL; | 1123 | (1 << 8) | |
1121 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | 1124 | EXT_CSD_CMD_SET_NORMAL; |
1125 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
1122 | 1126 | ||
1123 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); | 1127 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); |
1124 | if (err != MMC_ERR_NONE) { | 1128 | if (err != MMC_ERR_NONE) { |
1125 | printk("%s: failed to switch card to mmc v4 " | 1129 | printk("%s: failed to switch card to mmc v4 " |
1126 | "high-speed mode.\n", | 1130 | "high-speed mode.\n", |
1127 | mmc_hostname(card->host)); | 1131 | mmc_hostname(card->host)); |
1128 | continue; | 1132 | continue; |
1129 | } | 1133 | } |
1130 | 1134 | ||
1131 | mmc_card_set_highspeed(card); | 1135 | mmc_card_set_highspeed(card); |
1132 | 1136 | ||
1133 | /* Check for host support for wide-bus modes. */ | 1137 | host->ios.timing = MMC_TIMING_SD_HS; |
1134 | if (!(host->caps & MMC_CAP_4_BIT_DATA)) { | 1138 | mmc_set_ios(host); |
1135 | continue; | ||
1136 | } | 1139 | } |
1137 | 1140 | ||
1138 | /* Activate 4-bit support. */ | 1141 | /* Check for host support for wide-bus modes. */ |
1139 | cmd.opcode = MMC_SWITCH; | 1142 | if (host->caps & MMC_CAP_4_BIT_DATA) { |
1140 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | 1143 | /* Activate 4-bit support. */ |
1141 | (EXT_CSD_BUS_WIDTH << 16) | | 1144 | cmd.opcode = MMC_SWITCH; |
1142 | (EXT_CSD_BUS_WIDTH_4 << 8) | | 1145 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1143 | EXT_CSD_CMD_SET_NORMAL; | 1146 | (EXT_CSD_BUS_WIDTH << 16) | |
1144 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | 1147 | (EXT_CSD_BUS_WIDTH_4 << 8) | |
1148 | EXT_CSD_CMD_SET_NORMAL; | ||
1149 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
1145 | 1150 | ||
1146 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); | 1151 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); |
1147 | if (err != MMC_ERR_NONE) { | 1152 | if (err != MMC_ERR_NONE) { |
1148 | printk("%s: failed to switch card to " | 1153 | printk("%s: failed to switch card to " |
1149 | "mmc v4 4-bit bus mode.\n", | 1154 | "mmc v4 4-bit bus mode.\n", |
1150 | mmc_hostname(card->host)); | 1155 | mmc_hostname(card->host)); |
1151 | continue; | 1156 | continue; |
1152 | } | 1157 | } |
1153 | 1158 | ||
1154 | host->ios.bus_width = MMC_BUS_WIDTH_4; | 1159 | host->ios.bus_width = MMC_BUS_WIDTH_4; |
1160 | mmc_set_ios(host); | ||
1161 | } | ||
1155 | } | 1162 | } |
1156 | 1163 | ||
1157 | kfree(ext_csd); | 1164 | kfree(ext_csd); |
@@ -1241,6 +1248,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1241 | unsigned char *status; | 1248 | unsigned char *status; |
1242 | struct scatterlist sg; | 1249 | struct scatterlist sg; |
1243 | 1250 | ||
1251 | if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) | ||
1252 | return; | ||
1253 | |||
1244 | status = kmalloc(64, GFP_KERNEL); | 1254 | status = kmalloc(64, GFP_KERNEL); |
1245 | if (!status) { | 1255 | if (!status) { |
1246 | printk(KERN_WARNING "%s: Unable to allocate buffer for " | 1256 | printk(KERN_WARNING "%s: Unable to allocate buffer for " |
@@ -1332,6 +1342,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1332 | } | 1342 | } |
1333 | 1343 | ||
1334 | mmc_card_set_highspeed(card); | 1344 | mmc_card_set_highspeed(card); |
1345 | |||
1346 | host->ios.timing = MMC_TIMING_SD_HS; | ||
1347 | mmc_set_ios(host); | ||
1335 | } | 1348 | } |
1336 | 1349 | ||
1337 | kfree(status); | 1350 | kfree(status); |
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index 7522f76b15ec..d749f08601b8 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c | |||
@@ -606,7 +606,6 @@ static void sdhci_finish_command(struct sdhci_host *host) | |||
606 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | 606 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) |
607 | { | 607 | { |
608 | int div; | 608 | int div; |
609 | u8 ctrl; | ||
610 | u16 clk; | 609 | u16 clk; |
611 | unsigned long timeout; | 610 | unsigned long timeout; |
612 | 611 | ||
@@ -615,13 +614,6 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
615 | 614 | ||
616 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); | 615 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); |
617 | 616 | ||
618 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | ||
619 | if (clock > 25000000) | ||
620 | ctrl |= SDHCI_CTRL_HISPD; | ||
621 | else | ||
622 | ctrl &= ~SDHCI_CTRL_HISPD; | ||
623 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | ||
624 | |||
625 | if (clock == 0) | 617 | if (clock == 0) |
626 | goto out; | 618 | goto out; |
627 | 619 | ||
@@ -761,10 +753,17 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
761 | sdhci_set_power(host, ios->vdd); | 753 | sdhci_set_power(host, ios->vdd); |
762 | 754 | ||
763 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | 755 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); |
756 | |||
764 | if (ios->bus_width == MMC_BUS_WIDTH_4) | 757 | if (ios->bus_width == MMC_BUS_WIDTH_4) |
765 | ctrl |= SDHCI_CTRL_4BITBUS; | 758 | ctrl |= SDHCI_CTRL_4BITBUS; |
766 | else | 759 | else |
767 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 760 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
761 | |||
762 | if (ios->timing == MMC_TIMING_SD_HS) | ||
763 | ctrl |= SDHCI_CTRL_HISPD; | ||
764 | else | ||
765 | ctrl &= ~SDHCI_CTRL_HISPD; | ||
766 | |||
768 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | 767 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); |
769 | 768 | ||
770 | mmiowb(); | 769 | mmiowb(); |
@@ -994,7 +993,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
994 | 993 | ||
995 | intmask = readl(host->ioaddr + SDHCI_INT_STATUS); | 994 | intmask = readl(host->ioaddr + SDHCI_INT_STATUS); |
996 | 995 | ||
997 | if (!intmask) { | 996 | if (!intmask || intmask == 0xffffffff) { |
998 | result = IRQ_NONE; | 997 | result = IRQ_NONE; |
999 | goto out; | 998 | goto out; |
1000 | } | 999 | } |
@@ -1080,6 +1079,13 @@ static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1080 | 1079 | ||
1081 | pci_save_state(pdev); | 1080 | pci_save_state(pdev); |
1082 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | 1081 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
1082 | |||
1083 | for (i = 0;i < chip->num_slots;i++) { | ||
1084 | if (!chip->hosts[i]) | ||
1085 | continue; | ||
1086 | free_irq(chip->hosts[i]->irq, chip->hosts[i]); | ||
1087 | } | ||
1088 | |||
1083 | pci_disable_device(pdev); | 1089 | pci_disable_device(pdev); |
1084 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 1090 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1085 | 1091 | ||
@@ -1108,6 +1114,11 @@ static int sdhci_resume (struct pci_dev *pdev) | |||
1108 | continue; | 1114 | continue; |
1109 | if (chip->hosts[i]->flags & SDHCI_USE_DMA) | 1115 | if (chip->hosts[i]->flags & SDHCI_USE_DMA) |
1110 | pci_set_master(pdev); | 1116 | pci_set_master(pdev); |
1117 | ret = request_irq(chip->hosts[i]->irq, sdhci_irq, | ||
1118 | IRQF_SHARED, chip->hosts[i]->slot_descr, | ||
1119 | chip->hosts[i]); | ||
1120 | if (ret) | ||
1121 | return ret; | ||
1111 | sdhci_init(chip->hosts[i]); | 1122 | sdhci_init(chip->hosts[i]); |
1112 | mmiowb(); | 1123 | mmiowb(); |
1113 | ret = mmc_resume_host(chip->hosts[i]->mmc); | 1124 | ret = mmc_resume_host(chip->hosts[i]->mmc); |
@@ -1274,6 +1285,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1274 | mmc->f_max = host->max_clk; | 1285 | mmc->f_max = host->max_clk; |
1275 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK; | 1286 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK; |
1276 | 1287 | ||
1288 | if (caps & SDHCI_CAN_DO_HISPD) | ||
1289 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | ||
1290 | |||
1277 | mmc->ocr_avail = 0; | 1291 | mmc->ocr_avail = 0; |
1278 | if (caps & SDHCI_CAN_VDD_330) | 1292 | if (caps & SDHCI_CAN_VDD_330) |
1279 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; | 1293 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; |
@@ -1282,13 +1296,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1282 | if (caps & SDHCI_CAN_VDD_180) | 1296 | if (caps & SDHCI_CAN_VDD_180) |
1283 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; | 1297 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; |
1284 | 1298 | ||
1285 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { | ||
1286 | printk(KERN_ERR "%s: Controller reports > 25 MHz base clock," | ||
1287 | " but no high speed support.\n", | ||
1288 | host->slot_descr); | ||
1289 | mmc->f_max = 25000000; | ||
1290 | } | ||
1291 | |||
1292 | if (mmc->ocr_avail == 0) { | 1299 | if (mmc->ocr_avail == 0) { |
1293 | printk(KERN_ERR "%s: Hardware doesn't report any " | 1300 | printk(KERN_ERR "%s: Hardware doesn't report any " |
1294 | "support voltages.\n", host->slot_descr); | 1301 | "support voltages.\n", host->slot_descr); |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 716a47210aa3..72995777f809 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -822,11 +822,17 @@ static int vortex_resume(struct pci_dev *pdev) | |||
822 | { | 822 | { |
823 | struct net_device *dev = pci_get_drvdata(pdev); | 823 | struct net_device *dev = pci_get_drvdata(pdev); |
824 | struct vortex_private *vp = netdev_priv(dev); | 824 | struct vortex_private *vp = netdev_priv(dev); |
825 | int err; | ||
825 | 826 | ||
826 | if (dev && vp) { | 827 | if (dev && vp) { |
827 | pci_set_power_state(pdev, PCI_D0); | 828 | pci_set_power_state(pdev, PCI_D0); |
828 | pci_restore_state(pdev); | 829 | pci_restore_state(pdev); |
829 | pci_enable_device(pdev); | 830 | err = pci_enable_device(pdev); |
831 | if (err) { | ||
832 | printk(KERN_WARNING "%s: Could not enable device \n", | ||
833 | dev->name); | ||
834 | return err; | ||
835 | } | ||
830 | pci_set_master(pdev); | 836 | pci_set_master(pdev); |
831 | if (request_irq(dev->irq, vp->full_bus_master_rx ? | 837 | if (request_irq(dev->irq, vp->full_bus_master_rx ? |
832 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { | 838 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ea73ebff4387..e4724d874e7c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/netdevice.h> | 61 | #include <linux/netdevice.h> |
62 | #include <linux/inetdevice.h> | 62 | #include <linux/inetdevice.h> |
63 | #include <linux/igmp.h> | ||
63 | #include <linux/etherdevice.h> | 64 | #include <linux/etherdevice.h> |
64 | #include <linux/skbuff.h> | 65 | #include <linux/skbuff.h> |
65 | #include <net/sock.h> | 66 | #include <net/sock.h> |
@@ -861,6 +862,28 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen) | |||
861 | } | 862 | } |
862 | } | 863 | } |
863 | 864 | ||
865 | |||
866 | /* | ||
867 | * Retrieve the list of registered multicast addresses for the bonding | ||
868 | * device and retransmit an IGMP JOIN request to the current active | ||
869 | * slave. | ||
870 | */ | ||
871 | static void bond_resend_igmp_join_requests(struct bonding *bond) | ||
872 | { | ||
873 | struct in_device *in_dev; | ||
874 | struct ip_mc_list *im; | ||
875 | |||
876 | rcu_read_lock(); | ||
877 | in_dev = __in_dev_get_rcu(bond->dev); | ||
878 | if (in_dev) { | ||
879 | for (im = in_dev->mc_list; im; im = im->next) { | ||
880 | ip_mc_rejoin_group(im); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | rcu_read_unlock(); | ||
885 | } | ||
886 | |||
864 | /* | 887 | /* |
865 | * Totally destroys the mc_list in bond | 888 | * Totally destroys the mc_list in bond |
866 | */ | 889 | */ |
@@ -874,6 +897,7 @@ static void bond_mc_list_destroy(struct bonding *bond) | |||
874 | kfree(dmi); | 897 | kfree(dmi); |
875 | dmi = bond->mc_list; | 898 | dmi = bond->mc_list; |
876 | } | 899 | } |
900 | bond->mc_list = NULL; | ||
877 | } | 901 | } |
878 | 902 | ||
879 | /* | 903 | /* |
@@ -967,6 +991,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct | |||
967 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { | 991 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { |
968 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); | 992 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
969 | } | 993 | } |
994 | bond_resend_igmp_join_requests(bond); | ||
970 | } | 995 | } |
971 | } | 996 | } |
972 | 997 | ||
@@ -3423,15 +3448,21 @@ void bond_register_arp(struct bonding *bond) | |||
3423 | { | 3448 | { |
3424 | struct packet_type *pt = &bond->arp_mon_pt; | 3449 | struct packet_type *pt = &bond->arp_mon_pt; |
3425 | 3450 | ||
3451 | if (pt->type) | ||
3452 | return; | ||
3453 | |||
3426 | pt->type = htons(ETH_P_ARP); | 3454 | pt->type = htons(ETH_P_ARP); |
3427 | pt->dev = NULL; /*bond->dev;XXX*/ | 3455 | pt->dev = bond->dev; |
3428 | pt->func = bond_arp_rcv; | 3456 | pt->func = bond_arp_rcv; |
3429 | dev_add_pack(pt); | 3457 | dev_add_pack(pt); |
3430 | } | 3458 | } |
3431 | 3459 | ||
3432 | void bond_unregister_arp(struct bonding *bond) | 3460 | void bond_unregister_arp(struct bonding *bond) |
3433 | { | 3461 | { |
3434 | dev_remove_pack(&bond->arp_mon_pt); | 3462 | struct packet_type *pt = &bond->arp_mon_pt; |
3463 | |||
3464 | dev_remove_pack(pt); | ||
3465 | pt->type = 0; | ||
3435 | } | 3466 | } |
3436 | 3467 | ||
3437 | /*---------------------------- Hashing Policies -----------------------------*/ | 3468 | /*---------------------------- Hashing Policies -----------------------------*/ |
@@ -4011,42 +4042,6 @@ out: | |||
4011 | return 0; | 4042 | return 0; |
4012 | } | 4043 | } |
4013 | 4044 | ||
4014 | static void bond_activebackup_xmit_copy(struct sk_buff *skb, | ||
4015 | struct bonding *bond, | ||
4016 | struct slave *slave) | ||
4017 | { | ||
4018 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | ||
4019 | struct ethhdr *eth_data; | ||
4020 | u8 *hwaddr; | ||
4021 | int res; | ||
4022 | |||
4023 | if (!skb2) { | ||
4024 | printk(KERN_ERR DRV_NAME ": Error: " | ||
4025 | "bond_activebackup_xmit_copy(): skb_copy() failed\n"); | ||
4026 | return; | ||
4027 | } | ||
4028 | |||
4029 | skb2->mac.raw = (unsigned char *)skb2->data; | ||
4030 | eth_data = eth_hdr(skb2); | ||
4031 | |||
4032 | /* Pick an appropriate source MAC address | ||
4033 | * -- use slave's perm MAC addr, unless used by bond | ||
4034 | * -- otherwise, borrow active slave's perm MAC addr | ||
4035 | * since that will not be used | ||
4036 | */ | ||
4037 | hwaddr = slave->perm_hwaddr; | ||
4038 | if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN)) | ||
4039 | hwaddr = bond->curr_active_slave->perm_hwaddr; | ||
4040 | |||
4041 | /* Set source MAC address appropriately */ | ||
4042 | memcpy(eth_data->h_source, hwaddr, ETH_ALEN); | ||
4043 | |||
4044 | res = bond_dev_queue_xmit(bond, skb2, slave->dev); | ||
4045 | if (res) | ||
4046 | dev_kfree_skb(skb2); | ||
4047 | |||
4048 | return; | ||
4049 | } | ||
4050 | 4045 | ||
4051 | /* | 4046 | /* |
4052 | * in active-backup mode, we know that bond->curr_active_slave is always valid if | 4047 | * in active-backup mode, we know that bond->curr_active_slave is always valid if |
@@ -4067,21 +4062,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
4067 | if (!bond->curr_active_slave) | 4062 | if (!bond->curr_active_slave) |
4068 | goto out; | 4063 | goto out; |
4069 | 4064 | ||
4070 | /* Xmit IGMP frames on all slaves to ensure rapid fail-over | ||
4071 | for multicast traffic on snooping switches */ | ||
4072 | if (skb->protocol == __constant_htons(ETH_P_IP) && | ||
4073 | skb->nh.iph->protocol == IPPROTO_IGMP) { | ||
4074 | struct slave *slave, *active_slave; | ||
4075 | int i; | ||
4076 | |||
4077 | active_slave = bond->curr_active_slave; | ||
4078 | bond_for_each_slave_from_to(bond, slave, i, active_slave->next, | ||
4079 | active_slave->prev) | ||
4080 | if (IS_UP(slave->dev) && | ||
4081 | (slave->link == BOND_LINK_UP)) | ||
4082 | bond_activebackup_xmit_copy(skb, bond, slave); | ||
4083 | } | ||
4084 | |||
4085 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); | 4065 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); |
4086 | 4066 | ||
4087 | out: | 4067 | out: |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index be2ddbb6ef56..9ba21e0f27c5 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1309,7 +1309,7 @@ static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | |||
1309 | static int mv643xx_eth_probe(struct platform_device *pdev) | 1309 | static int mv643xx_eth_probe(struct platform_device *pdev) |
1310 | { | 1310 | { |
1311 | struct mv643xx_eth_platform_data *pd; | 1311 | struct mv643xx_eth_platform_data *pd; |
1312 | int port_num = pdev->id; | 1312 | int port_num; |
1313 | struct mv643xx_private *mp; | 1313 | struct mv643xx_private *mp; |
1314 | struct net_device *dev; | 1314 | struct net_device *dev; |
1315 | u8 *p; | 1315 | u8 *p; |
@@ -1319,6 +1319,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1319 | int duplex = DUPLEX_HALF; | 1319 | int duplex = DUPLEX_HALF; |
1320 | int speed = 0; /* default to auto-negotiation */ | 1320 | int speed = 0; /* default to auto-negotiation */ |
1321 | 1321 | ||
1322 | pd = pdev->dev.platform_data; | ||
1323 | if (pd == NULL) { | ||
1324 | printk(KERN_ERR "No mv643xx_eth_platform_data\n"); | ||
1325 | return -ENODEV; | ||
1326 | } | ||
1327 | |||
1322 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1328 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1323 | if (!dev) | 1329 | if (!dev) |
1324 | return -ENOMEM; | 1330 | return -ENOMEM; |
@@ -1331,8 +1337,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1331 | BUG_ON(!res); | 1337 | BUG_ON(!res); |
1332 | dev->irq = res->start; | 1338 | dev->irq = res->start; |
1333 | 1339 | ||
1334 | mp->port_num = port_num; | ||
1335 | |||
1336 | dev->open = mv643xx_eth_open; | 1340 | dev->open = mv643xx_eth_open; |
1337 | dev->stop = mv643xx_eth_stop; | 1341 | dev->stop = mv643xx_eth_stop; |
1338 | dev->hard_start_xmit = mv643xx_eth_start_xmit; | 1342 | dev->hard_start_xmit = mv643xx_eth_start_xmit; |
@@ -1373,39 +1377,40 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | 1377 | ||
1374 | spin_lock_init(&mp->lock); | 1378 | spin_lock_init(&mp->lock); |
1375 | 1379 | ||
1380 | port_num = pd->port_number; | ||
1381 | |||
1376 | /* set default config values */ | 1382 | /* set default config values */ |
1377 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1383 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1378 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1384 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1379 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1385 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1380 | 1386 | ||
1381 | pd = pdev->dev.platform_data; | 1387 | if (is_valid_ether_addr(pd->mac_addr)) |
1382 | if (pd) { | 1388 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1383 | if (is_valid_ether_addr(pd->mac_addr)) | ||
1384 | memcpy(dev->dev_addr, pd->mac_addr, 6); | ||
1385 | 1389 | ||
1386 | if (pd->phy_addr || pd->force_phy_addr) | 1390 | if (pd->phy_addr || pd->force_phy_addr) |
1387 | ethernet_phy_set(port_num, pd->phy_addr); | 1391 | ethernet_phy_set(port_num, pd->phy_addr); |
1388 | 1392 | ||
1389 | if (pd->rx_queue_size) | 1393 | if (pd->rx_queue_size) |
1390 | mp->rx_ring_size = pd->rx_queue_size; | 1394 | mp->rx_ring_size = pd->rx_queue_size; |
1391 | 1395 | ||
1392 | if (pd->tx_queue_size) | 1396 | if (pd->tx_queue_size) |
1393 | mp->tx_ring_size = pd->tx_queue_size; | 1397 | mp->tx_ring_size = pd->tx_queue_size; |
1394 | 1398 | ||
1395 | if (pd->tx_sram_size) { | 1399 | if (pd->tx_sram_size) { |
1396 | mp->tx_sram_size = pd->tx_sram_size; | 1400 | mp->tx_sram_size = pd->tx_sram_size; |
1397 | mp->tx_sram_addr = pd->tx_sram_addr; | 1401 | mp->tx_sram_addr = pd->tx_sram_addr; |
1398 | } | 1402 | } |
1399 | |||
1400 | if (pd->rx_sram_size) { | ||
1401 | mp->rx_sram_size = pd->rx_sram_size; | ||
1402 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1403 | } | ||
1404 | 1403 | ||
1405 | duplex = pd->duplex; | 1404 | if (pd->rx_sram_size) { |
1406 | speed = pd->speed; | 1405 | mp->rx_sram_size = pd->rx_sram_size; |
1406 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | duplex = pd->duplex; | ||
1410 | speed = pd->speed; | ||
1411 | |||
1412 | mp->port_num = port_num; | ||
1413 | |||
1409 | /* Hook up MII support for ethtool */ | 1414 | /* Hook up MII support for ethtool */ |
1410 | mp->mii.dev = dev; | 1415 | mp->mii.dev = dev; |
1411 | mp->mii.mdio_read = mv643xx_mdio_read; | 1416 | mp->mii.mdio_read = mv643xx_mdio_read; |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 5c57433cb306..c6172a77a6d7 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2024 | struct netdev_private *np = netdev_priv(dev); | 2024 | struct netdev_private *np = netdev_priv(dev); |
2025 | void __iomem * ioaddr = ns_ioaddr(dev); | 2025 | void __iomem * ioaddr = ns_ioaddr(dev); |
2026 | unsigned entry; | 2026 | unsigned entry; |
2027 | unsigned long flags; | ||
2027 | 2028 | ||
2028 | /* Note: Ordering is important here, set the field with the | 2029 | /* Note: Ordering is important here, set the field with the |
2029 | "ownership" bit last, and only then increment cur_tx. */ | 2030 | "ownership" bit last, and only then increment cur_tx. */ |
@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2037 | 2038 | ||
2038 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); | 2039 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); |
2039 | 2040 | ||
2040 | spin_lock_irq(&np->lock); | 2041 | spin_lock_irqsave(&np->lock, flags); |
2041 | 2042 | ||
2042 | if (!np->hands_off) { | 2043 | if (!np->hands_off) { |
2043 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); | 2044 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); |
@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2056 | dev_kfree_skb_irq(skb); | 2057 | dev_kfree_skb_irq(skb); |
2057 | np->stats.tx_dropped++; | 2058 | np->stats.tx_dropped++; |
2058 | } | 2059 | } |
2059 | spin_unlock_irq(&np->lock); | 2060 | spin_unlock_irqrestore(&np->lock, flags); |
2060 | 2061 | ||
2061 | dev->trans_start = jiffies; | 2062 | dev->trans_start = jiffies; |
2062 | 2063 | ||
@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2222 | pkt_len = (desc_status & DescSizeMask) - 4; | 2223 | pkt_len = (desc_status & DescSizeMask) - 4; |
2223 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2224 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2224 | if (desc_status & DescMore) { | 2225 | if (desc_status & DescMore) { |
2226 | unsigned long flags; | ||
2227 | |||
2225 | if (netif_msg_rx_err(np)) | 2228 | if (netif_msg_rx_err(np)) |
2226 | printk(KERN_WARNING | 2229 | printk(KERN_WARNING |
2227 | "%s: Oversized(?) Ethernet " | 2230 | "%s: Oversized(?) Ethernet " |
@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2236 | * reset procedure documented in | 2239 | * reset procedure documented in |
2237 | * AN-1287. */ | 2240 | * AN-1287. */ |
2238 | 2241 | ||
2239 | spin_lock_irq(&np->lock); | 2242 | spin_lock_irqsave(&np->lock, flags); |
2240 | reset_rx(dev); | 2243 | reset_rx(dev); |
2241 | reinit_rx(dev); | 2244 | reinit_rx(dev); |
2242 | writel(np->ring_dma, ioaddr + RxRingPtr); | 2245 | writel(np->ring_dma, ioaddr + RxRingPtr); |
2243 | check_link(dev); | 2246 | check_link(dev); |
2244 | spin_unlock_irq(&np->lock); | 2247 | spin_unlock_irqrestore(&np->lock, flags); |
2245 | 2248 | ||
2246 | /* We'll enable RX on exit from this | 2249 | /* We'll enable RX on exit from this |
2247 | * function. */ | 2250 | * function. */ |
@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
2396 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2399 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2397 | static void natsemi_poll_controller(struct net_device *dev) | 2400 | static void natsemi_poll_controller(struct net_device *dev) |
2398 | { | 2401 | { |
2402 | struct netdev_private *np = netdev_priv(dev); | ||
2403 | |||
2399 | disable_irq(dev->irq); | 2404 | disable_irq(dev->irq); |
2400 | intr_handler(dev->irq, dev); | 2405 | |
2406 | /* | ||
2407 | * A real interrupt might have already reached us at this point | ||
2408 | * but NAPI might still haven't called us back. As the interrupt | ||
2409 | * status register is cleared by reading, we should prevent an | ||
2410 | * interrupt loss in this case... | ||
2411 | */ | ||
2412 | if (!np->intr_status) | ||
2413 | intr_handler(dev->irq, dev); | ||
2414 | |||
2401 | enable_irq(dev->irq); | 2415 | enable_irq(dev->irq); |
2402 | } | 2416 | } |
2403 | #endif | 2417 | #endif |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 36f9d988278f..4d94ba7899bf 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1234,14 +1234,14 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1234 | skb_put(skb, pkt_len); /* Make room */ | 1234 | skb_put(skb, pkt_len); /* Make room */ |
1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, | 1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, |
1236 | lp->rx_dma_addr[entry], | 1236 | lp->rx_dma_addr[entry], |
1237 | PKT_BUF_SZ - 2, | 1237 | pkt_len, |
1238 | PCI_DMA_FROMDEVICE); | 1238 | PCI_DMA_FROMDEVICE); |
1239 | eth_copy_and_sum(skb, | 1239 | eth_copy_and_sum(skb, |
1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), | 1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), |
1241 | pkt_len, 0); | 1241 | pkt_len, 0); |
1242 | pci_dma_sync_single_for_device(lp->pci_dev, | 1242 | pci_dma_sync_single_for_device(lp->pci_dev, |
1243 | lp->rx_dma_addr[entry], | 1243 | lp->rx_dma_addr[entry], |
1244 | PKT_BUF_SZ - 2, | 1244 | pkt_len, |
1245 | PCI_DMA_FROMDEVICE); | 1245 | PCI_DMA_FROMDEVICE); |
1246 | } | 1246 | } |
1247 | lp->stats.rx_bytes += skb->len; | 1247 | lp->stats.rx_bytes += skb->len; |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index fb2b53051635..b3750f284279 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -968,10 +968,10 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location, | |||
968 | 968 | ||
969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) | 969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) |
970 | { | 970 | { |
971 | int i = 0; | 971 | int i; |
972 | u16 status; | 972 | u16 status; |
973 | 973 | ||
974 | while (i++ < 2) | 974 | for (i = 0; i < 2; i++) |
975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
976 | 976 | ||
977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); | 977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); |
@@ -1430,7 +1430,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr) | |||
1430 | int i = 0; | 1430 | int i = 0; |
1431 | u32 status; | 1431 | u32 status; |
1432 | 1432 | ||
1433 | while (i++ < 2) | 1433 | for (i = 0; i < 2; i++) |
1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1435 | 1435 | ||
1436 | if (!(status & MII_STAT_LINK)){ | 1436 | if (!(status & MII_STAT_LINK)){ |
@@ -1466,9 +1466,9 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex | |||
1466 | int phy_addr = sis_priv->cur_phy; | 1466 | int phy_addr = sis_priv->cur_phy; |
1467 | u32 status; | 1467 | u32 status; |
1468 | u16 autoadv, autorec; | 1468 | u16 autoadv, autorec; |
1469 | int i = 0; | 1469 | int i; |
1470 | 1470 | ||
1471 | while (i++ < 2) | 1471 | for (i = 0; i < 2; i++) |
1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1473 | 1473 | ||
1474 | if (!(status & MII_STAT_LINK)) | 1474 | if (!(status & MII_STAT_LINK)) |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index dacea4fd3337..c82befa209a2 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -1685,7 +1685,7 @@ static const struct ethtool_ops de_ethtool_ops = { | |||
1685 | .get_regs = de_get_regs, | 1685 | .get_regs = de_get_regs, |
1686 | }; | 1686 | }; |
1687 | 1687 | ||
1688 | static void __init de21040_get_mac_address (struct de_private *de) | 1688 | static void __devinit de21040_get_mac_address (struct de_private *de) |
1689 | { | 1689 | { |
1690 | unsigned i; | 1690 | unsigned i; |
1691 | 1691 | ||
@@ -1703,7 +1703,7 @@ static void __init de21040_get_mac_address (struct de_private *de) | |||
1703 | } | 1703 | } |
1704 | } | 1704 | } |
1705 | 1705 | ||
1706 | static void __init de21040_get_media_info(struct de_private *de) | 1706 | static void __devinit de21040_get_media_info(struct de_private *de) |
1707 | { | 1707 | { |
1708 | unsigned int i; | 1708 | unsigned int i; |
1709 | 1709 | ||
@@ -1765,7 +1765,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in | |||
1765 | return retval; | 1765 | return retval; |
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | static void __init de21041_get_srom_info (struct de_private *de) | 1768 | static void __devinit de21041_get_srom_info (struct de_private *de) |
1769 | { | 1769 | { |
1770 | unsigned i, sa_offset = 0, ofs; | 1770 | unsigned i, sa_offset = 0, ofs; |
1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; | 1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 7f59a3d4fda2..24a29c99ba94 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -143,9 +143,16 @@ | |||
143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ | 143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ |
144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ | 144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ |
145 | 145 | ||
146 | #define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) | 146 | #define DMFE_DBUG(dbug_now, msg, value) \ |
147 | do { \ | ||
148 | if (dmfe_debug || (dbug_now)) \ | ||
149 | printk(KERN_ERR DRV_NAME ": %s %lx\n",\ | ||
150 | (msg), (long) (value)); \ | ||
151 | } while (0) | ||
147 | 152 | ||
148 | #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); | 153 | #define SHOW_MEDIA_TYPE(mode) \ |
154 | printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \ | ||
155 | (mode & 1) ? "100":"10", (mode & 4) ? "full":"half"); | ||
149 | 156 | ||
150 | 157 | ||
151 | /* CR9 definition: SROM/MII */ | 158 | /* CR9 definition: SROM/MII */ |
@@ -163,10 +170,20 @@ | |||
163 | 170 | ||
164 | #define SROM_V41_CODE 0x14 | 171 | #define SROM_V41_CODE 0x14 |
165 | 172 | ||
166 | #define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5); | 173 | #define SROM_CLK_WRITE(data, ioaddr) \ |
174 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
175 | udelay(5); \ | ||
176 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
177 | udelay(5); \ | ||
178 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
179 | udelay(5); | ||
180 | |||
181 | #define __CHK_IO_SIZE(pci_id, dev_rev) \ | ||
182 | (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \ | ||
183 | DM9102A_IO_SIZE: DM9102_IO_SIZE) | ||
167 | 184 | ||
168 | #define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE | 185 | #define CHK_IO_SIZE(pci_dev, dev_rev) \ |
169 | #define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev) | 186 | (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)) |
170 | 187 | ||
171 | /* Sten Check */ | 188 | /* Sten Check */ |
172 | #define DEVICE net_device | 189 | #define DEVICE net_device |
@@ -187,7 +204,7 @@ struct rx_desc { | |||
187 | struct dmfe_board_info { | 204 | struct dmfe_board_info { |
188 | u32 chip_id; /* Chip vendor/Device ID */ | 205 | u32 chip_id; /* Chip vendor/Device ID */ |
189 | u32 chip_revision; /* Chip revision */ | 206 | u32 chip_revision; /* Chip revision */ |
190 | struct DEVICE *dev; /* net device */ | 207 | struct DEVICE *next_dev; /* next device */ |
191 | struct pci_dev *pdev; /* PCI device */ | 208 | struct pci_dev *pdev; /* PCI device */ |
192 | spinlock_t lock; | 209 | spinlock_t lock; |
193 | 210 | ||
@@ -231,7 +248,6 @@ struct dmfe_board_info { | |||
231 | u8 media_mode; /* user specify media mode */ | 248 | u8 media_mode; /* user specify media mode */ |
232 | u8 op_mode; /* real work media mode */ | 249 | u8 op_mode; /* real work media mode */ |
233 | u8 phy_addr; | 250 | u8 phy_addr; |
234 | u8 link_failed; /* Ever link failed */ | ||
235 | u8 wait_reset; /* Hardware failed, need to reset */ | 251 | u8 wait_reset; /* Hardware failed, need to reset */ |
236 | u8 dm910x_chk_mode; /* Operating mode check */ | 252 | u8 dm910x_chk_mode; /* Operating mode check */ |
237 | u8 first_in_callback; /* Flag to record state */ | 253 | u8 first_in_callback; /* Flag to record state */ |
@@ -329,7 +345,7 @@ static void dmfe_program_DM9802(struct dmfe_board_info *); | |||
329 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); | 345 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); |
330 | static void dmfe_set_phyxcer(struct dmfe_board_info *); | 346 | static void dmfe_set_phyxcer(struct dmfe_board_info *); |
331 | 347 | ||
332 | /* DM910X network baord routine ---------------------------- */ | 348 | /* DM910X network board routine ---------------------------- */ |
333 | 349 | ||
334 | /* | 350 | /* |
335 | * Search DM910X board ,allocate space and register it | 351 | * Search DM910X board ,allocate space and register it |
@@ -356,7 +372,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
356 | SET_NETDEV_DEV(dev, &pdev->dev); | 372 | SET_NETDEV_DEV(dev, &pdev->dev); |
357 | 373 | ||
358 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 374 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
359 | printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); | 375 | printk(KERN_WARNING DRV_NAME |
376 | ": 32-bit PCI DMA not available.\n"); | ||
360 | err = -ENODEV; | 377 | err = -ENODEV; |
361 | goto err_out_free; | 378 | goto err_out_free; |
362 | } | 379 | } |
@@ -399,11 +416,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
399 | /* Init system & device */ | 416 | /* Init system & device */ |
400 | db = netdev_priv(dev); | 417 | db = netdev_priv(dev); |
401 | 418 | ||
402 | db->dev = dev; | ||
403 | |||
404 | /* Allocate Tx/Rx descriptor memory */ | 419 | /* Allocate Tx/Rx descriptor memory */ |
405 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | 420 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * |
406 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | 421 | DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); |
422 | |||
423 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * | ||
424 | TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
407 | 425 | ||
408 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | 426 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; |
409 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | 427 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; |
@@ -428,7 +446,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
428 | dev->poll_controller = &poll_dmfe; | 446 | dev->poll_controller = &poll_dmfe; |
429 | #endif | 447 | #endif |
430 | dev->ethtool_ops = &netdev_ethtool_ops; | 448 | dev->ethtool_ops = &netdev_ethtool_ops; |
431 | netif_carrier_off(db->dev); | 449 | netif_carrier_off(dev); |
432 | spin_lock_init(&db->lock); | 450 | spin_lock_init(&db->lock); |
433 | 451 | ||
434 | pci_read_config_dword(pdev, 0x50, &pci_pmr); | 452 | pci_read_config_dword(pdev, 0x50, &pci_pmr); |
@@ -440,7 +458,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
440 | 458 | ||
441 | /* read 64 word srom data */ | 459 | /* read 64 word srom data */ |
442 | for (i = 0; i < 64; i++) | 460 | for (i = 0; i < 64; i++) |
443 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | 461 | ((u16 *) db->srom)[i] = |
462 | cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
444 | 463 | ||
445 | /* Set Node address */ | 464 | /* Set Node address */ |
446 | for (i = 0; i < 6; i++) | 465 | for (i = 0; i < 6; i++) |
@@ -482,14 +501,17 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev) | |||
482 | DMFE_DBUG(0, "dmfe_remove_one()", 0); | 501 | DMFE_DBUG(0, "dmfe_remove_one()", 0); |
483 | 502 | ||
484 | if (dev) { | 503 | if (dev) { |
504 | |||
505 | unregister_netdev(dev); | ||
506 | |||
485 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | 507 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * |
486 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | 508 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, |
487 | db->desc_pool_dma_ptr); | 509 | db->desc_pool_dma_ptr); |
488 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | 510 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, |
489 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | 511 | db->buf_pool_ptr, db->buf_pool_dma_ptr); |
490 | unregister_netdev(dev); | ||
491 | pci_release_regions(pdev); | 512 | pci_release_regions(pdev); |
492 | free_netdev(dev); /* free board information */ | 513 | free_netdev(dev); /* free board information */ |
514 | |||
493 | pci_set_drvdata(pdev, NULL); | 515 | pci_set_drvdata(pdev, NULL); |
494 | } | 516 | } |
495 | 517 | ||
@@ -509,7 +531,8 @@ static int dmfe_open(struct DEVICE *dev) | |||
509 | 531 | ||
510 | DMFE_DBUG(0, "dmfe_open", 0); | 532 | DMFE_DBUG(0, "dmfe_open", 0); |
511 | 533 | ||
512 | ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev); | 534 | ret = request_irq(dev->irq, &dmfe_interrupt, |
535 | IRQF_SHARED, dev->name, dev); | ||
513 | if (ret) | 536 | if (ret) |
514 | return ret; | 537 | return ret; |
515 | 538 | ||
@@ -518,7 +541,6 @@ static int dmfe_open(struct DEVICE *dev) | |||
518 | db->tx_packet_cnt = 0; | 541 | db->tx_packet_cnt = 0; |
519 | db->tx_queue_cnt = 0; | 542 | db->tx_queue_cnt = 0; |
520 | db->rx_avail_cnt = 0; | 543 | db->rx_avail_cnt = 0; |
521 | db->link_failed = 1; | ||
522 | db->wait_reset = 0; | 544 | db->wait_reset = 0; |
523 | 545 | ||
524 | db->first_in_callback = 0; | 546 | db->first_in_callback = 0; |
@@ -650,7 +672,8 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) | |||
650 | /* No Tx resource check, it never happen nromally */ | 672 | /* No Tx resource check, it never happen nromally */ |
651 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { | 673 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { |
652 | spin_unlock_irqrestore(&db->lock, flags); | 674 | spin_unlock_irqrestore(&db->lock, flags); |
653 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt); | 675 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", |
676 | db->tx_queue_cnt); | ||
654 | return 1; | 677 | return 1; |
655 | } | 678 | } |
656 | 679 | ||
@@ -722,7 +745,8 @@ static int dmfe_stop(struct DEVICE *dev) | |||
722 | 745 | ||
723 | #if 0 | 746 | #if 0 |
724 | /* show statistic counter */ | 747 | /* show statistic counter */ |
725 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | 748 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx" |
749 | " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | ||
726 | db->tx_fifo_underrun, db->tx_excessive_collision, | 750 | db->tx_fifo_underrun, db->tx_excessive_collision, |
727 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, | 751 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, |
728 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, | 752 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, |
@@ -905,7 +929,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) | |||
905 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | 929 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) |
906 | { | 930 | { |
907 | struct rx_desc *rxptr; | 931 | struct rx_desc *rxptr; |
908 | struct sk_buff *skb; | 932 | struct sk_buff *skb, *newskb; |
909 | int rxlen; | 933 | int rxlen; |
910 | u32 rdes0; | 934 | u32 rdes0; |
911 | 935 | ||
@@ -919,7 +943,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
919 | db->rx_avail_cnt--; | 943 | db->rx_avail_cnt--; |
920 | db->interval_rx_cnt++; | 944 | db->interval_rx_cnt++; |
921 | 945 | ||
922 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | 946 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), |
947 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
948 | |||
923 | if ( (rdes0 & 0x300) != 0x300) { | 949 | if ( (rdes0 & 0x300) != 0x300) { |
924 | /* A packet without First/Last flag */ | 950 | /* A packet without First/Last flag */ |
925 | /* reuse this SKB */ | 951 | /* reuse this SKB */ |
@@ -956,9 +982,11 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
956 | } else { | 982 | } else { |
957 | /* Good packet, send to upper layer */ | 983 | /* Good packet, send to upper layer */ |
958 | /* Shorst packet used new SKB */ | 984 | /* Shorst packet used new SKB */ |
959 | if ( (rxlen < RX_COPY_SIZE) && | 985 | if ((rxlen < RX_COPY_SIZE) && |
960 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 986 | ((newskb = dev_alloc_skb(rxlen + 2)) |
961 | != NULL) ) { | 987 | != NULL)) { |
988 | |||
989 | skb = newskb; | ||
962 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
963 | skb->dev = dev; | 991 | skb->dev = dev; |
964 | skb_reserve(skb, 2); /* 16byte align */ | 992 | skb_reserve(skb, 2); /* 16byte align */ |
@@ -1069,6 +1097,8 @@ static void dmfe_timer(unsigned long data) | |||
1069 | struct dmfe_board_info *db = netdev_priv(dev); | 1097 | struct dmfe_board_info *db = netdev_priv(dev); |
1070 | unsigned long flags; | 1098 | unsigned long flags; |
1071 | 1099 | ||
1100 | int link_ok, link_ok_phy; | ||
1101 | |||
1072 | DMFE_DBUG(0, "dmfe_timer()", 0); | 1102 | DMFE_DBUG(0, "dmfe_timer()", 0); |
1073 | spin_lock_irqsave(&db->lock, flags); | 1103 | spin_lock_irqsave(&db->lock, flags); |
1074 | 1104 | ||
@@ -1078,7 +1108,8 @@ static void dmfe_timer(unsigned long data) | |||
1078 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { | 1108 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { |
1079 | db->cr6_data &= ~0x40000; | 1109 | db->cr6_data &= ~0x40000; |
1080 | update_cr6(db->cr6_data, db->ioaddr); | 1110 | update_cr6(db->cr6_data, db->ioaddr); |
1081 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1111 | phy_write(db->ioaddr, |
1112 | db->phy_addr, 0, 0x1000, db->chip_id); | ||
1082 | db->cr6_data |= 0x40000; | 1113 | db->cr6_data |= 0x40000; |
1083 | update_cr6(db->cr6_data, db->ioaddr); | 1114 | update_cr6(db->cr6_data, db->ioaddr); |
1084 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; | 1115 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; |
@@ -1139,21 +1170,41 @@ static void dmfe_timer(unsigned long data) | |||
1139 | (db->chip_revision == 0x02000010)) ) { | 1170 | (db->chip_revision == 0x02000010)) ) { |
1140 | /* DM9102A Chip */ | 1171 | /* DM9102A Chip */ |
1141 | if (tmp_cr12 & 2) | 1172 | if (tmp_cr12 & 2) |
1142 | tmp_cr12 = 0x0; /* Link failed */ | 1173 | link_ok = 0; |
1143 | else | 1174 | else |
1144 | tmp_cr12 = 0x3; /* Link OK */ | 1175 | link_ok = 1; |
1145 | } | 1176 | } |
1177 | else | ||
1178 | /*0x43 is used instead of 0x3 because bit 6 should represent | ||
1179 | link status of external PHY */ | ||
1180 | link_ok = (tmp_cr12 & 0x43) ? 1 : 0; | ||
1181 | |||
1182 | |||
1183 | /* If chip reports that link is failed it could be because external | ||
1184 | PHY link status pin is not conected correctly to chip | ||
1185 | To be sure ask PHY too. | ||
1186 | */ | ||
1187 | |||
1188 | /* need a dummy read because of PHY's register latch*/ | ||
1189 | phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1190 | link_ok_phy = (phy_read (db->ioaddr, | ||
1191 | db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; | ||
1146 | 1192 | ||
1147 | if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { | 1193 | if (link_ok_phy != link_ok) { |
1194 | DMFE_DBUG (0, "PHY and chip report different link status", 0); | ||
1195 | link_ok = link_ok | link_ok_phy; | ||
1196 | } | ||
1197 | |||
1198 | if ( !link_ok && netif_carrier_ok(dev)) { | ||
1148 | /* Link Failed */ | 1199 | /* Link Failed */ |
1149 | DMFE_DBUG(0, "Link Failed", tmp_cr12); | 1200 | DMFE_DBUG(0, "Link Failed", tmp_cr12); |
1150 | db->link_failed = 1; | 1201 | netif_carrier_off(dev); |
1151 | netif_carrier_off(db->dev); | ||
1152 | 1202 | ||
1153 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | 1203 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ |
1154 | /* AUTO or force 1M Homerun/Longrun don't need */ | 1204 | /* AUTO or force 1M Homerun/Longrun don't need */ |
1155 | if ( !(db->media_mode & 0x38) ) | 1205 | if ( !(db->media_mode & 0x38) ) |
1156 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1206 | phy_write(db->ioaddr, db->phy_addr, |
1207 | 0, 0x1000, db->chip_id); | ||
1157 | 1208 | ||
1158 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | 1209 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ |
1159 | if (db->media_mode & DMFE_AUTO) { | 1210 | if (db->media_mode & DMFE_AUTO) { |
@@ -1162,21 +1213,19 @@ static void dmfe_timer(unsigned long data) | |||
1162 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | 1213 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ |
1163 | update_cr6(db->cr6_data, db->ioaddr); | 1214 | update_cr6(db->cr6_data, db->ioaddr); |
1164 | } | 1215 | } |
1165 | } else | 1216 | } else if (!netif_carrier_ok(dev)) { |
1166 | if ((tmp_cr12 & 0x3) && db->link_failed) { | 1217 | |
1167 | DMFE_DBUG(0, "Link link OK", tmp_cr12); | 1218 | DMFE_DBUG(0, "Link link OK", tmp_cr12); |
1168 | db->link_failed = 0; | 1219 | |
1169 | 1220 | /* Auto Sense Speed */ | |
1170 | /* Auto Sense Speed */ | 1221 | if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { |
1171 | if ( (db->media_mode & DMFE_AUTO) && | 1222 | netif_carrier_on(dev); |
1172 | dmfe_sense_speed(db) ) | 1223 | SHOW_MEDIA_TYPE(db->op_mode); |
1173 | db->link_failed = 1; | ||
1174 | else | ||
1175 | netif_carrier_on(db->dev); | ||
1176 | dmfe_process_mode(db); | ||
1177 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | ||
1178 | } | 1224 | } |
1179 | 1225 | ||
1226 | dmfe_process_mode(db); | ||
1227 | } | ||
1228 | |||
1180 | /* HPNA remote command check */ | 1229 | /* HPNA remote command check */ |
1181 | if (db->HPNA_command & 0xf00) { | 1230 | if (db->HPNA_command & 0xf00) { |
1182 | db->HPNA_timer--; | 1231 | db->HPNA_timer--; |
@@ -1221,7 +1270,7 @@ static void dmfe_dynamic_reset(struct DEVICE *dev) | |||
1221 | db->tx_packet_cnt = 0; | 1270 | db->tx_packet_cnt = 0; |
1222 | db->tx_queue_cnt = 0; | 1271 | db->tx_queue_cnt = 0; |
1223 | db->rx_avail_cnt = 0; | 1272 | db->rx_avail_cnt = 0; |
1224 | db->link_failed = 1; | 1273 | netif_carrier_off(dev); |
1225 | db->wait_reset = 0; | 1274 | db->wait_reset = 0; |
1226 | 1275 | ||
1227 | /* Re-initilize DM910X board */ | 1276 | /* Re-initilize DM910X board */ |
@@ -1259,7 +1308,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) | |||
1259 | 1308 | ||
1260 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | 1309 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { |
1261 | rxptr->rx_skb_ptr = skb; | 1310 | rxptr->rx_skb_ptr = skb; |
1262 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1311 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, |
1312 | skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1263 | wmb(); | 1313 | wmb(); |
1264 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1314 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1265 | db->rx_avail_cnt++; | 1315 | db->rx_avail_cnt++; |
@@ -1291,8 +1341,11 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd | |||
1291 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | 1341 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ |
1292 | 1342 | ||
1293 | /* rx descriptor start pointer */ | 1343 | /* rx descriptor start pointer */ |
1294 | db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; | 1344 | db->first_rx_desc = (void *)db->first_tx_desc + |
1295 | db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; | 1345 | sizeof(struct tx_desc) * TX_DESC_CNT; |
1346 | |||
1347 | db->first_rx_desc_dma = db->first_tx_desc_dma + | ||
1348 | sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1296 | db->rx_insert_ptr = db->first_rx_desc; | 1349 | db->rx_insert_ptr = db->first_rx_desc; |
1297 | db->rx_ready_ptr = db->first_rx_desc; | 1350 | db->rx_ready_ptr = db->first_rx_desc; |
1298 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | 1351 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ |
@@ -1470,7 +1523,8 @@ static void allocate_rx_buffer(struct dmfe_board_info *db) | |||
1470 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | 1523 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) |
1471 | break; | 1524 | break; |
1472 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | 1525 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ |
1473 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1526 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, |
1527 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1474 | wmb(); | 1528 | wmb(); |
1475 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1529 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1476 | rxptr = rxptr->next_rx_desc; | 1530 | rxptr = rxptr->next_rx_desc; |
@@ -1510,7 +1564,8 @@ static u16 read_srom_word(long ioaddr, int offset) | |||
1510 | for (i = 16; i > 0; i--) { | 1564 | for (i = 16; i > 0; i--) { |
1511 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | 1565 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); |
1512 | udelay(5); | 1566 | udelay(5); |
1513 | srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | 1567 | srom_data = (srom_data << 1) | |
1568 | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1514 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | 1569 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); |
1515 | udelay(5); | 1570 | udelay(5); |
1516 | } | 1571 | } |
@@ -1537,9 +1592,11 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db) | |||
1537 | 1592 | ||
1538 | if ( (phy_mode & 0x24) == 0x24 ) { | 1593 | if ( (phy_mode & 0x24) == 0x24 ) { |
1539 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ | 1594 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ |
1540 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000; | 1595 | phy_mode = phy_read(db->ioaddr, |
1596 | db->phy_addr, 7, db->chip_id) & 0xf000; | ||
1541 | else /* DM9102/DM9102A */ | 1597 | else /* DM9102/DM9102A */ |
1542 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000; | 1598 | phy_mode = phy_read(db->ioaddr, |
1599 | db->phy_addr, 17, db->chip_id) & 0xf000; | ||
1543 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ | 1600 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ |
1544 | switch (phy_mode) { | 1601 | switch (phy_mode) { |
1545 | case 0x1000: db->op_mode = DMFE_10MHF; break; | 1602 | case 0x1000: db->op_mode = DMFE_10MHF; break; |
@@ -1576,8 +1633,11 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) | |||
1576 | 1633 | ||
1577 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ | 1634 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ |
1578 | if (db->chip_id == PCI_DM9009_ID) { | 1635 | if (db->chip_id == PCI_DM9009_ID) { |
1579 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000; | 1636 | phy_reg = phy_read(db->ioaddr, |
1580 | phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id); | 1637 | db->phy_addr, 18, db->chip_id) & ~0x1000; |
1638 | |||
1639 | phy_write(db->ioaddr, | ||
1640 | db->phy_addr, 18, phy_reg, db->chip_id); | ||
1581 | } | 1641 | } |
1582 | 1642 | ||
1583 | /* Phyxcer capability setting */ | 1643 | /* Phyxcer capability setting */ |
@@ -1650,10 +1710,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1650 | case DMFE_100MHF: phy_reg = 0x2000; break; | 1710 | case DMFE_100MHF: phy_reg = 0x2000; break; |
1651 | case DMFE_100MFD: phy_reg = 0x2100; break; | 1711 | case DMFE_100MFD: phy_reg = 0x2100; break; |
1652 | } | 1712 | } |
1653 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1713 | phy_write(db->ioaddr, |
1714 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1654 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) | 1715 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) |
1655 | mdelay(20); | 1716 | mdelay(20); |
1656 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1717 | phy_write(db->ioaddr, |
1718 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1657 | } | 1719 | } |
1658 | } | 1720 | } |
1659 | } | 1721 | } |
@@ -1663,7 +1725,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1663 | * Write a word to Phy register | 1725 | * Write a word to Phy register |
1664 | */ | 1726 | */ |
1665 | 1727 | ||
1666 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) | 1728 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, |
1729 | u16 phy_data, u32 chip_id) | ||
1667 | { | 1730 | { |
1668 | u16 i; | 1731 | u16 i; |
1669 | unsigned long ioaddr; | 1732 | unsigned long ioaddr; |
@@ -1689,11 +1752,13 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1689 | 1752 | ||
1690 | /* Send Phy address */ | 1753 | /* Send Phy address */ |
1691 | for (i = 0x10; i > 0; i = i >> 1) | 1754 | for (i = 0x10; i > 0; i = i >> 1) |
1692 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1755 | phy_write_1bit(ioaddr, |
1756 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1693 | 1757 | ||
1694 | /* Send register address */ | 1758 | /* Send register address */ |
1695 | for (i = 0x10; i > 0; i = i >> 1) | 1759 | for (i = 0x10; i > 0; i = i >> 1) |
1696 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1760 | phy_write_1bit(ioaddr, |
1761 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1697 | 1762 | ||
1698 | /* written trasnition */ | 1763 | /* written trasnition */ |
1699 | phy_write_1bit(ioaddr, PHY_DATA_1); | 1764 | phy_write_1bit(ioaddr, PHY_DATA_1); |
@@ -1701,7 +1766,8 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1701 | 1766 | ||
1702 | /* Write a word data to PHY controller */ | 1767 | /* Write a word data to PHY controller */ |
1703 | for ( i = 0x8000; i > 0; i >>= 1) | 1768 | for ( i = 0x8000; i > 0; i >>= 1) |
1704 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | 1769 | phy_write_1bit(ioaddr, |
1770 | phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1705 | } | 1771 | } |
1706 | } | 1772 | } |
1707 | 1773 | ||
@@ -1738,11 +1804,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | |||
1738 | 1804 | ||
1739 | /* Send Phy address */ | 1805 | /* Send Phy address */ |
1740 | for (i = 0x10; i > 0; i = i >> 1) | 1806 | for (i = 0x10; i > 0; i = i >> 1) |
1741 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1807 | phy_write_1bit(ioaddr, |
1808 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1742 | 1809 | ||
1743 | /* Send register address */ | 1810 | /* Send register address */ |
1744 | for (i = 0x10; i > 0; i = i >> 1) | 1811 | for (i = 0x10; i > 0; i = i >> 1) |
1745 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1812 | phy_write_1bit(ioaddr, |
1813 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1746 | 1814 | ||
1747 | /* Skip transition state */ | 1815 | /* Skip transition state */ |
1748 | phy_read_1bit(ioaddr); | 1816 | phy_read_1bit(ioaddr); |
@@ -1963,7 +2031,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) | |||
1963 | 2031 | ||
1964 | /* Check remote device status match our setting ot not */ | 2032 | /* Check remote device status match our setting ot not */ |
1965 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { | 2033 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { |
1966 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); | 2034 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, |
2035 | db->chip_id); | ||
1967 | db->HPNA_timer=8; | 2036 | db->HPNA_timer=8; |
1968 | } else | 2037 | } else |
1969 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ | 2038 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ |
@@ -2003,8 +2072,11 @@ module_param(HPNA_tx_cmd, byte, 0); | |||
2003 | module_param(HPNA_NoiseFloor, byte, 0); | 2072 | module_param(HPNA_NoiseFloor, byte, 0); |
2004 | module_param(SF_mode, byte, 0); | 2073 | module_param(SF_mode, byte, 0); |
2005 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); | 2074 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); |
2006 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | 2075 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: " |
2007 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | 2076 | "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); |
2077 | |||
2078 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " | ||
2079 | "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | ||
2008 | 2080 | ||
2009 | /* Description: | 2081 | /* Description: |
2010 | * when user used insmod to add module, system invoked init_module() | 2082 | * when user used insmod to add module, system invoked init_module() |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 885e73d731c2..dab88b958d6e 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3598,17 +3598,20 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3598 | 3598 | ||
3599 | /* Move to next BD in the ring */ | 3599 | /* Move to next BD in the ring */ |
3600 | if (!(bd_status & T_W)) | 3600 | if (!(bd_status & T_W)) |
3601 | ugeth->txBd[txQ] = bd + sizeof(struct qe_bd); | 3601 | bd += sizeof(struct qe_bd); |
3602 | else | 3602 | else |
3603 | ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3603 | bd = ugeth->p_tx_bd_ring[txQ]; |
3604 | 3604 | ||
3605 | /* If the next BD still needs to be cleaned up, then the bds | 3605 | /* If the next BD still needs to be cleaned up, then the bds |
3606 | are full. We need to tell the kernel to stop sending us stuff. */ | 3606 | are full. We need to tell the kernel to stop sending us stuff. */ |
3607 | if (bd == ugeth->confBd[txQ]) { | 3607 | if (bd == ugeth->confBd[txQ]) { |
3608 | if (!netif_queue_stopped(dev)) | 3608 | if (!netif_queue_stopped(dev)) |
3609 | netif_stop_queue(dev); | 3609 | netif_stop_queue(dev); |
3610 | return NETDEV_TX_BUSY; | ||
3610 | } | 3611 | } |
3611 | 3612 | ||
3613 | ugeth->txBd[txQ] = bd; | ||
3614 | |||
3612 | if (ugeth->p_scheduler) { | 3615 | if (ugeth->p_scheduler) { |
3613 | ugeth->cpucount[txQ]++; | 3616 | ugeth->cpucount[txQ]++; |
3614 | /* Indicate to QE that there are more Tx bds ready for | 3617 | /* Indicate to QE that there are more Tx bds ready for |
@@ -3620,7 +3623,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3620 | 3623 | ||
3621 | spin_unlock_irq(&ugeth->lock); | 3624 | spin_unlock_irq(&ugeth->lock); |
3622 | 3625 | ||
3623 | return 0; | 3626 | return NETDEV_TX_OK; |
3624 | } | 3627 | } |
3625 | 3628 | ||
3626 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) | 3629 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
@@ -3722,7 +3725,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3722 | /* Handle the transmitted buffer and release */ | 3725 | /* Handle the transmitted buffer and release */ |
3723 | /* the BD to be used with the current frame */ | 3726 | /* the BD to be used with the current frame */ |
3724 | 3727 | ||
3725 | if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | 3728 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) |
3726 | break; | 3729 | break; |
3727 | 3730 | ||
3728 | ugeth->stats.tx_packets++; | 3731 | ugeth->stats.tx_packets++; |
@@ -3741,10 +3744,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3741 | 3744 | ||
3742 | /* Advance the confirmation BD pointer */ | 3745 | /* Advance the confirmation BD pointer */ |
3743 | if (!(bd_status & T_W)) | 3746 | if (!(bd_status & T_W)) |
3744 | ugeth->confBd[txQ] += sizeof(struct qe_bd); | 3747 | bd += sizeof(struct qe_bd); |
3745 | else | 3748 | else |
3746 | ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3749 | bd = ugeth->p_tx_bd_ring[txQ]; |
3750 | bd_status = in_be32((u32 *)bd); | ||
3747 | } | 3751 | } |
3752 | ugeth->confBd[txQ] = bd; | ||
3748 | return 0; | 3753 | return 0; |
3749 | } | 3754 | } |
3750 | 3755 | ||
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index b8f0a11e8f31..7f5a59836818 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -677,8 +677,6 @@ config FB_S1D13XXX | |||
677 | config FB_NVIDIA | 677 | config FB_NVIDIA |
678 | tristate "nVidia Framebuffer Support" | 678 | tristate "nVidia Framebuffer Support" |
679 | depends on FB && PCI | 679 | depends on FB && PCI |
680 | select I2C_ALGOBIT if FB_NVIDIA_I2C | ||
681 | select I2C if FB_NVIDIA_I2C | ||
682 | select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT | 680 | select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT |
683 | select FB_MODE_HELPERS | 681 | select FB_MODE_HELPERS |
684 | select FB_CFB_FILLRECT | 682 | select FB_CFB_FILLRECT |
@@ -697,6 +695,7 @@ config FB_NVIDIA | |||
697 | config FB_NVIDIA_I2C | 695 | config FB_NVIDIA_I2C |
698 | bool "Enable DDC Support" | 696 | bool "Enable DDC Support" |
699 | depends on FB_NVIDIA | 697 | depends on FB_NVIDIA |
698 | select FB_DDC | ||
700 | help | 699 | help |
701 | This enables I2C support for nVidia Chipsets. This is used | 700 | This enables I2C support for nVidia Chipsets. This is used |
702 | only for getting EDID information from the attached display | 701 | only for getting EDID information from the attached display |
@@ -716,7 +715,6 @@ config FB_NVIDIA_BACKLIGHT | |||
716 | config FB_RIVA | 715 | config FB_RIVA |
717 | tristate "nVidia Riva support" | 716 | tristate "nVidia Riva support" |
718 | depends on FB && PCI | 717 | depends on FB && PCI |
719 | select FB_DDC if FB_RIVA_I2C | ||
720 | select FB_BACKLIGHT if FB_RIVA_BACKLIGHT | 718 | select FB_BACKLIGHT if FB_RIVA_BACKLIGHT |
721 | select FB_MODE_HELPERS | 719 | select FB_MODE_HELPERS |
722 | select FB_CFB_FILLRECT | 720 | select FB_CFB_FILLRECT |
@@ -734,6 +732,7 @@ config FB_RIVA | |||
734 | config FB_RIVA_I2C | 732 | config FB_RIVA_I2C |
735 | bool "Enable DDC Support" | 733 | bool "Enable DDC Support" |
736 | depends on FB_RIVA | 734 | depends on FB_RIVA |
735 | select FB_DDC | ||
737 | help | 736 | help |
738 | This enables I2C support for nVidia Chipsets. This is used | 737 | This enables I2C support for nVidia Chipsets. This is used |
739 | only for getting EDID information from the attached display | 738 | only for getting EDID information from the attached display |
@@ -812,8 +811,6 @@ config FB_INTEL | |||
812 | depends on FB && EXPERIMENTAL && PCI && X86 | 811 | depends on FB && EXPERIMENTAL && PCI && X86 |
813 | select AGP | 812 | select AGP |
814 | select AGP_INTEL | 813 | select AGP_INTEL |
815 | select I2C_ALGOBIT if FB_INTEL_I2C | ||
816 | select I2C if FB_INTEL_I2C | ||
817 | select FB_MODE_HELPERS | 814 | select FB_MODE_HELPERS |
818 | select FB_CFB_FILLRECT | 815 | select FB_CFB_FILLRECT |
819 | select FB_CFB_COPYAREA | 816 | select FB_CFB_COPYAREA |
@@ -846,6 +843,7 @@ config FB_INTEL_DEBUG | |||
846 | config FB_INTEL_I2C | 843 | config FB_INTEL_I2C |
847 | bool "DDC/I2C for Intel framebuffer support" | 844 | bool "DDC/I2C for Intel framebuffer support" |
848 | depends on FB_INTEL | 845 | depends on FB_INTEL |
846 | select FB_DDC | ||
849 | default y | 847 | default y |
850 | help | 848 | help |
851 | Say Y here if you want DDC/I2C support for your on-board Intel graphics. | 849 | Say Y here if you want DDC/I2C support for your on-board Intel graphics. |
@@ -924,8 +922,8 @@ config FB_MATROX_G | |||
924 | 922 | ||
925 | config FB_MATROX_I2C | 923 | config FB_MATROX_I2C |
926 | tristate "Matrox I2C support" | 924 | tristate "Matrox I2C support" |
927 | depends on FB_MATROX && I2C | 925 | depends on FB_MATROX |
928 | select I2C_ALGOBIT | 926 | select FB_DDC |
929 | ---help--- | 927 | ---help--- |
930 | This drivers creates I2C buses which are needed for accessing the | 928 | This drivers creates I2C buses which are needed for accessing the |
931 | DDC (I2C) bus present on all Matroxes, an I2C bus which | 929 | DDC (I2C) bus present on all Matroxes, an I2C bus which |
@@ -993,7 +991,6 @@ config FB_MATROX_MULTIHEAD | |||
993 | config FB_RADEON | 991 | config FB_RADEON |
994 | tristate "ATI Radeon display support" | 992 | tristate "ATI Radeon display support" |
995 | depends on FB && PCI | 993 | depends on FB && PCI |
996 | select FB_DDC if FB_RADEON_I2C | ||
997 | select FB_BACKLIGHT if FB_RADEON_BACKLIGHT | 994 | select FB_BACKLIGHT if FB_RADEON_BACKLIGHT |
998 | select FB_MODE_HELPERS | 995 | select FB_MODE_HELPERS |
999 | select FB_CFB_FILLRECT | 996 | select FB_CFB_FILLRECT |
@@ -1018,6 +1015,7 @@ config FB_RADEON | |||
1018 | config FB_RADEON_I2C | 1015 | config FB_RADEON_I2C |
1019 | bool "DDC/I2C for ATI Radeon support" | 1016 | bool "DDC/I2C for ATI Radeon support" |
1020 | depends on FB_RADEON | 1017 | depends on FB_RADEON |
1018 | select FB_DDC | ||
1021 | default y | 1019 | default y |
1022 | help | 1020 | help |
1023 | Say Y here if you want DDC/I2C support for your Radeon board. | 1021 | Say Y here if you want DDC/I2C support for your Radeon board. |
@@ -1125,7 +1123,6 @@ config FB_S3 | |||
1125 | config FB_SAVAGE | 1123 | config FB_SAVAGE |
1126 | tristate "S3 Savage support" | 1124 | tristate "S3 Savage support" |
1127 | depends on FB && PCI && EXPERIMENTAL | 1125 | depends on FB && PCI && EXPERIMENTAL |
1128 | select FB_DDC if FB_SAVAGE_I2C | ||
1129 | select FB_MODE_HELPERS | 1126 | select FB_MODE_HELPERS |
1130 | select FB_CFB_FILLRECT | 1127 | select FB_CFB_FILLRECT |
1131 | select FB_CFB_COPYAREA | 1128 | select FB_CFB_COPYAREA |
@@ -1142,6 +1139,7 @@ config FB_SAVAGE | |||
1142 | config FB_SAVAGE_I2C | 1139 | config FB_SAVAGE_I2C |
1143 | bool "Enable DDC2 Support" | 1140 | bool "Enable DDC2 Support" |
1144 | depends on FB_SAVAGE | 1141 | depends on FB_SAVAGE |
1142 | select FB_DDC | ||
1145 | help | 1143 | help |
1146 | This enables I2C support for S3 Savage Chipsets. This is used | 1144 | This enables I2C support for S3 Savage Chipsets. This is used |
1147 | only for getting EDID information from the attached display | 1145 | only for getting EDID information from the attached display |
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h index f72faff33c0c..dc62f8e282b4 100644 --- a/drivers/video/aty/atyfb.h +++ b/drivers/video/aty/atyfb.h | |||
@@ -284,7 +284,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) | |||
284 | #endif | 284 | #endif |
285 | } | 285 | } |
286 | 286 | ||
287 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) | 287 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ |
288 | defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT) | ||
288 | extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); | 289 | extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); |
289 | extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); | 290 | extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); |
290 | #endif | 291 | #endif |
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index a50b303093a7..43f62d8ee41d 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c | |||
@@ -12,6 +12,11 @@ | |||
12 | #include <linux/backlight.h> | 12 | #include <linux/backlight.h> |
13 | #include <linux/fb.h> | 13 | #include <linux/fb.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | |||
16 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
17 | #include <asm/backlight.h> | ||
18 | #endif | ||
19 | |||
15 | #include "nv_local.h" | 20 | #include "nv_local.h" |
16 | #include "nv_type.h" | 21 | #include "nv_type.h" |
17 | #include "nv_proto.h" | 22 | #include "nv_proto.h" |
diff --git a/fs/buffer.c b/fs/buffer.c index e8504b65176c..1d0852fa728b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2365,6 +2365,10 @@ failed: | |||
2365 | } | 2365 | } |
2366 | EXPORT_SYMBOL(nobh_prepare_write); | 2366 | EXPORT_SYMBOL(nobh_prepare_write); |
2367 | 2367 | ||
2368 | /* | ||
2369 | * Make sure any changes to nobh_commit_write() are reflected in | ||
2370 | * nobh_truncate_page(), since it doesn't call commit_write(). | ||
2371 | */ | ||
2368 | int nobh_commit_write(struct file *file, struct page *page, | 2372 | int nobh_commit_write(struct file *file, struct page *page, |
2369 | unsigned from, unsigned to) | 2373 | unsigned from, unsigned to) |
2370 | { | 2374 | { |
@@ -2466,6 +2470,11 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) | |||
2466 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 2470 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); |
2467 | flush_dcache_page(page); | 2471 | flush_dcache_page(page); |
2468 | kunmap_atomic(kaddr, KM_USER0); | 2472 | kunmap_atomic(kaddr, KM_USER0); |
2473 | /* | ||
2474 | * It would be more correct to call aops->commit_write() | ||
2475 | * here, but this is more efficient. | ||
2476 | */ | ||
2477 | SetPageUptodate(page); | ||
2469 | set_page_dirty(page); | 2478 | set_page_dirty(page); |
2470 | } | 2479 | } |
2471 | unlock_page(page); | 2480 | unlock_page(page); |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 5fe13593b57f..6247628bdaed 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -1,3 +1,10 @@ | |||
1 | Verison 1.48 | ||
2 | ------------ | ||
3 | Fix mtime bouncing around from local idea of last write times to remote time. | ||
4 | Fix hang (in i_size_read) when simultaneous size update of same remote file | ||
5 | on smp system corrupts sequence number. Do not reread unnecessarily partial page | ||
6 | (which we are about to overwrite anyway) when writing out file opened rw. | ||
7 | |||
1 | Version 1.47 | 8 | Version 1.47 |
2 | ------------ | 9 | ------------ |
3 | Fix oops in list_del during mount caused by unaligned string. | 10 | Fix oops in list_del during mount caused by unaligned string. |
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index a26f26ed5a17..6ecd9d6ba3f3 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | obj-$(CONFIG_CIFS) += cifs.o | 4 | obj-$(CONFIG_CIFS) += cifs.o |
5 | 5 | ||
6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o | 6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o export.o |
diff --git a/fs/cifs/TODO b/fs/cifs/TODO index 68372946dc92..d7b9c27c942d 100644 --- a/fs/cifs/TODO +++ b/fs/cifs/TODO | |||
@@ -18,7 +18,9 @@ better) | |||
18 | 18 | ||
19 | d) Kerberos/SPNEGO session setup support - (started) | 19 | d) Kerberos/SPNEGO session setup support - (started) |
20 | 20 | ||
21 | e) NTLMv2 authentication (mostly implemented) | 21 | e) NTLMv2 authentication (mostly implemented - double check |
22 | that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in | ||
23 | fs/cifs/connect.c) | ||
22 | 24 | ||
23 | f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup | 25 | f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup |
24 | used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM | 26 | used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM |
@@ -88,11 +90,12 @@ w) Finish up the dos time conversion routines needed to return old server | |||
88 | time to the client (default time, of now or time 0 is used now for these | 90 | time to the client (default time, of now or time 0 is used now for these |
89 | very old servers) | 91 | very old servers) |
90 | 92 | ||
91 | x) Add support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers) | 93 | x) In support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers) |
94 | need to add ability to set time to server (utimes command) | ||
92 | 95 | ||
93 | y) Finish testing of Windows 9x/Windows ME server support (started). | 96 | y) Finish testing of Windows 9x/Windows ME server support (started). |
94 | 97 | ||
95 | KNOWN BUGS (updated April 29, 2005) | 98 | KNOWN BUGS (updated February 26, 2007) |
96 | ==================================== | 99 | ==================================== |
97 | See http://bugzilla.samba.org - search on product "CifsVFS" for | 100 | See http://bugzilla.samba.org - search on product "CifsVFS" for |
98 | current bug list. | 101 | current bug list. |
@@ -107,11 +110,6 @@ but recognizes them | |||
107 | succeed but still return access denied (appears to be Windows | 110 | succeed but still return access denied (appears to be Windows |
108 | server not cifs client problem) and has not been reproduced recently. | 111 | server not cifs client problem) and has not been reproduced recently. |
109 | NTFS partitions do not have this problem. | 112 | NTFS partitions do not have this problem. |
110 | 4) debug connectathon lock test case 10 which fails against | ||
111 | Samba (may be unmappable due to POSIX to Windows lock model | ||
112 | differences but worth investigating). Also debug Samba to | ||
113 | see why lock test case 7 takes longer to complete to Samba | ||
114 | than to Windows. | ||
115 | 113 | ||
116 | Misc testing to do | 114 | Misc testing to do |
117 | ================== | 115 | ================== |
@@ -119,7 +117,7 @@ Misc testing to do | |||
119 | types. Try nested symlinks (8 deep). Return max path name in stat -f information | 117 | types. Try nested symlinks (8 deep). Return max path name in stat -f information |
120 | 118 | ||
121 | 2) Modify file portion of ltp so it can run against a mounted network | 119 | 2) Modify file portion of ltp so it can run against a mounted network |
122 | share and run it against cifs vfs. | 120 | share and run it against cifs vfs in automated fashion. |
123 | 121 | ||
124 | 3) Additional performance testing and optimization using iozone and similar - | 122 | 3) Additional performance testing and optimization using iozone and similar - |
125 | there are some easy changes that can be done to parallelize sequential writes, | 123 | there are some easy changes that can be done to parallelize sequential writes, |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index bc2c0ac27169..faba4d69fe91 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/cifsfs.c | 2 | * fs/cifs/cifsfs.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2004 | 4 | * Copyright (C) International Business Machines Corp., 2002,2007 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * Common Internet FileSystem (CIFS) client | 7 | * Common Internet FileSystem (CIFS) client |
@@ -47,7 +47,11 @@ | |||
47 | 47 | ||
48 | #ifdef CONFIG_CIFS_QUOTA | 48 | #ifdef CONFIG_CIFS_QUOTA |
49 | static struct quotactl_ops cifs_quotactl_ops; | 49 | static struct quotactl_ops cifs_quotactl_ops; |
50 | #endif | 50 | #endif /* QUOTA */ |
51 | |||
52 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
53 | extern struct export_operations cifs_export_ops; | ||
54 | #endif /* EXPERIMENTAL */ | ||
51 | 55 | ||
52 | int cifsFYI = 0; | 56 | int cifsFYI = 0; |
53 | int cifsERROR = 1; | 57 | int cifsERROR = 1; |
@@ -62,8 +66,8 @@ unsigned int extended_security = CIFSSEC_DEF; | |||
62 | unsigned int sign_CIFS_PDUs = 1; | 66 | unsigned int sign_CIFS_PDUs = 1; |
63 | extern struct task_struct * oplockThread; /* remove sparse warning */ | 67 | extern struct task_struct * oplockThread; /* remove sparse warning */ |
64 | struct task_struct * oplockThread = NULL; | 68 | struct task_struct * oplockThread = NULL; |
65 | extern struct task_struct * dnotifyThread; /* remove sparse warning */ | 69 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ |
66 | struct task_struct * dnotifyThread = NULL; | 70 | static struct task_struct * dnotifyThread = NULL; |
67 | static const struct super_operations cifs_super_ops; | 71 | static const struct super_operations cifs_super_ops; |
68 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | 72 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; |
69 | module_param(CIFSMaxBufSize, int, 0); | 73 | module_param(CIFSMaxBufSize, int, 0); |
@@ -110,6 +114,10 @@ cifs_read_super(struct super_block *sb, void *data, | |||
110 | 114 | ||
111 | sb->s_magic = CIFS_MAGIC_NUMBER; | 115 | sb->s_magic = CIFS_MAGIC_NUMBER; |
112 | sb->s_op = &cifs_super_ops; | 116 | sb->s_op = &cifs_super_ops; |
117 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
118 | if(experimEnabled != 0) | ||
119 | sb->s_export_op = &cifs_export_ops; | ||
120 | #endif /* EXPERIMENTAL */ | ||
113 | /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | 121 | /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) |
114 | sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | 122 | sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ |
115 | #ifdef CONFIG_CIFS_QUOTA | 123 | #ifdef CONFIG_CIFS_QUOTA |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index c97c08eb481a..2c2c384894d8 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -38,8 +38,8 @@ extern const struct address_space_operations cifs_addr_ops_smallbuf; | |||
38 | /* Functions related to super block operations */ | 38 | /* Functions related to super block operations */ |
39 | /* extern const struct super_operations cifs_super_ops;*/ | 39 | /* extern const struct super_operations cifs_super_ops;*/ |
40 | extern void cifs_read_inode(struct inode *); | 40 | extern void cifs_read_inode(struct inode *); |
41 | extern void cifs_delete_inode(struct inode *); | 41 | /*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */ |
42 | /* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */ | 42 | /* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */ |
43 | 43 | ||
44 | /* Functions related to inodes */ | 44 | /* Functions related to inodes */ |
45 | extern const struct inode_operations cifs_dir_inode_ops; | 45 | extern const struct inode_operations cifs_dir_inode_ops; |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 74d3ccbb103b..e4de8eba4780 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -525,15 +525,17 @@ require use of the stronger protocol */ | |||
525 | */ | 525 | */ |
526 | GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH]; | 526 | GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH]; |
527 | 527 | ||
528 | GLOBAL_EXTERN struct list_head GlobalServerList; /* BB not implemented yet */ | 528 | /* GLOBAL_EXTERN struct list_head GlobalServerList; BB not implemented yet */ |
529 | GLOBAL_EXTERN struct list_head GlobalSMBSessionList; | 529 | GLOBAL_EXTERN struct list_head GlobalSMBSessionList; |
530 | GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; | 530 | GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; |
531 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ | 531 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ |
532 | 532 | ||
533 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; | 533 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; |
534 | 534 | ||
535 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */ | 535 | /* Outstanding dir notify requests */ |
536 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */ | 536 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; |
537 | /* DirNotify response queue */ | ||
538 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; | ||
537 | 539 | ||
538 | /* | 540 | /* |
539 | * Global transaction id (XID) information | 541 | * Global transaction id (XID) information |
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 2498d644827c..0efdf35aab2c 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h | |||
@@ -220,6 +220,9 @@ | |||
220 | */ | 220 | */ |
221 | #define CIFS_NO_HANDLE 0xFFFF | 221 | #define CIFS_NO_HANDLE 0xFFFF |
222 | 222 | ||
223 | #define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL | ||
224 | #define NO_CHANGE_32 0xFFFFFFFFUL | ||
225 | |||
223 | /* IPC$ in ASCII */ | 226 | /* IPC$ in ASCII */ |
224 | #define CIFS_IPC_RESOURCE "\x49\x50\x43\x24" | 227 | #define CIFS_IPC_RESOURCE "\x49\x50\x43\x24" |
225 | 228 | ||
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6148b82170c4..32eb1acab630 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -43,7 +43,7 @@ extern void _FreeXid(unsigned int); | |||
43 | #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));} | 43 | #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));} |
44 | extern char *build_path_from_dentry(struct dentry *); | 44 | extern char *build_path_from_dentry(struct dentry *); |
45 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); | 45 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); |
46 | extern void renew_parental_timestamps(struct dentry *direntry); | 46 | /* extern void renew_parental_timestamps(struct dentry *direntry);*/ |
47 | extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, | 47 | extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, |
48 | struct smb_hdr * /* input */ , | 48 | struct smb_hdr * /* input */ , |
49 | struct smb_hdr * /* out */ , | 49 | struct smb_hdr * /* out */ , |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 24364106b8f9..48fc0c2ab0e5 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -4803,6 +4803,16 @@ setPermsRetry: | |||
4803 | pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); | 4803 | pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); |
4804 | pSMB->Reserved4 = 0; | 4804 | pSMB->Reserved4 = 0; |
4805 | pSMB->hdr.smb_buf_length += byte_count; | 4805 | pSMB->hdr.smb_buf_length += byte_count; |
4806 | /* Samba server ignores set of file size to zero due to bugs in some | ||
4807 | older clients, but we should be precise - we use SetFileSize to | ||
4808 | set file size and do not want to truncate file size to zero | ||
4809 | accidently as happened on one Samba server beta by putting | ||
4810 | zero instead of -1 here */ | ||
4811 | data_offset->EndOfFile = NO_CHANGE_64; | ||
4812 | data_offset->NumOfBytes = NO_CHANGE_64; | ||
4813 | data_offset->LastStatusChange = NO_CHANGE_64; | ||
4814 | data_offset->LastAccessTime = NO_CHANGE_64; | ||
4815 | data_offset->LastModificationTime = NO_CHANGE_64; | ||
4806 | data_offset->Uid = cpu_to_le64(uid); | 4816 | data_offset->Uid = cpu_to_le64(uid); |
4807 | data_offset->Gid = cpu_to_le64(gid); | 4817 | data_offset->Gid = cpu_to_le64(gid); |
4808 | /* better to leave device as zero when it is */ | 4818 | /* better to leave device as zero when it is */ |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 66b825ade3e1..3fad638d26d3 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "cifs_debug.h" | 31 | #include "cifs_debug.h" |
32 | #include "cifs_fs_sb.h" | 32 | #include "cifs_fs_sb.h" |
33 | 33 | ||
34 | void | 34 | static void |
35 | renew_parental_timestamps(struct dentry *direntry) | 35 | renew_parental_timestamps(struct dentry *direntry) |
36 | { | 36 | { |
37 | /* BB check if there is a way to get the kernel to do this or if we really need this */ | 37 | /* BB check if there is a way to get the kernel to do this or if we really need this */ |
diff --git a/fs/cifs/export.c b/fs/cifs/export.c new file mode 100644 index 000000000000..1d716392c3aa --- /dev/null +++ b/fs/cifs/export.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * fs/cifs/export.c | ||
3 | * | ||
4 | * Copyright (C) International Business Machines Corp., 2007 | ||
5 | * Author(s): Steve French (sfrench@us.ibm.com) | ||
6 | * | ||
7 | * Common Internet FileSystem (CIFS) client | ||
8 | * | ||
9 | * Operations related to support for exporting files via NFSD | ||
10 | * | ||
11 | * This library is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU Lesser General Public License as published | ||
13 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This library is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
19 | * the GNU Lesser General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU Lesser General Public License | ||
22 | * along with this library; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * See Documentation/filesystems/Exporting | ||
28 | * and examples in fs/exportfs | ||
29 | */ | ||
30 | |||
31 | #include <linux/fs.h> | ||
32 | |||
33 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
34 | |||
35 | static struct dentry *cifs_get_parent(struct dentry *dentry) | ||
36 | { | ||
37 | /* BB need to add code here eventually to enable export via NFSD */ | ||
38 | return ERR_PTR(-EACCES); | ||
39 | } | ||
40 | |||
41 | struct export_operations cifs_export_ops = { | ||
42 | .get_parent = cifs_get_parent, | ||
43 | /* Following five export operations are unneeded so far and can default */ | ||
44 | /* .get_dentry = | ||
45 | .get_name = | ||
46 | .find_exported_dentry = | ||
47 | .decode_fh = | ||
48 | .encode_fs = */ | ||
49 | }; | ||
50 | |||
51 | #endif /* EXPERIMENTAL */ | ||
52 | |||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a1265c9bfec0..2d3275bedb55 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -879,18 +879,19 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
879 | cifs_stats_bytes_written(pTcon, total_written); | 879 | cifs_stats_bytes_written(pTcon, total_written); |
880 | 880 | ||
881 | /* since the write may have blocked check these pointers again */ | 881 | /* since the write may have blocked check these pointers again */ |
882 | if (file->f_path.dentry) { | 882 | if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { |
883 | if (file->f_path.dentry->d_inode) { | 883 | struct inode *inode = file->f_path.dentry->d_inode; |
884 | struct inode *inode = file->f_path.dentry->d_inode; | 884 | /* Do not update local mtime - server will set its actual value on write |
885 | inode->i_ctime = inode->i_mtime = | 885 | * inode->i_ctime = inode->i_mtime = |
886 | current_fs_time(inode->i_sb); | 886 | * current_fs_time(inode->i_sb);*/ |
887 | if (total_written > 0) { | 887 | if (total_written > 0) { |
888 | if (*poffset > file->f_path.dentry->d_inode->i_size) | 888 | spin_lock(&inode->i_lock); |
889 | i_size_write(file->f_path.dentry->d_inode, | 889 | if (*poffset > file->f_path.dentry->d_inode->i_size) |
890 | i_size_write(file->f_path.dentry->d_inode, | ||
890 | *poffset); | 891 | *poffset); |
891 | } | 892 | spin_unlock(&inode->i_lock); |
892 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
893 | } | 893 | } |
894 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
894 | } | 895 | } |
895 | FreeXid(xid); | 896 | FreeXid(xid); |
896 | return total_written; | 897 | return total_written; |
@@ -1012,18 +1013,18 @@ static ssize_t cifs_write(struct file *file, const char *write_data, | |||
1012 | cifs_stats_bytes_written(pTcon, total_written); | 1013 | cifs_stats_bytes_written(pTcon, total_written); |
1013 | 1014 | ||
1014 | /* since the write may have blocked check these pointers again */ | 1015 | /* since the write may have blocked check these pointers again */ |
1015 | if (file->f_path.dentry) { | 1016 | if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { |
1016 | if (file->f_path.dentry->d_inode) { | ||
1017 | /*BB We could make this contingent on superblock ATIME flag too */ | 1017 | /*BB We could make this contingent on superblock ATIME flag too */ |
1018 | /* file->f_path.dentry->d_inode->i_ctime = | 1018 | /* file->f_path.dentry->d_inode->i_ctime = |
1019 | file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ | 1019 | file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ |
1020 | if (total_written > 0) { | 1020 | if (total_written > 0) { |
1021 | if (*poffset > file->f_path.dentry->d_inode->i_size) | 1021 | spin_lock(&file->f_path.dentry->d_inode->i_lock); |
1022 | i_size_write(file->f_path.dentry->d_inode, | 1022 | if (*poffset > file->f_path.dentry->d_inode->i_size) |
1023 | *poffset); | 1023 | i_size_write(file->f_path.dentry->d_inode, |
1024 | } | 1024 | *poffset); |
1025 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | 1025 | spin_unlock(&file->f_path.dentry->d_inode->i_lock); |
1026 | } | 1026 | } |
1027 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
1027 | } | 1028 | } |
1028 | FreeXid(xid); | 1029 | FreeXid(xid); |
1029 | return total_written; | 1030 | return total_written; |
@@ -1400,6 +1401,7 @@ static int cifs_commit_write(struct file *file, struct page *page, | |||
1400 | xid = GetXid(); | 1401 | xid = GetXid(); |
1401 | cFYI(1, ("commit write for page %p up to position %lld for %d", | 1402 | cFYI(1, ("commit write for page %p up to position %lld for %d", |
1402 | page, position, to)); | 1403 | page, position, to)); |
1404 | spin_lock(&inode->i_lock); | ||
1403 | if (position > inode->i_size) { | 1405 | if (position > inode->i_size) { |
1404 | i_size_write(inode, position); | 1406 | i_size_write(inode, position); |
1405 | /* if (file->private_data == NULL) { | 1407 | /* if (file->private_data == NULL) { |
@@ -1429,6 +1431,7 @@ static int cifs_commit_write(struct file *file, struct page *page, | |||
1429 | cFYI(1, (" SetEOF (commit write) rc = %d", rc)); | 1431 | cFYI(1, (" SetEOF (commit write) rc = %d", rc)); |
1430 | } */ | 1432 | } */ |
1431 | } | 1433 | } |
1434 | spin_unlock(&inode->i_lock); | ||
1432 | if (!PageUptodate(page)) { | 1435 | if (!PageUptodate(page)) { |
1433 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; | 1436 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; |
1434 | /* can not rely on (or let) writepage write this data */ | 1437 | /* can not rely on (or let) writepage write this data */ |
@@ -1989,34 +1992,52 @@ static int cifs_prepare_write(struct file *file, struct page *page, | |||
1989 | unsigned from, unsigned to) | 1992 | unsigned from, unsigned to) |
1990 | { | 1993 | { |
1991 | int rc = 0; | 1994 | int rc = 0; |
1992 | loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | 1995 | loff_t i_size; |
1996 | loff_t offset; | ||
1997 | |||
1993 | cFYI(1, ("prepare write for page %p from %d to %d",page,from,to)); | 1998 | cFYI(1, ("prepare write for page %p from %d to %d",page,from,to)); |
1994 | if (!PageUptodate(page)) { | 1999 | if (PageUptodate(page)) |
1995 | /* if (to - from != PAGE_CACHE_SIZE) { | 2000 | return 0; |
1996 | void *kaddr = kmap_atomic(page, KM_USER0); | 2001 | |
2002 | /* If we are writing a full page it will be up to date, | ||
2003 | no need to read from the server */ | ||
2004 | if ((to == PAGE_CACHE_SIZE) && (from == 0)) { | ||
2005 | SetPageUptodate(page); | ||
2006 | return 0; | ||
2007 | } | ||
2008 | |||
2009 | offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | ||
2010 | i_size = i_size_read(page->mapping->host); | ||
2011 | |||
2012 | if ((offset >= i_size) || | ||
2013 | ((from == 0) && (offset + to) >= i_size)) { | ||
2014 | /* | ||
2015 | * We don't need to read data beyond the end of the file. | ||
2016 | * zero it, and set the page uptodate | ||
2017 | */ | ||
2018 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
2019 | |||
2020 | if (from) | ||
1997 | memset(kaddr, 0, from); | 2021 | memset(kaddr, 0, from); |
2022 | if (to < PAGE_CACHE_SIZE) | ||
1998 | memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); | 2023 | memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); |
1999 | flush_dcache_page(page); | 2024 | flush_dcache_page(page); |
2000 | kunmap_atomic(kaddr, KM_USER0); | 2025 | kunmap_atomic(kaddr, KM_USER0); |
2001 | } */ | 2026 | SetPageUptodate(page); |
2002 | /* If we are writing a full page it will be up to date, | 2027 | } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
2003 | no need to read from the server */ | ||
2004 | if ((to == PAGE_CACHE_SIZE) && (from == 0)) | ||
2005 | SetPageUptodate(page); | ||
2006 | |||
2007 | /* might as well read a page, it is fast enough */ | 2028 | /* might as well read a page, it is fast enough */ |
2008 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { | 2029 | rc = cifs_readpage_worker(file, page, &offset); |
2009 | rc = cifs_readpage_worker(file, page, &offset); | 2030 | } else { |
2010 | } else { | 2031 | /* we could try using another file handle if there is one - |
2011 | /* should we try using another file handle if there is one - | 2032 | but how would we lock it to prevent close of that handle |
2012 | how would we lock it to prevent close of that handle | 2033 | racing with this read? In any case |
2013 | racing with this read? | 2034 | this will be written out by commit_write so is fine */ |
2014 | In any case this will be written out by commit_write */ | ||
2015 | } | ||
2016 | } | 2035 | } |
2017 | 2036 | ||
2018 | /* BB should we pass any errors back? | 2037 | /* we do not need to pass errors back |
2019 | e.g. if we do not have read access to the file */ | 2038 | e.g. if we do not have read access to the file |
2039 | because cifs_commit_write will do the right thing. -- shaggy */ | ||
2040 | |||
2020 | return 0; | 2041 | return 0; |
2021 | } | 2042 | } |
2022 | 2043 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 37c6ce87416b..86b9dbbd8441 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -143,10 +143,10 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
143 | inode->i_gid = le64_to_cpu(findData.Gid); | 143 | inode->i_gid = le64_to_cpu(findData.Gid); |
144 | inode->i_nlink = le64_to_cpu(findData.Nlinks); | 144 | inode->i_nlink = le64_to_cpu(findData.Nlinks); |
145 | 145 | ||
146 | spin_lock(&inode->i_lock); | ||
146 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 147 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
147 | /* can not safely change the file size here if the | 148 | /* can not safely change the file size here if the |
148 | client is writing to it due to potential races */ | 149 | client is writing to it due to potential races */ |
149 | |||
150 | i_size_write(inode, end_of_file); | 150 | i_size_write(inode, end_of_file); |
151 | 151 | ||
152 | /* blksize needs to be multiple of two. So safer to default to | 152 | /* blksize needs to be multiple of two. So safer to default to |
@@ -162,6 +162,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
162 | /* for this calculation */ | 162 | /* for this calculation */ |
163 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | 163 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; |
164 | } | 164 | } |
165 | spin_unlock(&inode->i_lock); | ||
165 | 166 | ||
166 | if (num_of_bytes < end_of_file) | 167 | if (num_of_bytes < end_of_file) |
167 | cFYI(1, ("allocation size less than end of file")); | 168 | cFYI(1, ("allocation size less than end of file")); |
@@ -496,6 +497,8 @@ int cifs_get_inode_info(struct inode **pinode, | |||
496 | /* BB add code here - | 497 | /* BB add code here - |
497 | validate if device or weird share or device type? */ | 498 | validate if device or weird share or device type? */ |
498 | } | 499 | } |
500 | |||
501 | spin_lock(&inode->i_lock); | ||
499 | if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) { | 502 | if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) { |
500 | /* can not safely shrink the file size here if the | 503 | /* can not safely shrink the file size here if the |
501 | client is writing to it due to potential races */ | 504 | client is writing to it due to potential races */ |
@@ -506,6 +509,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
506 | inode->i_blocks = (512 - 1 + le64_to_cpu( | 509 | inode->i_blocks = (512 - 1 + le64_to_cpu( |
507 | pfindData->AllocationSize)) >> 9; | 510 | pfindData->AllocationSize)) >> 9; |
508 | } | 511 | } |
512 | spin_unlock(&inode->i_lock); | ||
509 | 513 | ||
510 | inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks); | 514 | inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks); |
511 | 515 | ||
@@ -834,8 +838,10 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
834 | 838 | ||
835 | if (!rc) { | 839 | if (!rc) { |
836 | drop_nlink(inode); | 840 | drop_nlink(inode); |
841 | spin_lock(&direntry->d_inode->i_lock); | ||
837 | i_size_write(direntry->d_inode,0); | 842 | i_size_write(direntry->d_inode,0); |
838 | clear_nlink(direntry->d_inode); | 843 | clear_nlink(direntry->d_inode); |
844 | spin_unlock(&direntry->d_inode->i_lock); | ||
839 | } | 845 | } |
840 | 846 | ||
841 | cifsInode = CIFS_I(direntry->d_inode); | 847 | cifsInode = CIFS_I(direntry->d_inode); |
@@ -1128,6 +1134,52 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from) | |||
1128 | return rc; | 1134 | return rc; |
1129 | } | 1135 | } |
1130 | 1136 | ||
1137 | static int cifs_vmtruncate(struct inode * inode, loff_t offset) | ||
1138 | { | ||
1139 | struct address_space *mapping = inode->i_mapping; | ||
1140 | unsigned long limit; | ||
1141 | |||
1142 | spin_lock(&inode->i_lock); | ||
1143 | if (inode->i_size < offset) | ||
1144 | goto do_expand; | ||
1145 | /* | ||
1146 | * truncation of in-use swapfiles is disallowed - it would cause | ||
1147 | * subsequent swapout to scribble on the now-freed blocks. | ||
1148 | */ | ||
1149 | if (IS_SWAPFILE(inode)) { | ||
1150 | spin_unlock(&inode->i_lock); | ||
1151 | goto out_busy; | ||
1152 | } | ||
1153 | i_size_write(inode, offset); | ||
1154 | spin_unlock(&inode->i_lock); | ||
1155 | unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); | ||
1156 | truncate_inode_pages(mapping, offset); | ||
1157 | goto out_truncate; | ||
1158 | |||
1159 | do_expand: | ||
1160 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | ||
1161 | if (limit != RLIM_INFINITY && offset > limit) { | ||
1162 | spin_unlock(&inode->i_lock); | ||
1163 | goto out_sig; | ||
1164 | } | ||
1165 | if (offset > inode->i_sb->s_maxbytes) { | ||
1166 | spin_unlock(&inode->i_lock); | ||
1167 | goto out_big; | ||
1168 | } | ||
1169 | i_size_write(inode, offset); | ||
1170 | spin_unlock(&inode->i_lock); | ||
1171 | out_truncate: | ||
1172 | if (inode->i_op && inode->i_op->truncate) | ||
1173 | inode->i_op->truncate(inode); | ||
1174 | return 0; | ||
1175 | out_sig: | ||
1176 | send_sig(SIGXFSZ, current, 0); | ||
1177 | out_big: | ||
1178 | return -EFBIG; | ||
1179 | out_busy: | ||
1180 | return -ETXTBSY; | ||
1181 | } | ||
1182 | |||
1131 | int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | 1183 | int cifs_setattr(struct dentry *direntry, struct iattr *attrs) |
1132 | { | 1184 | { |
1133 | int xid; | 1185 | int xid; |
@@ -1244,7 +1296,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1244 | */ | 1296 | */ |
1245 | 1297 | ||
1246 | if (rc == 0) { | 1298 | if (rc == 0) { |
1247 | rc = vmtruncate(direntry->d_inode, attrs->ia_size); | 1299 | rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size); |
1248 | cifs_truncate_page(direntry->d_inode->i_mapping, | 1300 | cifs_truncate_page(direntry->d_inode->i_mapping, |
1249 | direntry->d_inode->i_size); | 1301 | direntry->d_inode->i_size); |
1250 | } else | 1302 | } else |
@@ -1379,9 +1431,11 @@ cifs_setattr_exit: | |||
1379 | return rc; | 1431 | return rc; |
1380 | } | 1432 | } |
1381 | 1433 | ||
1434 | #if 0 | ||
1382 | void cifs_delete_inode(struct inode *inode) | 1435 | void cifs_delete_inode(struct inode *inode) |
1383 | { | 1436 | { |
1384 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); | 1437 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); |
1385 | /* may have to add back in if and when safe distributed caching of | 1438 | /* may have to add back in if and when safe distributed caching of |
1386 | directories added e.g. via FindNotify */ | 1439 | directories added e.g. via FindNotify */ |
1387 | } | 1440 | } |
1441 | #endif | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index c444798f0740..44cfb528797d 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Directory search handling | 4 | * Directory search handling |
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2004, 2005 | 6 | * Copyright (C) International Business Machines Corp., 2004, 2007 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * | 8 | * |
9 | * This library is free software; you can redistribute it and/or modify | 9 | * This library is free software; you can redistribute it and/or modify |
@@ -226,6 +226,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
226 | atomic_set(&cifsInfo->inUse, 1); | 226 | atomic_set(&cifsInfo->inUse, 1); |
227 | } | 227 | } |
228 | 228 | ||
229 | spin_lock(&tmp_inode->i_lock); | ||
229 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 230 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
230 | /* can not safely change the file size here if the | 231 | /* can not safely change the file size here if the |
231 | client is writing to it due to potential races */ | 232 | client is writing to it due to potential races */ |
@@ -235,6 +236,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
235 | /* for this calculation, even though the reported blocksize is larger */ | 236 | /* for this calculation, even though the reported blocksize is larger */ |
236 | tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9; | 237 | tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9; |
237 | } | 238 | } |
239 | spin_unlock(&tmp_inode->i_lock); | ||
238 | 240 | ||
239 | if (allocation_size < end_of_file) | 241 | if (allocation_size < end_of_file) |
240 | cFYI(1, ("May be sparse file, allocation less than file size")); | 242 | cFYI(1, ("May be sparse file, allocation less than file size")); |
@@ -355,6 +357,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode, | |||
355 | tmp_inode->i_gid = le64_to_cpu(pfindData->Gid); | 357 | tmp_inode->i_gid = le64_to_cpu(pfindData->Gid); |
356 | tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks); | 358 | tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks); |
357 | 359 | ||
360 | spin_lock(&tmp_inode->i_lock); | ||
358 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 361 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
359 | /* can not safely change the file size here if the | 362 | /* can not safely change the file size here if the |
360 | client is writing to it due to potential races */ | 363 | client is writing to it due to potential races */ |
@@ -364,6 +367,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode, | |||
364 | /* for this calculation, not the real blocksize */ | 367 | /* for this calculation, not the real blocksize */ |
365 | tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | 368 | tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; |
366 | } | 369 | } |
370 | spin_unlock(&tmp_inode->i_lock); | ||
367 | 371 | ||
368 | if (S_ISREG(tmp_inode->i_mode)) { | 372 | if (S_ISREG(tmp_inode->i_mode)) { |
369 | cFYI(1, ("File inode")); | 373 | cFYI(1, ("File inode")); |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f80007eaebf4..5f468459a1e2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -499,7 +499,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
499 | due to last connection to this server being unmounted */ | 499 | due to last connection to this server being unmounted */ |
500 | if (signal_pending(current)) { | 500 | if (signal_pending(current)) { |
501 | /* if signal pending do not hold up user for full smb timeout | 501 | /* if signal pending do not hold up user for full smb timeout |
502 | but we still give response a change to complete */ | 502 | but we still give response a chance to complete */ |
503 | timeout = 2 * HZ; | 503 | timeout = 2 * HZ; |
504 | } | 504 | } |
505 | 505 | ||
@@ -587,7 +587,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
587 | } | 587 | } |
588 | 588 | ||
589 | out: | 589 | out: |
590 | |||
591 | DeleteMidQEntry(midQ); | 590 | DeleteMidQEntry(midQ); |
592 | atomic_dec(&ses->server->inFlight); | 591 | atomic_dec(&ses->server->inFlight); |
593 | wake_up(&ses->server->request_q); | 592 | wake_up(&ses->server->request_q); |
@@ -681,7 +680,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
681 | due to last connection to this server being unmounted */ | 680 | due to last connection to this server being unmounted */ |
682 | if (signal_pending(current)) { | 681 | if (signal_pending(current)) { |
683 | /* if signal pending do not hold up user for full smb timeout | 682 | /* if signal pending do not hold up user for full smb timeout |
684 | but we still give response a change to complete */ | 683 | but we still give response a chance to complete */ |
685 | timeout = 2 * HZ; | 684 | timeout = 2 * HZ; |
686 | } | 685 | } |
687 | 686 | ||
@@ -765,7 +764,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
765 | } | 764 | } |
766 | 765 | ||
767 | out: | 766 | out: |
768 | |||
769 | DeleteMidQEntry(midQ); | 767 | DeleteMidQEntry(midQ); |
770 | atomic_dec(&ses->server->inFlight); | 768 | atomic_dec(&ses->server->inFlight); |
771 | wake_up(&ses->server->request_q); | 769 | wake_up(&ses->server->request_q); |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 14939ddf74f1..7285c94956c4 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -576,6 +576,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
576 | server->packet = vmalloc(NCP_PACKET_SIZE); | 576 | server->packet = vmalloc(NCP_PACKET_SIZE); |
577 | if (server->packet == NULL) | 577 | if (server->packet == NULL) |
578 | goto out_nls; | 578 | goto out_nls; |
579 | server->txbuf = vmalloc(NCP_PACKET_SIZE); | ||
580 | if (server->txbuf == NULL) | ||
581 | goto out_packet; | ||
582 | server->rxbuf = vmalloc(NCP_PACKET_SIZE); | ||
583 | if (server->rxbuf == NULL) | ||
584 | goto out_txbuf; | ||
579 | 585 | ||
580 | sock->sk->sk_data_ready = ncp_tcp_data_ready; | 586 | sock->sk->sk_data_ready = ncp_tcp_data_ready; |
581 | sock->sk->sk_error_report = ncp_tcp_error_report; | 587 | sock->sk->sk_error_report = ncp_tcp_error_report; |
@@ -597,7 +603,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
597 | error = ncp_connect(server); | 603 | error = ncp_connect(server); |
598 | ncp_unlock_server(server); | 604 | ncp_unlock_server(server); |
599 | if (error < 0) | 605 | if (error < 0) |
600 | goto out_packet; | 606 | goto out_rxbuf; |
601 | DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); | 607 | DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); |
602 | 608 | ||
603 | error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ | 609 | error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ |
@@ -666,8 +672,12 @@ out_disconnect: | |||
666 | ncp_lock_server(server); | 672 | ncp_lock_server(server); |
667 | ncp_disconnect(server); | 673 | ncp_disconnect(server); |
668 | ncp_unlock_server(server); | 674 | ncp_unlock_server(server); |
669 | out_packet: | 675 | out_rxbuf: |
670 | ncp_stop_tasks(server); | 676 | ncp_stop_tasks(server); |
677 | vfree(server->rxbuf); | ||
678 | out_txbuf: | ||
679 | vfree(server->txbuf); | ||
680 | out_packet: | ||
671 | vfree(server->packet); | 681 | vfree(server->packet); |
672 | out_nls: | 682 | out_nls: |
673 | #ifdef CONFIG_NCPFS_NLS | 683 | #ifdef CONFIG_NCPFS_NLS |
@@ -723,6 +733,8 @@ static void ncp_put_super(struct super_block *sb) | |||
723 | 733 | ||
724 | kfree(server->priv.data); | 734 | kfree(server->priv.data); |
725 | kfree(server->auth.object_name); | 735 | kfree(server->auth.object_name); |
736 | vfree(server->rxbuf); | ||
737 | vfree(server->txbuf); | ||
726 | vfree(server->packet); | 738 | vfree(server->packet); |
727 | sb->s_fs_info = NULL; | 739 | sb->s_fs_info = NULL; |
728 | kfree(server); | 740 | kfree(server); |
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index e496d8b65e92..e37df8d5fe70 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/socket.h> | 14 | #include <linux/socket.h> |
15 | #include <linux/fcntl.h> | 15 | #include <linux/fcntl.h> |
16 | #include <linux/stat.h> | 16 | #include <linux/stat.h> |
17 | #include <linux/string.h> | ||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <linux/in.h> | 19 | #include <linux/in.h> |
19 | #include <linux/net.h> | 20 | #include <linux/net.h> |
@@ -55,10 +56,11 @@ static int _send(struct socket *sock, const void *buff, int len) | |||
55 | struct ncp_request_reply { | 56 | struct ncp_request_reply { |
56 | struct list_head req; | 57 | struct list_head req; |
57 | wait_queue_head_t wq; | 58 | wait_queue_head_t wq; |
58 | struct ncp_reply_header* reply_buf; | 59 | atomic_t refs; |
60 | unsigned char* reply_buf; | ||
59 | size_t datalen; | 61 | size_t datalen; |
60 | int result; | 62 | int result; |
61 | enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status; | 63 | enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; |
62 | struct kvec* tx_ciov; | 64 | struct kvec* tx_ciov; |
63 | size_t tx_totallen; | 65 | size_t tx_totallen; |
64 | size_t tx_iovlen; | 66 | size_t tx_iovlen; |
@@ -67,6 +69,32 @@ struct ncp_request_reply { | |||
67 | u_int32_t sign[6]; | 69 | u_int32_t sign[6]; |
68 | }; | 70 | }; |
69 | 71 | ||
72 | static inline struct ncp_request_reply* ncp_alloc_req(void) | ||
73 | { | ||
74 | struct ncp_request_reply *req; | ||
75 | |||
76 | req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); | ||
77 | if (!req) | ||
78 | return NULL; | ||
79 | |||
80 | init_waitqueue_head(&req->wq); | ||
81 | atomic_set(&req->refs, (1)); | ||
82 | req->status = RQ_IDLE; | ||
83 | |||
84 | return req; | ||
85 | } | ||
86 | |||
87 | static void ncp_req_get(struct ncp_request_reply *req) | ||
88 | { | ||
89 | atomic_inc(&req->refs); | ||
90 | } | ||
91 | |||
92 | static void ncp_req_put(struct ncp_request_reply *req) | ||
93 | { | ||
94 | if (atomic_dec_and_test(&req->refs)) | ||
95 | kfree(req); | ||
96 | } | ||
97 | |||
70 | void ncp_tcp_data_ready(struct sock *sk, int len) | 98 | void ncp_tcp_data_ready(struct sock *sk, int len) |
71 | { | 99 | { |
72 | struct ncp_server *server = sk->sk_user_data; | 100 | struct ncp_server *server = sk->sk_user_data; |
@@ -101,14 +129,17 @@ void ncpdgram_timeout_call(unsigned long v) | |||
101 | schedule_work(&server->timeout_tq); | 129 | schedule_work(&server->timeout_tq); |
102 | } | 130 | } |
103 | 131 | ||
104 | static inline void ncp_finish_request(struct ncp_request_reply *req, int result) | 132 | static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result) |
105 | { | 133 | { |
106 | req->result = result; | 134 | req->result = result; |
135 | if (req->status != RQ_ABANDONED) | ||
136 | memcpy(req->reply_buf, server->rxbuf, req->datalen); | ||
107 | req->status = RQ_DONE; | 137 | req->status = RQ_DONE; |
108 | wake_up_all(&req->wq); | 138 | wake_up_all(&req->wq); |
139 | ncp_req_put(req); | ||
109 | } | 140 | } |
110 | 141 | ||
111 | static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err) | 142 | static void __abort_ncp_connection(struct ncp_server *server) |
112 | { | 143 | { |
113 | struct ncp_request_reply *req; | 144 | struct ncp_request_reply *req; |
114 | 145 | ||
@@ -118,31 +149,19 @@ static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request | |||
118 | req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); | 149 | req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); |
119 | 150 | ||
120 | list_del_init(&req->req); | 151 | list_del_init(&req->req); |
121 | if (req == aborted) { | 152 | ncp_finish_request(server, req, -EIO); |
122 | ncp_finish_request(req, err); | ||
123 | } else { | ||
124 | ncp_finish_request(req, -EIO); | ||
125 | } | ||
126 | } | 153 | } |
127 | req = server->rcv.creq; | 154 | req = server->rcv.creq; |
128 | if (req) { | 155 | if (req) { |
129 | server->rcv.creq = NULL; | 156 | server->rcv.creq = NULL; |
130 | if (req == aborted) { | 157 | ncp_finish_request(server, req, -EIO); |
131 | ncp_finish_request(req, err); | ||
132 | } else { | ||
133 | ncp_finish_request(req, -EIO); | ||
134 | } | ||
135 | server->rcv.ptr = NULL; | 158 | server->rcv.ptr = NULL; |
136 | server->rcv.state = 0; | 159 | server->rcv.state = 0; |
137 | } | 160 | } |
138 | req = server->tx.creq; | 161 | req = server->tx.creq; |
139 | if (req) { | 162 | if (req) { |
140 | server->tx.creq = NULL; | 163 | server->tx.creq = NULL; |
141 | if (req == aborted) { | 164 | ncp_finish_request(server, req, -EIO); |
142 | ncp_finish_request(req, err); | ||
143 | } else { | ||
144 | ncp_finish_request(req, -EIO); | ||
145 | } | ||
146 | } | 165 | } |
147 | } | 166 | } |
148 | 167 | ||
@@ -160,10 +179,12 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req | |||
160 | break; | 179 | break; |
161 | case RQ_QUEUED: | 180 | case RQ_QUEUED: |
162 | list_del_init(&req->req); | 181 | list_del_init(&req->req); |
163 | ncp_finish_request(req, err); | 182 | ncp_finish_request(server, req, err); |
164 | break; | 183 | break; |
165 | case RQ_INPROGRESS: | 184 | case RQ_INPROGRESS: |
166 | __abort_ncp_connection(server, req, err); | 185 | req->status = RQ_ABANDONED; |
186 | break; | ||
187 | case RQ_ABANDONED: | ||
167 | break; | 188 | break; |
168 | } | 189 | } |
169 | } | 190 | } |
@@ -177,7 +198,7 @@ static inline void ncp_abort_request(struct ncp_server *server, struct ncp_reque | |||
177 | 198 | ||
178 | static inline void __ncptcp_abort(struct ncp_server *server) | 199 | static inline void __ncptcp_abort(struct ncp_server *server) |
179 | { | 200 | { |
180 | __abort_ncp_connection(server, NULL, 0); | 201 | __abort_ncp_connection(server); |
181 | } | 202 | } |
182 | 203 | ||
183 | static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) | 204 | static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) |
@@ -294,6 +315,11 @@ static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_r | |||
294 | 315 | ||
295 | static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) | 316 | static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) |
296 | { | 317 | { |
318 | /* we copy the data so that we do not depend on the caller | ||
319 | staying alive */ | ||
320 | memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len); | ||
321 | req->tx_iov[1].iov_base = server->txbuf; | ||
322 | |||
297 | if (server->ncp_sock->type == SOCK_STREAM) | 323 | if (server->ncp_sock->type == SOCK_STREAM) |
298 | ncptcp_start_request(server, req); | 324 | ncptcp_start_request(server, req); |
299 | else | 325 | else |
@@ -308,6 +334,7 @@ static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply * | |||
308 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); | 334 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); |
309 | return -EIO; | 335 | return -EIO; |
310 | } | 336 | } |
337 | ncp_req_get(req); | ||
311 | if (server->tx.creq || server->rcv.creq) { | 338 | if (server->tx.creq || server->rcv.creq) { |
312 | req->status = RQ_QUEUED; | 339 | req->status = RQ_QUEUED; |
313 | list_add_tail(&req->req, &server->tx.requests); | 340 | list_add_tail(&req->req, &server->tx.requests); |
@@ -409,7 +436,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
409 | server->timeout_last = NCP_MAX_RPC_TIMEOUT; | 436 | server->timeout_last = NCP_MAX_RPC_TIMEOUT; |
410 | mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT); | 437 | mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT); |
411 | } else if (reply.type == NCP_REPLY) { | 438 | } else if (reply.type == NCP_REPLY) { |
412 | result = _recv(sock, (void*)req->reply_buf, req->datalen, MSG_DONTWAIT); | 439 | result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT); |
413 | #ifdef CONFIG_NCPFS_PACKET_SIGNING | 440 | #ifdef CONFIG_NCPFS_PACKET_SIGNING |
414 | if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { | 441 | if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { |
415 | if (result < 8 + 8) { | 442 | if (result < 8 + 8) { |
@@ -419,7 +446,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
419 | 446 | ||
420 | result -= 8; | 447 | result -= 8; |
421 | hdrl = sock->sk->sk_family == AF_INET ? 8 : 6; | 448 | hdrl = sock->sk->sk_family == AF_INET ? 8 : 6; |
422 | if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) { | 449 | if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) { |
423 | printk(KERN_INFO "ncpfs: Signature violation\n"); | 450 | printk(KERN_INFO "ncpfs: Signature violation\n"); |
424 | result = -EIO; | 451 | result = -EIO; |
425 | } | 452 | } |
@@ -428,7 +455,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
428 | #endif | 455 | #endif |
429 | del_timer(&server->timeout_tm); | 456 | del_timer(&server->timeout_tm); |
430 | server->rcv.creq = NULL; | 457 | server->rcv.creq = NULL; |
431 | ncp_finish_request(req, result); | 458 | ncp_finish_request(server, req, result); |
432 | __ncp_next_request(server); | 459 | __ncp_next_request(server); |
433 | mutex_unlock(&server->rcv.creq_mutex); | 460 | mutex_unlock(&server->rcv.creq_mutex); |
434 | continue; | 461 | continue; |
@@ -478,12 +505,6 @@ void ncpdgram_timeout_proc(struct work_struct *work) | |||
478 | mutex_unlock(&server->rcv.creq_mutex); | 505 | mutex_unlock(&server->rcv.creq_mutex); |
479 | } | 506 | } |
480 | 507 | ||
481 | static inline void ncp_init_req(struct ncp_request_reply* req) | ||
482 | { | ||
483 | init_waitqueue_head(&req->wq); | ||
484 | req->status = RQ_IDLE; | ||
485 | } | ||
486 | |||
487 | static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) | 508 | static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) |
488 | { | 509 | { |
489 | int result; | 510 | int result; |
@@ -601,8 +622,8 @@ skipdata:; | |||
601 | goto skipdata; | 622 | goto skipdata; |
602 | } | 623 | } |
603 | req->datalen = datalen - 8; | 624 | req->datalen = datalen - 8; |
604 | req->reply_buf->type = NCP_REPLY; | 625 | ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY; |
605 | server->rcv.ptr = (unsigned char*)(req->reply_buf) + 2; | 626 | server->rcv.ptr = server->rxbuf + 2; |
606 | server->rcv.len = datalen - 10; | 627 | server->rcv.len = datalen - 10; |
607 | server->rcv.state = 1; | 628 | server->rcv.state = 1; |
608 | break; | 629 | break; |
@@ -615,12 +636,12 @@ skipdata:; | |||
615 | case 1: | 636 | case 1: |
616 | req = server->rcv.creq; | 637 | req = server->rcv.creq; |
617 | if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) { | 638 | if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) { |
618 | if (req->reply_buf->sequence != server->sequence) { | 639 | if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) { |
619 | printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n"); | 640 | printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n"); |
620 | __ncp_abort_request(server, req, -EIO); | 641 | __ncp_abort_request(server, req, -EIO); |
621 | return -EIO; | 642 | return -EIO; |
622 | } | 643 | } |
623 | if ((req->reply_buf->conn_low | (req->reply_buf->conn_high << 8)) != server->connection) { | 644 | if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) { |
624 | printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n"); | 645 | printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n"); |
625 | __ncp_abort_request(server, req, -EIO); | 646 | __ncp_abort_request(server, req, -EIO); |
626 | return -EIO; | 647 | return -EIO; |
@@ -628,14 +649,14 @@ skipdata:; | |||
628 | } | 649 | } |
629 | #ifdef CONFIG_NCPFS_PACKET_SIGNING | 650 | #ifdef CONFIG_NCPFS_PACKET_SIGNING |
630 | if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { | 651 | if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { |
631 | if (sign_verify_reply(server, (unsigned char*)(req->reply_buf) + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) { | 652 | if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) { |
632 | printk(KERN_ERR "ncpfs: tcp: Signature violation\n"); | 653 | printk(KERN_ERR "ncpfs: tcp: Signature violation\n"); |
633 | __ncp_abort_request(server, req, -EIO); | 654 | __ncp_abort_request(server, req, -EIO); |
634 | return -EIO; | 655 | return -EIO; |
635 | } | 656 | } |
636 | } | 657 | } |
637 | #endif | 658 | #endif |
638 | ncp_finish_request(req, req->datalen); | 659 | ncp_finish_request(server, req, req->datalen); |
639 | nextreq:; | 660 | nextreq:; |
640 | __ncp_next_request(server); | 661 | __ncp_next_request(server); |
641 | case 2: | 662 | case 2: |
@@ -645,7 +666,7 @@ skipdata:; | |||
645 | server->rcv.state = 0; | 666 | server->rcv.state = 0; |
646 | break; | 667 | break; |
647 | case 3: | 668 | case 3: |
648 | ncp_finish_request(server->rcv.creq, -EIO); | 669 | ncp_finish_request(server, server->rcv.creq, -EIO); |
649 | goto nextreq; | 670 | goto nextreq; |
650 | case 5: | 671 | case 5: |
651 | info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len); | 672 | info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len); |
@@ -675,28 +696,39 @@ void ncp_tcp_tx_proc(struct work_struct *work) | |||
675 | } | 696 | } |
676 | 697 | ||
677 | static int do_ncp_rpc_call(struct ncp_server *server, int size, | 698 | static int do_ncp_rpc_call(struct ncp_server *server, int size, |
678 | struct ncp_reply_header* reply_buf, int max_reply_size) | 699 | unsigned char* reply_buf, int max_reply_size) |
679 | { | 700 | { |
680 | int result; | 701 | int result; |
681 | struct ncp_request_reply req; | 702 | struct ncp_request_reply *req; |
682 | 703 | ||
683 | ncp_init_req(&req); | 704 | req = ncp_alloc_req(); |
684 | req.reply_buf = reply_buf; | 705 | if (!req) |
685 | req.datalen = max_reply_size; | 706 | return -ENOMEM; |
686 | req.tx_iov[1].iov_base = server->packet; | 707 | |
687 | req.tx_iov[1].iov_len = size; | 708 | req->reply_buf = reply_buf; |
688 | req.tx_iovlen = 1; | 709 | req->datalen = max_reply_size; |
689 | req.tx_totallen = size; | 710 | req->tx_iov[1].iov_base = server->packet; |
690 | req.tx_type = *(u_int16_t*)server->packet; | 711 | req->tx_iov[1].iov_len = size; |
691 | 712 | req->tx_iovlen = 1; | |
692 | result = ncp_add_request(server, &req); | 713 | req->tx_totallen = size; |
693 | if (result < 0) { | 714 | req->tx_type = *(u_int16_t*)server->packet; |
694 | return result; | 715 | |
695 | } | 716 | result = ncp_add_request(server, req); |
696 | if (wait_event_interruptible(req.wq, req.status == RQ_DONE)) { | 717 | if (result < 0) |
697 | ncp_abort_request(server, &req, -EIO); | 718 | goto out; |
719 | |||
720 | if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) { | ||
721 | ncp_abort_request(server, req, -EINTR); | ||
722 | result = -EINTR; | ||
723 | goto out; | ||
698 | } | 724 | } |
699 | return req.result; | 725 | |
726 | result = req->result; | ||
727 | |||
728 | out: | ||
729 | ncp_req_put(req); | ||
730 | |||
731 | return result; | ||
700 | } | 732 | } |
701 | 733 | ||
702 | /* | 734 | /* |
@@ -751,11 +783,6 @@ static int ncp_do_request(struct ncp_server *server, int size, | |||
751 | 783 | ||
752 | DDPRINTK("do_ncp_rpc_call returned %d\n", result); | 784 | DDPRINTK("do_ncp_rpc_call returned %d\n", result); |
753 | 785 | ||
754 | if (result < 0) { | ||
755 | /* There was a problem with I/O, so the connections is | ||
756 | * no longer usable. */ | ||
757 | ncp_invalidate_conn(server); | ||
758 | } | ||
759 | return result; | 786 | return result; |
760 | } | 787 | } |
761 | 788 | ||
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 8813990304fe..85a668680f82 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -431,6 +431,8 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent) | |||
431 | new_parent_dentry = new_parent ? | 431 | new_parent_dentry = new_parent ? |
432 | new_parent->dentry : sysfs_mount->mnt_sb->s_root; | 432 | new_parent->dentry : sysfs_mount->mnt_sb->s_root; |
433 | 433 | ||
434 | if (old_parent_dentry->d_inode == new_parent_dentry->d_inode) | ||
435 | return 0; /* nothing to move */ | ||
434 | again: | 436 | again: |
435 | mutex_lock(&old_parent_dentry->d_inode->i_mutex); | 437 | mutex_lock(&old_parent_dentry->d_inode->i_mutex); |
436 | if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) { | 438 | if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) { |
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index e997891cc7cc..84016ff481b9 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h | |||
@@ -1 +1,67 @@ | |||
1 | #include <asm-x86_64/tsc.h> | 1 | /* |
2 | * linux/include/asm-i386/tsc.h | ||
3 | * | ||
4 | * i386 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_i386_TSC_H | ||
7 | #define _ASM_i386_TSC_H | ||
8 | |||
9 | #include <asm/processor.h> | ||
10 | |||
11 | /* | ||
12 | * Standard way to access the cycle counter. | ||
13 | */ | ||
14 | typedef unsigned long long cycles_t; | ||
15 | |||
16 | extern unsigned int cpu_khz; | ||
17 | extern unsigned int tsc_khz; | ||
18 | |||
19 | static inline cycles_t get_cycles(void) | ||
20 | { | ||
21 | unsigned long long ret = 0; | ||
22 | |||
23 | #ifndef CONFIG_X86_TSC | ||
24 | if (!cpu_has_tsc) | ||
25 | return 0; | ||
26 | #endif | ||
27 | |||
28 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
29 | rdtscll(ret); | ||
30 | #endif | ||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | /* Like get_cycles, but make sure the CPU is synchronized. */ | ||
35 | static __always_inline cycles_t get_cycles_sync(void) | ||
36 | { | ||
37 | unsigned long long ret; | ||
38 | #ifdef X86_FEATURE_SYNC_RDTSC | ||
39 | unsigned eax; | ||
40 | |||
41 | /* | ||
42 | * Don't do an additional sync on CPUs where we know | ||
43 | * RDTSC is already synchronous: | ||
44 | */ | ||
45 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
46 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
47 | #else | ||
48 | sync_core(); | ||
49 | #endif | ||
50 | rdtscll(ret); | ||
51 | |||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | extern void tsc_init(void); | ||
56 | extern void mark_tsc_unstable(void); | ||
57 | extern int unsynchronized_tsc(void); | ||
58 | extern void init_tsc_clocksource(void); | ||
59 | |||
60 | /* | ||
61 | * Boot-time check whether the TSCs are synchronized across | ||
62 | * all CPUs/cores: | ||
63 | */ | ||
64 | extern void check_tsc_sync_source(int cpu); | ||
65 | extern void check_tsc_sync_target(void); | ||
66 | |||
67 | #endif | ||
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h index 1f971eb7f71e..94d0a12a4114 100644 --- a/include/asm-i386/vmi_time.h +++ b/include/asm-i386/vmi_time.h | |||
@@ -61,6 +61,14 @@ extern void apic_vmi_timer_interrupt(void); | |||
61 | #ifdef CONFIG_NO_IDLE_HZ | 61 | #ifdef CONFIG_NO_IDLE_HZ |
62 | extern int vmi_stop_hz_timer(void); | 62 | extern int vmi_stop_hz_timer(void); |
63 | extern void vmi_account_time_restart_hz_timer(void); | 63 | extern void vmi_account_time_restart_hz_timer(void); |
64 | #else | ||
65 | static inline int vmi_stop_hz_timer(void) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | static inline void vmi_account_time_restart_hz_timer(void) | ||
70 | { | ||
71 | } | ||
64 | #endif | 72 | #endif |
65 | 73 | ||
66 | /* | 74 | /* |
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h index 26c3e9828288..d66ba6ef25f6 100644 --- a/include/asm-x86_64/tsc.h +++ b/include/asm-x86_64/tsc.h | |||
@@ -1,67 +1 @@ | |||
1 | /* | #include <asm-i386/tsc.h> | |
2 | * linux/include/asm-x86_64/tsc.h | ||
3 | * | ||
4 | * x86_64 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_x86_64_TSC_H | ||
7 | #define _ASM_x86_64_TSC_H | ||
8 | |||
9 | #include <asm/processor.h> | ||
10 | |||
11 | /* | ||
12 | * Standard way to access the cycle counter. | ||
13 | */ | ||
14 | typedef unsigned long long cycles_t; | ||
15 | |||
16 | extern unsigned int cpu_khz; | ||
17 | extern unsigned int tsc_khz; | ||
18 | |||
19 | static inline cycles_t get_cycles(void) | ||
20 | { | ||
21 | unsigned long long ret = 0; | ||
22 | |||
23 | #ifndef CONFIG_X86_TSC | ||
24 | if (!cpu_has_tsc) | ||
25 | return 0; | ||
26 | #endif | ||
27 | |||
28 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
29 | rdtscll(ret); | ||
30 | #endif | ||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | /* Like get_cycles, but make sure the CPU is synchronized. */ | ||
35 | static __always_inline cycles_t get_cycles_sync(void) | ||
36 | { | ||
37 | unsigned long long ret; | ||
38 | #ifdef X86_FEATURE_SYNC_RDTSC | ||
39 | unsigned eax; | ||
40 | |||
41 | /* | ||
42 | * Don't do an additional sync on CPUs where we know | ||
43 | * RDTSC is already synchronous: | ||
44 | */ | ||
45 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
46 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
47 | #else | ||
48 | sync_core(); | ||
49 | #endif | ||
50 | rdtscll(ret); | ||
51 | |||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | extern void tsc_init(void); | ||
56 | extern void mark_tsc_unstable(void); | ||
57 | extern int unsynchronized_tsc(void); | ||
58 | extern void init_tsc_clocksource(void); | ||
59 | |||
60 | /* | ||
61 | * Boot-time check whether the TSCs are synchronized across | ||
62 | * all CPUs/cores: | ||
63 | */ | ||
64 | extern void check_tsc_sync_source(int cpu); | ||
65 | extern void check_tsc_sync_target(void); | ||
66 | |||
67 | #endif | ||
diff --git a/include/linux/audit.h b/include/linux/audit.h index 229fa012c893..773e30df11ee 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef _LINUX_AUDIT_H_ | 24 | #ifndef _LINUX_AUDIT_H_ |
25 | #define _LINUX_AUDIT_H_ | 25 | #define _LINUX_AUDIT_H_ |
26 | 26 | ||
27 | #include <linux/types.h> | ||
27 | #include <linux/elf-em.h> | 28 | #include <linux/elf-em.h> |
28 | 29 | ||
29 | /* The netlink messages for the audit system is divided into blocks: | 30 | /* The netlink messages for the audit system is divided into blocks: |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3bef961b58b1..5bdbc744e773 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -47,7 +47,7 @@ enum hrtimer_restart { | |||
47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context | 47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context |
48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and | 48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and |
49 | * does not restart the timer | 49 | * does not restart the timer |
50 | * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context | 50 | * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context |
51 | * Special mode for tick emultation | 51 | * Special mode for tick emultation |
52 | */ | 52 | */ |
53 | enum hrtimer_cb_mode { | 53 | enum hrtimer_cb_mode { |
@@ -139,7 +139,7 @@ struct hrtimer_sleeper { | |||
139 | }; | 139 | }; |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * struct hrtimer_base - the timer base for a specific clock | 142 | * struct hrtimer_clock_base - the timer base for a specific clock |
143 | * @cpu_base: per cpu clock base | 143 | * @cpu_base: per cpu clock base |
144 | * @index: clock type index for per_cpu support when moving a | 144 | * @index: clock type index for per_cpu support when moving a |
145 | * timer to a base on another cpu. | 145 | * timer to a base on another cpu. |
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9dbb525c5178..a113fe68d8a1 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
@@ -218,5 +218,7 @@ extern void ip_mc_up(struct in_device *); | |||
218 | extern void ip_mc_down(struct in_device *); | 218 | extern void ip_mc_down(struct in_device *); |
219 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); | 219 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); |
220 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); | 220 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); |
221 | extern void ip_mc_rejoin_group(struct ip_mc_list *im); | ||
222 | |||
221 | #endif | 223 | #endif |
222 | #endif | 224 | #endif |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 913e5752569f..bfcef8a1ad8b 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -62,6 +62,12 @@ struct mmc_ios { | |||
62 | 62 | ||
63 | #define MMC_BUS_WIDTH_1 0 | 63 | #define MMC_BUS_WIDTH_1 0 |
64 | #define MMC_BUS_WIDTH_4 2 | 64 | #define MMC_BUS_WIDTH_4 2 |
65 | |||
66 | unsigned char timing; /* timing specification used */ | ||
67 | |||
68 | #define MMC_TIMING_LEGACY 0 | ||
69 | #define MMC_TIMING_MMC_HS 1 | ||
70 | #define MMC_TIMING_SD_HS 2 | ||
65 | }; | 71 | }; |
66 | 72 | ||
67 | struct mmc_host_ops { | 73 | struct mmc_host_ops { |
@@ -87,6 +93,8 @@ struct mmc_host { | |||
87 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ | 93 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ |
88 | #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ | 94 | #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ |
89 | #define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */ | 95 | #define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */ |
96 | #define MMC_CAP_MMC_HIGHSPEED (1 << 3) /* Can do MMC high-speed timing */ | ||
97 | #define MMC_CAP_SD_HIGHSPEED (1 << 4) /* Can do SD high-speed timing */ | ||
90 | 98 | ||
91 | /* host specific block data */ | 99 | /* host specific block data */ |
92 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ | 100 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index e7d4da1cc9fa..c6d4ab86b83c 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -1288,6 +1288,7 @@ struct mv64xxx_i2c_pdata { | |||
1288 | #define MV643XX_ETH_NAME "mv643xx_eth" | 1288 | #define MV643XX_ETH_NAME "mv643xx_eth" |
1289 | 1289 | ||
1290 | struct mv643xx_eth_platform_data { | 1290 | struct mv643xx_eth_platform_data { |
1291 | int port_number; | ||
1291 | u16 force_phy_addr; /* force override if phy_addr == 0 */ | 1292 | u16 force_phy_addr; /* force override if phy_addr == 0 */ |
1292 | u16 phy_addr; | 1293 | u16 phy_addr; |
1293 | 1294 | ||
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index a503052138bd..6330fc76b00f 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -50,6 +50,8 @@ struct ncp_server { | |||
50 | int packet_size; | 50 | int packet_size; |
51 | unsigned char *packet; /* Here we prepare requests and | 51 | unsigned char *packet; /* Here we prepare requests and |
52 | receive replies */ | 52 | receive replies */ |
53 | unsigned char *txbuf; /* Storage for current request */ | ||
54 | unsigned char *rxbuf; /* Storage for reply to current request */ | ||
53 | 55 | ||
54 | int lock; /* To prevent mismatch in protocols. */ | 56 | int lock; /* To prevent mismatch in protocols. */ |
55 | struct mutex mutex; | 57 | struct mutex mutex; |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 83b3c7b433aa..35fa4d5aadd0 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -194,9 +194,7 @@ static inline void svc_putu32(struct kvec *iov, __be32 val) | |||
194 | 194 | ||
195 | union svc_addr_u { | 195 | union svc_addr_u { |
196 | struct in_addr addr; | 196 | struct in_addr addr; |
197 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
198 | struct in6_addr addr6; | 197 | struct in6_addr addr6; |
199 | #endif | ||
200 | }; | 198 | }; |
201 | 199 | ||
202 | /* | 200 | /* |
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index cccea0a0feb4..7909687557bf 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h | |||
@@ -66,7 +66,7 @@ struct svc_sock { | |||
66 | * Function prototypes. | 66 | * Function prototypes. |
67 | */ | 67 | */ |
68 | int svc_makesock(struct svc_serv *, int, unsigned short, int flags); | 68 | int svc_makesock(struct svc_serv *, int, unsigned short, int flags); |
69 | void svc_close_socket(struct svc_sock *); | 69 | void svc_force_close_socket(struct svc_sock *); |
70 | int svc_recv(struct svc_rqst *, long); | 70 | int svc_recv(struct svc_rqst *, long); |
71 | int svc_send(struct svc_rqst *); | 71 | int svc_send(struct svc_rqst *); |
72 | void svc_drop(struct svc_rqst *); | 72 | void svc_drop(struct svc_rqst *); |
diff --git a/init/Kconfig b/init/Kconfig index f977086e118a..b170aa1d43bd 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -304,6 +304,22 @@ config RELAY | |||
304 | 304 | ||
305 | If unsure, say N. | 305 | If unsure, say N. |
306 | 306 | ||
307 | config BLK_DEV_INITRD | ||
308 | bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" | ||
309 | depends on BROKEN || !FRV | ||
310 | help | ||
311 | The initial RAM filesystem is a ramfs which is loaded by the | ||
312 | boot loader (loadlin or lilo) and that is mounted as root | ||
313 | before the normal boot procedure. It is typically used to | ||
314 | load modules needed to mount the "real" root file system, | ||
315 | etc. See <file:Documentation/initrd.txt> for details. | ||
316 | |||
317 | If RAM disk support (BLK_DEV_RAM) is also included, this | ||
318 | also enables initial RAM disk (initrd) support and adds | ||
319 | 15 Kbytes (more on some other architectures) to the kernel size. | ||
320 | |||
321 | If unsure say Y. | ||
322 | |||
307 | if BLK_DEV_INITRD | 323 | if BLK_DEV_INITRD |
308 | 324 | ||
309 | source "usr/Kconfig" | 325 | source "usr/Kconfig" |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 0b5ecbe5f045..554ac368be79 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -731,7 +731,8 @@ asmlinkage long sys_mq_unlink(const char __user *u_name) | |||
731 | if (IS_ERR(name)) | 731 | if (IS_ERR(name)) |
732 | return PTR_ERR(name); | 732 | return PTR_ERR(name); |
733 | 733 | ||
734 | mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); | 734 | mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, |
735 | I_MUTEX_PARENT); | ||
735 | dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); | 736 | dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); |
736 | if (IS_ERR(dentry)) { | 737 | if (IS_ERR(dentry)) { |
737 | err = PTR_ERR(dentry); | 738 | err = PTR_ERR(dentry); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index de93a8176ca6..ec4cb9f3e3b7 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -540,19 +540,19 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
540 | /* | 540 | /* |
541 | * Switch to high resolution mode | 541 | * Switch to high resolution mode |
542 | */ | 542 | */ |
543 | static void hrtimer_switch_to_hres(void) | 543 | static int hrtimer_switch_to_hres(void) |
544 | { | 544 | { |
545 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 545 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
546 | unsigned long flags; | 546 | unsigned long flags; |
547 | 547 | ||
548 | if (base->hres_active) | 548 | if (base->hres_active) |
549 | return; | 549 | return 1; |
550 | 550 | ||
551 | local_irq_save(flags); | 551 | local_irq_save(flags); |
552 | 552 | ||
553 | if (tick_init_highres()) { | 553 | if (tick_init_highres()) { |
554 | local_irq_restore(flags); | 554 | local_irq_restore(flags); |
555 | return; | 555 | return 0; |
556 | } | 556 | } |
557 | base->hres_active = 1; | 557 | base->hres_active = 1; |
558 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | 558 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; |
@@ -565,13 +565,14 @@ static void hrtimer_switch_to_hres(void) | |||
565 | local_irq_restore(flags); | 565 | local_irq_restore(flags); |
566 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", | 566 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", |
567 | smp_processor_id()); | 567 | smp_processor_id()); |
568 | return 1; | ||
568 | } | 569 | } |
569 | 570 | ||
570 | #else | 571 | #else |
571 | 572 | ||
572 | static inline int hrtimer_hres_active(void) { return 0; } | 573 | static inline int hrtimer_hres_active(void) { return 0; } |
573 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 574 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
574 | static inline void hrtimer_switch_to_hres(void) { } | 575 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
575 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 576 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
576 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 577 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
577 | struct hrtimer_clock_base *base) | 578 | struct hrtimer_clock_base *base) |
@@ -1130,6 +1131,9 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | |||
1130 | if (base->softirq_time.tv64 <= timer->expires.tv64) | 1131 | if (base->softirq_time.tv64 <= timer->expires.tv64) |
1131 | break; | 1132 | break; |
1132 | 1133 | ||
1134 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1135 | WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ); | ||
1136 | #endif | ||
1133 | timer_stats_account_hrtimer(timer); | 1137 | timer_stats_account_hrtimer(timer); |
1134 | 1138 | ||
1135 | fn = timer->function; | 1139 | fn = timer->function; |
@@ -1173,7 +1177,8 @@ void hrtimer_run_queues(void) | |||
1173 | * deadlock vs. xtime_lock. | 1177 | * deadlock vs. xtime_lock. |
1174 | */ | 1178 | */ |
1175 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 1179 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1176 | hrtimer_switch_to_hres(); | 1180 | if (hrtimer_switch_to_hres()) |
1181 | return; | ||
1177 | 1182 | ||
1178 | hrtimer_get_softirq_time(cpu_base); | 1183 | hrtimer_get_softirq_time(cpu_base); |
1179 | 1184 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 95f6657fff73..51a4dd0f1b74 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -81,29 +81,34 @@ config SOFTWARE_SUSPEND | |||
81 | bool "Software Suspend" | 81 | bool "Software Suspend" |
82 | depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) | 82 | depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) |
83 | ---help--- | 83 | ---help--- |
84 | Enable the possibility of suspending the machine. | 84 | Enable the suspend to disk (STD) functionality. |
85 | It doesn't need ACPI or APM. | ||
86 | You may suspend your machine by 'swsusp' or 'shutdown -z <time>' | ||
87 | (patch for sysvinit needed). | ||
88 | 85 | ||
89 | It creates an image which is saved in your active swap. Upon next | 86 | You can suspend your machine with 'echo disk > /sys/power/state'. |
87 | Alternatively, you can use the additional userland tools available | ||
88 | from <http://suspend.sf.net>. | ||
89 | |||
90 | In principle it does not require ACPI or APM, although for example | ||
91 | ACPI will be used if available. | ||
92 | |||
93 | It creates an image which is saved in your active swap. Upon the next | ||
90 | boot, pass the 'resume=/dev/swappartition' argument to the kernel to | 94 | boot, pass the 'resume=/dev/swappartition' argument to the kernel to |
91 | have it detect the saved image, restore memory state from it, and | 95 | have it detect the saved image, restore memory state from it, and |
92 | continue to run as before. If you do not want the previous state to | 96 | continue to run as before. If you do not want the previous state to |
93 | be reloaded, then use the 'noresume' kernel argument. However, note | 97 | be reloaded, then use the 'noresume' kernel command line argument. |
94 | that your partitions will be fsck'd and you must re-mkswap your swap | 98 | Note, however, that fsck will be run on your filesystems and you will |
95 | partitions. It does not work with swap files. | 99 | need to run mkswap against the swap partition used for the suspend. |
96 | 100 | ||
97 | Right now you may boot without resuming and then later resume but | 101 | It also works with swap files to a limited extent (for details see |
98 | in meantime you cannot use those swap partitions/files which were | 102 | <file:Documentation/power/swsusp-and-swap-files.txt>). |
99 | involved in suspending. Also in this case there is a risk that buffers | ||
100 | on disk won't match with saved ones. | ||
101 | 103 | ||
102 | For more information take a look at <file:Documentation/power/swsusp.txt>. | 104 | Right now you may boot without resuming and resume later but in the |
105 | meantime you cannot use the swap partition(s)/file(s) involved in | ||
106 | suspending. Also in this case you must not use the filesystems | ||
107 | that were mounted before the suspend. In particular, you MUST NOT | ||
108 | MOUNT any journaled filesystems mounted before the suspend or they | ||
109 | will get corrupted in a nasty way. | ||
103 | 110 | ||
104 | (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386. | 111 | For more information take a look at <file:Documentation/power/swsusp.txt>. |
105 | we need identity mapping for resume to work, and that is trivial | ||
106 | to get with 4MB pages, but less than trivial on PAE). | ||
107 | 112 | ||
108 | config PM_STD_PARTITION | 113 | config PM_STD_PARTITION |
109 | string "Default resume partition" | 114 | string "Default resume partition" |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 482b11ff65cb..bcd14e83ef39 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -60,19 +60,19 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | |||
60 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ | 60 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ |
61 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | 61 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ |
62 | 62 | ||
63 | module_param(nreaders, int, 0); | 63 | module_param(nreaders, int, 0444); |
64 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | 64 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); |
65 | module_param(nfakewriters, int, 0); | 65 | module_param(nfakewriters, int, 0444); |
66 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | 66 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); |
67 | module_param(stat_interval, int, 0); | 67 | module_param(stat_interval, int, 0444); |
68 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | 68 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); |
69 | module_param(verbose, bool, 0); | 69 | module_param(verbose, bool, 0444); |
70 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | 70 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); |
71 | module_param(test_no_idle_hz, bool, 0); | 71 | module_param(test_no_idle_hz, bool, 0444); |
72 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | 72 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); |
73 | module_param(shuffle_interval, int, 0); | 73 | module_param(shuffle_interval, int, 0444); |
74 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | 74 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); |
75 | module_param(torture_type, charp, 0); | 75 | module_param(torture_type, charp, 0444); |
76 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 76 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); |
77 | 77 | ||
78 | #define TORTURE_FLAG "-torture:" | 78 | #define TORTURE_FLAG "-torture:" |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 12b3efeb9f6f..5567745470f7 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -284,6 +284,42 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
285 | } | 285 | } |
286 | 286 | ||
287 | void tick_suspend_broadcast(void) | ||
288 | { | ||
289 | struct clock_event_device *bc; | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
293 | |||
294 | bc = tick_broadcast_device.evtdev; | ||
295 | if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | ||
296 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | ||
297 | |||
298 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
299 | } | ||
300 | |||
301 | int tick_resume_broadcast(void) | ||
302 | { | ||
303 | struct clock_event_device *bc; | ||
304 | unsigned long flags; | ||
305 | int broadcast = 0; | ||
306 | |||
307 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
308 | |||
309 | bc = tick_broadcast_device.evtdev; | ||
310 | if (bc) { | ||
311 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC && | ||
312 | !cpus_empty(tick_broadcast_mask)) | ||
313 | tick_broadcast_start_periodic(bc); | ||
314 | |||
315 | broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask); | ||
316 | } | ||
317 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
318 | |||
319 | return broadcast; | ||
320 | } | ||
321 | |||
322 | |||
287 | #ifdef CONFIG_TICK_ONESHOT | 323 | #ifdef CONFIG_TICK_ONESHOT |
288 | 324 | ||
289 | static cpumask_t tick_broadcast_oneshot_mask; | 325 | static cpumask_t tick_broadcast_oneshot_mask; |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 0986a2bfab49..43ba1bdec14c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -298,6 +298,28 @@ static void tick_shutdown(unsigned int *cpup) | |||
298 | spin_unlock_irqrestore(&tick_device_lock, flags); | 298 | spin_unlock_irqrestore(&tick_device_lock, flags); |
299 | } | 299 | } |
300 | 300 | ||
301 | static void tick_suspend_periodic(void) | ||
302 | { | ||
303 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
304 | unsigned long flags; | ||
305 | |||
306 | spin_lock_irqsave(&tick_device_lock, flags); | ||
307 | if (td->mode == TICKDEV_MODE_PERIODIC) | ||
308 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | ||
309 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
310 | } | ||
311 | |||
312 | static void tick_resume_periodic(void) | ||
313 | { | ||
314 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
315 | unsigned long flags; | ||
316 | |||
317 | spin_lock_irqsave(&tick_device_lock, flags); | ||
318 | if (td->mode == TICKDEV_MODE_PERIODIC) | ||
319 | tick_setup_periodic(td->evtdev, 0); | ||
320 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
321 | } | ||
322 | |||
301 | /* | 323 | /* |
302 | * Notification about clock event devices | 324 | * Notification about clock event devices |
303 | */ | 325 | */ |
@@ -325,6 +347,16 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
325 | tick_shutdown(dev); | 347 | tick_shutdown(dev); |
326 | break; | 348 | break; |
327 | 349 | ||
350 | case CLOCK_EVT_NOTIFY_SUSPEND: | ||
351 | tick_suspend_periodic(); | ||
352 | tick_suspend_broadcast(); | ||
353 | break; | ||
354 | |||
355 | case CLOCK_EVT_NOTIFY_RESUME: | ||
356 | if (!tick_resume_broadcast()) | ||
357 | tick_resume_periodic(); | ||
358 | break; | ||
359 | |||
328 | default: | 360 | default: |
329 | break; | 361 | break; |
330 | } | 362 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 54861a0f29ff..75890efd24ff 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -67,6 +67,8 @@ extern int tick_check_broadcast_device(struct clock_event_device *dev); | |||
67 | extern int tick_is_broadcast_device(struct clock_event_device *dev); | 67 | extern int tick_is_broadcast_device(struct clock_event_device *dev); |
68 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); | 68 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); |
69 | extern void tick_shutdown_broadcast(unsigned int *cpup); | 69 | extern void tick_shutdown_broadcast(unsigned int *cpup); |
70 | extern void tick_suspend_broadcast(void); | ||
71 | extern int tick_resume_broadcast(void); | ||
70 | 72 | ||
71 | extern void | 73 | extern void |
72 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 74 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
@@ -90,6 +92,8 @@ static inline int tick_device_uses_broadcast(struct clock_event_device *dev, | |||
90 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } | 92 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } |
91 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } | 93 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } |
92 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | 94 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } |
95 | static inline void tick_suspend_broadcast(void) { } | ||
96 | static inline int tick_resume_broadcast(void) { return 0; } | ||
93 | 97 | ||
94 | /* | 98 | /* |
95 | * Set the periodic handler in non broadcast mode | 99 | * Set the periodic handler in non broadcast mode |
diff --git a/kernel/timer.c b/kernel/timer.c index 8ad384253ef2..797cccb86431 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -862,6 +862,8 @@ int do_settimeofday(struct timespec *tv) | |||
862 | clock->error = 0; | 862 | clock->error = 0; |
863 | ntp_clear(); | 863 | ntp_clear(); |
864 | 864 | ||
865 | update_vsyscall(&xtime, clock); | ||
866 | |||
865 | write_sequnlock_irqrestore(&xtime_lock, flags); | 867 | write_sequnlock_irqrestore(&xtime_lock, flags); |
866 | 868 | ||
867 | /* signal hrtimers about time change */ | 869 | /* signal hrtimers about time change */ |
@@ -997,6 +999,9 @@ static int timekeeping_resume(struct sys_device *dev) | |||
997 | write_sequnlock_irqrestore(&xtime_lock, flags); | 999 | write_sequnlock_irqrestore(&xtime_lock, flags); |
998 | 1000 | ||
999 | touch_softlockup_watchdog(); | 1001 | touch_softlockup_watchdog(); |
1002 | |||
1003 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | ||
1004 | |||
1000 | /* Resume hrtimers */ | 1005 | /* Resume hrtimers */ |
1001 | clock_was_set(); | 1006 | clock_was_set(); |
1002 | 1007 | ||
@@ -1011,6 +1016,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
1011 | timekeeping_suspended = 1; | 1016 | timekeeping_suspended = 1; |
1012 | timekeeping_suspend_time = read_persistent_clock(); | 1017 | timekeeping_suspend_time = read_persistent_clock(); |
1013 | write_sequnlock_irqrestore(&xtime_lock, flags); | 1018 | write_sequnlock_irqrestore(&xtime_lock, flags); |
1019 | |||
1020 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | ||
1021 | |||
1014 | return 0; | 1022 | return 0; |
1015 | } | 1023 | } |
1016 | 1024 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 063721302ebf..1c6a084b5fb7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1251,6 +1251,28 @@ out: | |||
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * Resend IGMP JOIN report; used for bonding. | ||
1255 | */ | ||
1256 | void ip_mc_rejoin_group(struct ip_mc_list *im) | ||
1257 | { | ||
1258 | struct in_device *in_dev = im->interface; | ||
1259 | |||
1260 | #ifdef CONFIG_IP_MULTICAST | ||
1261 | if (im->multiaddr == IGMP_ALL_HOSTS) | ||
1262 | return; | ||
1263 | |||
1264 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | ||
1265 | igmp_mod_timer(im, IGMP_Initial_Report_Delay); | ||
1266 | return; | ||
1267 | } | ||
1268 | /* else, v3 */ | ||
1269 | im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : | ||
1270 | IGMP_Unsolicited_Report_Count; | ||
1271 | igmp_ifc_event(in_dev); | ||
1272 | #endif | ||
1273 | } | ||
1274 | |||
1275 | /* | ||
1254 | * A socket has left a multicast group on device dev | 1276 | * A socket has left a multicast group on device dev |
1255 | */ | 1277 | */ |
1256 | 1278 | ||
@@ -2596,3 +2618,4 @@ int __init igmp_mc_proc_init(void) | |||
2596 | EXPORT_SYMBOL(ip_mc_dec_group); | 2618 | EXPORT_SYMBOL(ip_mc_dec_group); |
2597 | EXPORT_SYMBOL(ip_mc_inc_group); | 2619 | EXPORT_SYMBOL(ip_mc_inc_group); |
2598 | EXPORT_SYMBOL(ip_mc_join_group); | 2620 | EXPORT_SYMBOL(ip_mc_join_group); |
2621 | EXPORT_SYMBOL(ip_mc_rejoin_group); | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 8353829bc5c6..b4db53ff1435 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -27,22 +27,26 @@ | |||
27 | 27 | ||
28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
29 | 29 | ||
30 | #define svc_serv_is_pooled(serv) ((serv)->sv_function) | ||
31 | |||
30 | /* | 32 | /* |
31 | * Mode for mapping cpus to pools. | 33 | * Mode for mapping cpus to pools. |
32 | */ | 34 | */ |
33 | enum { | 35 | enum { |
34 | SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */ | 36 | SVC_POOL_AUTO = -1, /* choose one of the others */ |
35 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool | 37 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool |
36 | * (legacy & UP mode) */ | 38 | * (legacy & UP mode) */ |
37 | SVC_POOL_PERCPU, /* one pool per cpu */ | 39 | SVC_POOL_PERCPU, /* one pool per cpu */ |
38 | SVC_POOL_PERNODE /* one pool per numa node */ | 40 | SVC_POOL_PERNODE /* one pool per numa node */ |
39 | }; | 41 | }; |
42 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL | ||
40 | 43 | ||
41 | /* | 44 | /* |
42 | * Structure for mapping cpus to pools and vice versa. | 45 | * Structure for mapping cpus to pools and vice versa. |
43 | * Setup once during sunrpc initialisation. | 46 | * Setup once during sunrpc initialisation. |
44 | */ | 47 | */ |
45 | static struct svc_pool_map { | 48 | static struct svc_pool_map { |
49 | int count; /* How many svc_servs use us */ | ||
46 | int mode; /* Note: int not enum to avoid | 50 | int mode; /* Note: int not enum to avoid |
47 | * warnings about "enumeration value | 51 | * warnings about "enumeration value |
48 | * not handled in switch" */ | 52 | * not handled in switch" */ |
@@ -50,9 +54,63 @@ static struct svc_pool_map { | |||
50 | unsigned int *pool_to; /* maps pool id to cpu or node */ | 54 | unsigned int *pool_to; /* maps pool id to cpu or node */ |
51 | unsigned int *to_pool; /* maps cpu or node to pool id */ | 55 | unsigned int *to_pool; /* maps cpu or node to pool id */ |
52 | } svc_pool_map = { | 56 | } svc_pool_map = { |
53 | .mode = SVC_POOL_NONE | 57 | .count = 0, |
58 | .mode = SVC_POOL_DEFAULT | ||
54 | }; | 59 | }; |
60 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ | ||
61 | |||
62 | static int | ||
63 | param_set_pool_mode(const char *val, struct kernel_param *kp) | ||
64 | { | ||
65 | int *ip = (int *)kp->arg; | ||
66 | struct svc_pool_map *m = &svc_pool_map; | ||
67 | int err; | ||
68 | |||
69 | mutex_lock(&svc_pool_map_mutex); | ||
70 | |||
71 | err = -EBUSY; | ||
72 | if (m->count) | ||
73 | goto out; | ||
74 | |||
75 | err = 0; | ||
76 | if (!strncmp(val, "auto", 4)) | ||
77 | *ip = SVC_POOL_AUTO; | ||
78 | else if (!strncmp(val, "global", 6)) | ||
79 | *ip = SVC_POOL_GLOBAL; | ||
80 | else if (!strncmp(val, "percpu", 6)) | ||
81 | *ip = SVC_POOL_PERCPU; | ||
82 | else if (!strncmp(val, "pernode", 7)) | ||
83 | *ip = SVC_POOL_PERNODE; | ||
84 | else | ||
85 | err = -EINVAL; | ||
86 | |||
87 | out: | ||
88 | mutex_unlock(&svc_pool_map_mutex); | ||
89 | return err; | ||
90 | } | ||
55 | 91 | ||
92 | static int | ||
93 | param_get_pool_mode(char *buf, struct kernel_param *kp) | ||
94 | { | ||
95 | int *ip = (int *)kp->arg; | ||
96 | |||
97 | switch (*ip) | ||
98 | { | ||
99 | case SVC_POOL_AUTO: | ||
100 | return strlcpy(buf, "auto", 20); | ||
101 | case SVC_POOL_GLOBAL: | ||
102 | return strlcpy(buf, "global", 20); | ||
103 | case SVC_POOL_PERCPU: | ||
104 | return strlcpy(buf, "percpu", 20); | ||
105 | case SVC_POOL_PERNODE: | ||
106 | return strlcpy(buf, "pernode", 20); | ||
107 | default: | ||
108 | return sprintf(buf, "%d", *ip); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, | ||
113 | &svc_pool_map.mode, 0644); | ||
56 | 114 | ||
57 | /* | 115 | /* |
58 | * Detect best pool mapping mode heuristically, | 116 | * Detect best pool mapping mode heuristically, |
@@ -166,18 +224,25 @@ svc_pool_map_init_pernode(struct svc_pool_map *m) | |||
166 | 224 | ||
167 | 225 | ||
168 | /* | 226 | /* |
169 | * Build the global map of cpus to pools and vice versa. | 227 | * Add a reference to the global map of cpus to pools (and |
228 | * vice versa). Initialise the map if we're the first user. | ||
229 | * Returns the number of pools. | ||
170 | */ | 230 | */ |
171 | static unsigned int | 231 | static unsigned int |
172 | svc_pool_map_init(void) | 232 | svc_pool_map_get(void) |
173 | { | 233 | { |
174 | struct svc_pool_map *m = &svc_pool_map; | 234 | struct svc_pool_map *m = &svc_pool_map; |
175 | int npools = -1; | 235 | int npools = -1; |
176 | 236 | ||
177 | if (m->mode != SVC_POOL_NONE) | 237 | mutex_lock(&svc_pool_map_mutex); |
238 | |||
239 | if (m->count++) { | ||
240 | mutex_unlock(&svc_pool_map_mutex); | ||
178 | return m->npools; | 241 | return m->npools; |
242 | } | ||
179 | 243 | ||
180 | m->mode = svc_pool_map_choose_mode(); | 244 | if (m->mode == SVC_POOL_AUTO) |
245 | m->mode = svc_pool_map_choose_mode(); | ||
181 | 246 | ||
182 | switch (m->mode) { | 247 | switch (m->mode) { |
183 | case SVC_POOL_PERCPU: | 248 | case SVC_POOL_PERCPU: |
@@ -195,9 +260,36 @@ svc_pool_map_init(void) | |||
195 | } | 260 | } |
196 | m->npools = npools; | 261 | m->npools = npools; |
197 | 262 | ||
263 | mutex_unlock(&svc_pool_map_mutex); | ||
198 | return m->npools; | 264 | return m->npools; |
199 | } | 265 | } |
200 | 266 | ||
267 | |||
268 | /* | ||
269 | * Drop a reference to the global map of cpus to pools. | ||
270 | * When the last reference is dropped, the map data is | ||
271 | * freed; this allows the sysadmin to change the pool | ||
272 | * mode using the pool_mode module option without | ||
273 | * rebooting or re-loading sunrpc.ko. | ||
274 | */ | ||
275 | static void | ||
276 | svc_pool_map_put(void) | ||
277 | { | ||
278 | struct svc_pool_map *m = &svc_pool_map; | ||
279 | |||
280 | mutex_lock(&svc_pool_map_mutex); | ||
281 | |||
282 | if (!--m->count) { | ||
283 | m->mode = SVC_POOL_DEFAULT; | ||
284 | kfree(m->to_pool); | ||
285 | kfree(m->pool_to); | ||
286 | m->npools = 0; | ||
287 | } | ||
288 | |||
289 | mutex_unlock(&svc_pool_map_mutex); | ||
290 | } | ||
291 | |||
292 | |||
201 | /* | 293 | /* |
202 | * Set the current thread's cpus_allowed mask so that it | 294 | * Set the current thread's cpus_allowed mask so that it |
203 | * will only run on cpus in the given pool. | 295 | * will only run on cpus in the given pool. |
@@ -212,10 +304,9 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
212 | 304 | ||
213 | /* | 305 | /* |
214 | * The caller checks for sv_nrpools > 1, which | 306 | * The caller checks for sv_nrpools > 1, which |
215 | * implies that we've been initialized and the | 307 | * implies that we've been initialized. |
216 | * map mode is not NONE. | ||
217 | */ | 308 | */ |
218 | BUG_ON(m->mode == SVC_POOL_NONE); | 309 | BUG_ON(m->count == 0); |
219 | 310 | ||
220 | switch (m->mode) | 311 | switch (m->mode) |
221 | { | 312 | { |
@@ -246,18 +337,19 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu) | |||
246 | unsigned int pidx = 0; | 337 | unsigned int pidx = 0; |
247 | 338 | ||
248 | /* | 339 | /* |
249 | * SVC_POOL_NONE happens in a pure client when | 340 | * An uninitialised map happens in a pure client when |
250 | * lockd is brought up, so silently treat it the | 341 | * lockd is brought up, so silently treat it the |
251 | * same as SVC_POOL_GLOBAL. | 342 | * same as SVC_POOL_GLOBAL. |
252 | */ | 343 | */ |
253 | 344 | if (svc_serv_is_pooled(serv)) { | |
254 | switch (m->mode) { | 345 | switch (m->mode) { |
255 | case SVC_POOL_PERCPU: | 346 | case SVC_POOL_PERCPU: |
256 | pidx = m->to_pool[cpu]; | 347 | pidx = m->to_pool[cpu]; |
257 | break; | 348 | break; |
258 | case SVC_POOL_PERNODE: | 349 | case SVC_POOL_PERNODE: |
259 | pidx = m->to_pool[cpu_to_node(cpu)]; | 350 | pidx = m->to_pool[cpu_to_node(cpu)]; |
260 | break; | 351 | break; |
352 | } | ||
261 | } | 353 | } |
262 | return &serv->sv_pools[pidx % serv->sv_nrpools]; | 354 | return &serv->sv_pools[pidx % serv->sv_nrpools]; |
263 | } | 355 | } |
@@ -347,7 +439,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
347 | svc_thread_fn func, int sig, struct module *mod) | 439 | svc_thread_fn func, int sig, struct module *mod) |
348 | { | 440 | { |
349 | struct svc_serv *serv; | 441 | struct svc_serv *serv; |
350 | unsigned int npools = svc_pool_map_init(); | 442 | unsigned int npools = svc_pool_map_get(); |
351 | 443 | ||
352 | serv = __svc_create(prog, bufsize, npools, shutdown); | 444 | serv = __svc_create(prog, bufsize, npools, shutdown); |
353 | 445 | ||
@@ -367,6 +459,7 @@ void | |||
367 | svc_destroy(struct svc_serv *serv) | 459 | svc_destroy(struct svc_serv *serv) |
368 | { | 460 | { |
369 | struct svc_sock *svsk; | 461 | struct svc_sock *svsk; |
462 | struct svc_sock *tmp; | ||
370 | 463 | ||
371 | dprintk("svc: svc_destroy(%s, %d)\n", | 464 | dprintk("svc: svc_destroy(%s, %d)\n", |
372 | serv->sv_program->pg_name, | 465 | serv->sv_program->pg_name, |
@@ -382,24 +475,23 @@ svc_destroy(struct svc_serv *serv) | |||
382 | 475 | ||
383 | del_timer_sync(&serv->sv_temptimer); | 476 | del_timer_sync(&serv->sv_temptimer); |
384 | 477 | ||
385 | while (!list_empty(&serv->sv_tempsocks)) { | 478 | list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list) |
386 | svsk = list_entry(serv->sv_tempsocks.next, | 479 | svc_force_close_socket(svsk); |
387 | struct svc_sock, | 480 | |
388 | sk_list); | ||
389 | svc_close_socket(svsk); | ||
390 | } | ||
391 | if (serv->sv_shutdown) | 481 | if (serv->sv_shutdown) |
392 | serv->sv_shutdown(serv); | 482 | serv->sv_shutdown(serv); |
393 | 483 | ||
394 | while (!list_empty(&serv->sv_permsocks)) { | 484 | list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list) |
395 | svsk = list_entry(serv->sv_permsocks.next, | 485 | svc_force_close_socket(svsk); |
396 | struct svc_sock, | 486 | |
397 | sk_list); | 487 | BUG_ON(!list_empty(&serv->sv_permsocks)); |
398 | svc_close_socket(svsk); | 488 | BUG_ON(!list_empty(&serv->sv_tempsocks)); |
399 | } | ||
400 | 489 | ||
401 | cache_clean_deferred(serv); | 490 | cache_clean_deferred(serv); |
402 | 491 | ||
492 | if (svc_serv_is_pooled(serv)) | ||
493 | svc_pool_map_put(); | ||
494 | |||
403 | /* Unregister service with the portmapper */ | 495 | /* Unregister service with the portmapper */ |
404 | svc_register(serv, 0, 0); | 496 | svc_register(serv, 0, 0); |
405 | kfree(serv->sv_pools); | 497 | kfree(serv->sv_pools); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 63ae94771b8e..f6e1eb1ea720 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -82,6 +82,7 @@ static void svc_delete_socket(struct svc_sock *svsk); | |||
82 | static void svc_udp_data_ready(struct sock *, int); | 82 | static void svc_udp_data_ready(struct sock *, int); |
83 | static int svc_udp_recvfrom(struct svc_rqst *); | 83 | static int svc_udp_recvfrom(struct svc_rqst *); |
84 | static int svc_udp_sendto(struct svc_rqst *); | 84 | static int svc_udp_sendto(struct svc_rqst *); |
85 | static void svc_close_socket(struct svc_sock *svsk); | ||
85 | 86 | ||
86 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | 87 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); |
87 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 88 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
@@ -131,13 +132,13 @@ static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) | |||
131 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), | 132 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), |
132 | htons(((struct sockaddr_in *) addr)->sin_port)); | 133 | htons(((struct sockaddr_in *) addr)->sin_port)); |
133 | break; | 134 | break; |
134 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 135 | |
135 | case AF_INET6: | 136 | case AF_INET6: |
136 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", | 137 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", |
137 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), | 138 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), |
138 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); | 139 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); |
139 | break; | 140 | break; |
140 | #endif | 141 | |
141 | default: | 142 | default: |
142 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); | 143 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); |
143 | break; | 144 | break; |
@@ -449,9 +450,7 @@ svc_wake_up(struct svc_serv *serv) | |||
449 | 450 | ||
450 | union svc_pktinfo_u { | 451 | union svc_pktinfo_u { |
451 | struct in_pktinfo pkti; | 452 | struct in_pktinfo pkti; |
452 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
453 | struct in6_pktinfo pkti6; | 453 | struct in6_pktinfo pkti6; |
454 | #endif | ||
455 | }; | 454 | }; |
456 | 455 | ||
457 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | 456 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) |
@@ -467,7 +466,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
467 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 466 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
468 | } | 467 | } |
469 | break; | 468 | break; |
470 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 469 | |
471 | case AF_INET6: { | 470 | case AF_INET6: { |
472 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | 471 | struct in6_pktinfo *pki = CMSG_DATA(cmh); |
473 | 472 | ||
@@ -479,7 +478,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
479 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 478 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
480 | } | 479 | } |
481 | break; | 480 | break; |
482 | #endif | ||
483 | } | 481 | } |
484 | return; | 482 | return; |
485 | } | 483 | } |
@@ -721,45 +719,21 @@ svc_write_space(struct sock *sk) | |||
721 | } | 719 | } |
722 | } | 720 | } |
723 | 721 | ||
724 | static void svc_udp_get_sender_address(struct svc_rqst *rqstp, | 722 | static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, |
725 | struct sk_buff *skb) | 723 | struct cmsghdr *cmh) |
726 | { | 724 | { |
727 | switch (rqstp->rq_sock->sk_sk->sk_family) { | 725 | switch (rqstp->rq_sock->sk_sk->sk_family) { |
728 | case AF_INET: { | 726 | case AF_INET: { |
729 | /* this seems to come from net/ipv4/udp.c:udp_recvmsg */ | 727 | struct in_pktinfo *pki = CMSG_DATA(cmh); |
730 | struct sockaddr_in *sin = svc_addr_in(rqstp); | 728 | rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; |
731 | |||
732 | sin->sin_family = AF_INET; | ||
733 | sin->sin_port = skb->h.uh->source; | ||
734 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | ||
735 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
736 | /* Remember which interface received this request */ | ||
737 | rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr; | ||
738 | } | ||
739 | break; | 729 | break; |
740 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
741 | case AF_INET6: { | ||
742 | /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */ | ||
743 | struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp); | ||
744 | |||
745 | sin6->sin6_family = AF_INET6; | ||
746 | sin6->sin6_port = skb->h.uh->source; | ||
747 | sin6->sin6_flowinfo = 0; | ||
748 | sin6->sin6_scope_id = 0; | ||
749 | if (ipv6_addr_type(&sin6->sin6_addr) & | ||
750 | IPV6_ADDR_LINKLOCAL) | ||
751 | sin6->sin6_scope_id = IP6CB(skb)->iif; | ||
752 | ipv6_addr_copy(&sin6->sin6_addr, | ||
753 | &skb->nh.ipv6h->saddr); | ||
754 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
755 | /* Remember which interface received this request */ | ||
756 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, | ||
757 | &skb->nh.ipv6h->saddr); | ||
758 | } | 730 | } |
731 | case AF_INET6: { | ||
732 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | ||
733 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); | ||
759 | break; | 734 | break; |
760 | #endif | 735 | } |
761 | } | 736 | } |
762 | return; | ||
763 | } | 737 | } |
764 | 738 | ||
765 | /* | 739 | /* |
@@ -771,7 +745,15 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
771 | struct svc_sock *svsk = rqstp->rq_sock; | 745 | struct svc_sock *svsk = rqstp->rq_sock; |
772 | struct svc_serv *serv = svsk->sk_server; | 746 | struct svc_serv *serv = svsk->sk_server; |
773 | struct sk_buff *skb; | 747 | struct sk_buff *skb; |
748 | char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; | ||
749 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | ||
774 | int err, len; | 750 | int err, len; |
751 | struct msghdr msg = { | ||
752 | .msg_name = svc_addr(rqstp), | ||
753 | .msg_control = cmh, | ||
754 | .msg_controllen = sizeof(buffer), | ||
755 | .msg_flags = MSG_DONTWAIT, | ||
756 | }; | ||
775 | 757 | ||
776 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | 758 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) |
777 | /* udp sockets need large rcvbuf as all pending | 759 | /* udp sockets need large rcvbuf as all pending |
@@ -797,7 +779,9 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
797 | } | 779 | } |
798 | 780 | ||
799 | clear_bit(SK_DATA, &svsk->sk_flags); | 781 | clear_bit(SK_DATA, &svsk->sk_flags); |
800 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | 782 | while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL, |
783 | 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || | ||
784 | (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | ||
801 | if (err == -EAGAIN) { | 785 | if (err == -EAGAIN) { |
802 | svc_sock_received(svsk); | 786 | svc_sock_received(svsk); |
803 | return err; | 787 | return err; |
@@ -805,6 +789,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
805 | /* possibly an icmp error */ | 789 | /* possibly an icmp error */ |
806 | dprintk("svc: recvfrom returned error %d\n", -err); | 790 | dprintk("svc: recvfrom returned error %d\n", -err); |
807 | } | 791 | } |
792 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | ||
808 | if (skb->tstamp.off_sec == 0) { | 793 | if (skb->tstamp.off_sec == 0) { |
809 | struct timeval tv; | 794 | struct timeval tv; |
810 | 795 | ||
@@ -827,7 +812,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
827 | 812 | ||
828 | rqstp->rq_prot = IPPROTO_UDP; | 813 | rqstp->rq_prot = IPPROTO_UDP; |
829 | 814 | ||
830 | svc_udp_get_sender_address(rqstp, skb); | 815 | if (cmh->cmsg_level != IPPROTO_IP || |
816 | cmh->cmsg_type != IP_PKTINFO) { | ||
817 | if (net_ratelimit()) | ||
818 | printk("rpcsvc: received unknown control message:" | ||
819 | "%d/%d\n", | ||
820 | cmh->cmsg_level, cmh->cmsg_type); | ||
821 | skb_free_datagram(svsk->sk_sk, skb); | ||
822 | return 0; | ||
823 | } | ||
824 | svc_udp_get_dest_address(rqstp, cmh); | ||
831 | 825 | ||
832 | if (skb_is_nonlinear(skb)) { | 826 | if (skb_is_nonlinear(skb)) { |
833 | /* we have to copy */ | 827 | /* we have to copy */ |
@@ -884,6 +878,9 @@ svc_udp_sendto(struct svc_rqst *rqstp) | |||
884 | static void | 878 | static void |
885 | svc_udp_init(struct svc_sock *svsk) | 879 | svc_udp_init(struct svc_sock *svsk) |
886 | { | 880 | { |
881 | int one = 1; | ||
882 | mm_segment_t oldfs; | ||
883 | |||
887 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | 884 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; |
888 | svsk->sk_sk->sk_write_space = svc_write_space; | 885 | svsk->sk_sk->sk_write_space = svc_write_space; |
889 | svsk->sk_recvfrom = svc_udp_recvfrom; | 886 | svsk->sk_recvfrom = svc_udp_recvfrom; |
@@ -899,6 +896,13 @@ svc_udp_init(struct svc_sock *svsk) | |||
899 | 896 | ||
900 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | 897 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ |
901 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | 898 | set_bit(SK_CHNGBUF, &svsk->sk_flags); |
899 | |||
900 | oldfs = get_fs(); | ||
901 | set_fs(KERNEL_DS); | ||
902 | /* make sure we get destination address info */ | ||
903 | svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, | ||
904 | (char __user *)&one, sizeof(one)); | ||
905 | set_fs(oldfs); | ||
902 | } | 906 | } |
903 | 907 | ||
904 | /* | 908 | /* |
@@ -977,11 +981,9 @@ static inline int svc_port_is_privileged(struct sockaddr *sin) | |||
977 | case AF_INET: | 981 | case AF_INET: |
978 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | 982 | return ntohs(((struct sockaddr_in *)sin)->sin_port) |
979 | < PROT_SOCK; | 983 | < PROT_SOCK; |
980 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
981 | case AF_INET6: | 984 | case AF_INET6: |
982 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | 985 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) |
983 | < PROT_SOCK; | 986 | < PROT_SOCK; |
984 | #endif | ||
985 | default: | 987 | default: |
986 | return 0; | 988 | return 0; |
987 | } | 989 | } |
@@ -1786,7 +1788,7 @@ svc_delete_socket(struct svc_sock *svsk) | |||
1786 | spin_unlock_bh(&serv->sv_lock); | 1788 | spin_unlock_bh(&serv->sv_lock); |
1787 | } | 1789 | } |
1788 | 1790 | ||
1789 | void svc_close_socket(struct svc_sock *svsk) | 1791 | static void svc_close_socket(struct svc_sock *svsk) |
1790 | { | 1792 | { |
1791 | set_bit(SK_CLOSE, &svsk->sk_flags); | 1793 | set_bit(SK_CLOSE, &svsk->sk_flags); |
1792 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) | 1794 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) |
@@ -1799,6 +1801,19 @@ void svc_close_socket(struct svc_sock *svsk) | |||
1799 | svc_sock_put(svsk); | 1801 | svc_sock_put(svsk); |
1800 | } | 1802 | } |
1801 | 1803 | ||
1804 | void svc_force_close_socket(struct svc_sock *svsk) | ||
1805 | { | ||
1806 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
1807 | if (test_bit(SK_BUSY, &svsk->sk_flags)) { | ||
1808 | /* Waiting to be processed, but no threads left, | ||
1809 | * So just remove it from the waiting list | ||
1810 | */ | ||
1811 | list_del_init(&svsk->sk_ready); | ||
1812 | clear_bit(SK_BUSY, &svsk->sk_flags); | ||
1813 | } | ||
1814 | svc_close_socket(svsk); | ||
1815 | } | ||
1816 | |||
1802 | /** | 1817 | /** |
1803 | * svc_makesock - Make a socket for nfsd and lockd | 1818 | * svc_makesock - Make a socket for nfsd and lockd |
1804 | * @serv: RPC server structure | 1819 | * @serv: RPC server structure |