aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt25
-rw-r--r--Documentation/networking/netdevices.txt2
-rw-r--r--Documentation/vm/slabinfo.c9
-rw-r--r--Makefile2
-rw-r--r--arch/i386/Makefile2
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/sata_nv.c18
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/net/e1000/e1000.h4
-rw-r--r--drivers/net/e1000/e1000_main.c39
-rw-r--r--drivers/net/gianfar.c11
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c3
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h3
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c60
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.h2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_main.c36
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/sky2.c69
-rw-r--r--drivers/net/spider_net.c3
-rw-r--r--fs/lockd/clntlock.c2
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/lockd/xdr.c4
-rw-r--r--fs/lockd/xdr4.c6
-rw-r--r--fs/nfs/callback.h4
-rw-r--r--fs/nfs/delegation.c2
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c96
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/write.c6
-rw-r--r--include/linux/lockd/xdr4.h1
-rw-r--r--include/linux/mii.h4
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/core/dev.c69
-rw-r--r--net/ipv4/Kconfig7
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp_cong.c40
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/sunrpc_syms.c4
53 files changed, 335 insertions, 275 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 498ff31f3aa1..5c8695a3d139 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -328,21 +328,20 @@ Who: Adrian Bunk <bunk@stusta.de>
328 328
329--------------------------- 329---------------------------
330 330
331What: libata.spindown_compat module parameter 331What: libata spindown skipping and warning
332When: Dec 2008 332When: Dec 2008
333Why: halt(8) synchronizes caches for and spins down libata disks 333Why: Some halt(8) implementations synchronize caches for and spin
334 because libata didn't use to spin down disk on system halt 334 down libata disks because libata didn't use to spin down disk on
335 (only synchronized caches). 335 system halt (only synchronized caches).
336 Spin down on system halt is now implemented and can be tested 336 Spin down on system halt is now implemented. sysfs node
337 using sysfs node /sys/class/scsi_disk/h:c:i:l/manage_start_stop. 337 /sys/class/scsi_disk/h:c:i:l/manage_start_stop is present if
338 spin down support is available.
338 Because issuing spin down command to an already spun down disk 339 Because issuing spin down command to an already spun down disk
339 makes some disks spin up just to spin down again, the old 340 makes some disks spin up just to spin down again, libata tracks
340 behavior needs to be maintained till userspace tool is updated 341 device spindown status to skip the extra spindown command and
341 to check the sysfs node and not to spin down disks with the 342 warn about it.
342 node set to one. 343 This is to give userspace tools the time to get updated and will
343 This module parameter is to give userspace tool the time to 344 be removed after userspace is reasonably updated.
344 get updated and should be removed after userspace is
345 reasonably updated.
346Who: Tejun Heo <htejun@gmail.com> 345Who: Tejun Heo <htejun@gmail.com>
347 346
348--------------------------- 347---------------------------
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 847cedb238f6..ce1361f95243 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -49,7 +49,7 @@ dev->hard_start_xmit:
49 for this and return -1 when the spin lock fails. 49 for this and return -1 when the spin lock fails.
50 The locking there should also properly protect against 50 The locking there should also properly protect against
51 set_multicast_list 51 set_multicast_list
52 Context: BHs disabled 52 Context: Process with BHs disabled or BH (timer).
53 Notes: netif_queue_stopped() is guaranteed false 53 Notes: netif_queue_stopped() is guaranteed false
54 Interrupts must be enabled when calling hard_start_xmit. 54 Interrupts must be enabled when calling hard_start_xmit.
55 (Interrupts must also be enabled when enabling the BH handler.) 55 (Interrupts must also be enabled when enabling the BH handler.)
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index 434af27a32ac..d4f21ffd1404 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -262,11 +262,17 @@ void decode_numa_list(int *numa, char *t)
262 262
263void slab_validate(struct slabinfo *s) 263void slab_validate(struct slabinfo *s)
264{ 264{
265 if (strcmp(s->name, "*") == 0)
266 return;
267
265 set_obj(s, "validate", 1); 268 set_obj(s, "validate", 1);
266} 269}
267 270
268void slab_shrink(struct slabinfo *s) 271void slab_shrink(struct slabinfo *s)
269{ 272{
273 if (strcmp(s->name, "*") == 0)
274 return;
275
270 set_obj(s, "shrink", 1); 276 set_obj(s, "shrink", 1);
271} 277}
272 278
@@ -550,6 +556,9 @@ int slab_empty(struct slabinfo *s)
550 556
551void slab_debug(struct slabinfo *s) 557void slab_debug(struct slabinfo *s)
552{ 558{
559 if (strcmp(s->name, "*") == 0)
560 return;
561
553 if (sanity && !s->sanity_checks) { 562 if (sanity && !s->sanity_checks) {
554 set_obj(s, "sanity", 1); 563 set_obj(s, "sanity", 1);
555 } 564 }
diff --git a/Makefile b/Makefile
index e6990e2cdafc..948fa09442f1 100644
--- a/Makefile
+++ b/Makefile
@@ -491,7 +491,7 @@ endif
491include $(srctree)/arch/$(ARCH)/Makefile 491include $(srctree)/arch/$(ARCH)/Makefile
492 492
493ifdef CONFIG_FRAME_POINTER 493ifdef CONFIG_FRAME_POINTER
494CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 494CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
495else 495else
496CFLAGS += -fomit-frame-pointer 496CFLAGS += -fomit-frame-pointer
497endif 497endif
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 6dc5e5d90fec..bd28f9f9b4b7 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -34,7 +34,7 @@ CHECKFLAGS += -D__i386__
34CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return 34CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return
35 35
36# prevent gcc from keeping the stack 16 byte aligned 36# prevent gcc from keeping the stack 16 byte aligned
37CFLAGS += -mpreferred-stack-boundary=4 37CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
38 38
39# CPU-specific tuning. Anything which can be shared with UML should go here. 39# CPU-specific tuning. Anything which can be shared with UML should go here.
40include $(srctree)/arch/i386/Makefile.cpu 40include $(srctree)/arch/i386/Makefile.cpu
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d5939e659cbb..d3ea7f55283c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -101,12 +101,6 @@ int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444); 101module_param_named(noacpi, libata_noacpi, int, 0444);
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); 102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103 103
104int ata_spindown_compat = 1;
105module_param_named(spindown_compat, ata_spindown_compat, int, 0644);
106MODULE_PARM_DESC(spindown_compat, "Enable backward compatible spindown "
107 "behavior. Will be removed. More info can be found in "
108 "Documentation/feature-removal-schedule.txt\n");
109
110MODULE_AUTHOR("Jeff Garzik"); 104MODULE_AUTHOR("Jeff Garzik");
111MODULE_DESCRIPTION("Library module for ATA devices"); 105MODULE_DESCRIPTION("Library module for ATA devices");
112MODULE_LICENSE("GPL"); 106MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b6a1de8fad5b..242c43eef807 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -893,7 +893,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
893 return queue_depth; 893 return queue_depth;
894} 894}
895 895
896/* XXX: for ata_spindown_compat */ 896/* XXX: for spindown warning */
897static void ata_delayed_done_timerfn(unsigned long arg) 897static void ata_delayed_done_timerfn(unsigned long arg)
898{ 898{
899 struct scsi_cmnd *scmd = (void *)arg; 899 struct scsi_cmnd *scmd = (void *)arg;
@@ -901,7 +901,7 @@ static void ata_delayed_done_timerfn(unsigned long arg)
901 scmd->scsi_done(scmd); 901 scmd->scsi_done(scmd);
902} 902}
903 903
904/* XXX: for ata_spindown_compat */ 904/* XXX: for spindown warning */
905static void ata_delayed_done(struct scsi_cmnd *scmd) 905static void ata_delayed_done(struct scsi_cmnd *scmd)
906{ 906{
907 static struct timer_list timer; 907 static struct timer_list timer;
@@ -966,8 +966,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
966 * removed. Read Documentation/feature-removal-schedule.txt 966 * removed. Read Documentation/feature-removal-schedule.txt
967 * for more info. 967 * for more info.
968 */ 968 */
969 if (ata_spindown_compat && 969 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
970 (qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
971 (system_state == SYSTEM_HALT || 970 (system_state == SYSTEM_HALT ||
972 system_state == SYSTEM_POWER_OFF)) { 971 system_state == SYSTEM_POWER_OFF)) {
973 static unsigned long warned = 0; 972 static unsigned long warned = 0;
@@ -1395,7 +1394,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1395 } 1394 }
1396 } 1395 }
1397 1396
1398 /* XXX: track spindown state for spindown_compat */ 1397 /* XXX: track spindown state for spindown skipping and warning */
1399 if (unlikely(qc->tf.command == ATA_CMD_STANDBY || 1398 if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
1400 qc->tf.command == ATA_CMD_STANDBYNOW1)) 1399 qc->tf.command == ATA_CMD_STANDBYNOW1))
1401 qc->dev->flags |= ATA_DFLAG_SPUNDOWN; 1400 qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 13cb0c9af68d..5e2466658420 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -58,7 +58,6 @@ extern int atapi_enabled;
58extern int atapi_dmadir; 58extern int atapi_dmadir;
59extern int libata_fua; 59extern int libata_fua;
60extern int libata_noacpi; 60extern int libata_noacpi;
61extern int ata_spindown_compat;
62extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 61extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
63extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 62extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
64 u64 block, u32 n_block, unsigned int tf_flags, 63 u64 block, u32 n_block, unsigned int tf_flags,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 4cea3ef75226..1a49c777fa6a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -229,7 +229,6 @@ struct nv_host_priv {
229#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) 229#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 230
231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232static void nv_remove_one (struct pci_dev *pdev);
233#ifdef CONFIG_PM 232#ifdef CONFIG_PM
234static int nv_pci_device_resume(struct pci_dev *pdev); 233static int nv_pci_device_resume(struct pci_dev *pdev);
235#endif 234#endif
@@ -288,12 +287,6 @@ static const struct pci_device_id nv_pci_tbl[] = {
288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, 287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, 288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
290 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, 289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
291 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
292 PCI_ANY_ID, PCI_ANY_ID,
293 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
294 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
295 PCI_ANY_ID, PCI_ANY_ID,
296 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
297 290
298 { } /* terminate list */ 291 { } /* terminate list */
299}; 292};
@@ -306,7 +299,7 @@ static struct pci_driver nv_pci_driver = {
306 .suspend = ata_pci_device_suspend, 299 .suspend = ata_pci_device_suspend,
307 .resume = nv_pci_device_resume, 300 .resume = nv_pci_device_resume,
308#endif 301#endif
309 .remove = nv_remove_one, 302 .remove = ata_pci_remove_one,
310}; 303};
311 304
312static struct scsi_host_template nv_sht = { 305static struct scsi_host_template nv_sht = {
@@ -1613,15 +1606,6 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1613 IRQF_SHARED, ppi[0]->sht); 1606 IRQF_SHARED, ppi[0]->sht);
1614} 1607}
1615 1608
1616static void nv_remove_one (struct pci_dev *pdev)
1617{
1618 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1619 struct nv_host_priv *hpriv = host->private_data;
1620
1621 ata_pci_remove_one(pdev);
1622 kfree(hpriv);
1623}
1624
1625#ifdef CONFIG_PM 1609#ifdef CONFIG_PM
1626static int nv_pci_device_resume(struct pci_dev *pdev) 1610static int nv_pci_device_resume(struct pci_dev *pdev)
1627{ 1611{
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index d105d2c189d2..ac4f43c4993f 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -441,7 +441,7 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
441 return -ENOMEM; 441 return -ENOMEM;
442 } 442 }
443 443
444 rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME); 444 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
445 if (rc) { 445 if (rc) {
446 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " 446 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
447 "PCI BARs (errno=%d)\n", rc); 447 "PCI BARs (errno=%d)\n", rc);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index a9ea67e75c1b..16a6edfeba41 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -333,11 +333,9 @@ struct e1000_adapter {
333 struct e1000_tx_ring test_tx_ring; 333 struct e1000_tx_ring test_tx_ring;
334 struct e1000_rx_ring test_rx_ring; 334 struct e1000_rx_ring test_rx_ring;
335 335
336
337 int msg_enable; 336 int msg_enable;
338#ifdef CONFIG_PCI_MSI
339 boolean_t have_msi; 337 boolean_t have_msi;
340#endif 338
341 /* to not mess up cache alignment, always add to the bottom */ 339 /* to not mess up cache alignment, always add to the bottom */
342 boolean_t tso_force; 340 boolean_t tso_force;
343 boolean_t smart_power_down; /* phy smart power down */ 341 boolean_t smart_power_down; /* phy smart power down */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 637ae8f68791..49be393e1c1d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -158,9 +158,7 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
158static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 158static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
159static int e1000_set_mac(struct net_device *netdev, void *p); 159static int e1000_set_mac(struct net_device *netdev, void *p);
160static irqreturn_t e1000_intr(int irq, void *data); 160static irqreturn_t e1000_intr(int irq, void *data);
161#ifdef CONFIG_PCI_MSI
162static irqreturn_t e1000_intr_msi(int irq, void *data); 161static irqreturn_t e1000_intr_msi(int irq, void *data);
163#endif
164static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 162static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
165 struct e1000_tx_ring *tx_ring); 163 struct e1000_tx_ring *tx_ring);
166#ifdef CONFIG_E1000_NAPI 164#ifdef CONFIG_E1000_NAPI
@@ -300,31 +298,26 @@ module_exit(e1000_exit_module);
300static int e1000_request_irq(struct e1000_adapter *adapter) 298static int e1000_request_irq(struct e1000_adapter *adapter)
301{ 299{
302 struct net_device *netdev = adapter->netdev; 300 struct net_device *netdev = adapter->netdev;
303 int flags, err = 0; 301 void (*handler) = &e1000_intr;
302 int irq_flags = IRQF_SHARED;
303 int err;
304 304
305 flags = IRQF_SHARED;
306#ifdef CONFIG_PCI_MSI
307 if (adapter->hw.mac_type >= e1000_82571) { 305 if (adapter->hw.mac_type >= e1000_82571) {
308 adapter->have_msi = TRUE; 306 adapter->have_msi = !pci_enable_msi(adapter->pdev);
309 if ((err = pci_enable_msi(adapter->pdev))) { 307 if (adapter->have_msi) {
310 DPRINTK(PROBE, ERR, 308 handler = &e1000_intr_msi;
311 "Unable to allocate MSI interrupt Error: %d\n", err); 309 irq_flags = 0;
312 adapter->have_msi = FALSE;
313 } 310 }
314 } 311 }
315 if (adapter->have_msi) { 312
316 flags &= ~IRQF_SHARED; 313 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
317 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags, 314 netdev);
318 netdev->name, netdev); 315 if (err) {
319 if (err) 316 if (adapter->have_msi)
320 DPRINTK(PROBE, ERR, 317 pci_disable_msi(adapter->pdev);
321 "Unable to allocate interrupt Error: %d\n", err);
322 } else
323#endif
324 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
325 netdev->name, netdev)))
326 DPRINTK(PROBE, ERR, 318 DPRINTK(PROBE, ERR,
327 "Unable to allocate interrupt Error: %d\n", err); 319 "Unable to allocate interrupt Error: %d\n", err);
320 }
328 321
329 return err; 322 return err;
330} 323}
@@ -335,10 +328,8 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
335 328
336 free_irq(adapter->pdev->irq, netdev); 329 free_irq(adapter->pdev->irq, netdev);
337 330
338#ifdef CONFIG_PCI_MSI
339 if (adapter->have_msi) 331 if (adapter->have_msi)
340 pci_disable_msi(adapter->pdev); 332 pci_disable_msi(adapter->pdev);
341#endif
342} 333}
343 334
344/** 335/**
@@ -3744,7 +3735,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
3744 3735
3745 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3736 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3746} 3737}
3747#ifdef CONFIG_PCI_MSI
3748 3738
3749/** 3739/**
3750 * e1000_intr_msi - Interrupt Handler 3740 * e1000_intr_msi - Interrupt Handler
@@ -3810,7 +3800,6 @@ e1000_intr_msi(int irq, void *data)
3810 3800
3811 return IRQ_HANDLED; 3801 return IRQ_HANDLED;
3812} 3802}
3813#endif
3814 3803
3815/** 3804/**
3816 * e1000_intr - Interrupt Handler 3805 * e1000_intr - Interrupt Handler
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b666a0cc0642..f5b3cba23fc5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1025,6 +1025,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1025 1025
1026 dev->trans_start = jiffies; 1026 dev->trans_start = jiffies;
1027 1027
1028 /* The powerpc-specific eieio() is used, as wmb() has too strong
1029 * semantics (it requires synchronization between cacheable and
1030 * uncacheable mappings, which eieio doesn't provide and which we
1031 * don't need), thus requiring a more expensive sync instruction. At
1032 * some point, the set of architecture-independent barrier functions
1033 * should be expanded to include weaker barriers.
1034 */
1035
1036 eieio();
1028 txbdp->status = status; 1037 txbdp->status = status;
1029 1038
1030 /* If this was the last BD in the ring, the next one */ 1039 /* If this was the last BD in the ring, the next one */
@@ -1301,6 +1310,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1301 bdp->length = 0; 1310 bdp->length = 0;
1302 1311
1303 /* Mark the buffer empty */ 1312 /* Mark the buffer empty */
1313 eieio();
1304 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1314 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1305 1315
1306 return skb; 1316 return skb;
@@ -1484,6 +1494,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1484 bdp = priv->cur_rx; 1494 bdp = priv->cur_rx;
1485 1495
1486 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1496 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1497 rmb();
1487 skb = priv->rx_skbuff[priv->skb_currx]; 1498 skb = priv->rx_skbuff[priv->skb_currx];
1488 1499
1489 if (!(bdp->status & 1500 if (!(bdp->status &
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 50035ebd4f52..f752e5fc65ba 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -926,7 +926,7 @@ static int emac_link_differs(struct ocp_enet_private *dev)
926 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; 926 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
927 int speed, pause, asym_pause; 927 int speed, pause, asym_pause;
928 928
929 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS)) 929 if (r & EMAC_MR1_MF_1000)
930 speed = SPEED_1000; 930 speed = SPEED_1000;
931 else if (r & EMAC_MR1_MF_100) 931 else if (r & EMAC_MR1_MF_100)
932 speed = SPEED_100; 932 speed = SPEED_100;
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
index 6c0f071e4052..cabd9846a5ee 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -59,8 +59,7 @@ int __init mal_register_commac(struct ibm_ocp_mal *mal,
59 return 0; 59 return 0;
60} 60}
61 61
62void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, 62void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
63 struct mal_commac *commac)
64{ 63{
65 unsigned long flags; 64 unsigned long flags;
66 local_irq_save(flags); 65 local_irq_save(flags);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index 407d2acbf7c7..64bc338acc6c 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -223,8 +223,7 @@ void mal_exit(void) __exit;
223 223
224int mal_register_commac(struct ibm_ocp_mal *mal, 224int mal_register_commac(struct ibm_ocp_mal *mal,
225 struct mal_commac *commac) __init; 225 struct mal_commac *commac) __init;
226void mal_unregister_commac(struct ibm_ocp_mal *mal, 226void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac);
227 struct mal_commac *commac) __exit;
228int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size); 227int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size);
229 228
230/* Returns BD ring offset for a particular channel 229/* Returns BD ring offset for a particular channel
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
index 9074f76ee2bf..e57862b34cae 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -22,6 +22,7 @@
22 22
23#include <asm/ocp.h> 23#include <asm/ocp.h>
24 24
25#include "ibm_emac_core.h"
25#include "ibm_emac_phy.h" 26#include "ibm_emac_phy.h"
26 27
27static inline int phy_read(struct mii_phy *phy, int reg) 28static inline int phy_read(struct mii_phy *phy, int reg)
@@ -34,11 +35,39 @@ static inline void phy_write(struct mii_phy *phy, int reg, int val)
34 phy->mdio_write(phy->dev, phy->address, reg, val); 35 phy->mdio_write(phy->dev, phy->address, reg, val);
35} 36}
36 37
37int mii_reset_phy(struct mii_phy *phy) 38/*
39 * polls MII_BMCR until BMCR_RESET bit clears or operation times out.
40 *
41 * returns:
42 * >= 0 => success, value in BMCR returned to caller
43 * -EBUSY => failure, RESET bit never cleared
44 * otherwise => failure, lower level PHY read failed
45 */
46static int mii_spin_reset_complete(struct mii_phy *phy)
38{ 47{
39 int val; 48 int val;
40 int limit = 10000; 49 int limit = 10000;
41 50
51 while (limit--) {
52 val = phy_read(phy, MII_BMCR);
53 if (val >= 0 && !(val & BMCR_RESET))
54 return val; /* success */
55 udelay(10);
56 }
57 if (val & BMCR_RESET)
58 val = -EBUSY;
59
60 if (net_ratelimit())
61 printk(KERN_ERR "emac%d: PHY reset timeout (%d)\n",
62 ((struct ocp_enet_private *)phy->dev->priv)->def->index,
63 val);
64 return val;
65}
66
67int mii_reset_phy(struct mii_phy *phy)
68{
69 int val;
70
42 val = phy_read(phy, MII_BMCR); 71 val = phy_read(phy, MII_BMCR);
43 val &= ~BMCR_ISOLATE; 72 val &= ~BMCR_ISOLATE;
44 val |= BMCR_RESET; 73 val |= BMCR_RESET;
@@ -46,16 +75,11 @@ int mii_reset_phy(struct mii_phy *phy)
46 75
47 udelay(300); 76 udelay(300);
48 77
49 while (limit--) { 78 val = mii_spin_reset_complete(phy);
50 val = phy_read(phy, MII_BMCR); 79 if (val >= 0 && (val & BMCR_ISOLATE))
51 if (val >= 0 && (val & BMCR_RESET) == 0)
52 break;
53 udelay(10);
54 }
55 if ((val & BMCR_ISOLATE) && limit > 0)
56 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); 80 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
57 81
58 return limit <= 0; 82 return val < 0;
59} 83}
60 84
61static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) 85static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
@@ -102,8 +126,14 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
102 } 126 }
103 127
104 /* Start/Restart aneg */ 128 /* Start/Restart aneg */
105 ctl = phy_read(phy, MII_BMCR); 129 /* on some PHYs (e.g. National DP83843) a write to MII_ADVERTISE
106 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 130 * causes BMCR_RESET to be set on the next read of MII_BMCR, which
131 * if not checked for causes the PHY to be reset below */
132 ctl = mii_spin_reset_complete(phy);
133 if (ctl < 0)
134 return ctl;
135
136 ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
107 phy_write(phy, MII_BMCR, ctl); 137 phy_write(phy, MII_BMCR, ctl);
108 138
109 return 0; 139 return 0;
@@ -118,13 +148,13 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
118 phy->duplex = fd; 148 phy->duplex = fd;
119 phy->pause = phy->asym_pause = 0; 149 phy->pause = phy->asym_pause = 0;
120 150
151 /* First reset the PHY */
152 mii_reset_phy(phy);
153
121 ctl = phy_read(phy, MII_BMCR); 154 ctl = phy_read(phy, MII_BMCR);
122 if (ctl < 0) 155 if (ctl < 0)
123 return ctl; 156 return ctl;
124 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); 157 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE | BMCR_SPEED1000);
125
126 /* First reset the PHY */
127 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
128 158
129 /* Select speed & duplex */ 159 /* Select speed & duplex */
130 switch (speed) { 160 switch (speed) {
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.c b/drivers/net/ibm_emac/ibm_emac_rgmii.c
index 53d281cb9a16..9dbb5e5936c3 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.c
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.c
@@ -162,7 +162,7 @@ void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
162 out_be32(&dev->base->ssr, ssr); 162 out_be32(&dev->base->ssr, ssr);
163} 163}
164 164
165void __exit __rgmii_fini(struct ocp_device *ocpdev, int input) 165void __rgmii_fini(struct ocp_device *ocpdev, int input)
166{ 166{
167 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev); 167 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
168 BUG_ON(!dev || dev->users == 0); 168 BUG_ON(!dev || dev->users == 0);
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
index 117ea486c2ca..971e45815c6c 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -37,7 +37,7 @@ struct ibm_ocp_rgmii {
37#ifdef CONFIG_IBM_EMAC_RGMII 37#ifdef CONFIG_IBM_EMAC_RGMII
38int rgmii_attach(void *emac) __init; 38int rgmii_attach(void *emac) __init;
39 39
40void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit; 40void __rgmii_fini(struct ocp_device *ocpdev, int input);
41static inline void rgmii_fini(struct ocp_device *ocpdev, int input) 41static inline void rgmii_fini(struct ocp_device *ocpdev, int input)
42{ 42{
43 if (ocpdev) 43 if (ocpdev)
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.c b/drivers/net/ibm_emac/ibm_emac_tah.c
index e287b451bb44..3c2d5ba522a1 100644
--- a/drivers/net/ibm_emac/ibm_emac_tah.c
+++ b/drivers/net/ibm_emac/ibm_emac_tah.c
@@ -63,7 +63,7 @@ int __init tah_attach(void *emac)
63 return 0; 63 return 0;
64} 64}
65 65
66void __exit __tah_fini(struct ocp_device *ocpdev) 66void __tah_fini(struct ocp_device *ocpdev)
67{ 67{
68 struct tah_regs *p = ocp_get_drvdata(ocpdev); 68 struct tah_regs *p = ocp_get_drvdata(ocpdev);
69 BUG_ON(!p); 69 BUG_ON(!p);
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
index 38153945a240..ccf64915e1e4 100644
--- a/drivers/net/ibm_emac/ibm_emac_tah.h
+++ b/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -55,7 +55,7 @@ struct tah_regs {
55#ifdef CONFIG_IBM_EMAC_TAH 55#ifdef CONFIG_IBM_EMAC_TAH
56int tah_attach(void *emac) __init; 56int tah_attach(void *emac) __init;
57 57
58void __tah_fini(struct ocp_device *ocpdev) __exit; 58void __tah_fini(struct ocp_device *ocpdev);
59static inline void tah_fini(struct ocp_device *ocpdev) 59static inline void tah_fini(struct ocp_device *ocpdev)
60{ 60{
61 if (ocpdev) 61 if (ocpdev)
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c
index 37dc8f342868..2c0fdb0cabff 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.c
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -215,7 +215,7 @@ void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
215 out_be32(&dev->base->ssr, ssr); 215 out_be32(&dev->base->ssr, ssr);
216} 216}
217 217
218void __exit __zmii_fini(struct ocp_device *ocpdev, int input) 218void __zmii_fini(struct ocp_device *ocpdev, int input)
219{ 219{
220 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); 220 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
221 BUG_ON(!dev || dev->users == 0); 221 BUG_ON(!dev || dev->users == 0);
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
index 972e3a44a09f..fad6d8bf983a 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -40,7 +40,7 @@ struct ibm_ocp_zmii {
40#ifdef CONFIG_IBM_EMAC_ZMII 40#ifdef CONFIG_IBM_EMAC_ZMII
41int zmii_attach(void *emac) __init; 41int zmii_attach(void *emac) __init;
42 42
43void __zmii_fini(struct ocp_device *ocpdev, int input) __exit; 43void __zmii_fini(struct ocp_device *ocpdev, int input);
44static inline void zmii_fini(struct ocp_device *ocpdev, int input) 44static inline void zmii_fini(struct ocp_device *ocpdev, int input)
45{ 45{
46 if (ocpdev) 46 if (ocpdev)
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index c8e90861f869..3569d5b03388 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -193,8 +193,6 @@ struct ixgb_adapter {
193 u16 msg_enable; 193 u16 msg_enable;
194 struct ixgb_hw_stats stats; 194 struct ixgb_hw_stats stats;
195 uint32_t alloc_rx_buff_failed; 195 uint32_t alloc_rx_buff_failed;
196#ifdef CONFIG_PCI_MSI
197 boolean_t have_msi; 196 boolean_t have_msi;
198#endif
199}; 197};
200#endif /* _IXGB_H_ */ 198#endif /* _IXGB_H_ */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 6d2b059371f1..991c8833e23c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -227,7 +227,7 @@ int
227ixgb_up(struct ixgb_adapter *adapter) 227ixgb_up(struct ixgb_adapter *adapter)
228{ 228{
229 struct net_device *netdev = adapter->netdev; 229 struct net_device *netdev = adapter->netdev;
230 int err; 230 int err, irq_flags = IRQF_SHARED;
231 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 231 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
232 struct ixgb_hw *hw = &adapter->hw; 232 struct ixgb_hw *hw = &adapter->hw;
233 233
@@ -246,26 +246,21 @@ ixgb_up(struct ixgb_adapter *adapter)
246 /* disable interrupts and get the hardware into a known state */ 246 /* disable interrupts and get the hardware into a known state */
247 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); 247 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
248 248
249#ifdef CONFIG_PCI_MSI 249 /* only enable MSI if bus is in PCI-X mode */
250 { 250 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
251 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & 251 err = pci_enable_msi(adapter->pdev);
252 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE; 252 if (!err) {
253 adapter->have_msi = TRUE; 253 adapter->have_msi = 1;
254 254 irq_flags = 0;
255 if (!pcix) 255 }
256 adapter->have_msi = FALSE;
257 else if((err = pci_enable_msi(adapter->pdev))) {
258 DPRINTK(PROBE, ERR,
259 "Unable to allocate MSI interrupt Error: %d\n", err);
260 adapter->have_msi = FALSE;
261 /* proceed to try to request regular interrupt */ 256 /* proceed to try to request regular interrupt */
262 } 257 }
263 }
264 258
265#endif 259 err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
266 if((err = request_irq(adapter->pdev->irq, &ixgb_intr, 260 netdev->name, netdev);
267 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 261 if (err) {
268 netdev->name, netdev))) { 262 if (adapter->have_msi)
263 pci_disable_msi(adapter->pdev);
269 DPRINTK(PROBE, ERR, 264 DPRINTK(PROBE, ERR,
270 "Unable to allocate interrupt Error: %d\n", err); 265 "Unable to allocate interrupt Error: %d\n", err);
271 return err; 266 return err;
@@ -307,11 +302,10 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
307 302
308 ixgb_irq_disable(adapter); 303 ixgb_irq_disable(adapter);
309 free_irq(adapter->pdev->irq, netdev); 304 free_irq(adapter->pdev->irq, netdev);
310#ifdef CONFIG_PCI_MSI 305
311 if(adapter->have_msi == TRUE) 306 if (adapter->have_msi)
312 pci_disable_msi(adapter->pdev); 307 pci_disable_msi(adapter->pdev);
313 308
314#endif
315 if(kill_watchdog) 309 if(kill_watchdog)
316 del_timer_sync(&adapter->watchdog_timer); 310 del_timer_sync(&adapter->watchdog_timer);
317#ifdef CONFIG_IXGB_NAPI 311#ifdef CONFIG_IXGB_NAPI
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index cf0e96adfe44..a36892457761 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1216,7 +1216,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1216 /* Window = 1 */ 1216 /* Window = 1 */
1217 writel(consumer, 1217 writel(consumer,
1218 NETXEN_CRB_NORMALIZE(adapter, 1218 NETXEN_CRB_NORMALIZE(adapter,
1219 recv_crb_registers[ctxid]. 1219 recv_crb_registers[adapter->portnum].
1220 crb_rcv_status_consumer)); 1220 crb_rcv_status_consumer));
1221 } 1221 }
1222 1222
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 104e20456e6f..832fd69a0e59 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -40,7 +40,6 @@
40#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
41#include <linux/prefetch.h> 41#include <linux/prefetch.h>
42#include <linux/mii.h> 42#include <linux/mii.h>
43#include <linux/dmi.h>
44 43
45#include <asm/irq.h> 44#include <asm/irq.h>
46 45
@@ -151,8 +150,6 @@ static const char *yukon2_name[] = {
151 "FE", /* 0xb7 */ 150 "FE", /* 0xb7 */
152}; 151};
153 152
154static int dmi_blacklisted;
155
156/* Access to external PHY */ 153/* Access to external PHY */
157static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) 154static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
158{ 155{
@@ -307,10 +304,13 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
307 PHY_M_EC_MAC_S_MSK); 304 PHY_M_EC_MAC_S_MSK);
308 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 305 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
309 306
307 /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
310 if (hw->chip_id == CHIP_ID_YUKON_EC) 308 if (hw->chip_id == CHIP_ID_YUKON_EC)
309 /* set downshift counter to 3x and enable downshift */
311 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; 310 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
312 else 311 else
313 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3); 312 /* set master & slave downshift counter to 1x */
313 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
314 314
315 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 315 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
316 } 316 }
@@ -327,10 +327,12 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
327 /* enable automatic crossover */ 327 /* enable automatic crossover */
328 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 328 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
329 329
330 /* downshift on PHY 88E1112 and 88E1149 is changed */
330 if (sky2->autoneg == AUTONEG_ENABLE 331 if (sky2->autoneg == AUTONEG_ENABLE
331 && (hw->chip_id == CHIP_ID_YUKON_XL 332 && (hw->chip_id == CHIP_ID_YUKON_XL
332 || hw->chip_id == CHIP_ID_YUKON_EC_U 333 || hw->chip_id == CHIP_ID_YUKON_EC_U
333 || hw->chip_id == CHIP_ID_YUKON_EX)) { 334 || hw->chip_id == CHIP_ID_YUKON_EX)) {
335 /* set downshift counter to 3x and enable downshift */
334 ctrl &= ~PHY_M_PC_DSC_MSK; 336 ctrl &= ~PHY_M_PC_DSC_MSK;
335 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 337 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
336 } 338 }
@@ -842,10 +844,12 @@ static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
842/* Update chip's next pointer */ 844/* Update chip's next pointer */
843static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) 845static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
844{ 846{
845 q = Y2_QADDR(q, PREF_UNIT_PUT_IDX); 847 /* Make sure write' to descriptors are complete before we tell hardware */
846 wmb(); 848 wmb();
847 sky2_write16(hw, q, idx); 849 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
848 sky2_read16(hw, q); 850
851 /* Synchronize I/O on since next processor may write to tail */
852 mmiowb();
849} 853}
850 854
851 855
@@ -977,6 +981,7 @@ stopped:
977 981
978 /* reset the Rx prefetch unit */ 982 /* reset the Rx prefetch unit */
979 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 983 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
984 mmiowb();
980} 985}
981 986
982/* Clean out receive buffer area, assumes receiver hardware stopped */ 987/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1196,7 +1201,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
1196 } 1201 }
1197 1202
1198 /* Tell chip about available buffers */ 1203 /* Tell chip about available buffers */
1199 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 1204 sky2_put_idx(hw, rxq, sky2->rx_put);
1200 return 0; 1205 return 0;
1201nomem: 1206nomem:
1202 sky2_rx_clean(sky2); 1207 sky2_rx_clean(sky2);
@@ -1538,6 +1543,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1538 } 1543 }
1539 1544
1540 sky2->tx_cons = idx; 1545 sky2->tx_cons = idx;
1546 smp_mb();
1547
1541 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 1548 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1542 netif_wake_queue(dev); 1549 netif_wake_queue(dev);
1543} 1550}
@@ -1577,13 +1584,6 @@ static int sky2_down(struct net_device *dev)
1577 imask &= ~portirq_msk[port]; 1584 imask &= ~portirq_msk[port];
1578 sky2_write32(hw, B0_IMSK, imask); 1585 sky2_write32(hw, B0_IMSK, imask);
1579 1586
1580 /*
1581 * Both ports share the NAPI poll on port 0, so if necessary undo the
1582 * the disable that is done in dev_close.
1583 */
1584 if (sky2->port == 0 && hw->ports > 1)
1585 netif_poll_enable(dev);
1586
1587 sky2_gmac_reset(hw, port); 1587 sky2_gmac_reset(hw, port);
1588 1588
1589 /* Stop transmitter */ 1589 /* Stop transmitter */
@@ -2139,8 +2139,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2139 switch (le->opcode & ~HW_OWNER) { 2139 switch (le->opcode & ~HW_OWNER) {
2140 case OP_RXSTAT: 2140 case OP_RXSTAT:
2141 skb = sky2_receive(dev, length, status); 2141 skb = sky2_receive(dev, length, status);
2142 if (!skb) 2142 if (unlikely(!skb)) {
2143 sky2->net_stats.rx_dropped++;
2143 goto force_update; 2144 goto force_update;
2145 }
2144 2146
2145 skb->protocol = eth_type_trans(skb, dev); 2147 skb->protocol = eth_type_trans(skb, dev);
2146 sky2->net_stats.rx_packets++; 2148 sky2->net_stats.rx_packets++;
@@ -2221,6 +2223,7 @@ force_update:
2221 2223
2222 /* Fully processed status ring so clear irq */ 2224 /* Fully processed status ring so clear irq */
2223 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2225 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2226 mmiowb();
2224 2227
2225exit_loop: 2228exit_loop:
2226 if (buf_write[0]) { 2229 if (buf_write[0]) {
@@ -2341,6 +2344,12 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2341 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", 2344 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2342 dev->name, status); 2345 dev->name, status);
2343 2346
2347 if (status & GM_IS_RX_CO_OV)
2348 gma_read16(hw, port, GM_RX_IRQ_SRC);
2349
2350 if (status & GM_IS_TX_CO_OV)
2351 gma_read16(hw, port, GM_TX_IRQ_SRC);
2352
2344 if (status & GM_IS_RX_FF_OR) { 2353 if (status & GM_IS_RX_FF_OR) {
2345 ++sky2->net_stats.rx_fifo_errors; 2354 ++sky2->net_stats.rx_fifo_errors;
2346 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2355 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
@@ -2439,6 +2448,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2439 if (work_done < work_limit) { 2448 if (work_done < work_limit) {
2440 netif_rx_complete(dev0); 2449 netif_rx_complete(dev0);
2441 2450
2451 /* end of interrupt, re-enables also acts as I/O synchronization */
2442 sky2_read32(hw, B0_Y2_SP_LISR); 2452 sky2_read32(hw, B0_Y2_SP_LISR);
2443 return 0; 2453 return 0;
2444 } else { 2454 } else {
@@ -2534,17 +2544,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2534 return -EOPNOTSUPP; 2544 return -EOPNOTSUPP;
2535 } 2545 }
2536 2546
2537
2538 /* Some Gigabyte motherboards have 88e8056 but cause problems
2539 * There is some unresolved hardware related problem that causes
2540 * descriptor errors and receive data corruption.
2541 */
2542 if (hw->chip_id == CHIP_ID_YUKON_EC_U && dmi_blacklisted) {
2543 dev_err(&hw->pdev->dev,
2544 "88E8056 on this motherboard not supported\n");
2545 return -EOPNOTSUPP;
2546 }
2547
2548 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2547 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2549 hw->ports = 1; 2548 hw->ports = 1;
2550 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2549 t8 = sky2_read8(hw, B2_Y2_HW_RES);
@@ -3910,24 +3909,8 @@ static struct pci_driver sky2_driver = {
3910 .shutdown = sky2_shutdown, 3909 .shutdown = sky2_shutdown,
3911}; 3910};
3912 3911
3913static struct dmi_system_id __initdata broken_dmi_table[] = {
3914 {
3915 .ident = "Gigabyte 965P-S3",
3916 .matches = {
3917 DMI_MATCH(DMI_SYS_VENDOR, "Gigabyte Technology Co., Ltd."),
3918 DMI_MATCH(DMI_PRODUCT_NAME, "965P-S3"),
3919
3920 },
3921 },
3922 { }
3923};
3924
3925static int __init sky2_init_module(void) 3912static int __init sky2_init_module(void)
3926{ 3913{
3927 /* Look for sick motherboards */
3928 if (dmi_check_system(broken_dmi_table))
3929 dmi_blacklisted = 1;
3930
3931 return pci_register_driver(&sky2_driver); 3914 return pci_register_driver(&sky2_driver);
3932} 3915}
3933 3916
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 108adbf5b5eb..c3964c3d89d9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -430,7 +430,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
430 /* and we need to have it 128 byte aligned, therefore we allocate a 430 /* and we need to have it 128 byte aligned, therefore we allocate a
431 * bit more */ 431 * bit more */
432 /* allocate an skb */ 432 /* allocate an skb */
433 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 433 descr->skb = netdev_alloc_skb(card->netdev,
434 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
434 if (!descr->skb) { 435 if (!descr->skb) {
435 if (netif_msg_rx_err(card) && net_ratelimit()) 436 if (netif_msg_rx_err(card) && net_ratelimit())
436 pr_err("Not enough memory to allocate rx buffer\n"); 437 pr_err("Not enough memory to allocate rx buffer\n");
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index f4d45d4d835b..d070b18e539d 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -153,7 +153,7 @@ nlmclnt_recovery(struct nlm_host *host)
153 if (!host->h_reclaiming++) { 153 if (!host->h_reclaiming++) {
154 nlm_get_host(host); 154 nlm_get_host(host);
155 __module_get(THIS_MODULE); 155 __module_get(THIS_MODULE);
156 if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) 156 if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0)
157 module_put(THIS_MODULE); 157 module_put(THIS_MODULE);
158 } 158 }
159} 159}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ad21c0713efa..96070bff93fc 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -221,7 +221,7 @@ nlm_bind_host(struct nlm_host *host)
221 host->h_nextrebind - jiffies); 221 host->h_nextrebind - jiffies);
222 } 222 }
223 } else { 223 } else {
224 unsigned long increment = nlmsvc_timeout * HZ; 224 unsigned long increment = nlmsvc_timeout;
225 struct rpc_timeout timeparms = { 225 struct rpc_timeout timeparms = {
226 .to_initval = increment, 226 .to_initval = increment,
227 .to_increment = increment, 227 .to_increment = increment,
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 9702956d206c..5316e307a49d 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -586,10 +586,6 @@ static struct rpc_version nlm_version3 = {
586 .procs = nlm_procedures, 586 .procs = nlm_procedures,
587}; 587};
588 588
589#ifdef CONFIG_LOCKD_V4
590extern struct rpc_version nlm_version4;
591#endif
592
593static struct rpc_version * nlm_versions[] = { 589static struct rpc_version * nlm_versions[] = {
594 [1] = &nlm_version1, 590 [1] = &nlm_version1,
595 [3] = &nlm_version3, 591 [3] = &nlm_version3,
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ce1efdbe1b3a..846fc1d639dd 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -123,7 +123,8 @@ static __be32 *
123nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) 123nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
124{ 124{
125 struct file_lock *fl = &lock->fl; 125 struct file_lock *fl = &lock->fl;
126 __s64 len, start, end; 126 __u64 len, start;
127 __s64 end;
127 128
128 if (!(p = xdr_decode_string_inplace(p, &lock->caller, 129 if (!(p = xdr_decode_string_inplace(p, &lock->caller,
129 &lock->len, NLM_MAXSTRLEN)) 130 &lock->len, NLM_MAXSTRLEN))
@@ -417,7 +418,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
417 if (resp->status == nlm_lck_denied) { 418 if (resp->status == nlm_lck_denied) {
418 struct file_lock *fl = &resp->lock.fl; 419 struct file_lock *fl = &resp->lock.fl;
419 u32 excl; 420 u32 excl;
420 s64 start, end, len; 421 __u64 start, len;
422 __s64 end;
421 423
422 memset(&resp->lock, 0, sizeof(resp->lock)); 424 memset(&resp->lock, 0, sizeof(resp->lock));
423 locks_init_lock(fl); 425 locks_init_lock(fl);
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index db3d7919c601..c2bb14e053e1 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -24,7 +24,7 @@ enum nfs4_callback_opnum {
24}; 24};
25 25
26struct cb_compound_hdr_arg { 26struct cb_compound_hdr_arg {
27 int taglen; 27 unsigned int taglen;
28 const char *tag; 28 const char *tag;
29 unsigned int callback_ident; 29 unsigned int callback_ident;
30 unsigned nops; 30 unsigned nops;
@@ -32,7 +32,7 @@ struct cb_compound_hdr_arg {
32 32
33struct cb_compound_hdr_res { 33struct cb_compound_hdr_res {
34 __be32 *status; 34 __be32 *status;
35 int taglen; 35 unsigned int taglen;
36 const char *tag; 36 const char *tag;
37 __be32 *nops; 37 __be32 *nops;
38}; 38};
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 841c99a9b11c..7f37d1bea83f 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -226,7 +226,7 @@ restart:
226 spin_unlock(&clp->cl_lock); 226 spin_unlock(&clp->cl_lock);
227} 227}
228 228
229int nfs_do_expire_all_delegations(void *ptr) 229static int nfs_do_expire_all_delegations(void *ptr)
230{ 230{
231 struct nfs_client *clp = ptr; 231 struct nfs_client *clp = ptr;
232 struct nfs_delegation *delegation; 232 struct nfs_delegation *delegation;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 3df428816559..ac92e45432a3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -607,7 +607,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
607 return res; 607 return res;
608} 608}
609 609
610loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin) 610static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
611{ 611{
612 mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); 612 mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
613 switch (origin) { 613 switch (origin) {
@@ -633,7 +633,7 @@ out:
633 * All directory operations under NFS are synchronous, so fsync() 633 * All directory operations under NFS are synchronous, so fsync()
634 * is a dummy operation. 634 * is a dummy operation.
635 */ 635 */
636int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) 636static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
637{ 637{
638 dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n", 638 dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n",
639 dentry->d_parent->d_name.name, dentry->d_name.name, 639 dentry->d_parent->d_name.name, dentry->d_name.name,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d6a30e965787..648e0ac0f90e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -790,7 +790,7 @@ out:
790 return -EACCES; 790 return -EACCES;
791} 791}
792 792
793int nfs4_recover_expired_lease(struct nfs_server *server) 793static int nfs4_recover_expired_lease(struct nfs_server *server)
794{ 794{
795 struct nfs_client *clp = server->nfs_client; 795 struct nfs_client *clp = server->nfs_client;
796 int ret; 796 int ret;
@@ -2748,7 +2748,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
2748/* This is the error handling routine for processes that are allowed 2748/* This is the error handling routine for processes that are allowed
2749 * to sleep. 2749 * to sleep.
2750 */ 2750 */
2751int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 2751static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
2752{ 2752{
2753 struct nfs_client *clp = server->nfs_client; 2753 struct nfs_client *clp = server->nfs_client;
2754 int ret = errorcode; 2754 int ret = errorcode;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5fffbdfa971f..8ed79d5c54f9 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -104,7 +104,7 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
104 return cred; 104 return cred;
105} 105}
106 106
107struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 107static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
108{ 108{
109 struct nfs4_state_owner *sp; 109 struct nfs4_state_owner *sp;
110 110
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 938f37166788..8003c91ccb9a 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -646,10 +646,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
646{ 646{
647 __be32 *p; 647 __be32 *p;
648 648
649 RESERVE_SPACE(8+sizeof(arg->stateid->data)); 649 RESERVE_SPACE(8+NFS4_STATEID_SIZE);
650 WRITE32(OP_CLOSE); 650 WRITE32(OP_CLOSE);
651 WRITE32(arg->seqid->sequence->counter); 651 WRITE32(arg->seqid->sequence->counter);
652 WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); 652 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
653 653
654 return 0; 654 return 0;
655} 655}
@@ -793,17 +793,17 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args)
793 WRITE64(nfs4_lock_length(args->fl)); 793 WRITE64(nfs4_lock_length(args->fl));
794 WRITE32(args->new_lock_owner); 794 WRITE32(args->new_lock_owner);
795 if (args->new_lock_owner){ 795 if (args->new_lock_owner){
796 RESERVE_SPACE(40); 796 RESERVE_SPACE(4+NFS4_STATEID_SIZE+20);
797 WRITE32(args->open_seqid->sequence->counter); 797 WRITE32(args->open_seqid->sequence->counter);
798 WRITEMEM(args->open_stateid->data, sizeof(args->open_stateid->data)); 798 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE);
799 WRITE32(args->lock_seqid->sequence->counter); 799 WRITE32(args->lock_seqid->sequence->counter);
800 WRITE64(args->lock_owner.clientid); 800 WRITE64(args->lock_owner.clientid);
801 WRITE32(4); 801 WRITE32(4);
802 WRITE32(args->lock_owner.id); 802 WRITE32(args->lock_owner.id);
803 } 803 }
804 else { 804 else {
805 RESERVE_SPACE(20); 805 RESERVE_SPACE(NFS4_STATEID_SIZE+4);
806 WRITEMEM(args->lock_stateid->data, sizeof(args->lock_stateid->data)); 806 WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE);
807 WRITE32(args->lock_seqid->sequence->counter); 807 WRITE32(args->lock_seqid->sequence->counter);
808 } 808 }
809 809
@@ -830,11 +830,11 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *arg
830{ 830{
831 __be32 *p; 831 __be32 *p;
832 832
833 RESERVE_SPACE(44); 833 RESERVE_SPACE(12+NFS4_STATEID_SIZE+16);
834 WRITE32(OP_LOCKU); 834 WRITE32(OP_LOCKU);
835 WRITE32(nfs4_lock_type(args->fl, 0)); 835 WRITE32(nfs4_lock_type(args->fl, 0));
836 WRITE32(args->seqid->sequence->counter); 836 WRITE32(args->seqid->sequence->counter);
837 WRITEMEM(args->stateid->data, sizeof(args->stateid->data)); 837 WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE);
838 WRITE64(args->fl->fl_start); 838 WRITE64(args->fl->fl_start);
839 WRITE64(nfs4_lock_length(args->fl)); 839 WRITE64(nfs4_lock_length(args->fl));
840 840
@@ -966,9 +966,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc
966{ 966{
967 __be32 *p; 967 __be32 *p;
968 968
969 RESERVE_SPACE(4+sizeof(stateid->data)); 969 RESERVE_SPACE(4+NFS4_STATEID_SIZE);
970 WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR); 970 WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
971 WRITEMEM(stateid->data, sizeof(stateid->data)); 971 WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
972 encode_string(xdr, name->len, name->name); 972 encode_string(xdr, name->len, name->name);
973} 973}
974 974
@@ -996,9 +996,9 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con
996{ 996{
997 __be32 *p; 997 __be32 *p;
998 998
999 RESERVE_SPACE(8+sizeof(arg->stateid->data)); 999 RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
1000 WRITE32(OP_OPEN_CONFIRM); 1000 WRITE32(OP_OPEN_CONFIRM);
1001 WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); 1001 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
1002 WRITE32(arg->seqid->sequence->counter); 1002 WRITE32(arg->seqid->sequence->counter);
1003 1003
1004 return 0; 1004 return 0;
@@ -1008,9 +1008,9 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea
1008{ 1008{
1009 __be32 *p; 1009 __be32 *p;
1010 1010
1011 RESERVE_SPACE(8+sizeof(arg->stateid->data)); 1011 RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
1012 WRITE32(OP_OPEN_DOWNGRADE); 1012 WRITE32(OP_OPEN_DOWNGRADE);
1013 WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); 1013 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
1014 WRITE32(arg->seqid->sequence->counter); 1014 WRITE32(arg->seqid->sequence->counter);
1015 encode_share_access(xdr, arg->open_flags); 1015 encode_share_access(xdr, arg->open_flags);
1016 return 0; 1016 return 0;
@@ -1045,12 +1045,12 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
1045 nfs4_stateid stateid; 1045 nfs4_stateid stateid;
1046 __be32 *p; 1046 __be32 *p;
1047 1047
1048 RESERVE_SPACE(16); 1048 RESERVE_SPACE(NFS4_STATEID_SIZE);
1049 if (ctx->state != NULL) { 1049 if (ctx->state != NULL) {
1050 nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); 1050 nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
1051 WRITEMEM(stateid.data, sizeof(stateid.data)); 1051 WRITEMEM(stateid.data, NFS4_STATEID_SIZE);
1052 } else 1052 } else
1053 WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); 1053 WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
1054} 1054}
1055 1055
1056static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) 1056static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args)
@@ -1079,10 +1079,10 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
1079 int replen; 1079 int replen;
1080 __be32 *p; 1080 __be32 *p;
1081 1081
1082 RESERVE_SPACE(32+sizeof(nfs4_verifier)); 1082 RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
1083 WRITE32(OP_READDIR); 1083 WRITE32(OP_READDIR);
1084 WRITE64(readdir->cookie); 1084 WRITE64(readdir->cookie);
1085 WRITEMEM(readdir->verifier.data, sizeof(readdir->verifier.data)); 1085 WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE);
1086 WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ 1086 WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
1087 WRITE32(readdir->count); 1087 WRITE32(readdir->count);
1088 WRITE32(2); 1088 WRITE32(2);
@@ -1190,9 +1190,9 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
1190{ 1190{
1191 __be32 *p; 1191 __be32 *p;
1192 1192
1193 RESERVE_SPACE(4+sizeof(zero_stateid.data)); 1193 RESERVE_SPACE(4+NFS4_STATEID_SIZE);
1194 WRITE32(OP_SETATTR); 1194 WRITE32(OP_SETATTR);
1195 WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); 1195 WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
1196 RESERVE_SPACE(2*4); 1196 RESERVE_SPACE(2*4);
1197 WRITE32(1); 1197 WRITE32(1);
1198 WRITE32(FATTR4_WORD0_ACL); 1198 WRITE32(FATTR4_WORD0_ACL);
@@ -1220,9 +1220,9 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *
1220 int status; 1220 int status;
1221 __be32 *p; 1221 __be32 *p;
1222 1222
1223 RESERVE_SPACE(4+sizeof(arg->stateid.data)); 1223 RESERVE_SPACE(4+NFS4_STATEID_SIZE);
1224 WRITE32(OP_SETATTR); 1224 WRITE32(OP_SETATTR);
1225 WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); 1225 WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
1226 1226
1227 if ((status = encode_attrs(xdr, arg->iap, server))) 1227 if ((status = encode_attrs(xdr, arg->iap, server)))
1228 return status; 1228 return status;
@@ -1234,9 +1234,9 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien
1234{ 1234{
1235 __be32 *p; 1235 __be32 *p;
1236 1236
1237 RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data)); 1237 RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE);
1238 WRITE32(OP_SETCLIENTID); 1238 WRITE32(OP_SETCLIENTID);
1239 WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data)); 1239 WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
1240 1240
1241 encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); 1241 encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
1242 RESERVE_SPACE(4); 1242 RESERVE_SPACE(4);
@@ -1253,10 +1253,10 @@ static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_c
1253{ 1253{
1254 __be32 *p; 1254 __be32 *p;
1255 1255
1256 RESERVE_SPACE(12 + sizeof(client_state->cl_confirm.data)); 1256 RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE);
1257 WRITE32(OP_SETCLIENTID_CONFIRM); 1257 WRITE32(OP_SETCLIENTID_CONFIRM);
1258 WRITE64(client_state->cl_clientid); 1258 WRITE64(client_state->cl_clientid);
1259 WRITEMEM(client_state->cl_confirm.data, sizeof(client_state->cl_confirm.data)); 1259 WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
1260 1260
1261 return 0; 1261 return 0;
1262} 1262}
@@ -1284,10 +1284,10 @@ static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *statei
1284{ 1284{
1285 __be32 *p; 1285 __be32 *p;
1286 1286
1287 RESERVE_SPACE(20); 1287 RESERVE_SPACE(4+NFS4_STATEID_SIZE);
1288 1288
1289 WRITE32(OP_DELEGRETURN); 1289 WRITE32(OP_DELEGRETURN);
1290 WRITEMEM(stateid->data, sizeof(stateid->data)); 1290 WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
1291 return 0; 1291 return 0;
1292 1292
1293} 1293}
@@ -2494,7 +2494,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
2494 int i; 2494 int i;
2495 dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations); 2495 dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations);
2496 for (i = loc->nservers; i < m; i++) { 2496 for (i = loc->nservers; i < m; i++) {
2497 int len; 2497 unsigned int len;
2498 char *data; 2498 char *data;
2499 status = decode_opaque_inline(xdr, &len, &data); 2499 status = decode_opaque_inline(xdr, &len, &data);
2500 if (unlikely(status != 0)) 2500 if (unlikely(status != 0))
@@ -2642,7 +2642,7 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
2642 return 0; 2642 return 0;
2643} 2643}
2644 2644
2645static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *uid) 2645static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid)
2646{ 2646{
2647 uint32_t len; 2647 uint32_t len;
2648 __be32 *p; 2648 __be32 *p;
@@ -2667,7 +2667,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
2667 return 0; 2667 return 0;
2668} 2668}
2669 2669
2670static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *gid) 2670static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid)
2671{ 2671{
2672 uint32_t len; 2672 uint32_t len;
2673 __be32 *p; 2673 __be32 *p;
@@ -2897,8 +2897,8 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
2897 status = decode_op_hdr(xdr, OP_CLOSE); 2897 status = decode_op_hdr(xdr, OP_CLOSE);
2898 if (status) 2898 if (status)
2899 return status; 2899 return status;
2900 READ_BUF(sizeof(res->stateid.data)); 2900 READ_BUF(NFS4_STATEID_SIZE);
2901 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 2901 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
2902 return 0; 2902 return 0;
2903} 2903}
2904 2904
@@ -3186,8 +3186,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
3186 3186
3187 status = decode_op_hdr(xdr, OP_LOCK); 3187 status = decode_op_hdr(xdr, OP_LOCK);
3188 if (status == 0) { 3188 if (status == 0) {
3189 READ_BUF(sizeof(res->stateid.data)); 3189 READ_BUF(NFS4_STATEID_SIZE);
3190 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 3190 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3191 } else if (status == -NFS4ERR_DENIED) 3191 } else if (status == -NFS4ERR_DENIED)
3192 return decode_lock_denied(xdr, NULL); 3192 return decode_lock_denied(xdr, NULL);
3193 return status; 3193 return status;
@@ -3209,8 +3209,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
3209 3209
3210 status = decode_op_hdr(xdr, OP_LOCKU); 3210 status = decode_op_hdr(xdr, OP_LOCKU);
3211 if (status == 0) { 3211 if (status == 0) {
3212 READ_BUF(sizeof(res->stateid.data)); 3212 READ_BUF(NFS4_STATEID_SIZE);
3213 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 3213 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3214 } 3214 }
3215 return status; 3215 return status;
3216} 3216}
@@ -3251,8 +3251,8 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
3251 res->delegation_type = 0; 3251 res->delegation_type = 0;
3252 return 0; 3252 return 0;
3253 } 3253 }
3254 READ_BUF(20); 3254 READ_BUF(NFS4_STATEID_SIZE+4);
3255 COPYMEM(res->delegation.data, sizeof(res->delegation.data)); 3255 COPYMEM(res->delegation.data, NFS4_STATEID_SIZE);
3256 READ32(res->do_recall); 3256 READ32(res->do_recall);
3257 switch (delegation_type) { 3257 switch (delegation_type) {
3258 case NFS4_OPEN_DELEGATE_READ: 3258 case NFS4_OPEN_DELEGATE_READ:
@@ -3275,8 +3275,8 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
3275 status = decode_op_hdr(xdr, OP_OPEN); 3275 status = decode_op_hdr(xdr, OP_OPEN);
3276 if (status) 3276 if (status)
3277 return status; 3277 return status;
3278 READ_BUF(sizeof(res->stateid.data)); 3278 READ_BUF(NFS4_STATEID_SIZE);
3279 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 3279 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3280 3280
3281 decode_change_info(xdr, &res->cinfo); 3281 decode_change_info(xdr, &res->cinfo);
3282 3282
@@ -3302,8 +3302,8 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre
3302 status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); 3302 status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
3303 if (status) 3303 if (status)
3304 return status; 3304 return status;
3305 READ_BUF(sizeof(res->stateid.data)); 3305 READ_BUF(NFS4_STATEID_SIZE);
3306 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 3306 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3307 return 0; 3307 return 0;
3308} 3308}
3309 3309
@@ -3315,8 +3315,8 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re
3315 status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); 3315 status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
3316 if (status) 3316 if (status)
3317 return status; 3317 return status;
3318 READ_BUF(sizeof(res->stateid.data)); 3318 READ_BUF(NFS4_STATEID_SIZE);
3319 COPYMEM(res->stateid.data, sizeof(res->stateid.data)); 3319 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3320 return 0; 3320 return 0;
3321} 3321}
3322 3322
@@ -3590,9 +3590,9 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
3590 } 3590 }
3591 READ32(nfserr); 3591 READ32(nfserr);
3592 if (nfserr == NFS_OK) { 3592 if (nfserr == NFS_OK) {
3593 READ_BUF(8 + sizeof(clp->cl_confirm.data)); 3593 READ_BUF(8 + NFS4_VERIFIER_SIZE);
3594 READ64(clp->cl_clientid); 3594 READ64(clp->cl_clientid);
3595 COPYMEM(clp->cl_confirm.data, sizeof(clp->cl_confirm.data)); 3595 COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE);
3596 } else if (nfserr == NFSERR_CLID_INUSE) { 3596 } else if (nfserr == NFSERR_CLID_INUSE) {
3597 uint32_t len; 3597 uint32_t len;
3598 3598
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 9a55807b2a70..7bd7cb95c034 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
79static 79static
80int nfs_return_empty_page(struct page *page) 80int nfs_return_empty_page(struct page *page)
81{ 81{
82 memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE); 82 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
83 SetPageUptodate(page); 83 SetPageUptodate(page);
84 unlock_page(page); 84 unlock_page(page);
85 return 0; 85 return 0;
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
103 pglen = PAGE_CACHE_SIZE - base; 103 pglen = PAGE_CACHE_SIZE - base;
104 for (;;) { 104 for (;;) {
105 if (remainder <= pglen) { 105 if (remainder <= pglen) {
106 memclear_highpage_flush(*pages, base, remainder); 106 zero_user_page(*pages, base, remainder, KM_USER0);
107 break; 107 break;
108 } 108 }
109 memclear_highpage_flush(*pages, base, pglen); 109 zero_user_page(*pages, base, pglen, KM_USER0);
110 pages++; 110 pages++;
111 remainder -= pglen; 111 remainder -= pglen;
112 pglen = PAGE_CACHE_SIZE; 112 pglen = PAGE_CACHE_SIZE;
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
130 return PTR_ERR(new); 130 return PTR_ERR(new);
131 } 131 }
132 if (len < PAGE_CACHE_SIZE) 132 if (len < PAGE_CACHE_SIZE)
133 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 133 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
134 134
135 nfs_list_add_request(new, &one_request); 135 nfs_list_add_request(new, &one_request);
136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
532 return PTR_ERR(new); 532 return PTR_ERR(new);
533 } 533 }
534 if (len < PAGE_CACHE_SIZE) 534 if (len < PAGE_CACHE_SIZE)
535 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 535 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
536 nfs_pageio_add_request(desc->pgio, new); 536 nfs_pageio_add_request(desc->pgio, new);
537 return 0; 537 return 0;
538} 538}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index de92b9509d94..b084c03ce493 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -58,7 +58,7 @@ struct nfs_write_data *nfs_commit_alloc(void)
58 return p; 58 return p;
59} 59}
60 60
61void nfs_commit_rcu_free(struct rcu_head *head) 61static void nfs_commit_rcu_free(struct rcu_head *head)
62{ 62{
63 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); 63 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
64 if (p && (p->pagevec != &p->page_array[0])) 64 if (p && (p->pagevec != &p->page_array[0]))
@@ -168,7 +168,7 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
168 if (count != nfs_page_length(page)) 168 if (count != nfs_page_length(page))
169 return; 169 return;
170 if (count != PAGE_CACHE_SIZE) 170 if (count != PAGE_CACHE_SIZE)
171 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); 171 zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0);
172 SetPageUptodate(page); 172 SetPageUptodate(page);
173} 173}
174 174
@@ -922,7 +922,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
922 return 0; 922 return 0;
923 out_bad: 923 out_bad:
924 while (!list_empty(head)) { 924 while (!list_empty(head)) {
925 struct nfs_page *req = nfs_list_entry(head->next); 925 req = nfs_list_entry(head->next);
926 nfs_list_remove_request(req); 926 nfs_list_remove_request(req);
927 nfs_redirty_request(req); 927 nfs_redirty_request(req);
928 nfs_end_page_writeback(req->wb_page); 928 nfs_end_page_writeback(req->wb_page);
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index dd12b4c9e613..12bfe09de2b1 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -42,5 +42,6 @@ int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
42int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); 42int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
43int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); 43int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
44 */ 44 */
45extern struct rpc_version nlm_version4;
45 46
46#endif /* LOCKD_XDR4_H */ 47#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index beddc6d3b0f6..151b7e0182c7 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -56,8 +56,8 @@
56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ 56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
57#define BMSR_RESV 0x00c0 /* Unused... */ 57#define BMSR_RESV 0x00c0 /* Unused... */
58#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ 58#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
59#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */ 59#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */
60#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */ 60#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */
61#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ 61#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
62#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ 62#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
63#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ 63#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 1be5be88debe..7e7f33a38fc0 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -16,6 +16,7 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18#define NFS4_VERIFIER_SIZE 8 18#define NFS4_VERIFIER_SIZE 8
19#define NFS4_STATEID_SIZE 16
19#define NFS4_FHSIZE 128 20#define NFS4_FHSIZE 128
20#define NFS4_MAXPATHLEN PATH_MAX 21#define NFS4_MAXPATHLEN PATH_MAX
21#define NFS4_MAXNAMLEN NAME_MAX 22#define NFS4_MAXNAMLEN NAME_MAX
@@ -113,7 +114,7 @@ struct nfs4_acl {
113}; 114};
114 115
115typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; 116typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
116typedef struct { char data[16]; } nfs4_stateid; 117typedef struct { char data[NFS4_STATEID_SIZE]; } nfs4_stateid;
117 118
118enum nfs_opnum4 { 119enum nfs_opnum4 {
119 OP_ACCESS = 3, 120 OP_ACCESS = 3,
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 4a68125b6de6..ad293760f6eb 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -47,6 +47,8 @@ extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct r
47extern int rpc_unlink(struct dentry *); 47extern int rpc_unlink(struct dentry *);
48extern struct vfsmount *rpc_get_mount(void); 48extern struct vfsmount *rpc_get_mount(void);
49extern void rpc_put_mount(void); 49extern void rpc_put_mount(void);
50extern int register_rpc_pipefs(void);
51extern void unregister_rpc_pipefs(void);
50 52
51#endif 53#endif
52#endif 54#endif
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fa89ce6ce076..34f7590506fa 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -244,6 +244,8 @@ void xprt_disconnect(struct rpc_xprt *xprt);
244 */ 244 */
245struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); 245struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
246struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); 246struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
247int init_socket_xprt(void);
248void cleanup_socket_xprt(void);
247 249
248/* 250/*
249 * Reserved bit positions in xprt->state 251 * Reserved bit positions in xprt->state
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 7eae8665ff59..ce0719a2cfeb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -160,7 +160,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
160{ 160{
161 int ret; 161 int ret;
162 162
163 ret = del_timer(&work->timer); 163 ret = del_timer_sync(&work->timer);
164 if (ret) 164 if (ret)
165 work_clear_pending(&work->work); 165 work_clear_pending(&work->work);
166 return ret; 166 return ret;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index bfc9a35bad33..1dae3dfc66a9 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -665,7 +665,8 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
665 /* Detach sockets from device */ 665 /* Detach sockets from device */
666 read_lock(&hci_sk_list.lock); 666 read_lock(&hci_sk_list.lock);
667 sk_for_each(sk, node, &hci_sk_list.head) { 667 sk_for_each(sk, node, &hci_sk_list.head) {
668 lock_sock(sk); 668 local_bh_disable();
669 bh_lock_sock_nested(sk);
669 if (hci_pi(sk)->hdev == hdev) { 670 if (hci_pi(sk)->hdev == hdev) {
670 hci_pi(sk)->hdev = NULL; 671 hci_pi(sk)->hdev = NULL;
671 sk->sk_err = EPIPE; 672 sk->sk_err = EPIPE;
@@ -674,7 +675,8 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
674 675
675 hci_dev_put(hdev); 676 hci_dev_put(hdev);
676 } 677 }
677 release_sock(sk); 678 bh_unlock_sock(sk);
679 local_bh_enable();
678 } 680 }
679 read_unlock(&hci_sk_list.lock); 681 read_unlock(&hci_sk_list.lock);
680 } 682 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 8301e2ac747f..f2b61111e26d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
116#include <linux/dmaengine.h> 116#include <linux/dmaengine.h>
117#include <linux/err.h> 117#include <linux/err.h>
118#include <linux/ctype.h> 118#include <linux/ctype.h>
119#include <linux/if_arp.h>
119 120
120/* 121/*
121 * The list of packet types we will receive (as opposed to discard) 122 * The list of packet types we will receive (as opposed to discard)
@@ -217,6 +218,73 @@ extern void netdev_unregister_sysfs(struct net_device *);
217#define netdev_unregister_sysfs(dev) do { } while(0) 218#define netdev_unregister_sysfs(dev) do { } while(0)
218#endif 219#endif
219 220
221#ifdef CONFIG_DEBUG_LOCK_ALLOC
222/*
223 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
224 * according to dev->type
225 */
226static const unsigned short netdev_lock_type[] =
227 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
228 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
229 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
230 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
231 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
232 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
233 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
234 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
235 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
236 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
237 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
238 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
239 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
240 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
241 ARPHRD_NONE};
242
243static const char *netdev_lock_name[] =
244 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
245 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
246 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
247 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
248 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
249 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
250 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
251 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
252 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
253 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
254 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
255 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
256 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
257 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
258 "_xmit_NONE"};
259
260static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
261
262static inline unsigned short netdev_lock_pos(unsigned short dev_type)
263{
264 int i;
265
266 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
267 if (netdev_lock_type[i] == dev_type)
268 return i;
269 /* the last key is used by default */
270 return ARRAY_SIZE(netdev_lock_type) - 1;
271}
272
273static inline void netdev_set_lockdep_class(spinlock_t *lock,
274 unsigned short dev_type)
275{
276 int i;
277
278 i = netdev_lock_pos(dev_type);
279 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
280 netdev_lock_name[i]);
281}
282#else
283static inline void netdev_set_lockdep_class(spinlock_t *lock,
284 unsigned short dev_type)
285{
286}
287#endif
220 288
221/******************************************************************************* 289/*******************************************************************************
222 290
@@ -3001,6 +3069,7 @@ int register_netdevice(struct net_device *dev)
3001 3069
3002 spin_lock_init(&dev->queue_lock); 3070 spin_lock_init(&dev->queue_lock);
3003 spin_lock_init(&dev->_xmit_lock); 3071 spin_lock_init(&dev->_xmit_lock);
3072 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3004 dev->xmit_lock_owner = -1; 3073 dev->xmit_lock_owner = -1;
3005 spin_lock_init(&dev->ingress_lock); 3074 spin_lock_init(&dev->ingress_lock);
3006 3075
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index c68196cc56ab..010fbb2d45e9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -43,11 +43,11 @@ config IP_ADVANCED_ROUTER
43 asymmetric routing (packets from you to a host take a different path 43 asymmetric routing (packets from you to a host take a different path
44 than packets from that host to you) or if you operate a non-routing 44 than packets from that host to you) or if you operate a non-routing
45 host which has several IP addresses on different interfaces. To turn 45 host which has several IP addresses on different interfaces. To turn
46 rp_filter off use: 46 rp_filter on use:
47 47
48 echo 0 > /proc/sys/net/ipv4/conf/<device>/rp_filter 48 echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
49 or 49 or
50 echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter 50 echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
51 51
52 If unsure, say N here. 52 If unsure, say N here.
53 53
@@ -577,6 +577,7 @@ config TCP_CONG_VENO
577config TCP_CONG_YEAH 577config TCP_CONG_YEAH
578 tristate "YeAH TCP" 578 tristate "YeAH TCP"
579 depends on EXPERIMENTAL 579 depends on EXPERIMENTAL
580 select TCP_CONG_VEGAS
580 default n 581 default n
581 ---help--- 582 ---help---
582 YeAH-TCP is a sender-side high-speed enabled TCP congestion control 583 YeAH-TCP is a sender-side high-speed enabled TCP congestion control
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb76e3c725a0..df9fe4f2e8cc 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2396,7 +2396,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2396 2396
2397 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2397 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2398 dev_out = ip_dev_find(oldflp->fl4_src); 2398 dev_out = ip_dev_find(oldflp->fl4_src);
2399 if ((dev_out == NULL) && !(sysctl_ip_nonlocal_bind)) 2399 if (dev_out == NULL)
2400 goto out; 2400 goto out;
2401 2401
2402 /* I removed check for oif == dev_out->oif here. 2402 /* I removed check for oif == dev_out->oif here.
@@ -2407,7 +2407,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2407 of another iface. --ANK 2407 of another iface. --ANK
2408 */ 2408 */
2409 2409
2410 if (dev_out && oldflp->oif == 0 2410 if (oldflp->oif == 0
2411 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) { 2411 && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2412 /* Special hack: user can direct multicasts 2412 /* Special hack: user can direct multicasts
2413 and limited broadcast via necessary interface 2413 and limited broadcast via necessary interface
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 86b26539e54b..1260e52ad772 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -276,30 +276,34 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
276 276
277 277
278/* 278/*
279 * Slow start (exponential increase) with 279 * Slow start is used when congestion window is less than slow start
280 * RFC3742 Limited Slow Start (fast linear increase) support. 280 * threshold. This version implements the basic RFC2581 version
281 * and optionally supports:
282 * RFC3742 Limited Slow Start - growth limited to max_ssthresh
283 * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
281 */ 284 */
282void tcp_slow_start(struct tcp_sock *tp) 285void tcp_slow_start(struct tcp_sock *tp)
283{ 286{
284 int cnt = 0; 287 int cnt; /* increase in packets */
285 288
286 if (sysctl_tcp_abc) { 289 /* RFC3465: ABC Slow start
287 /* RFC3465: Slow Start 290 * Increase only after a full MSS of bytes is acked
288 * TCP sender SHOULD increase cwnd by the number of 291 *
289 * previously unacknowledged bytes ACKed by each incoming 292 * TCP sender SHOULD increase cwnd by the number of
290 * acknowledgment, provided the increase is not more than L 293 * previously unacknowledged bytes ACKed by each incoming
291 */ 294 * acknowledgment, provided the increase is not more than L
292 if (tp->bytes_acked < tp->mss_cache) 295 */
293 return; 296 if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
294 } 297 return;
295 298
296 if (sysctl_tcp_max_ssthresh > 0 && 299 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
297 tp->snd_cwnd > sysctl_tcp_max_ssthresh) 300 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
298 cnt += sysctl_tcp_max_ssthresh>>1;
299 else 301 else
300 cnt += tp->snd_cwnd; 302 cnt = tp->snd_cwnd; /* exponential increase */
301 303
302 /* RFC3465: We MAY increase by 2 if discovered delayed ack */ 304 /* RFC3465: ABC
305 * We MAY increase by 2 if discovered delayed ack
306 */
303 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) 307 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
304 cnt <<= 1; 308 cnt <<= 1;
305 tp->bytes_acked = 0; 309 tp->bytes_acked = 0;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b011eb625e49..944d75396fb3 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -989,8 +989,6 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
989 spin_unlock(&rpc_sched_lock); 989 spin_unlock(&rpc_sched_lock);
990} 990}
991 991
992static DECLARE_MUTEX_LOCKED(rpciod_running);
993
994static void rpciod_killall(void) 992static void rpciod_killall(void)
995{ 993{
996 unsigned long flags; 994 unsigned long flags;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 0d35bc796d00..73075dec83c0 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -134,11 +134,7 @@ EXPORT_SYMBOL(nfsd_debug);
134EXPORT_SYMBOL(nlm_debug); 134EXPORT_SYMBOL(nlm_debug);
135#endif 135#endif
136 136
137extern int register_rpc_pipefs(void);
138extern void unregister_rpc_pipefs(void);
139extern struct cache_detail ip_map_cache, unix_gid_cache; 137extern struct cache_detail ip_map_cache, unix_gid_cache;
140extern int init_socket_xprt(void);
141extern void cleanup_socket_xprt(void);
142 138
143static int __init 139static int __init
144init_sunrpc(void) 140init_sunrpc(void)