aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/ioport.c1
-rw-r--r--arch/i386/kernel/process.c50
-rw-r--r--arch/i386/kernel/ptrace.c5
-rw-r--r--block/blktrace.c5
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/battery.c6
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cm_sbs.c46
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/hotkey.c10
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sleep/proc.c6
-rw-r--r--drivers/acpi/system.c6
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/net/3c59x.c166
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/e1000/e1000.h10
-rw-r--r--drivers/net/e1000/e1000_ethtool.c141
-rw-r--r--drivers/net/e1000/e1000_hw.c1772
-rw-r--r--drivers/net/e1000/e1000_hw.h398
-rw-r--r--drivers/net/e1000/e1000_main.c385
-rw-r--r--drivers/net/e1000/e1000_osdep.h13
-rw-r--r--drivers/net/e1000/e1000_param.c199
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/irda/ali-ircc.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/s390/net/qeth_main.c2
-rw-r--r--include/asm-i386/atomic.h30
-rw-r--r--include/asm-i386/futex.h10
-rw-r--r--include/asm-i386/local.h14
-rw-r--r--include/asm-i386/posix_types.h4
-rw-r--r--include/asm-i386/rwlock.h4
-rw-r--r--include/asm-i386/rwsem.h35
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h14
-rw-r--r--include/asm-i386/thread_info.h7
-rw-r--r--include/asm-powerpc/atomic.h32
-rw-r--r--include/asm-powerpc/bitops.h16
-rw-r--r--include/asm-powerpc/system.h16
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/net/protocol.h2
-rw-r--r--include/net/tcp.h1
-rw-r--r--net/atm/clip.c13
-rw-r--r--net/atm/ipcommon.c17
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_dev.c4
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/dev.c42
-rw-r--r--net/decnet/dn_rules.c3
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ipv6_sockglue.c89
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/sched/act_api.c2
81 files changed, 2950 insertions, 833 deletions
diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c
index 79026f026b85..498e8bc197d5 100644
--- a/arch/i386/kernel/ioport.c
+++ b/arch/i386/kernel/ioport.c
@@ -79,6 +79,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
79 79
80 memset(bitmap, 0xff, IO_BITMAP_BYTES); 80 memset(bitmap, 0xff, IO_BITMAP_BYTES);
81 t->io_bitmap_ptr = bitmap; 81 t->io_bitmap_ptr = bitmap;
82 set_thread_flag(TIF_IO_BITMAP);
82 } 83 }
83 84
84 /* 85 /*
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 94e2c87edeaa..923bb292f47f 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -359,16 +359,16 @@ EXPORT_SYMBOL(kernel_thread);
359 */ 359 */
360void exit_thread(void) 360void exit_thread(void)
361{ 361{
362 struct task_struct *tsk = current;
363 struct thread_struct *t = &tsk->thread;
364
365 /* The process may have allocated an io port bitmap... nuke it. */ 362 /* The process may have allocated an io port bitmap... nuke it. */
366 if (unlikely(NULL != t->io_bitmap_ptr)) { 363 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
364 struct task_struct *tsk = current;
365 struct thread_struct *t = &tsk->thread;
367 int cpu = get_cpu(); 366 int cpu = get_cpu();
368 struct tss_struct *tss = &per_cpu(init_tss, cpu); 367 struct tss_struct *tss = &per_cpu(init_tss, cpu);
369 368
370 kfree(t->io_bitmap_ptr); 369 kfree(t->io_bitmap_ptr);
371 t->io_bitmap_ptr = NULL; 370 t->io_bitmap_ptr = NULL;
371 clear_thread_flag(TIF_IO_BITMAP);
372 /* 372 /*
373 * Careful, clear this in the TSS too: 373 * Careful, clear this in the TSS too:
374 */ 374 */
@@ -387,6 +387,7 @@ void flush_thread(void)
387 387
388 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); 388 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
389 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 389 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
390 clear_tsk_thread_flag(tsk, TIF_DEBUG);
390 /* 391 /*
391 * Forget coprocessor state.. 392 * Forget coprocessor state..
392 */ 393 */
@@ -431,7 +432,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
431 savesegment(gs,p->thread.gs); 432 savesegment(gs,p->thread.gs);
432 433
433 tsk = current; 434 tsk = current;
434 if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) { 435 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
435 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 436 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
436 if (!p->thread.io_bitmap_ptr) { 437 if (!p->thread.io_bitmap_ptr) {
437 p->thread.io_bitmap_max = 0; 438 p->thread.io_bitmap_max = 0;
@@ -439,6 +440,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
439 } 440 }
440 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr, 441 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
441 IO_BITMAP_BYTES); 442 IO_BITMAP_BYTES);
443 set_tsk_thread_flag(p, TIF_IO_BITMAP);
442 } 444 }
443 445
444 /* 446 /*
@@ -533,10 +535,24 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
533 return 1; 535 return 1;
534} 536}
535 537
536static inline void 538static noinline void __switch_to_xtra(struct task_struct *next_p,
537handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) 539 struct tss_struct *tss)
538{ 540{
539 if (!next->io_bitmap_ptr) { 541 struct thread_struct *next;
542
543 next = &next_p->thread;
544
545 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
546 set_debugreg(next->debugreg[0], 0);
547 set_debugreg(next->debugreg[1], 1);
548 set_debugreg(next->debugreg[2], 2);
549 set_debugreg(next->debugreg[3], 3);
550 /* no 4 and 5 */
551 set_debugreg(next->debugreg[6], 6);
552 set_debugreg(next->debugreg[7], 7);
553 }
554
555 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
540 /* 556 /*
541 * Disable the bitmap via an invalid offset. We still cache 557 * Disable the bitmap via an invalid offset. We still cache
542 * the previous bitmap owner and the IO bitmap contents: 558 * the previous bitmap owner and the IO bitmap contents:
@@ -544,6 +560,7 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
544 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 560 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
545 return; 561 return;
546 } 562 }
563
547 if (likely(next == tss->io_bitmap_owner)) { 564 if (likely(next == tss->io_bitmap_owner)) {
548 /* 565 /*
549 * Previous owner of the bitmap (hence the bitmap content) 566 * Previous owner of the bitmap (hence the bitmap content)
@@ -671,20 +688,11 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
671 set_iopl_mask(next->iopl); 688 set_iopl_mask(next->iopl);
672 689
673 /* 690 /*
674 * Now maybe reload the debug registers 691 * Now maybe handle debug registers and/or IO bitmaps
675 */ 692 */
676 if (unlikely(next->debugreg[7])) { 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
677 set_debugreg(next->debugreg[0], 0); 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
678 set_debugreg(next->debugreg[1], 1); 695 __switch_to_xtra(next_p, tss);
679 set_debugreg(next->debugreg[2], 2);
680 set_debugreg(next->debugreg[3], 3);
681 /* no 4 and 5 */
682 set_debugreg(next->debugreg[6], 6);
683 set_debugreg(next->debugreg[7], 7);
684 }
685
686 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
687 handle_io_bitmap(next, tss);
688 696
689 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
690 698
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index fd7eaf7866e0..d3db03f4085d 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -468,8 +468,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
468 for(i=0; i<4; i++) 468 for(i=0; i<4; i++)
469 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1) 469 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
470 goto out_tsk; 470 goto out_tsk;
471 if (data)
472 set_tsk_thread_flag(child, TIF_DEBUG);
473 else
474 clear_tsk_thread_flag(child, TIF_DEBUG);
471 } 475 }
472
473 addr -= (long) &dummy->u_debugreg; 476 addr -= (long) &dummy->u_debugreg;
474 addr = addr >> 2; 477 addr = addr >> 2;
475 child->thread.debugreg[addr] = data; 478 child->thread.debugreg[addr] = data;
diff --git a/block/blktrace.c b/block/blktrace.c
index 92925e7d9e6c..b8c0702777ff 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -69,7 +69,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK
69/* 69/*
70 * Bio action bits of interest 70 * Bio action bits of interest
71 */ 71 */
72static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) }; 72static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD) };
73 73
74/* 74/*
75 * More could be added as needed, taking care to increment the decrementer 75 * More could be added as needed, taking care to increment the decrementer
@@ -79,6 +79,8 @@ static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
79 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0)) 79 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
80#define trace_sync_bit(rw) \ 80#define trace_sync_bit(rw) \
81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) 81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
82#define trace_ahead_bit(rw) \
83 (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0))
82 84
83/* 85/*
84 * The worker for the various blk_add_trace*() types. Fills out a 86 * The worker for the various blk_add_trace*() types. Fills out a
@@ -100,6 +102,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
100 what |= ddir_act[rw & WRITE]; 102 what |= ddir_act[rw & WRITE];
101 what |= bio_act[trace_barrier_bit(rw)]; 103 what |= bio_act[trace_barrier_bit(rw)];
102 what |= bio_act[trace_sync_bit(rw)]; 104 what |= bio_act[trace_sync_bit(rw)];
105 what |= bio_act[trace_ahead_bit(rw)];
103 106
104 pid = tsk->pid; 107 pid = tsk->pid;
105 if (unlikely(act_log_check(bt, what, sector, pid))) 108 if (unlikely(act_log_check(bt, what, sector, pid)))
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index ab17c7224bb6..61d6b3c65b66 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3491,8 +3491,8 @@ EXPORT_SYMBOL(end_request);
3491 3491
3492void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3492void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3493{ 3493{
3494 /* first three bits are identical in rq->flags and bio->bi_rw */ 3494 /* first two bits are identical in rq->flags and bio->bi_rw */
3495 rq->flags |= (bio->bi_rw & 7); 3495 rq->flags |= (bio->bi_rw & 3);
3496 3496
3497 rq->nr_phys_segments = bio_phys_segments(q, bio); 3497 rq->nr_phys_segments = bio_phys_segments(q, bio);
3498 rq->nr_hw_segments = bio_hw_segments(q, bio); 3498 rq->nr_hw_segments = bio_hw_segments(q, bio);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index fef7bab12244..56c5ba874623 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -107,7 +107,6 @@ config ACPI_BUTTON
107config ACPI_VIDEO 107config ACPI_VIDEO
108 tristate "Video" 108 tristate "Video"
109 depends on X86 109 depends on X86
110 default y
111 help 110 help
112 This driver implement the ACPI Extensions For Display Adapters 111 This driver implement the ACPI Extensions For Display Adapters
113 for integrated graphics devices on motherboard, as specified in 112 for integrated graphics devices on motherboard, as specified in
@@ -135,8 +134,7 @@ config ACPI_FAN
135 134
136config ACPI_DOCK 135config ACPI_DOCK
137 tristate "Dock" 136 tristate "Dock"
138 depends on !ACPI_IBM_DOCK 137 depends on EXPERIMENTAL
139 default y
140 help 138 help
141 This driver adds support for ACPI controlled docking stations 139 This driver adds support for ACPI controlled docking stations
142 140
@@ -214,6 +212,7 @@ config ACPI_IBM
214config ACPI_IBM_DOCK 212config ACPI_IBM_DOCK
215 bool "Legacy Docking Station Support" 213 bool "Legacy Docking Station Support"
216 depends on ACPI_IBM 214 depends on ACPI_IBM
215 depends on ACPI_DOCK=n
217 default n 216 default n
218 ---help--- 217 ---help---
219 Allows the ibm_acpi driver to handle docking station events. 218 Allows the ibm_acpi driver to handle docking station events.
@@ -357,7 +356,6 @@ config ACPI_SBS
357 tristate "Smart Battery System (EXPERIMENTAL)" 356 tristate "Smart Battery System (EXPERIMENTAL)"
358 depends on X86 && I2C 357 depends on X86 && I2C
359 depends on EXPERIMENTAL 358 depends on EXPERIMENTAL
360 default y
361 help 359 help
362 This driver adds support for the Smart Battery System. 360 This driver adds support for the Smart Battery System.
363 Depends on I2C (Device Drivers ---> I2C support) 361 Depends on I2C (Device Drivers ---> I2C support)
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 24ccf81d135f..96309b9660da 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -72,7 +72,7 @@ struct acpi_ac {
72 unsigned long state; 72 unsigned long state;
73}; 73};
74 74
75static struct file_operations acpi_ac_fops = { 75static const struct file_operations acpi_ac_fops = {
76 .open = acpi_ac_open_fs, 76 .open = acpi_ac_open_fs,
77 .read = seq_read, 77 .read = seq_read,
78 .llseek = seq_lseek, 78 .llseek = seq_lseek,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 24bf4dca88cc..6e5221707d97 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -557,7 +557,7 @@ static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
557 return single_open(file, acpi_battery_read_alarm, PDE(inode)->data); 557 return single_open(file, acpi_battery_read_alarm, PDE(inode)->data);
558} 558}
559 559
560static struct file_operations acpi_battery_info_ops = { 560static const struct file_operations acpi_battery_info_ops = {
561 .open = acpi_battery_info_open_fs, 561 .open = acpi_battery_info_open_fs,
562 .read = seq_read, 562 .read = seq_read,
563 .llseek = seq_lseek, 563 .llseek = seq_lseek,
@@ -565,7 +565,7 @@ static struct file_operations acpi_battery_info_ops = {
565 .owner = THIS_MODULE, 565 .owner = THIS_MODULE,
566}; 566};
567 567
568static struct file_operations acpi_battery_state_ops = { 568static const struct file_operations acpi_battery_state_ops = {
569 .open = acpi_battery_state_open_fs, 569 .open = acpi_battery_state_open_fs,
570 .read = seq_read, 570 .read = seq_read,
571 .llseek = seq_lseek, 571 .llseek = seq_lseek,
@@ -573,7 +573,7 @@ static struct file_operations acpi_battery_state_ops = {
573 .owner = THIS_MODULE, 573 .owner = THIS_MODULE,
574}; 574};
575 575
576static struct file_operations acpi_battery_alarm_ops = { 576static const struct file_operations acpi_battery_alarm_ops = {
577 .open = acpi_battery_alarm_open_fs, 577 .open = acpi_battery_alarm_open_fs,
578 .read = seq_read, 578 .read = seq_read,
579 .write = acpi_battery_write_alarm, 579 .write = acpi_battery_write_alarm,
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ea5a0496a4fd..b2977695e120 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -192,8 +192,8 @@ int acpi_bus_set_power(acpi_handle handle, int state)
192 /* Make sure this is a valid target state */ 192 /* Make sure this is a valid target state */
193 193
194 if (!device->flags.power_manageable) { 194 if (!device->flags.power_manageable) {
195 printk(KERN_DEBUG "Device `[%s]' is not power manageable", 195 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable",
196 device->kobj.name); 196 device->kobj.name));
197 return -ENODEV; 197 return -ENODEV;
198 } 198 }
199 /* 199 /*
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index fd1ba05eab68..5ef885e82c78 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -87,14 +87,14 @@ struct acpi_button {
87 unsigned long pushed; 87 unsigned long pushed;
88}; 88};
89 89
90static struct file_operations acpi_button_info_fops = { 90static const struct file_operations acpi_button_info_fops = {
91 .open = acpi_button_info_open_fs, 91 .open = acpi_button_info_open_fs,
92 .read = seq_read, 92 .read = seq_read,
93 .llseek = seq_lseek, 93 .llseek = seq_lseek,
94 .release = single_release, 94 .release = single_release,
95}; 95};
96 96
97static struct file_operations acpi_button_state_fops = { 97static const struct file_operations acpi_button_state_fops = {
98 .open = acpi_button_state_open_fs, 98 .open = acpi_button_state_open_fs,
99 .read = seq_read, 99 .read = seq_read,
100 .llseek = seq_lseek, 100 .llseek = seq_lseek,
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 574a75a166c5..a01ce6700bfe 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -39,50 +39,43 @@ ACPI_MODULE_NAME("cm_sbs")
39static struct proc_dir_entry *acpi_ac_dir; 39static struct proc_dir_entry *acpi_ac_dir;
40static struct proc_dir_entry *acpi_battery_dir; 40static struct proc_dir_entry *acpi_battery_dir;
41 41
42static struct semaphore cm_sbs_sem; 42static DEFINE_MUTEX(cm_sbs_mutex);
43 43
44static int lock_ac_dir_cnt = 0; 44static int lock_ac_dir_cnt;
45static int lock_battery_dir_cnt = 0; 45static int lock_battery_dir_cnt;
46 46
47struct proc_dir_entry *acpi_lock_ac_dir(void) 47struct proc_dir_entry *acpi_lock_ac_dir(void)
48{ 48{
49 49 mutex_lock(&cm_sbs_mutex);
50 down(&cm_sbs_sem); 50 if (!acpi_ac_dir)
51 if (!acpi_ac_dir) {
52 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); 51 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
53 }
54 if (acpi_ac_dir) { 52 if (acpi_ac_dir) {
55 lock_ac_dir_cnt++; 53 lock_ac_dir_cnt++;
56 } else { 54 } else {
57 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 55 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
58 "Cannot create %s\n", ACPI_AC_CLASS)); 56 "Cannot create %s\n", ACPI_AC_CLASS));
59 } 57 }
60 up(&cm_sbs_sem); 58 mutex_unlock(&cm_sbs_mutex);
61 return acpi_ac_dir; 59 return acpi_ac_dir;
62} 60}
63
64EXPORT_SYMBOL(acpi_lock_ac_dir); 61EXPORT_SYMBOL(acpi_lock_ac_dir);
65 62
66void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) 63void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
67{ 64{
68 65 mutex_lock(&cm_sbs_mutex);
69 down(&cm_sbs_sem); 66 if (acpi_ac_dir_param)
70 if (acpi_ac_dir_param) {
71 lock_ac_dir_cnt--; 67 lock_ac_dir_cnt--;
72 }
73 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { 68 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
74 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); 69 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
75 acpi_ac_dir = 0; 70 acpi_ac_dir = 0;
76 } 71 }
77 up(&cm_sbs_sem); 72 mutex_unlock(&cm_sbs_mutex);
78} 73}
79
80EXPORT_SYMBOL(acpi_unlock_ac_dir); 74EXPORT_SYMBOL(acpi_unlock_ac_dir);
81 75
82struct proc_dir_entry *acpi_lock_battery_dir(void) 76struct proc_dir_entry *acpi_lock_battery_dir(void)
83{ 77{
84 78 mutex_lock(&cm_sbs_mutex);
85 down(&cm_sbs_sem);
86 if (!acpi_battery_dir) { 79 if (!acpi_battery_dir) {
87 acpi_battery_dir = 80 acpi_battery_dir =
88 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); 81 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
@@ -93,39 +86,28 @@ struct proc_dir_entry *acpi_lock_battery_dir(void)
93 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 86 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
94 "Cannot create %s\n", ACPI_BATTERY_CLASS)); 87 "Cannot create %s\n", ACPI_BATTERY_CLASS));
95 } 88 }
96 up(&cm_sbs_sem); 89 mutex_unlock(&cm_sbs_mutex);
97 return acpi_battery_dir; 90 return acpi_battery_dir;
98} 91}
99
100EXPORT_SYMBOL(acpi_lock_battery_dir); 92EXPORT_SYMBOL(acpi_lock_battery_dir);
101 93
102void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) 94void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
103{ 95{
104 96 mutex_lock(&cm_sbs_mutex);
105 down(&cm_sbs_sem); 97 if (acpi_battery_dir_param)
106 if (acpi_battery_dir_param) {
107 lock_battery_dir_cnt--; 98 lock_battery_dir_cnt--;
108 }
109 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param 99 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
110 && acpi_battery_dir) { 100 && acpi_battery_dir) {
111 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); 101 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
112 acpi_battery_dir = 0; 102 acpi_battery_dir = 0;
113 } 103 }
114 up(&cm_sbs_sem); 104 mutex_unlock(&cm_sbs_mutex);
115 return; 105 return;
116} 106}
117
118EXPORT_SYMBOL(acpi_unlock_battery_dir); 107EXPORT_SYMBOL(acpi_unlock_battery_dir);
119 108
120static int __init acpi_cm_sbs_init(void) 109static int __init acpi_cm_sbs_init(void)
121{ 110{
122
123 if (acpi_disabled)
124 return 0;
125
126 init_MUTEX(&cm_sbs_sem);
127
128 return 0; 111 return 0;
129} 112}
130
131subsys_initcall(acpi_cm_sbs_init); 113subsys_initcall(acpi_cm_sbs_init);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8c5d7df7d343..e5d796362854 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -929,7 +929,7 @@ static int acpi_ec_info_open_fs(struct inode *inode, struct file *file)
929 return single_open(file, acpi_ec_read_info, PDE(inode)->data); 929 return single_open(file, acpi_ec_read_info, PDE(inode)->data);
930} 930}
931 931
932static struct file_operations acpi_ec_info_ops = { 932static const struct file_operations acpi_ec_info_ops = {
933 .open = acpi_ec_info_open_fs, 933 .open = acpi_ec_info_open_fs,
934 .read = seq_read, 934 .read = seq_read,
935 .llseek = seq_lseek, 935 .llseek = seq_lseek,
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index a901b23e95e7..959a893c8d1f 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -99,7 +99,7 @@ static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
99 return 0; 99 return 0;
100} 100}
101 101
102static struct file_operations acpi_system_event_ops = { 102static const struct file_operations acpi_system_event_ops = {
103 .open = acpi_system_open_event, 103 .open = acpi_system_open_event,
104 .read = acpi_system_read_event, 104 .read = acpi_system_read_event,
105 .release = acpi_system_close_event, 105 .release = acpi_system_close_event,
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index daed2460924d..045c89477e59 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -120,7 +120,7 @@ acpi_fan_write_state(struct file *file, const char __user * buffer,
120 return count; 120 return count;
121} 121}
122 122
123static struct file_operations acpi_fan_state_ops = { 123static const struct file_operations acpi_fan_state_ops = {
124 .open = acpi_fan_state_open_fs, 124 .open = acpi_fan_state_open_fs,
125 .read = seq_read, 125 .read = seq_read,
126 .write = acpi_fan_write_state, 126 .write = acpi_fan_write_state,
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index fd81a0f5222f..32c9d88fd196 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -184,7 +184,7 @@ static union acpi_hotkey *get_hotkey_by_event(struct
184 *hotkey_list, int event); 184 *hotkey_list, int event);
185 185
186/* event based config */ 186/* event based config */
187static struct file_operations hotkey_config_fops = { 187static const struct file_operations hotkey_config_fops = {
188 .open = hotkey_open_config, 188 .open = hotkey_open_config,
189 .read = seq_read, 189 .read = seq_read,
190 .write = hotkey_write_config, 190 .write = hotkey_write_config,
@@ -193,7 +193,7 @@ static struct file_operations hotkey_config_fops = {
193}; 193};
194 194
195/* polling based config */ 195/* polling based config */
196static struct file_operations hotkey_poll_config_fops = { 196static const struct file_operations hotkey_poll_config_fops = {
197 .open = hotkey_poll_open_config, 197 .open = hotkey_poll_open_config,
198 .read = seq_read, 198 .read = seq_read,
199 .write = hotkey_write_config, 199 .write = hotkey_write_config,
@@ -202,7 +202,7 @@ static struct file_operations hotkey_poll_config_fops = {
202}; 202};
203 203
204/* hotkey driver info */ 204/* hotkey driver info */
205static struct file_operations hotkey_info_fops = { 205static const struct file_operations hotkey_info_fops = {
206 .open = hotkey_info_open_fs, 206 .open = hotkey_info_open_fs,
207 .read = seq_read, 207 .read = seq_read,
208 .llseek = seq_lseek, 208 .llseek = seq_lseek,
@@ -210,7 +210,7 @@ static struct file_operations hotkey_info_fops = {
210}; 210};
211 211
212/* action */ 212/* action */
213static struct file_operations hotkey_action_fops = { 213static const struct file_operations hotkey_action_fops = {
214 .open = hotkey_action_open_fs, 214 .open = hotkey_action_open_fs,
215 .read = seq_read, 215 .read = seq_read,
216 .write = hotkey_execute_aml_method, 216 .write = hotkey_execute_aml_method,
@@ -219,7 +219,7 @@ static struct file_operations hotkey_action_fops = {
219}; 219};
220 220
221/* polling results */ 221/* polling results */
222static struct file_operations hotkey_polling_fops = { 222static const struct file_operations hotkey_polling_fops = {
223 .open = hotkey_polling_open_fs, 223 .open = hotkey_polling_open_fs,
224 .read = seq_read, 224 .read = seq_read,
225 .llseek = seq_lseek, 225 .llseek = seq_lseek,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 5d3447f4582c..fec225d1b6b7 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -80,7 +80,7 @@ struct acpi_power_resource {
80 80
81static struct list_head acpi_power_resource_list; 81static struct list_head acpi_power_resource_list;
82 82
83static struct file_operations acpi_power_fops = { 83static const struct file_operations acpi_power_fops = {
84 .open = acpi_power_open_fs, 84 .open = acpi_power_open_fs,
85 .read = seq_read, 85 .read = seq_read,
86 .llseek = seq_lseek, 86 .llseek = seq_lseek,
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 52674323b14d..b13d64415b7a 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -102,7 +102,7 @@ static struct acpi_driver acpi_processor_driver = {
102#define INSTALL_NOTIFY_HANDLER 1 102#define INSTALL_NOTIFY_HANDLER 1
103#define UNINSTALL_NOTIFY_HANDLER 2 103#define UNINSTALL_NOTIFY_HANDLER 2
104 104
105static struct file_operations acpi_processor_info_fops = { 105static const struct file_operations acpi_processor_info_fops = {
106 .open = acpi_processor_info_open_fs, 106 .open = acpi_processor_info_open_fs,
107 .read = seq_read, 107 .read = seq_read,
108 .llseek = seq_lseek, 108 .llseek = seq_lseek,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 8e9c26aae8fe..71066066d626 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1070,7 +1070,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1070 PDE(inode)->data); 1070 PDE(inode)->data);
1071} 1071}
1072 1072
1073static struct file_operations acpi_processor_power_fops = { 1073static const struct file_operations acpi_processor_power_fops = {
1074 .open = acpi_processor_power_open_fs, 1074 .open = acpi_processor_power_open_fs,
1075 .read = seq_read, 1075 .read = seq_read,
1076 .llseek = seq_lseek, 1076 .llseek = seq_lseek,
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 4696a85a98b9..34962578039d 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -434,7 +434,7 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
434 PDE(inode)->data); 434 PDE(inode)->data);
435} 435}
436 436
437static struct file_operations acpi_system_wakeup_device_fops = { 437static const struct file_operations acpi_system_wakeup_device_fops = {
438 .open = acpi_system_wakeup_device_open_fs, 438 .open = acpi_system_wakeup_device_open_fs,
439 .read = seq_read, 439 .read = seq_read,
440 .write = acpi_system_write_wakeup_device, 440 .write = acpi_system_write_wakeup_device,
@@ -443,7 +443,7 @@ static struct file_operations acpi_system_wakeup_device_fops = {
443}; 443};
444 444
445#ifdef CONFIG_ACPI_SLEEP_PROC_SLEEP 445#ifdef CONFIG_ACPI_SLEEP_PROC_SLEEP
446static struct file_operations acpi_system_sleep_fops = { 446static const struct file_operations acpi_system_sleep_fops = {
447 .open = acpi_system_sleep_open_fs, 447 .open = acpi_system_sleep_open_fs,
448 .read = seq_read, 448 .read = seq_read,
449 .write = acpi_system_write_sleep, 449 .write = acpi_system_write_sleep,
@@ -452,7 +452,7 @@ static struct file_operations acpi_system_sleep_fops = {
452}; 452};
453#endif /* CONFIG_ACPI_SLEEP_PROC_SLEEP */ 453#endif /* CONFIG_ACPI_SLEEP_PROC_SLEEP */
454 454
455static struct file_operations acpi_system_alarm_fops = { 455static const struct file_operations acpi_system_alarm_fops = {
456 .open = acpi_system_alarm_open_fs, 456 .open = acpi_system_alarm_open_fs,
457 .read = seq_read, 457 .read = seq_read,
458 .write = acpi_system_write_alarm, 458 .write = acpi_system_write_alarm,
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index c3bb7faad75e..d86dcb3c2366 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -57,7 +57,7 @@ static int acpi_system_info_open_fs(struct inode *inode, struct file *file)
57 return single_open(file, acpi_system_read_info, PDE(inode)->data); 57 return single_open(file, acpi_system_read_info, PDE(inode)->data);
58} 58}
59 59
60static struct file_operations acpi_system_info_ops = { 60static const struct file_operations acpi_system_info_ops = {
61 .open = acpi_system_info_open_fs, 61 .open = acpi_system_info_open_fs,
62 .read = seq_read, 62 .read = seq_read,
63 .llseek = seq_lseek, 63 .llseek = seq_lseek,
@@ -67,7 +67,7 @@ static struct file_operations acpi_system_info_ops = {
67static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, 67static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
68 loff_t *); 68 loff_t *);
69 69
70static struct file_operations acpi_system_dsdt_ops = { 70static const struct file_operations acpi_system_dsdt_ops = {
71 .read = acpi_system_read_dsdt, 71 .read = acpi_system_read_dsdt,
72}; 72};
73 73
@@ -94,7 +94,7 @@ acpi_system_read_dsdt(struct file *file,
94static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t, 94static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t,
95 loff_t *); 95 loff_t *);
96 96
97static struct file_operations acpi_system_fadt_ops = { 97static const struct file_operations acpi_system_fadt_ops = {
98 .read = acpi_system_read_fadt, 98 .read = acpi_system_read_fadt,
99}; 99};
100 100
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 503c0b99db12..480a31796886 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -176,21 +176,21 @@ struct acpi_thermal {
176 struct timer_list timer; 176 struct timer_list timer;
177}; 177};
178 178
179static struct file_operations acpi_thermal_state_fops = { 179static const struct file_operations acpi_thermal_state_fops = {
180 .open = acpi_thermal_state_open_fs, 180 .open = acpi_thermal_state_open_fs,
181 .read = seq_read, 181 .read = seq_read,
182 .llseek = seq_lseek, 182 .llseek = seq_lseek,
183 .release = single_release, 183 .release = single_release,
184}; 184};
185 185
186static struct file_operations acpi_thermal_temp_fops = { 186static const struct file_operations acpi_thermal_temp_fops = {
187 .open = acpi_thermal_temp_open_fs, 187 .open = acpi_thermal_temp_open_fs,
188 .read = seq_read, 188 .read = seq_read,
189 .llseek = seq_lseek, 189 .llseek = seq_lseek,
190 .release = single_release, 190 .release = single_release,
191}; 191};
192 192
193static struct file_operations acpi_thermal_trip_fops = { 193static const struct file_operations acpi_thermal_trip_fops = {
194 .open = acpi_thermal_trip_open_fs, 194 .open = acpi_thermal_trip_open_fs,
195 .read = seq_read, 195 .read = seq_read,
196 .write = acpi_thermal_write_trip_points, 196 .write = acpi_thermal_write_trip_points,
@@ -198,7 +198,7 @@ static struct file_operations acpi_thermal_trip_fops = {
198 .release = single_release, 198 .release = single_release,
199}; 199};
200 200
201static struct file_operations acpi_thermal_cooling_fops = { 201static const struct file_operations acpi_thermal_cooling_fops = {
202 .open = acpi_thermal_cooling_open_fs, 202 .open = acpi_thermal_cooling_open_fs,
203 .read = seq_read, 203 .read = seq_read,
204 .write = acpi_thermal_write_cooling_mode, 204 .write = acpi_thermal_write_cooling_mode,
@@ -206,7 +206,7 @@ static struct file_operations acpi_thermal_cooling_fops = {
206 .release = single_release, 206 .release = single_release,
207}; 207};
208 208
209static struct file_operations acpi_thermal_polling_fops = { 209static const struct file_operations acpi_thermal_polling_fops = {
210 .open = acpi_thermal_polling_open_fs, 210 .open = acpi_thermal_polling_open_fs,
211 .read = seq_read, 211 .read = seq_read,
212 .write = acpi_thermal_write_polling, 212 .write = acpi_thermal_write_polling,
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 01a9f1cb7743..cfa5af883e13 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -398,7 +398,7 @@ config ATM_FORE200E_USE_TASKLET
398 default n 398 default n
399 help 399 help
400 This defers work to be done by the interrupt handler to a 400 This defers work to be done by the interrupt handler to a
401 tasklet instead of hanlding everything at interrupt time. This 401 tasklet instead of handling everything at interrupt time. This
402 may improve the responsive of the host. 402 may improve the responsive of the host.
403 403
404config ATM_FORE200E_TX_RETRY 404config ATM_FORE200E_TX_RETRY
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1ba4039777e8..8d328186f774 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -423,6 +423,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
423 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) 423 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
424 return -EINVAL; 424 return -EINVAL;
425 425
426 lock_cpu_hotplug();
427
426 /* Do not use cpufreq_set_policy here or the user_policy.max 428 /* Do not use cpufreq_set_policy here or the user_policy.max
427 will be wrongly overridden */ 429 will be wrongly overridden */
428 mutex_lock(&policy->lock); 430 mutex_lock(&policy->lock);
@@ -432,6 +434,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
432 policy->user_policy.governor = policy->governor; 434 policy->user_policy.governor = policy->governor;
433 mutex_unlock(&policy->lock); 435 mutex_unlock(&policy->lock);
434 436
437 unlock_cpu_hotplug();
438
435 return ret ? ret : count; 439 return ret ? ret : count;
436} 440}
437 441
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 2819de79442c..80e8ca013e44 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -17,172 +17,6 @@
17 410 Severn Ave., Suite 210 17 410 Severn Ave., Suite 210
18 Annapolis MD 21403 18 Annapolis MD 21403
19 19
20 Linux Kernel Additions:
21
22 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
23 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com>
24 Remove compatibility defines for kernel versions < 2.2.x.
25 Update for new 2.3.x module interface
26 LK1.1.2 (March 19, 2000)
27 * New PCI interface (jgarzik)
28
29 LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
30 - Merged with 3c575_cb.c
31 - Don't set RxComplete in boomerang interrupt enable reg
32 - spinlock in vortex_timer to protect mdio functions
33 - disable local interrupts around call to vortex_interrupt in
34 vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
35 - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
36 - In vortex_start_xmit(), move the lock to _after_ we've altered
37 vp->cur_tx and vp->tx_full. This defeats the race between
38 vortex_start_xmit() and vortex_interrupt which was identified
39 by Bogdan Costescu.
40 - Merged back support for six new cards from various sources
41 - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
42 insertion oops)
43 - Tell it that 3c905C has NWAY for 100bT autoneg
44 - Fix handling of SetStatusEnd in 'Too much work..' code, as
45 per 2.3.99's 3c575_cb (Dave Hinds).
46 - Split ISR into two for vortex & boomerang
47 - Fix MOD_INC/DEC races
48 - Handle resource allocation failures.
49 - Fix 3CCFE575CT LED polarity
50 - Make tx_interrupt_mitigation the default
51
52 LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
53 - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
54 - Put vortex_info_tbl into __devinitdata
55 - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
56 as in the hardware.
57 - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
58
59 LK1.1.5 28 April 2000, andrewm
60 - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
61 - Some extra diagnostics
62 - In vortex_error(), reset the Tx on maxCollisions. Otherwise most
63 chips usually get a Tx timeout.
64 - Added extra_reset module parm
65 - Replaced some inline timer manip with mod_timer
66 (Franois romieu <Francois.Romieu@nic.fr>)
67 - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
68 (this came across from 3c575_cb).
69
70 LK1.1.6 06 Jun 2000, andrewm
71 - Backed out the PPC defines.
72 - Use del_timer_sync(), mod_timer().
73 - Fix wrapped ulong comparison in boomerang_rx()
74 - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
75 (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
76 - Replace union wn3_config with BFINS/BFEXT manipulation for
77 sparc64 (Pete Zaitcev, Peter Jones)
78 - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
79 do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
80 Donald Becker)
81 - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
82 - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
83
84 LK1.1.7 2 Jul 2000 andrewm
85 - Better handling of shared IRQs
86 - Reset the transmitter on a Tx reclaim error
87 - Fixed crash under OOM during vortex_open() (Mark Hemment)
88 - Fix Rx cessation problem during OOM (help from Mark Hemment)
89 - The spinlocks around the mdio access were blocking interrupts for 300uS.
90 Fix all this to use spin_lock_bh() within mdio_read/write
91 - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
92 have one.
93 - Added 802.3x MAC-layer flow control support
94
95 LK1.1.8 13 Aug 2000 andrewm
96 - Ignore request_region() return value - already reserved if Cardbus.
97 - Merged some additional Cardbus flags from Don's 0.99Qk
98 - Some fixes for 3c556 (Fred Maciel)
99 - Fix for EISA initialisation (Jan Rekorajski)
100 - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
101 - Fixed MII_XCVR_PWR for 3CCFE575CT
102 - Added INVERT_LED_PWR, used it.
103 - Backed out the extra_reset stuff
104
105 LK1.1.9 12 Sep 2000 andrewm
106 - Backed out the tx_reset_resume flags. It was a no-op.
107 - In vortex_error, don't reset the Tx on txReclaim errors
108 - In vortex_error, don't reset the Tx on maxCollisions errors.
109 Hence backed out all the DownListPtr logic here.
110 - In vortex_error, give Tornado cards a partial TxReset on
111 maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this.
112 - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
113 - Fixed a bug where, if vp->tx_full is set when the interface
114 is downed, it remains set when the interface is upped. Bad
115 things happen.
116
117 LK1.1.10 17 Sep 2000 andrewm
118 - Added EEPROM_8BIT for 3c555 (Fred Maciel)
119 - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
120 - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
121
122 LK1.1.11 13 Nov 2000 andrewm
123 - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
124
125 LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
126 - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
127 - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
128 - Added extended issue_and_wait for the 3c905CX.
129 - Look for an MII on PHY index 24 first (3c905CX oddity).
130 - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
131 - Don't free skbs we don't own on oom path in vortex_open().
132
133 LK1.1.13 27 Jan 2001
134 - Added explicit `medialock' flag so we can truly
135 lock the media type down with `options'.
136 - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
137 - Added and used EEPROM_NORESET for 3c556B PM resumes.
138 - Fixed leakage of vp->rx_ring.
139 - Break out separate HAS_HWCKSM device capability flag.
140 - Kill vp->tx_full (ANK)
141 - Merge zerocopy fragment handling (ANK?)
142
143 LK1.1.14 15 Feb 2001
144 - Enable WOL. Can be turned on with `enable_wol' module option.
145 - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
146 - If a device's internalconfig register reports it has NWAY,
147 use it, even if autoselect is enabled.
148
149 LK1.1.15 6 June 2001 akpm
150 - Prevent double counting of received bytes (Lars Christensen)
151 - Add ethtool support (jgarzik)
152 - Add module parm descriptions (Andrzej M. Krzysztofowicz)
153 - Implemented alloc_etherdev() API
154 - Special-case the 'Tx error 82' message.
155
156 LK1.1.16 18 July 2001 akpm
157 - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
158 - Lessen verbosity of bootup messages
159 - Fix WOL - use new PM API functions.
160 - Use netif_running() instead of vp->open in suspend/resume.
161 - Don't reset the interface logic on open/close/rmmod. It upsets
162 autonegotiation, and hence DHCP (from 0.99T).
163 - Back out EEPROM_NORESET flag because of the above (we do it for all
164 NICs).
165 - Correct 3c982 identification string
166 - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
167 clash.
168
169 LK1.1.17 18Dec01 akpm
170 - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently.
171 And it has NWAY.
172 - Mask our advertised modes (vp->advertising) with our capabilities
173 (MII reg5) when deciding which duplex mode to use.
174 - Add `global_options' as default for options[]. Ditto global_enable_wol,
175 global_full_duplex.
176
177 LK1.1.18 01Jul02 akpm
178 - Fix for undocumented transceiver power-up bit on some 3c566B's
179 (Donald Becker, Rahul Karnik)
180
181 - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
182 - Also see Documentation/networking/vortex.txt
183
184 LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org>
185 - EISA sysfs integration.
186*/ 20*/
187 21
188/* 22/*
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index cd9718512d1c..e4f4eaff7679 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1709,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1709 void __iomem *ioaddr = tp->mmio_addr; 1709 void __iomem *ioaddr = tp->mmio_addr;
1710 unsigned int entry; 1710 unsigned int entry;
1711 unsigned int len = skb->len; 1711 unsigned int len = skb->len;
1712 unsigned long flags;
1712 1713
1713 /* Calculate the next Tx descriptor entry. */ 1714 /* Calculate the next Tx descriptor entry. */
1714 entry = tp->cur_tx % NUM_TX_DESC; 1715 entry = tp->cur_tx % NUM_TX_DESC;
@@ -1725,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1725 return 0; 1726 return 0;
1726 } 1727 }
1727 1728
1728 spin_lock_irq(&tp->lock); 1729 spin_lock_irqsave(&tp->lock, flags);
1729 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), 1730 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
1730 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); 1731 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
1731 1732
@@ -1736,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1736 1737
1737 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) 1738 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
1738 netif_stop_queue (dev); 1739 netif_stop_queue (dev);
1739 spin_unlock_irq(&tp->lock); 1740 spin_unlock_irqrestore(&tp->lock, flags);
1740 1741
1741 if (netif_msg_tx_queued(tp)) 1742 if (netif_msg_tx_queued(tp))
1742 printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", 1743 printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 64b6a72b4f6a..db73de0d2511 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp)
1639 skb = tx_buf->skb; 1639 skb = tx_buf->skb;
1640#ifdef BCM_TSO 1640#ifdef BCM_TSO
1641 /* partial BD completions possible with TSO packets */ 1641 /* partial BD completions possible with TSO packets */
1642 if (skb_shinfo(skb)->gso_size) { 1642 if (skb_is_gso(skb)) {
1643 u16 last_idx, last_ring_idx; 1643 u16 last_idx, last_ring_idx;
1644 1644
1645 last_idx = sw_cons + 1645 last_idx = sw_cons +
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 87f94d939ff8..61b3754f50ff 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1417 struct cpl_tx_pkt *cpl; 1417 struct cpl_tx_pkt *cpl;
1418 1418
1419#ifdef NETIF_F_TSO 1419#ifdef NETIF_F_TSO
1420 if (skb_shinfo(skb)->gso_size) { 1420 if (skb_is_gso(skb)) {
1421 int eth_type; 1421 int eth_type;
1422 struct cpl_tx_pkt_lso *hdr; 1422 struct cpl_tx_pkt_lso *hdr;
1423 1423
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3042d33e2d4d..f411bbb44f86 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -68,7 +68,6 @@
68#ifdef NETIF_F_TSO 68#ifdef NETIF_F_TSO
69#include <net/checksum.h> 69#include <net/checksum.h>
70#endif 70#endif
71#include <linux/workqueue.h>
72#include <linux/mii.h> 71#include <linux/mii.h>
73#include <linux/ethtool.h> 72#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 73#include <linux/if_vlan.h>
@@ -143,6 +142,7 @@ struct e1000_adapter;
143 142
144#define AUTO_ALL_MODES 0 143#define AUTO_ALL_MODES 0
145#define E1000_EEPROM_82544_APM 0x0004 144#define E1000_EEPROM_82544_APM 0x0004
145#define E1000_EEPROM_ICH8_APME 0x0004
146#define E1000_EEPROM_APME 0x0400 146#define E1000_EEPROM_APME 0x0400
147 147
148#ifndef E1000_MASTER_SLAVE 148#ifndef E1000_MASTER_SLAVE
@@ -254,7 +254,6 @@ struct e1000_adapter {
254 spinlock_t tx_queue_lock; 254 spinlock_t tx_queue_lock;
255#endif 255#endif
256 atomic_t irq_sem; 256 atomic_t irq_sem;
257 struct work_struct watchdog_task;
258 struct work_struct reset_task; 257 struct work_struct reset_task;
259 uint8_t fc_autoneg; 258 uint8_t fc_autoneg;
260 259
@@ -339,8 +338,14 @@ struct e1000_adapter {
339#ifdef NETIF_F_TSO 338#ifdef NETIF_F_TSO
340 boolean_t tso_force; 339 boolean_t tso_force;
341#endif 340#endif
341 boolean_t smart_power_down; /* phy smart power down */
342 unsigned long flags;
342}; 343};
343 344
345enum e1000_state_t {
346 __E1000_DRIVER_TESTING,
347 __E1000_RESETTING,
348};
344 349
345/* e1000_main.c */ 350/* e1000_main.c */
346extern char e1000_driver_name[]; 351extern char e1000_driver_name[];
@@ -348,6 +353,7 @@ extern char e1000_driver_version[];
348int e1000_up(struct e1000_adapter *adapter); 353int e1000_up(struct e1000_adapter *adapter);
349void e1000_down(struct e1000_adapter *adapter); 354void e1000_down(struct e1000_adapter *adapter);
350void e1000_reset(struct e1000_adapter *adapter); 355void e1000_reset(struct e1000_adapter *adapter);
356void e1000_reinit_locked(struct e1000_adapter *adapter);
351int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 357int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
352void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 358void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
353int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 359int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d19664891768..88a82ba88f57 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -109,7 +109,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
109 SUPPORTED_1000baseT_Full| 109 SUPPORTED_1000baseT_Full|
110 SUPPORTED_Autoneg | 110 SUPPORTED_Autoneg |
111 SUPPORTED_TP); 111 SUPPORTED_TP);
112 112 if (hw->phy_type == e1000_phy_ife)
113 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
113 ecmd->advertising = ADVERTISED_TP; 114 ecmd->advertising = ADVERTISED_TP;
114 115
115 if (hw->autoneg == 1) { 116 if (hw->autoneg == 1) {
@@ -203,11 +204,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
203 204
204 /* reset the link */ 205 /* reset the link */
205 206
206 if (netif_running(adapter->netdev)) { 207 if (netif_running(adapter->netdev))
207 e1000_down(adapter); 208 e1000_reinit_locked(adapter);
208 e1000_reset(adapter); 209 else
209 e1000_up(adapter);
210 } else
211 e1000_reset(adapter); 210 e1000_reset(adapter);
212 211
213 return 0; 212 return 0;
@@ -254,10 +253,9 @@ e1000_set_pauseparam(struct net_device *netdev,
254 hw->original_fc = hw->fc; 253 hw->original_fc = hw->fc;
255 254
256 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 255 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
257 if (netif_running(adapter->netdev)) { 256 if (netif_running(adapter->netdev))
258 e1000_down(adapter); 257 e1000_reinit_locked(adapter);
259 e1000_up(adapter); 258 else
260 } else
261 e1000_reset(adapter); 259 e1000_reset(adapter);
262 } else 260 } else
263 return ((hw->media_type == e1000_media_type_fiber) ? 261 return ((hw->media_type == e1000_media_type_fiber) ?
@@ -279,10 +277,9 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
279 struct e1000_adapter *adapter = netdev_priv(netdev); 277 struct e1000_adapter *adapter = netdev_priv(netdev);
280 adapter->rx_csum = data; 278 adapter->rx_csum = data;
281 279
282 if (netif_running(netdev)) { 280 if (netif_running(netdev))
283 e1000_down(adapter); 281 e1000_reinit_locked(adapter);
284 e1000_up(adapter); 282 else
285 } else
286 e1000_reset(adapter); 283 e1000_reset(adapter);
287 return 0; 284 return 0;
288} 285}
@@ -577,6 +574,7 @@ e1000_get_drvinfo(struct net_device *netdev,
577 case e1000_82572: 574 case e1000_82572:
578 case e1000_82573: 575 case e1000_82573:
579 case e1000_80003es2lan: 576 case e1000_80003es2lan:
577 case e1000_ich8lan:
580 sprintf(firmware_version, "%d.%d-%d", 578 sprintf(firmware_version, "%d.%d-%d",
581 (eeprom_data & 0xF000) >> 12, 579 (eeprom_data & 0xF000) >> 12,
582 (eeprom_data & 0x0FF0) >> 4, 580 (eeprom_data & 0x0FF0) >> 4,
@@ -631,6 +629,9 @@ e1000_set_ringparam(struct net_device *netdev,
631 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; 629 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
632 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; 630 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
633 631
632 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
633 msleep(1);
634
634 if (netif_running(adapter->netdev)) 635 if (netif_running(adapter->netdev))
635 e1000_down(adapter); 636 e1000_down(adapter);
636 637
@@ -691,9 +692,11 @@ e1000_set_ringparam(struct net_device *netdev,
691 adapter->rx_ring = rx_new; 692 adapter->rx_ring = rx_new;
692 adapter->tx_ring = tx_new; 693 adapter->tx_ring = tx_new;
693 if ((err = e1000_up(adapter))) 694 if ((err = e1000_up(adapter)))
694 return err; 695 goto err_setup;
695 } 696 }
696 697
698 clear_bit(__E1000_RESETTING, &adapter->flags);
699
697 return 0; 700 return 0;
698err_setup_tx: 701err_setup_tx:
699 e1000_free_all_rx_resources(adapter); 702 e1000_free_all_rx_resources(adapter);
@@ -701,6 +704,8 @@ err_setup_rx:
701 adapter->rx_ring = rx_old; 704 adapter->rx_ring = rx_old;
702 adapter->tx_ring = tx_old; 705 adapter->tx_ring = tx_old;
703 e1000_up(adapter); 706 e1000_up(adapter);
707err_setup:
708 clear_bit(__E1000_RESETTING, &adapter->flags);
704 return err; 709 return err;
705} 710}
706 711
@@ -754,6 +759,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
754 toggle = 0x7FFFF3FF; 759 toggle = 0x7FFFF3FF;
755 break; 760 break;
756 case e1000_82573: 761 case e1000_82573:
762 case e1000_ich8lan:
757 toggle = 0x7FFFF033; 763 toggle = 0x7FFFF033;
758 break; 764 break;
759 default: 765 default:
@@ -773,11 +779,12 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
773 } 779 }
774 /* restore previous status */ 780 /* restore previous status */
775 E1000_WRITE_REG(&adapter->hw, STATUS, before); 781 E1000_WRITE_REG(&adapter->hw, STATUS, before);
776 782 if (adapter->hw.mac_type != e1000_ich8lan) {
777 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 783 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
778 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); 784 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
779 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); 785 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
780 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); 786 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
787 }
781 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); 788 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
782 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 789 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
783 REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); 790 REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -790,20 +797,22 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
790 REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); 797 REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
791 798
792 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); 799 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
793 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 800 before = (adapter->hw.mac_type == e1000_ich8lan ?
801 0x06C3B33E : 0x06DFB3FE);
802 REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
794 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 803 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
795 804
796 if (adapter->hw.mac_type >= e1000_82543) { 805 if (adapter->hw.mac_type >= e1000_82543) {
797 806
798 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 807 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
799 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 808 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
800 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); 809 if (adapter->hw.mac_type != e1000_ich8lan)
810 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
801 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 811 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
802 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 812 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
803 813 value = (adapter->hw.mac_type == e1000_ich8lan ?
804 for (i = 0; i < E1000_RAR_ENTRIES; i++) { 814 E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
805 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 815 for (i = 0; i < value; i++) {
806 0xFFFFFFFF);
807 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 816 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
808 0xFFFFFFFF); 817 0xFFFFFFFF);
809 } 818 }
@@ -817,7 +826,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
817 826
818 } 827 }
819 828
820 for (i = 0; i < E1000_MC_TBL_SIZE; i++) 829 value = (adapter->hw.mac_type == e1000_ich8lan ?
830 E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
831 for (i = 0; i < value; i++)
821 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 832 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
822 833
823 *data = 0; 834 *data = 0;
@@ -889,6 +900,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
889 /* Test each interrupt */ 900 /* Test each interrupt */
890 for (; i < 10; i++) { 901 for (; i < 10; i++) {
891 902
903 if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
904 continue;
892 /* Interrupt to test */ 905 /* Interrupt to test */
893 mask = 1 << i; 906 mask = 1 << i;
894 907
@@ -1246,18 +1259,33 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1246 } else if (adapter->hw.phy_type == e1000_phy_gg82563) { 1259 } else if (adapter->hw.phy_type == e1000_phy_gg82563) {
1247 e1000_write_phy_reg(&adapter->hw, 1260 e1000_write_phy_reg(&adapter->hw,
1248 GG82563_PHY_KMRN_MODE_CTRL, 1261 GG82563_PHY_KMRN_MODE_CTRL,
1249 0x1CE); 1262 0x1CC);
1250 } 1263 }
1251 /* force 1000, set loopback */
1252 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
1253 1264
1254 /* Now set up the MAC to the same speed/duplex as the PHY. */
1255 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); 1265 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
1256 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1266
1257 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1267 if (adapter->hw.phy_type == e1000_phy_ife) {
1258 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1268 /* force 100, set loopback */
1259 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1269 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100);
1260 E1000_CTRL_FD); /* Force Duplex to FULL */ 1270
1271 /* Now set up the MAC to the same speed/duplex as the PHY. */
1272 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1273 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1274 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1275 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1276 E1000_CTRL_FD); /* Force Duplex to FULL */
1277 } else {
1278 /* force 1000, set loopback */
1279 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
1280
1281 /* Now set up the MAC to the same speed/duplex as the PHY. */
1282 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
1283 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1284 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1285 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1286 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1287 E1000_CTRL_FD); /* Force Duplex to FULL */
1288 }
1261 1289
1262 if (adapter->hw.media_type == e1000_media_type_copper && 1290 if (adapter->hw.media_type == e1000_media_type_copper &&
1263 adapter->hw.phy_type == e1000_phy_m88) { 1291 adapter->hw.phy_type == e1000_phy_m88) {
@@ -1317,6 +1345,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1317 case e1000_82572: 1345 case e1000_82572:
1318 case e1000_82573: 1346 case e1000_82573:
1319 case e1000_80003es2lan: 1347 case e1000_80003es2lan:
1348 case e1000_ich8lan:
1320 return e1000_integrated_phy_loopback(adapter); 1349 return e1000_integrated_phy_loopback(adapter);
1321 break; 1350 break;
1322 1351
@@ -1568,6 +1597,7 @@ e1000_diag_test(struct net_device *netdev,
1568 struct e1000_adapter *adapter = netdev_priv(netdev); 1597 struct e1000_adapter *adapter = netdev_priv(netdev);
1569 boolean_t if_running = netif_running(netdev); 1598 boolean_t if_running = netif_running(netdev);
1570 1599
1600 set_bit(__E1000_DRIVER_TESTING, &adapter->flags);
1571 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1601 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1572 /* Offline tests */ 1602 /* Offline tests */
1573 1603
@@ -1582,7 +1612,8 @@ e1000_diag_test(struct net_device *netdev,
1582 eth_test->flags |= ETH_TEST_FL_FAILED; 1612 eth_test->flags |= ETH_TEST_FL_FAILED;
1583 1613
1584 if (if_running) 1614 if (if_running)
1585 e1000_down(adapter); 1615 /* indicate we're in test mode */
1616 dev_close(netdev);
1586 else 1617 else
1587 e1000_reset(adapter); 1618 e1000_reset(adapter);
1588 1619
@@ -1607,8 +1638,9 @@ e1000_diag_test(struct net_device *netdev,
1607 adapter->hw.autoneg = autoneg; 1638 adapter->hw.autoneg = autoneg;
1608 1639
1609 e1000_reset(adapter); 1640 e1000_reset(adapter);
1641 clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
1610 if (if_running) 1642 if (if_running)
1611 e1000_up(adapter); 1643 dev_open(netdev);
1612 } else { 1644 } else {
1613 /* Online tests */ 1645 /* Online tests */
1614 if (e1000_link_test(adapter, &data[4])) 1646 if (e1000_link_test(adapter, &data[4]))
@@ -1619,6 +1651,8 @@ e1000_diag_test(struct net_device *netdev,
1619 data[1] = 0; 1651 data[1] = 0;
1620 data[2] = 0; 1652 data[2] = 0;
1621 data[3] = 0; 1653 data[3] = 0;
1654
1655 clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
1622 } 1656 }
1623 msleep_interruptible(4 * 1000); 1657 msleep_interruptible(4 * 1000);
1624} 1658}
@@ -1778,21 +1812,18 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1778 mod_timer(&adapter->blink_timer, jiffies); 1812 mod_timer(&adapter->blink_timer, jiffies);
1779 msleep_interruptible(data * 1000); 1813 msleep_interruptible(data * 1000);
1780 del_timer_sync(&adapter->blink_timer); 1814 del_timer_sync(&adapter->blink_timer);
1781 } else if (adapter->hw.mac_type < e1000_82573) { 1815 } else if (adapter->hw.phy_type == e1000_phy_ife) {
1782 E1000_WRITE_REG(&adapter->hw, LEDCTL, 1816 if (!adapter->blink_timer.function) {
1783 (E1000_LEDCTL_LED2_BLINK_RATE | 1817 init_timer(&adapter->blink_timer);
1784 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | 1818 adapter->blink_timer.function = e1000_led_blink_callback;
1785 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1819 adapter->blink_timer.data = (unsigned long) adapter;
1786 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | 1820 }
1787 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); 1821 mod_timer(&adapter->blink_timer, jiffies);
1788 msleep_interruptible(data * 1000); 1822 msleep_interruptible(data * 1000);
1823 del_timer_sync(&adapter->blink_timer);
1824 e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
1789 } else { 1825 } else {
1790 E1000_WRITE_REG(&adapter->hw, LEDCTL, 1826 e1000_blink_led_start(&adapter->hw);
1791 (E1000_LEDCTL_LED2_BLINK_RATE |
1792 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1793 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1794 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1795 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1796 msleep_interruptible(data * 1000); 1827 msleep_interruptible(data * 1000);
1797 } 1828 }
1798 1829
@@ -1807,10 +1838,8 @@ static int
1807e1000_nway_reset(struct net_device *netdev) 1838e1000_nway_reset(struct net_device *netdev)
1808{ 1839{
1809 struct e1000_adapter *adapter = netdev_priv(netdev); 1840 struct e1000_adapter *adapter = netdev_priv(netdev);
1810 if (netif_running(netdev)) { 1841 if (netif_running(netdev))
1811 e1000_down(adapter); 1842 e1000_reinit_locked(adapter);
1812 e1000_up(adapter);
1813 }
1814 return 0; 1843 return 0;
1815} 1844}
1816 1845
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 3959039b16ec..583518ae49ce 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -101,7 +101,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset,
101 101
102#define E1000_WRITE_REG_IO(a, reg, val) \ 102#define E1000_WRITE_REG_IO(a, reg, val) \
103 e1000_write_reg_io((a), E1000_##reg, val) 103 e1000_write_reg_io((a), E1000_##reg, val)
104static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw); 104static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
105 uint16_t duplex);
105static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
106 107
107/* IGP cable length table */ 108/* IGP cable length table */
@@ -156,6 +157,14 @@ e1000_set_phy_type(struct e1000_hw *hw)
156 hw->phy_type = e1000_phy_igp; 157 hw->phy_type = e1000_phy_igp;
157 break; 158 break;
158 } 159 }
160 case IGP03E1000_E_PHY_ID:
161 hw->phy_type = e1000_phy_igp_3;
162 break;
163 case IFE_E_PHY_ID:
164 case IFE_PLUS_E_PHY_ID:
165 case IFE_C_E_PHY_ID:
166 hw->phy_type = e1000_phy_ife;
167 break;
159 case GG82563_E_PHY_ID: 168 case GG82563_E_PHY_ID:
160 if (hw->mac_type == e1000_80003es2lan) { 169 if (hw->mac_type == e1000_80003es2lan) {
161 hw->phy_type = e1000_phy_gg82563; 170 hw->phy_type = e1000_phy_gg82563;
@@ -332,6 +341,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
332 break; 341 break;
333 case E1000_DEV_ID_82541EI: 342 case E1000_DEV_ID_82541EI:
334 case E1000_DEV_ID_82541EI_MOBILE: 343 case E1000_DEV_ID_82541EI_MOBILE:
344 case E1000_DEV_ID_82541ER_LOM:
335 hw->mac_type = e1000_82541; 345 hw->mac_type = e1000_82541;
336 break; 346 break;
337 case E1000_DEV_ID_82541ER: 347 case E1000_DEV_ID_82541ER:
@@ -341,6 +351,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
341 hw->mac_type = e1000_82541_rev_2; 351 hw->mac_type = e1000_82541_rev_2;
342 break; 352 break;
343 case E1000_DEV_ID_82547EI: 353 case E1000_DEV_ID_82547EI:
354 case E1000_DEV_ID_82547EI_MOBILE:
344 hw->mac_type = e1000_82547; 355 hw->mac_type = e1000_82547;
345 break; 356 break;
346 case E1000_DEV_ID_82547GI: 357 case E1000_DEV_ID_82547GI:
@@ -354,6 +365,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
354 case E1000_DEV_ID_82572EI_COPPER: 365 case E1000_DEV_ID_82572EI_COPPER:
355 case E1000_DEV_ID_82572EI_FIBER: 366 case E1000_DEV_ID_82572EI_FIBER:
356 case E1000_DEV_ID_82572EI_SERDES: 367 case E1000_DEV_ID_82572EI_SERDES:
368 case E1000_DEV_ID_82572EI:
357 hw->mac_type = e1000_82572; 369 hw->mac_type = e1000_82572;
358 break; 370 break;
359 case E1000_DEV_ID_82573E: 371 case E1000_DEV_ID_82573E:
@@ -361,16 +373,29 @@ e1000_set_mac_type(struct e1000_hw *hw)
361 case E1000_DEV_ID_82573L: 373 case E1000_DEV_ID_82573L:
362 hw->mac_type = e1000_82573; 374 hw->mac_type = e1000_82573;
363 break; 375 break;
376 case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
377 case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
364 case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: 378 case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
365 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: 379 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
366 hw->mac_type = e1000_80003es2lan; 380 hw->mac_type = e1000_80003es2lan;
367 break; 381 break;
382 case E1000_DEV_ID_ICH8_IGP_M_AMT:
383 case E1000_DEV_ID_ICH8_IGP_AMT:
384 case E1000_DEV_ID_ICH8_IGP_C:
385 case E1000_DEV_ID_ICH8_IFE:
386 case E1000_DEV_ID_ICH8_IGP_M:
387 hw->mac_type = e1000_ich8lan;
388 break;
368 default: 389 default:
369 /* Should never have loaded on this device */ 390 /* Should never have loaded on this device */
370 return -E1000_ERR_MAC_TYPE; 391 return -E1000_ERR_MAC_TYPE;
371 } 392 }
372 393
373 switch(hw->mac_type) { 394 switch(hw->mac_type) {
395 case e1000_ich8lan:
396 hw->swfwhw_semaphore_present = TRUE;
397 hw->asf_firmware_present = TRUE;
398 break;
374 case e1000_80003es2lan: 399 case e1000_80003es2lan:
375 hw->swfw_sync_present = TRUE; 400 hw->swfw_sync_present = TRUE;
376 /* fall through */ 401 /* fall through */
@@ -423,6 +448,7 @@ e1000_set_media_type(struct e1000_hw *hw)
423 case e1000_82542_rev2_1: 448 case e1000_82542_rev2_1:
424 hw->media_type = e1000_media_type_fiber; 449 hw->media_type = e1000_media_type_fiber;
425 break; 450 break;
451 case e1000_ich8lan:
426 case e1000_82573: 452 case e1000_82573:
427 /* The STATUS_TBIMODE bit is reserved or reused for the this 453 /* The STATUS_TBIMODE bit is reserved or reused for the this
428 * device. 454 * device.
@@ -527,6 +553,14 @@ e1000_reset_hw(struct e1000_hw *hw)
527 } while(timeout); 553 } while(timeout);
528 } 554 }
529 555
556 /* Workaround for ICH8 bit corruption issue in FIFO memory */
557 if (hw->mac_type == e1000_ich8lan) {
558 /* Set Tx and Rx buffer allocation to 8k apiece. */
559 E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
560 /* Set Packet Buffer Size to 16k. */
561 E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
562 }
563
530 /* Issue a global reset to the MAC. This will reset the chip's 564 /* Issue a global reset to the MAC. This will reset the chip's
531 * transmit, receive, DMA, and link units. It will not effect 565 * transmit, receive, DMA, and link units. It will not effect
532 * the current PCI configuration. The global reset bit is self- 566 * the current PCI configuration. The global reset bit is self-
@@ -550,6 +584,20 @@ e1000_reset_hw(struct e1000_hw *hw)
550 /* Reset is performed on a shadow of the control register */ 584 /* Reset is performed on a shadow of the control register */
551 E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); 585 E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
552 break; 586 break;
587 case e1000_ich8lan:
588 if (!hw->phy_reset_disable &&
589 e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
590 /* e1000_ich8lan PHY HW reset requires MAC CORE reset
591 * at the same time to make sure the interface between
592 * MAC and the external PHY is reset.
593 */
594 ctrl |= E1000_CTRL_PHY_RST;
595 }
596
597 e1000_get_software_flag(hw);
598 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
599 msec_delay(5);
600 break;
553 default: 601 default:
554 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); 602 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
555 break; 603 break;
@@ -591,6 +639,7 @@ e1000_reset_hw(struct e1000_hw *hw)
591 /* fall through */ 639 /* fall through */
592 case e1000_82571: 640 case e1000_82571:
593 case e1000_82572: 641 case e1000_82572:
642 case e1000_ich8lan:
594 case e1000_80003es2lan: 643 case e1000_80003es2lan:
595 ret_val = e1000_get_auto_rd_done(hw); 644 ret_val = e1000_get_auto_rd_done(hw);
596 if(ret_val) 645 if(ret_val)
@@ -633,6 +682,12 @@ e1000_reset_hw(struct e1000_hw *hw)
633 e1000_pci_set_mwi(hw); 682 e1000_pci_set_mwi(hw);
634 } 683 }
635 684
685 if (hw->mac_type == e1000_ich8lan) {
686 uint32_t kab = E1000_READ_REG(hw, KABGTXD);
687 kab |= E1000_KABGTXD_BGSQLBIAS;
688 E1000_WRITE_REG(hw, KABGTXD, kab);
689 }
690
636 return E1000_SUCCESS; 691 return E1000_SUCCESS;
637} 692}
638 693
@@ -675,9 +730,12 @@ e1000_init_hw(struct e1000_hw *hw)
675 730
676 /* Disabling VLAN filtering. */ 731 /* Disabling VLAN filtering. */
677 DEBUGOUT("Initializing the IEEE VLAN\n"); 732 DEBUGOUT("Initializing the IEEE VLAN\n");
678 if (hw->mac_type < e1000_82545_rev_3) 733 /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
679 E1000_WRITE_REG(hw, VET, 0); 734 if (hw->mac_type != e1000_ich8lan) {
680 e1000_clear_vfta(hw); 735 if (hw->mac_type < e1000_82545_rev_3)
736 E1000_WRITE_REG(hw, VET, 0);
737 e1000_clear_vfta(hw);
738 }
681 739
682 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 740 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
683 if(hw->mac_type == e1000_82542_rev2_0) { 741 if(hw->mac_type == e1000_82542_rev2_0) {
@@ -705,8 +763,14 @@ e1000_init_hw(struct e1000_hw *hw)
705 /* Zero out the Multicast HASH table */ 763 /* Zero out the Multicast HASH table */
706 DEBUGOUT("Zeroing the MTA\n"); 764 DEBUGOUT("Zeroing the MTA\n");
707 mta_size = E1000_MC_TBL_SIZE; 765 mta_size = E1000_MC_TBL_SIZE;
708 for(i = 0; i < mta_size; i++) 766 if (hw->mac_type == e1000_ich8lan)
767 mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
768 for(i = 0; i < mta_size; i++) {
709 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 769 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
770 /* use write flush to prevent Memory Write Block (MWB) from
771 * occuring when accessing our register space */
772 E1000_WRITE_FLUSH(hw);
773 }
710 774
711 /* Set the PCI priority bit correctly in the CTRL register. This 775 /* Set the PCI priority bit correctly in the CTRL register. This
712 * determines if the adapter gives priority to receives, or if it 776 * determines if the adapter gives priority to receives, or if it
@@ -744,6 +808,10 @@ e1000_init_hw(struct e1000_hw *hw)
744 break; 808 break;
745 } 809 }
746 810
811 /* More time needed for PHY to initialize */
812 if (hw->mac_type == e1000_ich8lan)
813 msec_delay(15);
814
747 /* Call a subroutine to configure the link and setup flow control. */ 815 /* Call a subroutine to configure the link and setup flow control. */
748 ret_val = e1000_setup_link(hw); 816 ret_val = e1000_setup_link(hw);
749 817
@@ -757,6 +825,7 @@ e1000_init_hw(struct e1000_hw *hw)
757 case e1000_82571: 825 case e1000_82571:
758 case e1000_82572: 826 case e1000_82572:
759 case e1000_82573: 827 case e1000_82573:
828 case e1000_ich8lan:
760 case e1000_80003es2lan: 829 case e1000_80003es2lan:
761 ctrl |= E1000_TXDCTL_COUNT_DESC; 830 ctrl |= E1000_TXDCTL_COUNT_DESC;
762 break; 831 break;
@@ -795,6 +864,7 @@ e1000_init_hw(struct e1000_hw *hw)
795 /* Fall through */ 864 /* Fall through */
796 case e1000_82571: 865 case e1000_82571:
797 case e1000_82572: 866 case e1000_82572:
867 case e1000_ich8lan:
798 ctrl = E1000_READ_REG(hw, TXDCTL1); 868 ctrl = E1000_READ_REG(hw, TXDCTL1);
799 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 869 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
800 if(hw->mac_type >= e1000_82571) 870 if(hw->mac_type >= e1000_82571)
@@ -818,6 +888,11 @@ e1000_init_hw(struct e1000_hw *hw)
818 */ 888 */
819 e1000_clear_hw_cntrs(hw); 889 e1000_clear_hw_cntrs(hw);
820 890
891 /* ICH8 No-snoop bits are opposite polarity.
892 * Set to snoop by default after reset. */
893 if (hw->mac_type == e1000_ich8lan)
894 e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
895
821 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || 896 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
822 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { 897 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
823 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 898 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
@@ -905,6 +980,7 @@ e1000_setup_link(struct e1000_hw *hw)
905 */ 980 */
906 if (hw->fc == e1000_fc_default) { 981 if (hw->fc == e1000_fc_default) {
907 switch (hw->mac_type) { 982 switch (hw->mac_type) {
983 case e1000_ich8lan:
908 case e1000_82573: 984 case e1000_82573:
909 hw->fc = e1000_fc_full; 985 hw->fc = e1000_fc_full;
910 break; 986 break;
@@ -971,9 +1047,12 @@ e1000_setup_link(struct e1000_hw *hw)
971 */ 1047 */
972 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); 1048 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
973 1049
974 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 1050 /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
975 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 1051 if (hw->mac_type != e1000_ich8lan) {
976 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 1052 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
1053 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
1054 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
1055 }
977 1056
978 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 1057 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
979 1058
@@ -1237,12 +1316,13 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1237 1316
1238 /* Wait 10ms for MAC to configure PHY from eeprom settings */ 1317 /* Wait 10ms for MAC to configure PHY from eeprom settings */
1239 msec_delay(15); 1318 msec_delay(15);
1240 1319 if (hw->mac_type != e1000_ich8lan) {
1241 /* Configure activity LED after PHY reset */ 1320 /* Configure activity LED after PHY reset */
1242 led_ctrl = E1000_READ_REG(hw, LEDCTL); 1321 led_ctrl = E1000_READ_REG(hw, LEDCTL);
1243 led_ctrl &= IGP_ACTIVITY_LED_MASK; 1322 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1244 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 1323 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1245 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 1324 E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
1325 }
1246 1326
1247 /* disable lplu d3 during driver init */ 1327 /* disable lplu d3 during driver init */
1248 ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1328 ret_val = e1000_set_d3_lplu_state(hw, FALSE);
@@ -1478,8 +1558,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1478 if (ret_val) 1558 if (ret_val)
1479 return ret_val; 1559 return ret_val;
1480 1560
1481 /* Enable Pass False Carrier on the PHY */ 1561 phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1482 phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
1483 1562
1484 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1563 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
1485 phy_data); 1564 phy_data);
@@ -1561,28 +1640,40 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1561 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1640 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1562 if(hw->disable_polarity_correction == 1) 1641 if(hw->disable_polarity_correction == 1)
1563 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1642 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1564 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1643 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1565 if(ret_val) 1644 if (ret_val)
1566 return ret_val;
1567
1568 /* Force TX_CLK in the Extended PHY Specific Control Register
1569 * to 25MHz clock.
1570 */
1571 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1572 if(ret_val)
1573 return ret_val; 1645 return ret_val;
1574 1646
1575 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1576
1577 if (hw->phy_revision < M88E1011_I_REV_4) { 1647 if (hw->phy_revision < M88E1011_I_REV_4) {
1578 /* Configure Master and Slave downshift values */ 1648 /* Force TX_CLK in the Extended PHY Specific Control Register
1579 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1649 * to 25MHz clock.
1650 */
1651 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1652 if (ret_val)
1653 return ret_val;
1654
1655 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1656
1657 if ((hw->phy_revision == E1000_REVISION_2) &&
1658 (hw->phy_id == M88E1111_I_PHY_ID)) {
1659 /* Vidalia Phy, set the downshift counter to 5x */
1660 phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
1661 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
1662 ret_val = e1000_write_phy_reg(hw,
1663 M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1664 if (ret_val)
1665 return ret_val;
1666 } else {
1667 /* Configure Master and Slave downshift values */
1668 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
1580 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1669 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
1581 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1670 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
1582 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1671 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
1583 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1672 ret_val = e1000_write_phy_reg(hw,
1584 if(ret_val) 1673 M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1585 return ret_val; 1674 if (ret_val)
1675 return ret_val;
1676 }
1586 } 1677 }
1587 1678
1588 /* SW Reset the PHY so all changes take effect */ 1679 /* SW Reset the PHY so all changes take effect */
@@ -1620,6 +1711,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1620 if(hw->autoneg_advertised == 0) 1711 if(hw->autoneg_advertised == 0)
1621 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1712 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1622 1713
1714 /* IFE phy only supports 10/100 */
1715 if (hw->phy_type == e1000_phy_ife)
1716 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
1717
1623 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1718 DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
1624 ret_val = e1000_phy_setup_autoneg(hw); 1719 ret_val = e1000_phy_setup_autoneg(hw);
1625 if(ret_val) { 1720 if(ret_val) {
@@ -1717,6 +1812,26 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1717 1812
1718 DEBUGFUNC("e1000_setup_copper_link"); 1813 DEBUGFUNC("e1000_setup_copper_link");
1719 1814
1815 switch (hw->mac_type) {
1816 case e1000_80003es2lan:
1817 case e1000_ich8lan:
1818 /* Set the mac to wait the maximum time between each
1819 * iteration and increase the max iterations when
1820 * polling the phy; this fixes erroneous timeouts at 10Mbps. */
1821 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1822 if (ret_val)
1823 return ret_val;
1824 ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
1825 if (ret_val)
1826 return ret_val;
1827 reg_data |= 0x3F;
1828 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
1829 if (ret_val)
1830 return ret_val;
1831 default:
1832 break;
1833 }
1834
1720 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1835 /* Check if it is a valid PHY and set PHY mode if necessary. */
1721 ret_val = e1000_copper_link_preconfig(hw); 1836 ret_val = e1000_copper_link_preconfig(hw);
1722 if(ret_val) 1837 if(ret_val)
@@ -1724,10 +1839,8 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1724 1839
1725 switch (hw->mac_type) { 1840 switch (hw->mac_type) {
1726 case e1000_80003es2lan: 1841 case e1000_80003es2lan:
1727 ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, 1842 /* Kumeran registers are written-only */
1728 &reg_data); 1843 reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
1729 if (ret_val)
1730 return ret_val;
1731 reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; 1844 reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
1732 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, 1845 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
1733 reg_data); 1846 reg_data);
@@ -1739,6 +1852,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1739 } 1852 }
1740 1853
1741 if (hw->phy_type == e1000_phy_igp || 1854 if (hw->phy_type == e1000_phy_igp ||
1855 hw->phy_type == e1000_phy_igp_3 ||
1742 hw->phy_type == e1000_phy_igp_2) { 1856 hw->phy_type == e1000_phy_igp_2) {
1743 ret_val = e1000_copper_link_igp_setup(hw); 1857 ret_val = e1000_copper_link_igp_setup(hw);
1744 if(ret_val) 1858 if(ret_val)
@@ -1803,7 +1917,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1803* hw - Struct containing variables accessed by shared code 1917* hw - Struct containing variables accessed by shared code
1804******************************************************************************/ 1918******************************************************************************/
1805static int32_t 1919static int32_t
1806e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) 1920e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
1807{ 1921{
1808 int32_t ret_val = E1000_SUCCESS; 1922 int32_t ret_val = E1000_SUCCESS;
1809 uint32_t tipg; 1923 uint32_t tipg;
@@ -1823,6 +1937,18 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
1823 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; 1937 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
1824 E1000_WRITE_REG(hw, TIPG, tipg); 1938 E1000_WRITE_REG(hw, TIPG, tipg);
1825 1939
1940 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1941
1942 if (ret_val)
1943 return ret_val;
1944
1945 if (duplex == HALF_DUPLEX)
1946 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
1947 else
1948 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1949
1950 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1951
1826 return ret_val; 1952 return ret_val;
1827} 1953}
1828 1954
@@ -1847,6 +1973,14 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
1847 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; 1973 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
1848 E1000_WRITE_REG(hw, TIPG, tipg); 1974 E1000_WRITE_REG(hw, TIPG, tipg);
1849 1975
1976 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
1977
1978 if (ret_val)
1979 return ret_val;
1980
1981 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1982 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
1983
1850 return ret_val; 1984 return ret_val;
1851} 1985}
1852 1986
@@ -1869,10 +2003,13 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1869 if(ret_val) 2003 if(ret_val)
1870 return ret_val; 2004 return ret_val;
1871 2005
1872 /* Read the MII 1000Base-T Control Register (Address 9). */ 2006 if (hw->phy_type != e1000_phy_ife) {
1873 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 2007 /* Read the MII 1000Base-T Control Register (Address 9). */
1874 if(ret_val) 2008 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1875 return ret_val; 2009 if (ret_val)
2010 return ret_val;
2011 } else
2012 mii_1000t_ctrl_reg=0;
1876 2013
1877 /* Need to parse both autoneg_advertised and fc and set up 2014 /* Need to parse both autoneg_advertised and fc and set up
1878 * the appropriate PHY registers. First we will parse for 2015 * the appropriate PHY registers. First we will parse for
@@ -1923,6 +2060,9 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1923 if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { 2060 if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
1924 DEBUGOUT("Advertise 1000mb Full duplex\n"); 2061 DEBUGOUT("Advertise 1000mb Full duplex\n");
1925 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 2062 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2063 if (hw->phy_type == e1000_phy_ife) {
2064 DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
2065 }
1926 } 2066 }
1927 2067
1928 /* Check for a software override of the flow control settings, and 2068 /* Check for a software override of the flow control settings, and
@@ -1984,9 +2124,11 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1984 2124
1985 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 2125 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1986 2126
1987 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 2127 if (hw->phy_type != e1000_phy_ife) {
1988 if(ret_val) 2128 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1989 return ret_val; 2129 if (ret_val)
2130 return ret_val;
2131 }
1990 2132
1991 return E1000_SUCCESS; 2133 return E1000_SUCCESS;
1992} 2134}
@@ -2089,6 +2231,18 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2089 2231
2090 /* Need to reset the PHY or these changes will be ignored */ 2232 /* Need to reset the PHY or these changes will be ignored */
2091 mii_ctrl_reg |= MII_CR_RESET; 2233 mii_ctrl_reg |= MII_CR_RESET;
2234 /* Disable MDI-X support for 10/100 */
2235 } else if (hw->phy_type == e1000_phy_ife) {
2236 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
2237 if (ret_val)
2238 return ret_val;
2239
2240 phy_data &= ~IFE_PMC_AUTO_MDIX;
2241 phy_data &= ~IFE_PMC_FORCE_MDIX;
2242
2243 ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
2244 if (ret_val)
2245 return ret_val;
2092 } else { 2246 } else {
2093 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 2247 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
2094 * forced whenever speed or duplex are forced. 2248 * forced whenever speed or duplex are forced.
@@ -2721,8 +2875,12 @@ e1000_check_for_link(struct e1000_hw *hw)
2721 */ 2875 */
2722 if(hw->tbi_compatibility_en) { 2876 if(hw->tbi_compatibility_en) {
2723 uint16_t speed, duplex; 2877 uint16_t speed, duplex;
2724 e1000_get_speed_and_duplex(hw, &speed, &duplex); 2878 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
2725 if(speed != SPEED_1000) { 2879 if (ret_val) {
2880 DEBUGOUT("Error getting link speed and duplex\n");
2881 return ret_val;
2882 }
2883 if (speed != SPEED_1000) {
2726 /* If link speed is not set to gigabit speed, we do not need 2884 /* If link speed is not set to gigabit speed, we do not need
2727 * to enable TBI compatibility. 2885 * to enable TBI compatibility.
2728 */ 2886 */
@@ -2889,7 +3047,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
2889 if (*speed == SPEED_1000) 3047 if (*speed == SPEED_1000)
2890 ret_val = e1000_configure_kmrn_for_1000(hw); 3048 ret_val = e1000_configure_kmrn_for_1000(hw);
2891 else 3049 else
2892 ret_val = e1000_configure_kmrn_for_10_100(hw); 3050 ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
3051 if (ret_val)
3052 return ret_val;
3053 }
3054
3055 if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
3056 ret_val = e1000_kumeran_lock_loss_workaround(hw);
2893 if (ret_val) 3057 if (ret_val)
2894 return ret_val; 3058 return ret_val;
2895 } 3059 }
@@ -3079,6 +3243,9 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3079 3243
3080 DEBUGFUNC("e1000_swfw_sync_acquire"); 3244 DEBUGFUNC("e1000_swfw_sync_acquire");
3081 3245
3246 if (hw->swfwhw_semaphore_present)
3247 return e1000_get_software_flag(hw);
3248
3082 if (!hw->swfw_sync_present) 3249 if (!hw->swfw_sync_present)
3083 return e1000_get_hw_eeprom_semaphore(hw); 3250 return e1000_get_hw_eeprom_semaphore(hw);
3084 3251
@@ -3118,6 +3285,11 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3118 3285
3119 DEBUGFUNC("e1000_swfw_sync_release"); 3286 DEBUGFUNC("e1000_swfw_sync_release");
3120 3287
3288 if (hw->swfwhw_semaphore_present) {
3289 e1000_release_software_flag(hw);
3290 return;
3291 }
3292
3121 if (!hw->swfw_sync_present) { 3293 if (!hw->swfw_sync_present) {
3122 e1000_put_hw_eeprom_semaphore(hw); 3294 e1000_put_hw_eeprom_semaphore(hw);
3123 return; 3295 return;
@@ -3160,7 +3332,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3160 if (e1000_swfw_sync_acquire(hw, swfw)) 3332 if (e1000_swfw_sync_acquire(hw, swfw))
3161 return -E1000_ERR_SWFW_SYNC; 3333 return -E1000_ERR_SWFW_SYNC;
3162 3334
3163 if((hw->phy_type == e1000_phy_igp || 3335 if ((hw->phy_type == e1000_phy_igp ||
3336 hw->phy_type == e1000_phy_igp_3 ||
3164 hw->phy_type == e1000_phy_igp_2) && 3337 hw->phy_type == e1000_phy_igp_2) &&
3165 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3338 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3166 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3339 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3299,7 +3472,8 @@ e1000_write_phy_reg(struct e1000_hw *hw,
3299 if (e1000_swfw_sync_acquire(hw, swfw)) 3472 if (e1000_swfw_sync_acquire(hw, swfw))
3300 return -E1000_ERR_SWFW_SYNC; 3473 return -E1000_ERR_SWFW_SYNC;
3301 3474
3302 if((hw->phy_type == e1000_phy_igp || 3475 if ((hw->phy_type == e1000_phy_igp ||
3476 hw->phy_type == e1000_phy_igp_3 ||
3303 hw->phy_type == e1000_phy_igp_2) && 3477 hw->phy_type == e1000_phy_igp_2) &&
3304 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3478 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3305 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3479 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3514,7 +3688,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3514 E1000_WRITE_FLUSH(hw); 3688 E1000_WRITE_FLUSH(hw);
3515 3689
3516 if (hw->mac_type >= e1000_82571) 3690 if (hw->mac_type >= e1000_82571)
3517 msec_delay(10); 3691 msec_delay_irq(10);
3518 e1000_swfw_sync_release(hw, swfw); 3692 e1000_swfw_sync_release(hw, swfw);
3519 } else { 3693 } else {
3520 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 3694 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
@@ -3544,6 +3718,12 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3544 ret_val = e1000_get_phy_cfg_done(hw); 3718 ret_val = e1000_get_phy_cfg_done(hw);
3545 e1000_release_software_semaphore(hw); 3719 e1000_release_software_semaphore(hw);
3546 3720
3721 if ((hw->mac_type == e1000_ich8lan) &&
3722 (hw->phy_type == e1000_phy_igp_3)) {
3723 ret_val = e1000_init_lcd_from_nvm(hw);
3724 if (ret_val)
3725 return ret_val;
3726 }
3547 return ret_val; 3727 return ret_val;
3548} 3728}
3549 3729
@@ -3572,9 +3752,11 @@ e1000_phy_reset(struct e1000_hw *hw)
3572 case e1000_82541_rev_2: 3752 case e1000_82541_rev_2:
3573 case e1000_82571: 3753 case e1000_82571:
3574 case e1000_82572: 3754 case e1000_82572:
3755 case e1000_ich8lan:
3575 ret_val = e1000_phy_hw_reset(hw); 3756 ret_val = e1000_phy_hw_reset(hw);
3576 if(ret_val) 3757 if(ret_val)
3577 return ret_val; 3758 return ret_val;
3759
3578 break; 3760 break;
3579 default: 3761 default:
3580 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 3762 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
@@ -3597,11 +3779,120 @@ e1000_phy_reset(struct e1000_hw *hw)
3597} 3779}
3598 3780
3599/****************************************************************************** 3781/******************************************************************************
3782* Work-around for 82566 power-down: on D3 entry-
3783* 1) disable gigabit link
3784* 2) write VR power-down enable
3785* 3) read it back
3786* if successful continue, else issue LCD reset and repeat
3787*
3788* hw - struct containing variables accessed by shared code
3789******************************************************************************/
3790void
3791e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3792{
3793 int32_t reg;
3794 uint16_t phy_data;
3795 int32_t retry = 0;
3796
3797 DEBUGFUNC("e1000_phy_powerdown_workaround");
3798
3799 if (hw->phy_type != e1000_phy_igp_3)
3800 return;
3801
3802 do {
3803 /* Disable link */
3804 reg = E1000_READ_REG(hw, PHY_CTRL);
3805 E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3806 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3807
3808 /* Write VR power-down enable */
3809 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
3810 e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
3811 IGP3_VR_CTRL_MODE_SHUT);
3812
3813 /* Read it back and test */
3814 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
3815 if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
3816 break;
3817
3818 /* Issue PHY reset and repeat at most one more time */
3819 reg = E1000_READ_REG(hw, CTRL);
3820 E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
3821 retry++;
3822 } while (retry);
3823
3824 return;
3825
3826}
3827
3828/******************************************************************************
3829* Work-around for 82566 Kumeran PCS lock loss:
3830* On link status change (i.e. PCI reset, speed change) and link is up and
3831* speed is gigabit-
3832* 0) if workaround is optionally disabled do nothing
3833* 1) wait 1ms for Kumeran link to come up
3834* 2) check Kumeran Diagnostic register PCS lock loss bit
3835* 3) if not set the link is locked (all is good), otherwise...
3836* 4) reset the PHY
3837* 5) repeat up to 10 times
3838* Note: this is only called for IGP3 copper when speed is 1gb.
3839*
3840* hw - struct containing variables accessed by shared code
3841******************************************************************************/
3842int32_t
3843e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3844{
3845 int32_t ret_val;
3846 int32_t reg;
3847 int32_t cnt;
3848 uint16_t phy_data;
3849
3850 if (hw->kmrn_lock_loss_workaround_disabled)
3851 return E1000_SUCCESS;
3852
3853 /* Make sure link is up before proceeding. If not just return.
3854 * Attempting this while link is negotiating fouls up link
3855 * stability */
3856 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3857 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3858
3859 if (phy_data & MII_SR_LINK_STATUS) {
3860 for (cnt = 0; cnt < 10; cnt++) {
3861 /* read once to clear */
3862 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3863 if (ret_val)
3864 return ret_val;
3865 /* and again to get new status */
3866 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3867 if (ret_val)
3868 return ret_val;
3869
3870 /* check for PCS lock */
3871 if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3872 return E1000_SUCCESS;
3873
3874 /* Issue PHY reset */
3875 e1000_phy_hw_reset(hw);
3876 msec_delay_irq(5);
3877 }
3878 /* Disable GigE link negotiation */
3879 reg = E1000_READ_REG(hw, PHY_CTRL);
3880 E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3881 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3882
3883 /* unable to acquire PCS lock */
3884 return E1000_ERR_PHY;
3885 }
3886
3887 return E1000_SUCCESS;
3888}
3889
3890/******************************************************************************
3600* Probes the expected PHY address for known PHY IDs 3891* Probes the expected PHY address for known PHY IDs
3601* 3892*
3602* hw - Struct containing variables accessed by shared code 3893* hw - Struct containing variables accessed by shared code
3603******************************************************************************/ 3894******************************************************************************/
3604static int32_t 3895int32_t
3605e1000_detect_gig_phy(struct e1000_hw *hw) 3896e1000_detect_gig_phy(struct e1000_hw *hw)
3606{ 3897{
3607 int32_t phy_init_status, ret_val; 3898 int32_t phy_init_status, ret_val;
@@ -3613,8 +3904,8 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3613 /* The 82571 firmware may still be configuring the PHY. In this 3904 /* The 82571 firmware may still be configuring the PHY. In this
3614 * case, we cannot access the PHY until the configuration is done. So 3905 * case, we cannot access the PHY until the configuration is done. So
3615 * we explicitly set the PHY values. */ 3906 * we explicitly set the PHY values. */
3616 if(hw->mac_type == e1000_82571 || 3907 if (hw->mac_type == e1000_82571 ||
3617 hw->mac_type == e1000_82572) { 3908 hw->mac_type == e1000_82572) {
3618 hw->phy_id = IGP01E1000_I_PHY_ID; 3909 hw->phy_id = IGP01E1000_I_PHY_ID;
3619 hw->phy_type = e1000_phy_igp_2; 3910 hw->phy_type = e1000_phy_igp_2;
3620 return E1000_SUCCESS; 3911 return E1000_SUCCESS;
@@ -3631,7 +3922,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3631 3922
3632 /* Read the PHY ID Registers to identify which PHY is onboard. */ 3923 /* Read the PHY ID Registers to identify which PHY is onboard. */
3633 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); 3924 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
3634 if(ret_val) 3925 if (ret_val)
3635 return ret_val; 3926 return ret_val;
3636 3927
3637 hw->phy_id = (uint32_t) (phy_id_high << 16); 3928 hw->phy_id = (uint32_t) (phy_id_high << 16);
@@ -3669,6 +3960,12 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3669 case e1000_80003es2lan: 3960 case e1000_80003es2lan:
3670 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; 3961 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
3671 break; 3962 break;
3963 case e1000_ich8lan:
3964 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
3965 if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
3966 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
3967 if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
3968 break;
3672 default: 3969 default:
3673 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 3970 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
3674 return -E1000_ERR_CONFIG; 3971 return -E1000_ERR_CONFIG;
@@ -3784,6 +4081,53 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
3784} 4081}
3785 4082
3786/****************************************************************************** 4083/******************************************************************************
4084* Get PHY information from various PHY registers for ife PHY only.
4085*
4086* hw - Struct containing variables accessed by shared code
4087* phy_info - PHY information structure
4088******************************************************************************/
4089int32_t
4090e1000_phy_ife_get_info(struct e1000_hw *hw,
4091 struct e1000_phy_info *phy_info)
4092{
4093 int32_t ret_val;
4094 uint16_t phy_data, polarity;
4095
4096 DEBUGFUNC("e1000_phy_ife_get_info");
4097
4098 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
4099 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
4100
4101 ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
4102 if (ret_val)
4103 return ret_val;
4104 phy_info->polarity_correction =
4105 (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
4106 IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT;
4107
4108 if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
4109 ret_val = e1000_check_polarity(hw, &polarity);
4110 if (ret_val)
4111 return ret_val;
4112 } else {
4113 /* Polarity is forced. */
4114 polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >>
4115 IFE_PSC_FORCE_POLARITY_SHIFT;
4116 }
4117 phy_info->cable_polarity = polarity;
4118
4119 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
4120 if (ret_val)
4121 return ret_val;
4122
4123 phy_info->mdix_mode =
4124 (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
4125 IFE_PMC_MDIX_MODE_SHIFT;
4126
4127 return E1000_SUCCESS;
4128}
4129
4130/******************************************************************************
3787* Get PHY information from various PHY registers fot m88 PHY only. 4131* Get PHY information from various PHY registers fot m88 PHY only.
3788* 4132*
3789* hw - Struct containing variables accessed by shared code 4133* hw - Struct containing variables accessed by shared code
@@ -3898,9 +4242,12 @@ e1000_phy_get_info(struct e1000_hw *hw,
3898 return -E1000_ERR_CONFIG; 4242 return -E1000_ERR_CONFIG;
3899 } 4243 }
3900 4244
3901 if(hw->phy_type == e1000_phy_igp || 4245 if (hw->phy_type == e1000_phy_igp ||
4246 hw->phy_type == e1000_phy_igp_3 ||
3902 hw->phy_type == e1000_phy_igp_2) 4247 hw->phy_type == e1000_phy_igp_2)
3903 return e1000_phy_igp_get_info(hw, phy_info); 4248 return e1000_phy_igp_get_info(hw, phy_info);
4249 else if (hw->phy_type == e1000_phy_ife)
4250 return e1000_phy_ife_get_info(hw, phy_info);
3904 else 4251 else
3905 return e1000_phy_m88_get_info(hw, phy_info); 4252 return e1000_phy_m88_get_info(hw, phy_info);
3906} 4253}
@@ -4049,6 +4396,35 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4049 eeprom->use_eerd = TRUE; 4396 eeprom->use_eerd = TRUE;
4050 eeprom->use_eewr = FALSE; 4397 eeprom->use_eewr = FALSE;
4051 break; 4398 break;
4399 case e1000_ich8lan:
4400 {
4401 int32_t i = 0;
4402 uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
4403
4404 eeprom->type = e1000_eeprom_ich8;
4405 eeprom->use_eerd = FALSE;
4406 eeprom->use_eewr = FALSE;
4407 eeprom->word_size = E1000_SHADOW_RAM_WORDS;
4408
4409 /* Zero the shadow RAM structure. But don't load it from NVM
4410 * so as to save time for driver init */
4411 if (hw->eeprom_shadow_ram != NULL) {
4412 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4413 hw->eeprom_shadow_ram[i].modified = FALSE;
4414 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
4415 }
4416 }
4417
4418 hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
4419 ICH8_FLASH_SECTOR_SIZE;
4420
4421 hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
4422 hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
4423 hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
4424 hw->flash_bank_size /= 2 * sizeof(uint16_t);
4425
4426 break;
4427 }
4052 default: 4428 default:
4053 break; 4429 break;
4054 } 4430 }
@@ -4469,7 +4845,10 @@ e1000_read_eeprom(struct e1000_hw *hw,
4469 return ret_val; 4845 return ret_val;
4470 } 4846 }
4471 4847
4472 if(eeprom->type == e1000_eeprom_spi) { 4848 if (eeprom->type == e1000_eeprom_ich8)
4849 return e1000_read_eeprom_ich8(hw, offset, words, data);
4850
4851 if (eeprom->type == e1000_eeprom_spi) {
4473 uint16_t word_in; 4852 uint16_t word_in;
4474 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; 4853 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
4475 4854
@@ -4636,7 +5015,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
4636 5015
4637 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 5016 DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
4638 5017
4639 if(hw->mac_type == e1000_82573) { 5018 if (hw->mac_type == e1000_ich8lan)
5019 return FALSE;
5020
5021 if (hw->mac_type == e1000_82573) {
4640 eecd = E1000_READ_REG(hw, EECD); 5022 eecd = E1000_READ_REG(hw, EECD);
4641 5023
4642 /* Isolate bits 15 & 16 */ 5024 /* Isolate bits 15 & 16 */
@@ -4686,8 +5068,22 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4686 } 5068 }
4687 } 5069 }
4688 5070
4689 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 5071 if (hw->mac_type == e1000_ich8lan) {
4690 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 5072 /* Drivers must allocate the shadow ram structure for the
5073 * EEPROM checksum to be updated. Otherwise, this bit as well
5074 * as the checksum must both be set correctly for this
5075 * validation to pass.
5076 */
5077 e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
5078 if ((eeprom_data & 0x40) == 0) {
5079 eeprom_data |= 0x40;
5080 e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
5081 e1000_update_eeprom_checksum(hw);
5082 }
5083 }
5084
5085 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
5086 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
4691 DEBUGOUT("EEPROM Read Error\n"); 5087 DEBUGOUT("EEPROM Read Error\n");
4692 return -E1000_ERR_EEPROM; 5088 return -E1000_ERR_EEPROM;
4693 } 5089 }
@@ -4713,6 +5109,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4713int32_t 5109int32_t
4714e1000_update_eeprom_checksum(struct e1000_hw *hw) 5110e1000_update_eeprom_checksum(struct e1000_hw *hw)
4715{ 5111{
5112 uint32_t ctrl_ext;
4716 uint16_t checksum = 0; 5113 uint16_t checksum = 0;
4717 uint16_t i, eeprom_data; 5114 uint16_t i, eeprom_data;
4718 5115
@@ -4731,6 +5128,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
4731 return -E1000_ERR_EEPROM; 5128 return -E1000_ERR_EEPROM;
4732 } else if (hw->eeprom.type == e1000_eeprom_flash) { 5129 } else if (hw->eeprom.type == e1000_eeprom_flash) {
4733 e1000_commit_shadow_ram(hw); 5130 e1000_commit_shadow_ram(hw);
5131 } else if (hw->eeprom.type == e1000_eeprom_ich8) {
5132 e1000_commit_shadow_ram(hw);
5133 /* Reload the EEPROM, or else modifications will not appear
5134 * until after next adapter reset. */
5135 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
5136 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
5137 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
5138 msec_delay(10);
4734 } 5139 }
4735 return E1000_SUCCESS; 5140 return E1000_SUCCESS;
4736} 5141}
@@ -4770,6 +5175,9 @@ e1000_write_eeprom(struct e1000_hw *hw,
4770 if(eeprom->use_eewr == TRUE) 5175 if(eeprom->use_eewr == TRUE)
4771 return e1000_write_eeprom_eewr(hw, offset, words, data); 5176 return e1000_write_eeprom_eewr(hw, offset, words, data);
4772 5177
5178 if (eeprom->type == e1000_eeprom_ich8)
5179 return e1000_write_eeprom_ich8(hw, offset, words, data);
5180
4773 /* Prepare the EEPROM for writing */ 5181 /* Prepare the EEPROM for writing */
4774 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 5182 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
4775 return -E1000_ERR_EEPROM; 5183 return -E1000_ERR_EEPROM;
@@ -4957,11 +5365,17 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4957 uint32_t flop = 0; 5365 uint32_t flop = 0;
4958 uint32_t i = 0; 5366 uint32_t i = 0;
4959 int32_t error = E1000_SUCCESS; 5367 int32_t error = E1000_SUCCESS;
4960 5368 uint32_t old_bank_offset = 0;
4961 /* The flop register will be used to determine if flash type is STM */ 5369 uint32_t new_bank_offset = 0;
4962 flop = E1000_READ_REG(hw, FLOP); 5370 uint32_t sector_retries = 0;
5371 uint8_t low_byte = 0;
5372 uint8_t high_byte = 0;
5373 uint8_t temp_byte = 0;
5374 boolean_t sector_write_failed = FALSE;
4963 5375
4964 if (hw->mac_type == e1000_82573) { 5376 if (hw->mac_type == e1000_82573) {
5377 /* The flop register will be used to determine if flash type is STM */
5378 flop = E1000_READ_REG(hw, FLOP);
4965 for (i=0; i < attempts; i++) { 5379 for (i=0; i < attempts; i++) {
4966 eecd = E1000_READ_REG(hw, EECD); 5380 eecd = E1000_READ_REG(hw, EECD);
4967 if ((eecd & E1000_EECD_FLUPD) == 0) { 5381 if ((eecd & E1000_EECD_FLUPD) == 0) {
@@ -4995,6 +5409,106 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4995 } 5409 }
4996 } 5410 }
4997 5411
5412 if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
5413 /* We're writing to the opposite bank so if we're on bank 1,
5414 * write to bank 0 etc. We also need to erase the segment that
5415 * is going to be written */
5416 if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
5417 new_bank_offset = hw->flash_bank_size * 2;
5418 old_bank_offset = 0;
5419 e1000_erase_ich8_4k_segment(hw, 1);
5420 } else {
5421 old_bank_offset = hw->flash_bank_size * 2;
5422 new_bank_offset = 0;
5423 e1000_erase_ich8_4k_segment(hw, 0);
5424 }
5425
5426 do {
5427 sector_write_failed = FALSE;
5428 /* Loop for every byte in the shadow RAM,
5429 * which is in units of words. */
5430 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5431 /* Determine whether to write the value stored
5432 * in the other NVM bank or a modified value stored
5433 * in the shadow RAM */
5434 if (hw->eeprom_shadow_ram[i].modified == TRUE) {
5435 low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
5436 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
5437 &temp_byte);
5438 udelay(100);
5439 error = e1000_verify_write_ich8_byte(hw,
5440 (i << 1) + new_bank_offset,
5441 low_byte);
5442 if (error != E1000_SUCCESS)
5443 sector_write_failed = TRUE;
5444 high_byte =
5445 (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
5446 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5447 &temp_byte);
5448 udelay(100);
5449 } else {
5450 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
5451 &low_byte);
5452 udelay(100);
5453 error = e1000_verify_write_ich8_byte(hw,
5454 (i << 1) + new_bank_offset, low_byte);
5455 if (error != E1000_SUCCESS)
5456 sector_write_failed = TRUE;
5457 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5458 &high_byte);
5459 }
5460
5461 /* If the word is 0x13, then make sure the signature bits
5462 * (15:14) are 11b until the commit has completed.
5463 * This will allow us to write 10b which indicates the
5464 * signature is valid. We want to do this after the write
5465 * has completed so that we don't mark the segment valid
5466 * while the write is still in progress */
5467 if (i == E1000_ICH8_NVM_SIG_WORD)
5468 high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
5469
5470 error = e1000_verify_write_ich8_byte(hw,
5471 (i << 1) + new_bank_offset + 1, high_byte);
5472 if (error != E1000_SUCCESS)
5473 sector_write_failed = TRUE;
5474
5475 if (sector_write_failed == FALSE) {
5476 /* Clear the now not used entry in the cache */
5477 hw->eeprom_shadow_ram[i].modified = FALSE;
5478 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
5479 }
5480 }
5481
5482 /* Don't bother writing the segment valid bits if sector
5483 * programming failed. */
5484 if (sector_write_failed == FALSE) {
5485 /* Finally validate the new segment by setting bit 15:14
5486 * to 10b in word 0x13 , this can be done without an
5487 * erase as well since these bits are 11 to start with
5488 * and we need to change bit 14 to 0b */
5489 e1000_read_ich8_byte(hw,
5490 E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
5491 &high_byte);
5492 high_byte &= 0xBF;
5493 error = e1000_verify_write_ich8_byte(hw,
5494 E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
5495 high_byte);
5496 if (error != E1000_SUCCESS)
5497 sector_write_failed = TRUE;
5498
5499 /* And invalidate the previously valid segment by setting
5500 * its signature word (0x13) high_byte to 0b. This can be
5501 * done without an erase because flash erase sets all bits
5502 * to 1's. We can write 1's to 0's without an erase */
5503 error = e1000_verify_write_ich8_byte(hw,
5504 E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset,
5505 0);
5506 if (error != E1000_SUCCESS)
5507 sector_write_failed = TRUE;
5508 }
5509 } while (++sector_retries < 10 && sector_write_failed == TRUE);
5510 }
5511
4998 return error; 5512 return error;
4999} 5513}
5000 5514
@@ -5102,15 +5616,19 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5102 * the other port. */ 5616 * the other port. */
5103 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) 5617 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
5104 rar_num -= 1; 5618 rar_num -= 1;
5619 if (hw->mac_type == e1000_ich8lan)
5620 rar_num = E1000_RAR_ENTRIES_ICH8LAN;
5621
5105 /* Zero out the other 15 receive addresses. */ 5622 /* Zero out the other 15 receive addresses. */
5106 DEBUGOUT("Clearing RAR[1-15]\n"); 5623 DEBUGOUT("Clearing RAR[1-15]\n");
5107 for(i = 1; i < rar_num; i++) { 5624 for(i = 1; i < rar_num; i++) {
5108 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 5625 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5626 E1000_WRITE_FLUSH(hw);
5109 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 5627 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5628 E1000_WRITE_FLUSH(hw);
5110 } 5629 }
5111} 5630}
5112 5631
5113#if 0
5114/****************************************************************************** 5632/******************************************************************************
5115 * Updates the MAC's list of multicast addresses. 5633 * Updates the MAC's list of multicast addresses.
5116 * 5634 *
@@ -5145,6 +5663,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5145 /* Clear RAR[1-15] */ 5663 /* Clear RAR[1-15] */
5146 DEBUGOUT(" Clearing RAR[1-15]\n"); 5664 DEBUGOUT(" Clearing RAR[1-15]\n");
5147 num_rar_entry = E1000_RAR_ENTRIES; 5665 num_rar_entry = E1000_RAR_ENTRIES;
5666 if (hw->mac_type == e1000_ich8lan)
5667 num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN;
5148 /* Reserve a spot for the Locally Administered Address to work around 5668 /* Reserve a spot for the Locally Administered Address to work around
5149 * an 82571 issue in which a reset on one port will reload the MAC on 5669 * an 82571 issue in which a reset on one port will reload the MAC on
5150 * the other port. */ 5670 * the other port. */
@@ -5153,14 +5673,19 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5153 5673
5154 for(i = rar_used_count; i < num_rar_entry; i++) { 5674 for(i = rar_used_count; i < num_rar_entry; i++) {
5155 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 5675 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5676 E1000_WRITE_FLUSH(hw);
5156 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 5677 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5678 E1000_WRITE_FLUSH(hw);
5157 } 5679 }
5158 5680
5159 /* Clear the MTA */ 5681 /* Clear the MTA */
5160 DEBUGOUT(" Clearing MTA\n"); 5682 DEBUGOUT(" Clearing MTA\n");
5161 num_mta_entry = E1000_NUM_MTA_REGISTERS; 5683 num_mta_entry = E1000_NUM_MTA_REGISTERS;
5684 if (hw->mac_type == e1000_ich8lan)
5685 num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
5162 for(i = 0; i < num_mta_entry; i++) { 5686 for(i = 0; i < num_mta_entry; i++) {
5163 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 5687 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
5688 E1000_WRITE_FLUSH(hw);
5164 } 5689 }
5165 5690
5166 /* Add the new addresses */ 5691 /* Add the new addresses */
@@ -5194,7 +5719,6 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5194 } 5719 }
5195 DEBUGOUT("MC Update Complete\n"); 5720 DEBUGOUT("MC Update Complete\n");
5196} 5721}
5197#endif /* 0 */
5198 5722
5199/****************************************************************************** 5723/******************************************************************************
5200 * Hashes an address to determine its location in the multicast table 5724 * Hashes an address to determine its location in the multicast table
@@ -5217,24 +5741,46 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
5217 * LSB MSB 5741 * LSB MSB
5218 */ 5742 */
5219 case 0: 5743 case 0:
5220 /* [47:36] i.e. 0x563 for above example address */ 5744 if (hw->mac_type == e1000_ich8lan) {
5221 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 5745 /* [47:38] i.e. 0x158 for above example address */
5746 hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
5747 } else {
5748 /* [47:36] i.e. 0x563 for above example address */
5749 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
5750 }
5222 break; 5751 break;
5223 case 1: 5752 case 1:
5224 /* [46:35] i.e. 0xAC6 for above example address */ 5753 if (hw->mac_type == e1000_ich8lan) {
5225 hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); 5754 /* [46:37] i.e. 0x2B1 for above example address */
5755 hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
5756 } else {
5757 /* [46:35] i.e. 0xAC6 for above example address */
5758 hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
5759 }
5226 break; 5760 break;
5227 case 2: 5761 case 2:
5228 /* [45:34] i.e. 0x5D8 for above example address */ 5762 if (hw->mac_type == e1000_ich8lan) {
5229 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 5763 /*[45:36] i.e. 0x163 for above example address */
5764 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
5765 } else {
5766 /* [45:34] i.e. 0x5D8 for above example address */
5767 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
5768 }
5230 break; 5769 break;
5231 case 3: 5770 case 3:
5232 /* [43:32] i.e. 0x634 for above example address */ 5771 if (hw->mac_type == e1000_ich8lan) {
5233 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); 5772 /* [43:34] i.e. 0x18D for above example address */
5773 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
5774 } else {
5775 /* [43:32] i.e. 0x634 for above example address */
5776 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
5777 }
5234 break; 5778 break;
5235 } 5779 }
5236 5780
5237 hash_value &= 0xFFF; 5781 hash_value &= 0xFFF;
5782 if (hw->mac_type == e1000_ich8lan)
5783 hash_value &= 0x3FF;
5238 5784
5239 return hash_value; 5785 return hash_value;
5240} 5786}
@@ -5262,6 +5808,8 @@ e1000_mta_set(struct e1000_hw *hw,
5262 * register are determined by the lower 5 bits of the value. 5808 * register are determined by the lower 5 bits of the value.
5263 */ 5809 */
5264 hash_reg = (hash_value >> 5) & 0x7F; 5810 hash_reg = (hash_value >> 5) & 0x7F;
5811 if (hw->mac_type == e1000_ich8lan)
5812 hash_reg &= 0x1F;
5265 hash_bit = hash_value & 0x1F; 5813 hash_bit = hash_value & 0x1F;
5266 5814
5267 mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); 5815 mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -5275,9 +5823,12 @@ e1000_mta_set(struct e1000_hw *hw,
5275 if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { 5823 if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
5276 temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); 5824 temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
5277 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); 5825 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
5826 E1000_WRITE_FLUSH(hw);
5278 E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); 5827 E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
5828 E1000_WRITE_FLUSH(hw);
5279 } else { 5829 } else {
5280 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); 5830 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
5831 E1000_WRITE_FLUSH(hw);
5281 } 5832 }
5282} 5833}
5283 5834
@@ -5334,7 +5885,9 @@ e1000_rar_set(struct e1000_hw *hw,
5334 } 5885 }
5335 5886
5336 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); 5887 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
5888 E1000_WRITE_FLUSH(hw);
5337 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); 5889 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
5890 E1000_WRITE_FLUSH(hw);
5338} 5891}
5339 5892
5340/****************************************************************************** 5893/******************************************************************************
@@ -5351,12 +5904,18 @@ e1000_write_vfta(struct e1000_hw *hw,
5351{ 5904{
5352 uint32_t temp; 5905 uint32_t temp;
5353 5906
5354 if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { 5907 if (hw->mac_type == e1000_ich8lan)
5908 return;
5909
5910 if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
5355 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); 5911 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
5356 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); 5912 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5913 E1000_WRITE_FLUSH(hw);
5357 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); 5914 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
5915 E1000_WRITE_FLUSH(hw);
5358 } else { 5916 } else {
5359 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); 5917 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5918 E1000_WRITE_FLUSH(hw);
5360 } 5919 }
5361} 5920}
5362 5921
@@ -5373,6 +5932,9 @@ e1000_clear_vfta(struct e1000_hw *hw)
5373 uint32_t vfta_offset = 0; 5932 uint32_t vfta_offset = 0;
5374 uint32_t vfta_bit_in_reg = 0; 5933 uint32_t vfta_bit_in_reg = 0;
5375 5934
5935 if (hw->mac_type == e1000_ich8lan)
5936 return;
5937
5376 if (hw->mac_type == e1000_82573) { 5938 if (hw->mac_type == e1000_82573) {
5377 if (hw->mng_cookie.vlan_id != 0) { 5939 if (hw->mng_cookie.vlan_id != 0) {
5378 /* The VFTA is a 4096b bit-field, each identifying a single VLAN 5940 /* The VFTA is a 4096b bit-field, each identifying a single VLAN
@@ -5392,6 +5954,7 @@ e1000_clear_vfta(struct e1000_hw *hw)
5392 * manageability unit */ 5954 * manageability unit */
5393 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; 5955 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
5394 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); 5956 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
5957 E1000_WRITE_FLUSH(hw);
5395 } 5958 }
5396} 5959}
5397 5960
@@ -5421,9 +5984,18 @@ e1000_id_led_init(struct e1000_hw * hw)
5421 DEBUGOUT("EEPROM Read Error\n"); 5984 DEBUGOUT("EEPROM Read Error\n");
5422 return -E1000_ERR_EEPROM; 5985 return -E1000_ERR_EEPROM;
5423 } 5986 }
5424 if((eeprom_data== ID_LED_RESERVED_0000) || 5987
5425 (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT; 5988 if ((hw->mac_type == e1000_82573) &&
5426 for(i = 0; i < 4; i++) { 5989 (eeprom_data == ID_LED_RESERVED_82573))
5990 eeprom_data = ID_LED_DEFAULT_82573;
5991 else if ((eeprom_data == ID_LED_RESERVED_0000) ||
5992 (eeprom_data == ID_LED_RESERVED_FFFF)) {
5993 if (hw->mac_type == e1000_ich8lan)
5994 eeprom_data = ID_LED_DEFAULT_ICH8LAN;
5995 else
5996 eeprom_data = ID_LED_DEFAULT;
5997 }
5998 for (i = 0; i < 4; i++) {
5427 temp = (eeprom_data >> (i << 2)) & led_mask; 5999 temp = (eeprom_data >> (i << 2)) & led_mask;
5428 switch(temp) { 6000 switch(temp) {
5429 case ID_LED_ON1_DEF2: 6001 case ID_LED_ON1_DEF2:
@@ -5519,6 +6091,44 @@ e1000_setup_led(struct e1000_hw *hw)
5519} 6091}
5520 6092
5521/****************************************************************************** 6093/******************************************************************************
6094 * Used on 82571 and later Si that has LED blink bits.
6095 * Callers must use their own timer and should have already called
6096 * e1000_id_led_init()
6097 * Call e1000_cleanup led() to stop blinking
6098 *
6099 * hw - Struct containing variables accessed by shared code
6100 *****************************************************************************/
6101int32_t
6102e1000_blink_led_start(struct e1000_hw *hw)
6103{
6104 int16_t i;
6105 uint32_t ledctl_blink = 0;
6106
6107 DEBUGFUNC("e1000_id_led_blink_on");
6108
6109 if (hw->mac_type < e1000_82571) {
6110 /* Nothing to do */
6111 return E1000_SUCCESS;
6112 }
6113 if (hw->media_type == e1000_media_type_fiber) {
6114 /* always blink LED0 for PCI-E fiber */
6115 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
6116 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
6117 } else {
6118 /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
6119 ledctl_blink = hw->ledctl_mode2;
6120 for (i=0; i < 4; i++)
6121 if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
6122 E1000_LEDCTL_MODE_LED_ON)
6123 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
6124 }
6125
6126 E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
6127
6128 return E1000_SUCCESS;
6129}
6130
6131/******************************************************************************
5522 * Restores the saved state of the SW controlable LED. 6132 * Restores the saved state of the SW controlable LED.
5523 * 6133 *
5524 * hw - Struct containing variables accessed by shared code 6134 * hw - Struct containing variables accessed by shared code
@@ -5548,6 +6158,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
5548 return ret_val; 6158 return ret_val;
5549 /* Fall Through */ 6159 /* Fall Through */
5550 default: 6160 default:
6161 if (hw->phy_type == e1000_phy_ife) {
6162 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
6163 break;
6164 }
5551 /* Restore LEDCTL settings */ 6165 /* Restore LEDCTL settings */
5552 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); 6166 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
5553 break; 6167 break;
@@ -5592,7 +6206,10 @@ e1000_led_on(struct e1000_hw *hw)
5592 /* Clear SW Defineable Pin 0 to turn on the LED */ 6206 /* Clear SW Defineable Pin 0 to turn on the LED */
5593 ctrl &= ~E1000_CTRL_SWDPIN0; 6207 ctrl &= ~E1000_CTRL_SWDPIN0;
5594 ctrl |= E1000_CTRL_SWDPIO0; 6208 ctrl |= E1000_CTRL_SWDPIO0;
5595 } else if(hw->media_type == e1000_media_type_copper) { 6209 } else if (hw->phy_type == e1000_phy_ife) {
6210 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
6211 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
6212 } else if (hw->media_type == e1000_media_type_copper) {
5596 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); 6213 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
5597 return E1000_SUCCESS; 6214 return E1000_SUCCESS;
5598 } 6215 }
@@ -5640,7 +6257,10 @@ e1000_led_off(struct e1000_hw *hw)
5640 /* Set SW Defineable Pin 0 to turn off the LED */ 6257 /* Set SW Defineable Pin 0 to turn off the LED */
5641 ctrl |= E1000_CTRL_SWDPIN0; 6258 ctrl |= E1000_CTRL_SWDPIN0;
5642 ctrl |= E1000_CTRL_SWDPIO0; 6259 ctrl |= E1000_CTRL_SWDPIO0;
5643 } else if(hw->media_type == e1000_media_type_copper) { 6260 } else if (hw->phy_type == e1000_phy_ife) {
6261 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
6262 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
6263 } else if (hw->media_type == e1000_media_type_copper) {
5644 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); 6264 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
5645 return E1000_SUCCESS; 6265 return E1000_SUCCESS;
5646 } 6266 }
@@ -5678,12 +6298,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
5678 temp = E1000_READ_REG(hw, XOFFRXC); 6298 temp = E1000_READ_REG(hw, XOFFRXC);
5679 temp = E1000_READ_REG(hw, XOFFTXC); 6299 temp = E1000_READ_REG(hw, XOFFTXC);
5680 temp = E1000_READ_REG(hw, FCRUC); 6300 temp = E1000_READ_REG(hw, FCRUC);
6301
6302 if (hw->mac_type != e1000_ich8lan) {
5681 temp = E1000_READ_REG(hw, PRC64); 6303 temp = E1000_READ_REG(hw, PRC64);
5682 temp = E1000_READ_REG(hw, PRC127); 6304 temp = E1000_READ_REG(hw, PRC127);
5683 temp = E1000_READ_REG(hw, PRC255); 6305 temp = E1000_READ_REG(hw, PRC255);
5684 temp = E1000_READ_REG(hw, PRC511); 6306 temp = E1000_READ_REG(hw, PRC511);
5685 temp = E1000_READ_REG(hw, PRC1023); 6307 temp = E1000_READ_REG(hw, PRC1023);
5686 temp = E1000_READ_REG(hw, PRC1522); 6308 temp = E1000_READ_REG(hw, PRC1522);
6309 }
6310
5687 temp = E1000_READ_REG(hw, GPRC); 6311 temp = E1000_READ_REG(hw, GPRC);
5688 temp = E1000_READ_REG(hw, BPRC); 6312 temp = E1000_READ_REG(hw, BPRC);
5689 temp = E1000_READ_REG(hw, MPRC); 6313 temp = E1000_READ_REG(hw, MPRC);
@@ -5703,12 +6327,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
5703 temp = E1000_READ_REG(hw, TOTH); 6327 temp = E1000_READ_REG(hw, TOTH);
5704 temp = E1000_READ_REG(hw, TPR); 6328 temp = E1000_READ_REG(hw, TPR);
5705 temp = E1000_READ_REG(hw, TPT); 6329 temp = E1000_READ_REG(hw, TPT);
6330
6331 if (hw->mac_type != e1000_ich8lan) {
5706 temp = E1000_READ_REG(hw, PTC64); 6332 temp = E1000_READ_REG(hw, PTC64);
5707 temp = E1000_READ_REG(hw, PTC127); 6333 temp = E1000_READ_REG(hw, PTC127);
5708 temp = E1000_READ_REG(hw, PTC255); 6334 temp = E1000_READ_REG(hw, PTC255);
5709 temp = E1000_READ_REG(hw, PTC511); 6335 temp = E1000_READ_REG(hw, PTC511);
5710 temp = E1000_READ_REG(hw, PTC1023); 6336 temp = E1000_READ_REG(hw, PTC1023);
5711 temp = E1000_READ_REG(hw, PTC1522); 6337 temp = E1000_READ_REG(hw, PTC1522);
6338 }
6339
5712 temp = E1000_READ_REG(hw, MPTC); 6340 temp = E1000_READ_REG(hw, MPTC);
5713 temp = E1000_READ_REG(hw, BPTC); 6341 temp = E1000_READ_REG(hw, BPTC);
5714 6342
@@ -5731,6 +6359,9 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
5731 6359
5732 temp = E1000_READ_REG(hw, IAC); 6360 temp = E1000_READ_REG(hw, IAC);
5733 temp = E1000_READ_REG(hw, ICRXOC); 6361 temp = E1000_READ_REG(hw, ICRXOC);
6362
6363 if (hw->mac_type == e1000_ich8lan) return;
6364
5734 temp = E1000_READ_REG(hw, ICRXPTC); 6365 temp = E1000_READ_REG(hw, ICRXPTC);
5735 temp = E1000_READ_REG(hw, ICRXATC); 6366 temp = E1000_READ_REG(hw, ICRXATC);
5736 temp = E1000_READ_REG(hw, ICTXPTC); 6367 temp = E1000_READ_REG(hw, ICTXPTC);
@@ -5911,6 +6542,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
5911 hw->bus_width = e1000_bus_width_pciex_1; 6542 hw->bus_width = e1000_bus_width_pciex_1;
5912 break; 6543 break;
5913 case e1000_82571: 6544 case e1000_82571:
6545 case e1000_ich8lan:
5914 case e1000_80003es2lan: 6546 case e1000_80003es2lan:
5915 hw->bus_type = e1000_bus_type_pci_express; 6547 hw->bus_type = e1000_bus_type_pci_express;
5916 hw->bus_speed = e1000_bus_speed_2500; 6548 hw->bus_speed = e1000_bus_speed_2500;
@@ -5948,8 +6580,6 @@ e1000_get_bus_info(struct e1000_hw *hw)
5948 break; 6580 break;
5949 } 6581 }
5950} 6582}
5951
5952#if 0
5953/****************************************************************************** 6583/******************************************************************************
5954 * Reads a value from one of the devices registers using port I/O (as opposed 6584 * Reads a value from one of the devices registers using port I/O (as opposed
5955 * memory mapped I/O). Only 82544 and newer devices support port I/O. 6585 * memory mapped I/O). Only 82544 and newer devices support port I/O.
@@ -5967,7 +6597,6 @@ e1000_read_reg_io(struct e1000_hw *hw,
5967 e1000_io_write(hw, io_addr, offset); 6597 e1000_io_write(hw, io_addr, offset);
5968 return e1000_io_read(hw, io_data); 6598 return e1000_io_read(hw, io_data);
5969} 6599}
5970#endif /* 0 */
5971 6600
5972/****************************************************************************** 6601/******************************************************************************
5973 * Writes a value to one of the devices registers using port I/O (as opposed to 6602 * Writes a value to one of the devices registers using port I/O (as opposed to
@@ -6012,8 +6641,6 @@ e1000_get_cable_length(struct e1000_hw *hw,
6012{ 6641{
6013 int32_t ret_val; 6642 int32_t ret_val;
6014 uint16_t agc_value = 0; 6643 uint16_t agc_value = 0;
6015 uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6016 uint16_t max_agc = 0;
6017 uint16_t i, phy_data; 6644 uint16_t i, phy_data;
6018 uint16_t cable_length; 6645 uint16_t cable_length;
6019 6646
@@ -6086,6 +6713,8 @@ e1000_get_cable_length(struct e1000_hw *hw,
6086 break; 6713 break;
6087 } 6714 }
6088 } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 6715 } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6716 uint16_t cur_agc_value;
6717 uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6089 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6718 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6090 {IGP01E1000_PHY_AGC_A, 6719 {IGP01E1000_PHY_AGC_A,
6091 IGP01E1000_PHY_AGC_B, 6720 IGP01E1000_PHY_AGC_B,
@@ -6098,23 +6727,23 @@ e1000_get_cable_length(struct e1000_hw *hw,
6098 if(ret_val) 6727 if(ret_val)
6099 return ret_val; 6728 return ret_val;
6100 6729
6101 cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; 6730 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
6102 6731
6103 /* Array bound check. */ 6732 /* Value bound check. */
6104 if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || 6733 if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
6105 (cur_agc == 0)) 6734 (cur_agc_value == 0))
6106 return -E1000_ERR_PHY; 6735 return -E1000_ERR_PHY;
6107 6736
6108 agc_value += cur_agc; 6737 agc_value += cur_agc_value;
6109 6738
6110 /* Update minimal AGC value. */ 6739 /* Update minimal AGC value. */
6111 if(min_agc > cur_agc) 6740 if (min_agc_value > cur_agc_value)
6112 min_agc = cur_agc; 6741 min_agc_value = cur_agc_value;
6113 } 6742 }
6114 6743
6115 /* Remove the minimal AGC result for length < 50m */ 6744 /* Remove the minimal AGC result for length < 50m */
6116 if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { 6745 if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
6117 agc_value -= min_agc; 6746 agc_value -= min_agc_value;
6118 6747
6119 /* Get the average length of the remaining 3 channels */ 6748 /* Get the average length of the remaining 3 channels */
6120 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); 6749 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
@@ -6130,7 +6759,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
6130 IGP01E1000_AGC_RANGE) : 0; 6759 IGP01E1000_AGC_RANGE) : 0;
6131 *max_length = e1000_igp_cable_length_table[agc_value] + 6760 *max_length = e1000_igp_cable_length_table[agc_value] +
6132 IGP01E1000_AGC_RANGE; 6761 IGP01E1000_AGC_RANGE;
6133 } else if (hw->phy_type == e1000_phy_igp_2) { 6762 } else if (hw->phy_type == e1000_phy_igp_2 ||
6763 hw->phy_type == e1000_phy_igp_3) {
6764 uint16_t cur_agc_index, max_agc_index = 0;
6765 uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
6134 uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 6766 uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
6135 {IGP02E1000_PHY_AGC_A, 6767 {IGP02E1000_PHY_AGC_A,
6136 IGP02E1000_PHY_AGC_B, 6768 IGP02E1000_PHY_AGC_B,
@@ -6145,19 +6777,27 @@ e1000_get_cable_length(struct e1000_hw *hw,
6145 /* Getting bits 15:9, which represent the combination of course and 6777 /* Getting bits 15:9, which represent the combination of course and
6146 * fine gain values. The result is a number that can be put into 6778 * fine gain values. The result is a number that can be put into
6147 * the lookup table to obtain the approximate cable length. */ 6779 * the lookup table to obtain the approximate cable length. */
6148 cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 6780 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
6149 IGP02E1000_AGC_LENGTH_MASK; 6781 IGP02E1000_AGC_LENGTH_MASK;
6150 6782
6151 /* Remove min & max AGC values from calculation. */ 6783 /* Array index bound check. */
6152 if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc]) 6784 if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
6153 min_agc = cur_agc; 6785 (cur_agc_index == 0))
6154 if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc]) 6786 return -E1000_ERR_PHY;
6155 max_agc = cur_agc;
6156 6787
6157 agc_value += e1000_igp_2_cable_length_table[cur_agc]; 6788 /* Remove min & max AGC values from calculation. */
6789 if (e1000_igp_2_cable_length_table[min_agc_index] >
6790 e1000_igp_2_cable_length_table[cur_agc_index])
6791 min_agc_index = cur_agc_index;
6792 if (e1000_igp_2_cable_length_table[max_agc_index] <
6793 e1000_igp_2_cable_length_table[cur_agc_index])
6794 max_agc_index = cur_agc_index;
6795
6796 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
6158 } 6797 }
6159 6798
6160 agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]); 6799 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
6800 e1000_igp_2_cable_length_table[max_agc_index]);
6161 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); 6801 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
6162 6802
6163 /* Calculate cable length with the error range of +/- 10 meters. */ 6803 /* Calculate cable length with the error range of +/- 10 meters. */
@@ -6203,7 +6843,8 @@ e1000_check_polarity(struct e1000_hw *hw,
6203 return ret_val; 6843 return ret_val;
6204 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 6844 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
6205 M88E1000_PSSR_REV_POLARITY_SHIFT; 6845 M88E1000_PSSR_REV_POLARITY_SHIFT;
6206 } else if(hw->phy_type == e1000_phy_igp || 6846 } else if (hw->phy_type == e1000_phy_igp ||
6847 hw->phy_type == e1000_phy_igp_3 ||
6207 hw->phy_type == e1000_phy_igp_2) { 6848 hw->phy_type == e1000_phy_igp_2) {
6208 /* Read the Status register to check the speed */ 6849 /* Read the Status register to check the speed */
6209 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 6850 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
@@ -6229,6 +6870,13 @@ e1000_check_polarity(struct e1000_hw *hw,
6229 * 100 Mbps this bit is always 0) */ 6870 * 100 Mbps this bit is always 0) */
6230 *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; 6871 *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
6231 } 6872 }
6873 } else if (hw->phy_type == e1000_phy_ife) {
6874 ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
6875 &phy_data);
6876 if (ret_val)
6877 return ret_val;
6878 *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >>
6879 IFE_PESC_POLARITY_REVERSED_SHIFT;
6232 } 6880 }
6233 return E1000_SUCCESS; 6881 return E1000_SUCCESS;
6234} 6882}
@@ -6256,7 +6904,8 @@ e1000_check_downshift(struct e1000_hw *hw)
6256 6904
6257 DEBUGFUNC("e1000_check_downshift"); 6905 DEBUGFUNC("e1000_check_downshift");
6258 6906
6259 if(hw->phy_type == e1000_phy_igp || 6907 if (hw->phy_type == e1000_phy_igp ||
6908 hw->phy_type == e1000_phy_igp_3 ||
6260 hw->phy_type == e1000_phy_igp_2) { 6909 hw->phy_type == e1000_phy_igp_2) {
6261 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 6910 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6262 &phy_data); 6911 &phy_data);
@@ -6273,6 +6922,9 @@ e1000_check_downshift(struct e1000_hw *hw)
6273 6922
6274 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 6923 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
6275 M88E1000_PSSR_DOWNSHIFT_SHIFT; 6924 M88E1000_PSSR_DOWNSHIFT_SHIFT;
6925 } else if (hw->phy_type == e1000_phy_ife) {
6926 /* e1000_phy_ife supports 10/100 speed only */
6927 hw->speed_downgraded = FALSE;
6276 } 6928 }
6277 6929
6278 return E1000_SUCCESS; 6930 return E1000_SUCCESS;
@@ -6317,7 +6969,9 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
6317 6969
6318 if(speed == SPEED_1000) { 6970 if(speed == SPEED_1000) {
6319 6971
6320 e1000_get_cable_length(hw, &min_length, &max_length); 6972 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
6973 if (ret_val)
6974 return ret_val;
6321 6975
6322 if((hw->dsp_config_state == e1000_dsp_config_enabled) && 6976 if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
6323 min_length >= e1000_igp_cable_length_50) { 6977 min_length >= e1000_igp_cable_length_50) {
@@ -6525,20 +7179,27 @@ static int32_t
6525e1000_set_d3_lplu_state(struct e1000_hw *hw, 7179e1000_set_d3_lplu_state(struct e1000_hw *hw,
6526 boolean_t active) 7180 boolean_t active)
6527{ 7181{
7182 uint32_t phy_ctrl = 0;
6528 int32_t ret_val; 7183 int32_t ret_val;
6529 uint16_t phy_data; 7184 uint16_t phy_data;
6530 DEBUGFUNC("e1000_set_d3_lplu_state"); 7185 DEBUGFUNC("e1000_set_d3_lplu_state");
6531 7186
6532 if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) 7187 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
7188 && hw->phy_type != e1000_phy_igp_3)
6533 return E1000_SUCCESS; 7189 return E1000_SUCCESS;
6534 7190
6535 /* During driver activity LPLU should not be used or it will attain link 7191 /* During driver activity LPLU should not be used or it will attain link
6536 * from the lowest speeds starting from 10Mbps. The capability is used for 7192 * from the lowest speeds starting from 10Mbps. The capability is used for
6537 * Dx transitions and states */ 7193 * Dx transitions and states */
6538 if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { 7194 if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
6539 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 7195 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
6540 if(ret_val) 7196 if (ret_val)
6541 return ret_val; 7197 return ret_val;
7198 } else if (hw->mac_type == e1000_ich8lan) {
7199 /* MAC writes into PHY register based on the state transition
7200 * and start auto-negotiation. SW driver can overwrite the settings
7201 * in CSR PHY power control E1000_PHY_CTRL register. */
7202 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
6542 } else { 7203 } else {
6543 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 7204 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
6544 if(ret_val) 7205 if(ret_val)
@@ -6553,11 +7214,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
6553 if(ret_val) 7214 if(ret_val)
6554 return ret_val; 7215 return ret_val;
6555 } else { 7216 } else {
7217 if (hw->mac_type == e1000_ich8lan) {
7218 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
7219 E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
7220 } else {
6556 phy_data &= ~IGP02E1000_PM_D3_LPLU; 7221 phy_data &= ~IGP02E1000_PM_D3_LPLU;
6557 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 7222 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
6558 phy_data); 7223 phy_data);
6559 if (ret_val) 7224 if (ret_val)
6560 return ret_val; 7225 return ret_val;
7226 }
6561 } 7227 }
6562 7228
6563 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 7229 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
@@ -6593,17 +7259,22 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
6593 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 7259 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
6594 7260
6595 if(hw->mac_type == e1000_82541_rev_2 || 7261 if(hw->mac_type == e1000_82541_rev_2 ||
6596 hw->mac_type == e1000_82547_rev_2) { 7262 hw->mac_type == e1000_82547_rev_2) {
6597 phy_data |= IGP01E1000_GMII_FLEX_SPD; 7263 phy_data |= IGP01E1000_GMII_FLEX_SPD;
6598 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 7264 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
6599 if(ret_val) 7265 if(ret_val)
6600 return ret_val; 7266 return ret_val;
6601 } else { 7267 } else {
7268 if (hw->mac_type == e1000_ich8lan) {
7269 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
7270 E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
7271 } else {
6602 phy_data |= IGP02E1000_PM_D3_LPLU; 7272 phy_data |= IGP02E1000_PM_D3_LPLU;
6603 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 7273 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
6604 phy_data); 7274 phy_data);
6605 if (ret_val) 7275 if (ret_val)
6606 return ret_val; 7276 return ret_val;
7277 }
6607 } 7278 }
6608 7279
6609 /* When LPLU is enabled we should disable SmartSpeed */ 7280 /* When LPLU is enabled we should disable SmartSpeed */
@@ -6638,6 +7309,7 @@ static int32_t
6638e1000_set_d0_lplu_state(struct e1000_hw *hw, 7309e1000_set_d0_lplu_state(struct e1000_hw *hw,
6639 boolean_t active) 7310 boolean_t active)
6640{ 7311{
7312 uint32_t phy_ctrl = 0;
6641 int32_t ret_val; 7313 int32_t ret_val;
6642 uint16_t phy_data; 7314 uint16_t phy_data;
6643 DEBUGFUNC("e1000_set_d0_lplu_state"); 7315 DEBUGFUNC("e1000_set_d0_lplu_state");
@@ -6645,15 +7317,24 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
6645 if(hw->mac_type <= e1000_82547_rev_2) 7317 if(hw->mac_type <= e1000_82547_rev_2)
6646 return E1000_SUCCESS; 7318 return E1000_SUCCESS;
6647 7319
7320 if (hw->mac_type == e1000_ich8lan) {
7321 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
7322 } else {
6648 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 7323 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
6649 if(ret_val) 7324 if(ret_val)
6650 return ret_val; 7325 return ret_val;
7326 }
6651 7327
6652 if (!active) { 7328 if (!active) {
7329 if (hw->mac_type == e1000_ich8lan) {
7330 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
7331 E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
7332 } else {
6653 phy_data &= ~IGP02E1000_PM_D0_LPLU; 7333 phy_data &= ~IGP02E1000_PM_D0_LPLU;
6654 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 7334 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
6655 if (ret_val) 7335 if (ret_val)
6656 return ret_val; 7336 return ret_val;
7337 }
6657 7338
6658 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 7339 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
6659 * Dx states where the power conservation is most important. During 7340 * Dx states where the power conservation is most important. During
@@ -6686,10 +7367,15 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
6686 7367
6687 } else { 7368 } else {
6688 7369
7370 if (hw->mac_type == e1000_ich8lan) {
7371 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
7372 E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
7373 } else {
6689 phy_data |= IGP02E1000_PM_D0_LPLU; 7374 phy_data |= IGP02E1000_PM_D0_LPLU;
6690 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 7375 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
6691 if (ret_val) 7376 if (ret_val)
6692 return ret_val; 7377 return ret_val;
7378 }
6693 7379
6694 /* When LPLU is enabled we should disable SmartSpeed */ 7380 /* When LPLU is enabled we should disable SmartSpeed */
6695 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 7381 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
@@ -6928,8 +7614,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
6928 7614
6929 length >>= 2; 7615 length >>= 2;
6930 /* The device driver writes the relevant command block into the ram area. */ 7616 /* The device driver writes the relevant command block into the ram area. */
6931 for (i = 0; i < length; i++) 7617 for (i = 0; i < length; i++) {
6932 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); 7618 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
7619 E1000_WRITE_FLUSH(hw);
7620 }
6933 7621
6934 return E1000_SUCCESS; 7622 return E1000_SUCCESS;
6935} 7623}
@@ -6961,15 +7649,18 @@ e1000_mng_write_commit(
6961 * returns - TRUE when the mode is IAMT or FALSE. 7649 * returns - TRUE when the mode is IAMT or FALSE.
6962 ****************************************************************************/ 7650 ****************************************************************************/
6963boolean_t 7651boolean_t
6964e1000_check_mng_mode( 7652e1000_check_mng_mode(struct e1000_hw *hw)
6965 struct e1000_hw *hw)
6966{ 7653{
6967 uint32_t fwsm; 7654 uint32_t fwsm;
6968 7655
6969 fwsm = E1000_READ_REG(hw, FWSM); 7656 fwsm = E1000_READ_REG(hw, FWSM);
6970 7657
6971 if((fwsm & E1000_FWSM_MODE_MASK) == 7658 if (hw->mac_type == e1000_ich8lan) {
6972 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 7659 if ((fwsm & E1000_FWSM_MODE_MASK) ==
7660 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7661 return TRUE;
7662 } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
7663 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
6973 return TRUE; 7664 return TRUE;
6974 7665
6975 return FALSE; 7666 return FALSE;
@@ -7209,7 +7900,6 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7209 E1000_WRITE_REG(hw, CTRL, ctrl); 7900 E1000_WRITE_REG(hw, CTRL, ctrl);
7210} 7901}
7211 7902
7212#if 0
7213/*************************************************************************** 7903/***************************************************************************
7214 * 7904 *
7215 * Enables PCI-Express master access. 7905 * Enables PCI-Express master access.
@@ -7233,7 +7923,6 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
7233 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 7923 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
7234 E1000_WRITE_REG(hw, CTRL, ctrl); 7924 E1000_WRITE_REG(hw, CTRL, ctrl);
7235} 7925}
7236#endif /* 0 */
7237 7926
7238/******************************************************************************* 7927/*******************************************************************************
7239 * 7928 *
@@ -7299,8 +7988,10 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
7299 case e1000_82572: 7988 case e1000_82572:
7300 case e1000_82573: 7989 case e1000_82573:
7301 case e1000_80003es2lan: 7990 case e1000_80003es2lan:
7302 while(timeout) { 7991 case e1000_ich8lan:
7303 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; 7992 while (timeout) {
7993 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
7994 break;
7304 else msec_delay(1); 7995 else msec_delay(1);
7305 timeout--; 7996 timeout--;
7306 } 7997 }
@@ -7340,7 +8031,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
7340 8031
7341 switch (hw->mac_type) { 8032 switch (hw->mac_type) {
7342 default: 8033 default:
7343 msec_delay(10); 8034 msec_delay_irq(10);
7344 break; 8035 break;
7345 case e1000_80003es2lan: 8036 case e1000_80003es2lan:
7346 /* Separate *_CFG_DONE_* bit for each port */ 8037 /* Separate *_CFG_DONE_* bit for each port */
@@ -7523,6 +8214,13 @@ int32_t
7523e1000_check_phy_reset_block(struct e1000_hw *hw) 8214e1000_check_phy_reset_block(struct e1000_hw *hw)
7524{ 8215{
7525 uint32_t manc = 0; 8216 uint32_t manc = 0;
8217 uint32_t fwsm = 0;
8218
8219 if (hw->mac_type == e1000_ich8lan) {
8220 fwsm = E1000_READ_REG(hw, FWSM);
8221 return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
8222 : E1000_BLK_PHY_RESET;
8223 }
7526 8224
7527 if (hw->mac_type > e1000_82547_rev_2) 8225 if (hw->mac_type > e1000_82547_rev_2)
7528 manc = E1000_READ_REG(hw, MANC); 8226 manc = E1000_READ_REG(hw, MANC);
@@ -7549,6 +8247,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
7549 if((fwsm & E1000_FWSM_MODE_MASK) != 0) 8247 if((fwsm & E1000_FWSM_MODE_MASK) != 0)
7550 return TRUE; 8248 return TRUE;
7551 break; 8249 break;
8250 case e1000_ich8lan:
8251 return TRUE;
7552 default: 8252 default:
7553 break; 8253 break;
7554 } 8254 }
@@ -7556,4 +8256,846 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
7556} 8256}
7557 8257
7558 8258
8259/******************************************************************************
8260 * Configure PCI-Ex no-snoop
8261 *
8262 * hw - Struct containing variables accessed by shared code.
8263 * no_snoop - Bitmap of no-snoop events.
8264 *
8265 * returns: E1000_SUCCESS
8266 *
8267 *****************************************************************************/
8268int32_t
8269e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8270{
8271 uint32_t gcr_reg = 0;
8272
8273 DEBUGFUNC("e1000_set_pci_ex_no_snoop");
8274
8275 if (hw->bus_type == e1000_bus_type_unknown)
8276 e1000_get_bus_info(hw);
8277
8278 if (hw->bus_type != e1000_bus_type_pci_express)
8279 return E1000_SUCCESS;
8280
8281 if (no_snoop) {
8282 gcr_reg = E1000_READ_REG(hw, GCR);
8283 gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
8284 gcr_reg |= no_snoop;
8285 E1000_WRITE_REG(hw, GCR, gcr_reg);
8286 }
8287 if (hw->mac_type == e1000_ich8lan) {
8288 uint32_t ctrl_ext;
8289
8290 E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
8291
8292 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
8293 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
8294 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
8295 }
8296
8297 return E1000_SUCCESS;
8298}
8299
8300/***************************************************************************
8301 *
8302 * Get software semaphore FLAG bit (SWFLAG).
8303 * SWFLAG is used to synchronize the access to all shared resource between
8304 * SW, FW and HW.
8305 *
8306 * hw: Struct containing variables accessed by shared code
8307 *
8308 ***************************************************************************/
8309int32_t
8310e1000_get_software_flag(struct e1000_hw *hw)
8311{
8312 int32_t timeout = PHY_CFG_TIMEOUT;
8313 uint32_t extcnf_ctrl;
8314
8315 DEBUGFUNC("e1000_get_software_flag");
8316
8317 if (hw->mac_type == e1000_ich8lan) {
8318 while (timeout) {
8319 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
8320 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8321 E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
8322
8323 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
8324 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8325 break;
8326 msec_delay_irq(1);
8327 timeout--;
8328 }
8329
8330 if (!timeout) {
8331 DEBUGOUT("FW or HW locks the resource too long.\n");
8332 return -E1000_ERR_CONFIG;
8333 }
8334 }
8335
8336 return E1000_SUCCESS;
8337}
8338
8339/***************************************************************************
8340 *
8341 * Release software semaphore FLAG bit (SWFLAG).
8342 * SWFLAG is used to synchronize the access to all shared resource between
8343 * SW, FW and HW.
8344 *
8345 * hw: Struct containing variables accessed by shared code
8346 *
8347 ***************************************************************************/
8348void
8349e1000_release_software_flag(struct e1000_hw *hw)
8350{
8351 uint32_t extcnf_ctrl;
8352
8353 DEBUGFUNC("e1000_release_software_flag");
8354
8355 if (hw->mac_type == e1000_ich8lan) {
8356 extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
8357 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8358 E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
8359 }
8360
8361 return;
8362}
8363
8364/***************************************************************************
8365 *
8366 * Disable dynamic power down mode in ife PHY.
8367 * It can be used to workaround band-gap problem.
8368 *
8369 * hw: Struct containing variables accessed by shared code
8370 *
8371 ***************************************************************************/
8372int32_t
8373e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8374{
8375 uint16_t phy_data;
8376 int32_t ret_val = E1000_SUCCESS;
8377
8378 DEBUGFUNC("e1000_ife_disable_dynamic_power_down");
8379
8380 if (hw->phy_type == e1000_phy_ife) {
8381 ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
8382 if (ret_val)
8383 return ret_val;
8384
8385 phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
8386 ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
8387 }
8388
8389 return ret_val;
8390}
8391
8392/***************************************************************************
8393 *
8394 * Enable dynamic power down mode in ife PHY.
8395 * It can be used to workaround band-gap problem.
8396 *
8397 * hw: Struct containing variables accessed by shared code
8398 *
8399 ***************************************************************************/
8400int32_t
8401e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8402{
8403 uint16_t phy_data;
8404 int32_t ret_val = E1000_SUCCESS;
8405
8406 DEBUGFUNC("e1000_ife_enable_dynamic_power_down");
8407
8408 if (hw->phy_type == e1000_phy_ife) {
8409 ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
8410 if (ret_val)
8411 return ret_val;
8412
8413 phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
8414 ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
8415 }
8416
8417 return ret_val;
8418}
8419
8420/******************************************************************************
8421 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8422 * register.
8423 *
8424 * hw - Struct containing variables accessed by shared code
8425 * offset - offset of word in the EEPROM to read
8426 * data - word read from the EEPROM
8427 * words - number of words to read
8428 *****************************************************************************/
8429int32_t
8430e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8431 uint16_t *data)
8432{
8433 int32_t error = E1000_SUCCESS;
8434 uint32_t flash_bank = 0;
8435 uint32_t act_offset = 0;
8436 uint32_t bank_offset = 0;
8437 uint16_t word = 0;
8438 uint16_t i = 0;
8439
8440 /* We need to know which is the valid flash bank. In the event
8441 * that we didn't allocate eeprom_shadow_ram, we may not be
8442 * managing flash_bank. So it cannot be trusted and needs
8443 * to be updated with each read.
8444 */
8445 /* Value of bit 22 corresponds to the flash bank we're on. */
8446 flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
8447
8448 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
8449 bank_offset = flash_bank * (hw->flash_bank_size * 2);
8450
8451 error = e1000_get_software_flag(hw);
8452 if (error != E1000_SUCCESS)
8453 return error;
8454
8455 for (i = 0; i < words; i++) {
8456 if (hw->eeprom_shadow_ram != NULL &&
8457 hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
8458 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
8459 } else {
8460 /* The NVM part needs a byte offset, hence * 2 */
8461 act_offset = bank_offset + ((offset + i) * 2);
8462 error = e1000_read_ich8_word(hw, act_offset, &word);
8463 if (error != E1000_SUCCESS)
8464 break;
8465 data[i] = word;
8466 }
8467 }
8468
8469 e1000_release_software_flag(hw);
8470
8471 return error;
8472}
8473
8474/******************************************************************************
8475 * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
8476 * register. Actually, writes are written to the shadow ram cache in the hw
8477 * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
8478 * the NVM, which occurs when the NVM checksum is updated.
8479 *
8480 * hw - Struct containing variables accessed by shared code
8481 * offset - offset of word in the EEPROM to write
8482 * words - number of words to write
8483 * data - words to write to the EEPROM
8484 *****************************************************************************/
8485int32_t
8486e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8487 uint16_t *data)
8488{
8489 uint32_t i = 0;
8490 int32_t error = E1000_SUCCESS;
8491
8492 error = e1000_get_software_flag(hw);
8493 if (error != E1000_SUCCESS)
8494 return error;
8495
8496 /* A driver can write to the NVM only if it has eeprom_shadow_ram
8497 * allocated. Subsequent reads to the modified words are read from
8498 * this cached structure as well. Writes will only go into this
8499 * cached structure unless it's followed by a call to
8500 * e1000_update_eeprom_checksum() where it will commit the changes
8501 * and clear the "modified" field.
8502 */
8503 if (hw->eeprom_shadow_ram != NULL) {
8504 for (i = 0; i < words; i++) {
8505 if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
8506 hw->eeprom_shadow_ram[offset+i].modified = TRUE;
8507 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
8508 } else {
8509 error = -E1000_ERR_EEPROM;
8510 break;
8511 }
8512 }
8513 } else {
8514 /* Drivers have the option to not allocate eeprom_shadow_ram as long
8515 * as they don't perform any NVM writes. An attempt in doing so
8516 * will result in this error.
8517 */
8518 error = -E1000_ERR_EEPROM;
8519 }
8520
8521 e1000_release_software_flag(hw);
8522
8523 return error;
8524}
8525
8526/******************************************************************************
8527 * This function does initial flash setup so that a new read/write/erase cycle
8528 * can be started.
8529 *
8530 * hw - The pointer to the hw structure
8531 ****************************************************************************/
8532int32_t
8533e1000_ich8_cycle_init(struct e1000_hw *hw)
8534{
8535 union ich8_hws_flash_status hsfsts;
8536 int32_t error = E1000_ERR_EEPROM;
8537 int32_t i = 0;
8538
8539 DEBUGFUNC("e1000_ich8_cycle_init");
8540
8541 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8542
8543 /* May be check the Flash Des Valid bit in Hw status */
8544 if (hsfsts.hsf_status.fldesvalid == 0) {
8545 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
8546 return error;
8547 }
8548
8549 /* Clear FCERR in Hw status by writing 1 */
8550 /* Clear DAEL in Hw status by writing a 1 */
8551 hsfsts.hsf_status.flcerr = 1;
8552 hsfsts.hsf_status.dael = 1;
8553
8554 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
8555
8556 /* Either we should have a hardware SPI cycle in progress bit to check
8557 * against, in order to start a new cycle or FDONE bit should be changed
8558 * in the hardware so that it is 1 after harware reset, which can then be
8559 * used as an indication whether a cycle is in progress or has been
8560 * completed .. we should also have some software semaphore mechanism to
8561 * guard FDONE or the cycle in progress bit so that two threads access to
8562 * those bits can be sequentiallized or a way so that 2 threads dont
8563 * start the cycle at the same time */
8564
8565 if (hsfsts.hsf_status.flcinprog == 0) {
8566 /* There is no cycle running at present, so we can start a cycle */
8567 /* Begin by setting Flash Cycle Done. */
8568 hsfsts.hsf_status.flcdone = 1;
8569 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
8570 error = E1000_SUCCESS;
8571 } else {
8572 /* otherwise poll for sometime so the current cycle has a chance
8573 * to end before giving up. */
8574 for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
8575 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8576 if (hsfsts.hsf_status.flcinprog == 0) {
8577 error = E1000_SUCCESS;
8578 break;
8579 }
8580 udelay(1);
8581 }
8582 if (error == E1000_SUCCESS) {
8583 /* Successful in waiting for previous cycle to timeout,
8584 * now set the Flash Cycle Done. */
8585 hsfsts.hsf_status.flcdone = 1;
8586 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
8587 } else {
8588 DEBUGOUT("Flash controller busy, cannot get access");
8589 }
8590 }
8591 return error;
8592}
8593
8594/******************************************************************************
8595 * This function starts a flash cycle and waits for its completion
8596 *
8597 * hw - The pointer to the hw structure
8598 ****************************************************************************/
8599int32_t
8600e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8601{
8602 union ich8_hws_flash_ctrl hsflctl;
8603 union ich8_hws_flash_status hsfsts;
8604 int32_t error = E1000_ERR_EEPROM;
8605 uint32_t i = 0;
8606
8607 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8608 hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
8609 hsflctl.hsf_ctrl.flcgo = 1;
8610 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
8611
8612 /* wait till FDONE bit is set to 1 */
8613 do {
8614 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8615 if (hsfsts.hsf_status.flcdone == 1)
8616 break;
8617 udelay(1);
8618 i++;
8619 } while (i < timeout);
8620 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
8621 error = E1000_SUCCESS;
8622 }
8623 return error;
8624}
8625
8626/******************************************************************************
8627 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8628 *
8629 * hw - The pointer to the hw structure
8630 * index - The index of the byte or word to read.
8631 * size - Size of data to read, 1=byte 2=word
8632 * data - Pointer to the word to store the value read.
8633 *****************************************************************************/
8634int32_t
8635e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8636 uint32_t size, uint16_t* data)
8637{
8638 union ich8_hws_flash_status hsfsts;
8639 union ich8_hws_flash_ctrl hsflctl;
8640 uint32_t flash_linear_address;
8641 uint32_t flash_data = 0;
8642 int32_t error = -E1000_ERR_EEPROM;
8643 int32_t count = 0;
8644
8645 DEBUGFUNC("e1000_read_ich8_data");
8646
8647 if (size < 1 || size > 2 || data == 0x0 ||
8648 index > ICH8_FLASH_LINEAR_ADDR_MASK)
8649 return error;
8650
8651 flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
8652 hw->flash_base_addr;
8653
8654 do {
8655 udelay(1);
8656 /* Steps */
8657 error = e1000_ich8_cycle_init(hw);
8658 if (error != E1000_SUCCESS)
8659 break;
8660
8661 hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
8662 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8663 hsflctl.hsf_ctrl.fldbcount = size - 1;
8664 hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
8665 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
8666
8667 /* Write the last 24 bits of index into Flash Linear address field in
8668 * Flash Address */
8669 /* TODO: TBD maybe check the index against the size of flash */
8670
8671 E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
8672
8673 error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
8674
8675 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
8676 * sequence a few more times, else read in (shift in) the Flash Data0,
8677 * the order is least significant byte first msb to lsb */
8678 if (error == E1000_SUCCESS) {
8679 flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
8680 if (size == 1) {
8681 *data = (uint8_t)(flash_data & 0x000000FF);
8682 } else if (size == 2) {
8683 *data = (uint16_t)(flash_data & 0x0000FFFF);
8684 }
8685 break;
8686 } else {
8687 /* If we've gotten here, then things are probably completely hosed,
8688 * but if the error condition is detected, it won't hurt to give
8689 * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
8690 */
8691 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8692 if (hsfsts.hsf_status.flcerr == 1) {
8693 /* Repeat for some time before giving up. */
8694 continue;
8695 } else if (hsfsts.hsf_status.flcdone == 0) {
8696 DEBUGOUT("Timeout error - flash cycle did not complete.");
8697 break;
8698 }
8699 }
8700 } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
8701
8702 return error;
8703}
8704
8705/******************************************************************************
8706 * Writes One /two bytes to the NVM using the ICH8 flash access registers.
8707 *
8708 * hw - The pointer to the hw structure
8709 * index - The index of the byte/word to read.
8710 * size - Size of data to read, 1=byte 2=word
8711 * data - The byte(s) to write to the NVM.
8712 *****************************************************************************/
8713int32_t
8714e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8715 uint16_t data)
8716{
8717 union ich8_hws_flash_status hsfsts;
8718 union ich8_hws_flash_ctrl hsflctl;
8719 uint32_t flash_linear_address;
8720 uint32_t flash_data = 0;
8721 int32_t error = -E1000_ERR_EEPROM;
8722 int32_t count = 0;
8723
8724 DEBUGFUNC("e1000_write_ich8_data");
8725
8726 if (size < 1 || size > 2 || data > size * 0xff ||
8727 index > ICH8_FLASH_LINEAR_ADDR_MASK)
8728 return error;
8729
8730 flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
8731 hw->flash_base_addr;
8732
8733 do {
8734 udelay(1);
8735 /* Steps */
8736 error = e1000_ich8_cycle_init(hw);
8737 if (error != E1000_SUCCESS)
8738 break;
8739
8740 hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
8741 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8742 hsflctl.hsf_ctrl.fldbcount = size -1;
8743 hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
8744 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
8745
8746 /* Write the last 24 bits of index into Flash Linear address field in
8747 * Flash Address */
8748 E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
8749
8750 if (size == 1)
8751 flash_data = (uint32_t)data & 0x00FF;
8752 else
8753 flash_data = (uint32_t)data;
8754
8755 E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
8756
8757 /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
8758 * sequence a few more times else done */
8759 error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
8760 if (error == E1000_SUCCESS) {
8761 break;
8762 } else {
8763 /* If we're here, then things are most likely completely hosed,
8764 * but if the error condition is detected, it won't hurt to give
8765 * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
8766 */
8767 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8768 if (hsfsts.hsf_status.flcerr == 1) {
8769 /* Repeat for some time before giving up. */
8770 continue;
8771 } else if (hsfsts.hsf_status.flcdone == 0) {
8772 DEBUGOUT("Timeout error - flash cycle did not complete.");
8773 break;
8774 }
8775 }
8776 } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
8777
8778 return error;
8779}
8780
8781/******************************************************************************
8782 * Reads a single byte from the NVM using the ICH8 flash access registers.
8783 *
8784 * hw - pointer to e1000_hw structure
8785 * index - The index of the byte to read.
8786 * data - Pointer to a byte to store the value read.
8787 *****************************************************************************/
8788int32_t
8789e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8790{
8791 int32_t status = E1000_SUCCESS;
8792 uint16_t word = 0;
8793
8794 status = e1000_read_ich8_data(hw, index, 1, &word);
8795 if (status == E1000_SUCCESS) {
8796 *data = (uint8_t)word;
8797 }
8798
8799 return status;
8800}
8801
8802/******************************************************************************
8803 * Writes a single byte to the NVM using the ICH8 flash access registers.
8804 * Performs verification by reading back the value and then going through
8805 * a retry algorithm before giving up.
8806 *
8807 * hw - pointer to e1000_hw structure
8808 * index - The index of the byte to write.
8809 * byte - The byte to write to the NVM.
8810 *****************************************************************************/
8811int32_t
8812e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8813{
8814 int32_t error = E1000_SUCCESS;
8815 int32_t program_retries;
8816 uint8_t temp_byte;
8817
8818 e1000_write_ich8_byte(hw, index, byte);
8819 udelay(100);
8820
8821 for (program_retries = 0; program_retries < 100; program_retries++) {
8822 e1000_read_ich8_byte(hw, index, &temp_byte);
8823 if (temp_byte == byte)
8824 break;
8825 udelay(10);
8826 e1000_write_ich8_byte(hw, index, byte);
8827 udelay(100);
8828 }
8829 if (program_retries == 100)
8830 error = E1000_ERR_EEPROM;
8831
8832 return error;
8833}
8834
8835/******************************************************************************
8836 * Writes a single byte to the NVM using the ICH8 flash access registers.
8837 *
8838 * hw - pointer to e1000_hw structure
8839 * index - The index of the byte to read.
8840 * data - The byte to write to the NVM.
8841 *****************************************************************************/
8842int32_t
8843e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8844{
8845 int32_t status = E1000_SUCCESS;
8846 uint16_t word = (uint16_t)data;
8847
8848 status = e1000_write_ich8_data(hw, index, 1, word);
8849
8850 return status;
8851}
8852
8853/******************************************************************************
8854 * Reads a word from the NVM using the ICH8 flash access registers.
8855 *
8856 * hw - pointer to e1000_hw structure
8857 * index - The starting byte index of the word to read.
8858 * data - Pointer to a word to store the value read.
8859 *****************************************************************************/
8860int32_t
8861e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8862{
8863 int32_t status = E1000_SUCCESS;
8864 status = e1000_read_ich8_data(hw, index, 2, data);
8865 return status;
8866}
8867
8868/******************************************************************************
8869 * Writes a word to the NVM using the ICH8 flash access registers.
8870 *
8871 * hw - pointer to e1000_hw structure
8872 * index - The starting byte index of the word to read.
8873 * data - The word to write to the NVM.
8874 *****************************************************************************/
8875int32_t
8876e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8877{
8878 int32_t status = E1000_SUCCESS;
8879 status = e1000_write_ich8_data(hw, index, 2, data);
8880 return status;
8881}
8882
8883/******************************************************************************
8884 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
8885 * segment N is 4096 * N + flash_reg_addr.
8886 *
8887 * hw - pointer to e1000_hw structure
8888 * segment - 0 for first segment, 1 for second segment, etc.
8889 *****************************************************************************/
8890int32_t
8891e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8892{
8893 union ich8_hws_flash_status hsfsts;
8894 union ich8_hws_flash_ctrl hsflctl;
8895 uint32_t flash_linear_address;
8896 int32_t count = 0;
8897 int32_t error = E1000_ERR_EEPROM;
8898 int32_t iteration, seg_size;
8899 int32_t sector_size;
8900 int32_t j = 0;
8901 int32_t error_flag = 0;
8902
8903 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8904
8905 /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
8906 /* 00: The Hw sector is 256 bytes, hence we need to erase 16
8907 * consecutive sectors. The start index for the nth Hw sector can be
8908 * calculated as = segment * 4096 + n * 256
8909 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
8910 * The start index for the nth Hw sector can be calculated
8911 * as = segment * 4096
8912 * 10: Error condition
8913 * 11: The Hw sector size is much bigger than the size asked to
8914 * erase...error condition */
8915 if (hsfsts.hsf_status.berasesz == 0x0) {
8916 /* Hw sector size 256 */
8917 sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256;
8918 iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
8919 } else if (hsfsts.hsf_status.berasesz == 0x1) {
8920 sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K;
8921 iteration = 1;
8922 } else if (hsfsts.hsf_status.berasesz == 0x3) {
8923 sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K;
8924 iteration = 1;
8925 } else {
8926 return error;
8927 }
8928
8929 for (j = 0; j < iteration ; j++) {
8930 do {
8931 count++;
8932 /* Steps */
8933 error = e1000_ich8_cycle_init(hw);
8934 if (error != E1000_SUCCESS) {
8935 error_flag = 1;
8936 break;
8937 }
8938
8939 /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
8940 * Control */
8941 hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
8942 hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
8943 E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
8944
8945 /* Write the last 24 bits of an index within the block into Flash
8946 * Linear address field in Flash Address. This probably needs to
8947 * be calculated here based off the on-chip segment size and the
8948 * software segment size assumed (4K) */
8949 /* TBD */
8950 flash_linear_address = segment * sector_size + j * seg_size;
8951 flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
8952 flash_linear_address += hw->flash_base_addr;
8953
8954 E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
8955
8956 error = e1000_ich8_flash_cycle(hw, 1000000);
8957 /* Check if FCERR is set to 1. If 1, clear it and try the whole
8958 * sequence a few more times else Done */
8959 if (error == E1000_SUCCESS) {
8960 break;
8961 } else {
8962 hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
8963 if (hsfsts.hsf_status.flcerr == 1) {
8964 /* repeat for some time before giving up */
8965 continue;
8966 } else if (hsfsts.hsf_status.flcdone == 0) {
8967 error_flag = 1;
8968 break;
8969 }
8970 }
8971 } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
8972 if (error_flag == 1)
8973 break;
8974 }
8975 if (error_flag != 1)
8976 error = E1000_SUCCESS;
8977 return error;
8978}
8979
8980/******************************************************************************
8981 *
8982 * Reverse duplex setting without breaking the link.
8983 *
8984 * hw: Struct containing variables accessed by shared code
8985 *
8986 *****************************************************************************/
8987int32_t
8988e1000_duplex_reversal(struct e1000_hw *hw)
8989{
8990 int32_t ret_val;
8991 uint16_t phy_data;
8992
8993 if (hw->phy_type != e1000_phy_igp_3)
8994 return E1000_SUCCESS;
8995
8996 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
8997 if (ret_val)
8998 return ret_val;
8999
9000 phy_data ^= MII_CR_FULL_DUPLEX;
9001
9002 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
9003 if (ret_val)
9004 return ret_val;
9005
9006 ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data);
9007 if (ret_val)
9008 return ret_val;
9009
9010 phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET;
9011 ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data);
9012
9013 return ret_val;
9014}
9015
9016int32_t
9017e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9018 uint32_t cnf_base_addr, uint32_t cnf_size)
9019{
9020 uint32_t ret_val = E1000_SUCCESS;
9021 uint16_t word_addr, reg_data, reg_addr;
9022 uint16_t i;
9023
9024 /* cnf_base_addr is in DWORD */
9025 word_addr = (uint16_t)(cnf_base_addr << 1);
9026
9027 /* cnf_size is returned in size of dwords */
9028 for (i = 0; i < cnf_size; i++) {
9029 ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
9030 if (ret_val)
9031 return ret_val;
9032
9033 ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
9034 if (ret_val)
9035 return ret_val;
9036
9037 ret_val = e1000_get_software_flag(hw);
9038 if (ret_val != E1000_SUCCESS)
9039 return ret_val;
9040
9041 ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
9042
9043 e1000_release_software_flag(hw);
9044 }
9045
9046 return ret_val;
9047}
9048
9049
9050int32_t
9051e1000_init_lcd_from_nvm(struct e1000_hw *hw)
9052{
9053 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
9054
9055 if (hw->phy_type != e1000_phy_igp_3)
9056 return E1000_SUCCESS;
9057
9058 /* Check if SW needs configure the PHY */
9059 reg_data = E1000_READ_REG(hw, FEXTNVM);
9060 if (!(reg_data & FEXTNVM_SW_CONFIG))
9061 return E1000_SUCCESS;
9062
9063 /* Wait for basic configuration completes before proceeding*/
9064 loop = 0;
9065 do {
9066 reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
9067 udelay(100);
9068 loop++;
9069 } while ((!reg_data) && (loop < 50));
9070
9071 /* Clear the Init Done bit for the next init event */
9072 reg_data = E1000_READ_REG(hw, STATUS);
9073 reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
9074 E1000_WRITE_REG(hw, STATUS, reg_data);
9075
9076 /* Make sure HW does not configure LCD from PHY extended configuration
9077 before SW configuration */
9078 reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
9079 if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
9080 reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
9081 cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
9082 cnf_size >>= 16;
9083 if (cnf_size) {
9084 reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
9085 cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
9086 /* cnf_base_addr is in DWORD */
9087 cnf_base_addr >>= 16;
9088
9089 /* Configure LCD from extended configuration region. */
9090 ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
9091 cnf_size);
9092 if (ret_val)
9093 return ret_val;
9094 }
9095 }
9096
9097 return E1000_SUCCESS;
9098}
9099
9100
7559 9101
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 467c9ed944f8..f9341e3276b3 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -62,6 +62,7 @@ typedef enum {
62 e1000_82572, 62 e1000_82572,
63 e1000_82573, 63 e1000_82573,
64 e1000_80003es2lan, 64 e1000_80003es2lan,
65 e1000_ich8lan,
65 e1000_num_macs 66 e1000_num_macs
66} e1000_mac_type; 67} e1000_mac_type;
67 68
@@ -70,6 +71,7 @@ typedef enum {
70 e1000_eeprom_spi, 71 e1000_eeprom_spi,
71 e1000_eeprom_microwire, 72 e1000_eeprom_microwire,
72 e1000_eeprom_flash, 73 e1000_eeprom_flash,
74 e1000_eeprom_ich8,
73 e1000_eeprom_none, /* No NVM support */ 75 e1000_eeprom_none, /* No NVM support */
74 e1000_num_eeprom_types 76 e1000_num_eeprom_types
75} e1000_eeprom_type; 77} e1000_eeprom_type;
@@ -98,6 +100,11 @@ typedef enum {
98 e1000_fc_default = 0xFF 100 e1000_fc_default = 0xFF
99} e1000_fc_type; 101} e1000_fc_type;
100 102
103struct e1000_shadow_ram {
104 uint16_t eeprom_word;
105 boolean_t modified;
106};
107
101/* PCI bus types */ 108/* PCI bus types */
102typedef enum { 109typedef enum {
103 e1000_bus_type_unknown = 0, 110 e1000_bus_type_unknown = 0,
@@ -218,6 +225,8 @@ typedef enum {
218 e1000_phy_igp, 225 e1000_phy_igp,
219 e1000_phy_igp_2, 226 e1000_phy_igp_2,
220 e1000_phy_gg82563, 227 e1000_phy_gg82563,
228 e1000_phy_igp_3,
229 e1000_phy_ife,
221 e1000_phy_undefined = 0xFF 230 e1000_phy_undefined = 0xFF
222} e1000_phy_type; 231} e1000_phy_type;
223 232
@@ -313,6 +322,10 @@ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy
313int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 322int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
314int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 323int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
315int32_t e1000_phy_reset(struct e1000_hw *hw); 324int32_t e1000_phy_reset(struct e1000_hw *hw);
325void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
326int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
327int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
328int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
316int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
317int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
318int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); 331int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
@@ -331,6 +344,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
331#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 344#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
332#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 345#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
333#define E1000_MNG_IAMT_MODE 0x3 346#define E1000_MNG_IAMT_MODE 0x3
347#define E1000_MNG_ICH_IAMT_MODE 0x2
334#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 348#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
335 349
336#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ 350#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
@@ -388,6 +402,8 @@ int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
388int32_t e1000_read_mac_addr(struct e1000_hw * hw); 402int32_t e1000_read_mac_addr(struct e1000_hw * hw);
389int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); 403int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
390void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 404void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
405void e1000_release_software_flag(struct e1000_hw *hw);
406int32_t e1000_get_software_flag(struct e1000_hw *hw);
391 407
392/* Filters (multicast, vlan, receive) */ 408/* Filters (multicast, vlan, receive) */
393void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); 409void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
@@ -401,6 +417,7 @@ int32_t e1000_setup_led(struct e1000_hw *hw);
401int32_t e1000_cleanup_led(struct e1000_hw *hw); 417int32_t e1000_cleanup_led(struct e1000_hw *hw);
402int32_t e1000_led_on(struct e1000_hw *hw); 418int32_t e1000_led_on(struct e1000_hw *hw);
403int32_t e1000_led_off(struct e1000_hw *hw); 419int32_t e1000_led_off(struct e1000_hw *hw);
420int32_t e1000_blink_led_start(struct e1000_hw *hw);
404 421
405/* Adaptive IFS Functions */ 422/* Adaptive IFS Functions */
406 423
@@ -422,6 +439,29 @@ int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
422int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 439int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
423void e1000_release_software_semaphore(struct e1000_hw *hw); 440void e1000_release_software_semaphore(struct e1000_hw *hw);
424int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 441int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
442int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
443
444int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
445 uint8_t *data);
446int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
447 uint8_t byte);
448int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
449 uint8_t byte);
450int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
451 uint16_t *data);
452int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
453 uint32_t size, uint16_t *data);
454int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
455 uint16_t words, uint16_t *data);
456int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
457 uint16_t words, uint16_t *data);
458int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
459
460
461#define E1000_READ_REG_IO(a, reg) \
462 e1000_read_reg_io((a), E1000_##reg)
463#define E1000_WRITE_REG_IO(a, reg, val) \
464 e1000_write_reg_io((a), E1000_##reg, val)
425 465
426/* PCI Device IDs */ 466/* PCI Device IDs */
427#define E1000_DEV_ID_82542 0x1000 467#define E1000_DEV_ID_82542 0x1000
@@ -446,6 +486,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
446#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D 486#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
447#define E1000_DEV_ID_82541EI 0x1013 487#define E1000_DEV_ID_82541EI 0x1013
448#define E1000_DEV_ID_82541EI_MOBILE 0x1018 488#define E1000_DEV_ID_82541EI_MOBILE 0x1018
489#define E1000_DEV_ID_82541ER_LOM 0x1014
449#define E1000_DEV_ID_82541ER 0x1078 490#define E1000_DEV_ID_82541ER 0x1078
450#define E1000_DEV_ID_82547GI 0x1075 491#define E1000_DEV_ID_82547GI 0x1075
451#define E1000_DEV_ID_82541GI 0x1076 492#define E1000_DEV_ID_82541GI 0x1076
@@ -457,18 +498,28 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
457#define E1000_DEV_ID_82546GB_PCIE 0x108A 498#define E1000_DEV_ID_82546GB_PCIE 0x108A
458#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 499#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
459#define E1000_DEV_ID_82547EI 0x1019 500#define E1000_DEV_ID_82547EI 0x1019
501#define E1000_DEV_ID_82547EI_MOBILE 0x101A
460#define E1000_DEV_ID_82571EB_COPPER 0x105E 502#define E1000_DEV_ID_82571EB_COPPER 0x105E
461#define E1000_DEV_ID_82571EB_FIBER 0x105F 503#define E1000_DEV_ID_82571EB_FIBER 0x105F
462#define E1000_DEV_ID_82571EB_SERDES 0x1060 504#define E1000_DEV_ID_82571EB_SERDES 0x1060
463#define E1000_DEV_ID_82572EI_COPPER 0x107D 505#define E1000_DEV_ID_82572EI_COPPER 0x107D
464#define E1000_DEV_ID_82572EI_FIBER 0x107E 506#define E1000_DEV_ID_82572EI_FIBER 0x107E
465#define E1000_DEV_ID_82572EI_SERDES 0x107F 507#define E1000_DEV_ID_82572EI_SERDES 0x107F
508#define E1000_DEV_ID_82572EI 0x10B9
466#define E1000_DEV_ID_82573E 0x108B 509#define E1000_DEV_ID_82573E 0x108B
467#define E1000_DEV_ID_82573E_IAMT 0x108C 510#define E1000_DEV_ID_82573E_IAMT 0x108C
468#define E1000_DEV_ID_82573L 0x109A 511#define E1000_DEV_ID_82573L 0x109A
469#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 512#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
470#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 513#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
471#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 514#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
515#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
516#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
517
518#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
519#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
520#define E1000_DEV_ID_ICH8_IGP_C 0x104B
521#define E1000_DEV_ID_ICH8_IFE 0x104C
522#define E1000_DEV_ID_ICH8_IGP_M 0x104D
472 523
473 524
474#define NODE_ADDRESS_SIZE 6 525#define NODE_ADDRESS_SIZE 6
@@ -539,6 +590,14 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
539 E1000_IMS_RXSEQ | \ 590 E1000_IMS_RXSEQ | \
540 E1000_IMS_LSC) 591 E1000_IMS_LSC)
541 592
593/* Additional interrupts need to be handled for e1000_ich8lan:
594 DSW = The FW changed the status of the DISSW bit in FWSM
595 PHYINT = The LAN connected device generates an interrupt
596 EPRST = Manageability reset event */
597#define IMS_ICH8LAN_ENABLE_MASK (\
598 E1000_IMS_DSW | \
599 E1000_IMS_PHYINT | \
600 E1000_IMS_EPRST)
542 601
543/* Number of high/low register pairs in the RAR. The RAR (Receive Address 602/* Number of high/low register pairs in the RAR. The RAR (Receive Address
544 * Registers) holds the directed and multicast addresses that we monitor. We 603 * Registers) holds the directed and multicast addresses that we monitor. We
@@ -546,6 +605,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
546 * E1000_RAR_ENTRIES - 1 multicast addresses. 605 * E1000_RAR_ENTRIES - 1 multicast addresses.
547 */ 606 */
548#define E1000_RAR_ENTRIES 15 607#define E1000_RAR_ENTRIES 15
608#define E1000_RAR_ENTRIES_ICH8LAN 7
549 609
550#define MIN_NUMBER_OF_DESCRIPTORS 8 610#define MIN_NUMBER_OF_DESCRIPTORS 8
551#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 611#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
@@ -767,6 +827,9 @@ struct e1000_data_desc {
767#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ 827#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
768#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 828#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
769 829
830#define E1000_NUM_UNICAST_ICH8LAN 7
831#define E1000_MC_TBL_SIZE_ICH8LAN 32
832
770 833
771/* Receive Address Register */ 834/* Receive Address Register */
772struct e1000_rar { 835struct e1000_rar {
@@ -776,6 +839,7 @@ struct e1000_rar {
776 839
777/* Number of entries in the Multicast Table Array (MTA). */ 840/* Number of entries in the Multicast Table Array (MTA). */
778#define E1000_NUM_MTA_REGISTERS 128 841#define E1000_NUM_MTA_REGISTERS 128
842#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
779 843
780/* IPv4 Address Table Entry */ 844/* IPv4 Address Table Entry */
781struct e1000_ipv4_at_entry { 845struct e1000_ipv4_at_entry {
@@ -786,6 +850,7 @@ struct e1000_ipv4_at_entry {
786/* Four wakeup IP addresses are supported */ 850/* Four wakeup IP addresses are supported */
787#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 851#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
788#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 852#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
853#define E1000_IP4AT_SIZE_ICH8LAN 3
789#define E1000_IP6AT_SIZE 1 854#define E1000_IP6AT_SIZE 1
790 855
791/* IPv6 Address Table Entry */ 856/* IPv6 Address Table Entry */
@@ -844,6 +909,7 @@ struct e1000_ffvt_entry {
844#define E1000_FLA 0x0001C /* Flash Access - RW */ 909#define E1000_FLA 0x0001C /* Flash Access - RW */
845#define E1000_MDIC 0x00020 /* MDI Control - RW */ 910#define E1000_MDIC 0x00020 /* MDI Control - RW */
846#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 911#define E1000_SCTL 0x00024 /* SerDes Control - RW */
912#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
847#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 913#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
848#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 914#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
849#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 915#define E1000_FCT 0x00030 /* Flow Control Type - RW */
@@ -872,6 +938,8 @@ struct e1000_ffvt_entry {
872#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 938#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
873#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ 939#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
874#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ 940#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
941#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
942#define FEXTNVM_SW_CONFIG 0x0001
875#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 943#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
876#define E1000_PBS 0x01008 /* Packet Buffer Size */ 944#define E1000_PBS 0x01008 /* Packet Buffer Size */
877#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 945#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -899,11 +967,13 @@ struct e1000_ffvt_entry {
899#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ 967#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
900#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ 968#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
901#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ 969#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
902#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 970#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
971#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
903#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 972#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
904#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 973#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
905#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ 974#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
906#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 975#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
976#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
907#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 977#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
908#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 978#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
909#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 979#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -1050,6 +1120,7 @@ struct e1000_ffvt_entry {
1050#define E1000_82542_FLA E1000_FLA 1120#define E1000_82542_FLA E1000_FLA
1051#define E1000_82542_MDIC E1000_MDIC 1121#define E1000_82542_MDIC E1000_MDIC
1052#define E1000_82542_SCTL E1000_SCTL 1122#define E1000_82542_SCTL E1000_SCTL
1123#define E1000_82542_FEXTNVM E1000_FEXTNVM
1053#define E1000_82542_FCAL E1000_FCAL 1124#define E1000_82542_FCAL E1000_FCAL
1054#define E1000_82542_FCAH E1000_FCAH 1125#define E1000_82542_FCAH E1000_FCAH
1055#define E1000_82542_FCT E1000_FCT 1126#define E1000_82542_FCT E1000_FCT
@@ -1073,6 +1144,19 @@ struct e1000_ffvt_entry {
1073#define E1000_82542_RDLEN0 E1000_82542_RDLEN 1144#define E1000_82542_RDLEN0 E1000_82542_RDLEN
1074#define E1000_82542_RDH0 E1000_82542_RDH 1145#define E1000_82542_RDH0 E1000_82542_RDH
1075#define E1000_82542_RDT0 E1000_82542_RDT 1146#define E1000_82542_RDT0 E1000_82542_RDT
1147#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
1148 * RX Control - RW */
1149#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
1150#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
1151#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
1152#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
1153#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
1154#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
1155#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
1156#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
1157#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
1158#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
1159#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
1076#define E1000_82542_RDTR1 0x00130 1160#define E1000_82542_RDTR1 0x00130
1077#define E1000_82542_RDBAL1 0x00138 1161#define E1000_82542_RDBAL1 0x00138
1078#define E1000_82542_RDBAH1 0x0013C 1162#define E1000_82542_RDBAH1 0x0013C
@@ -1110,11 +1194,14 @@ struct e1000_ffvt_entry {
1110#define E1000_82542_FLOP E1000_FLOP 1194#define E1000_82542_FLOP E1000_FLOP
1111#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL 1195#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
1112#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE 1196#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
1197#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
1113#define E1000_82542_ERT E1000_ERT 1198#define E1000_82542_ERT E1000_ERT
1114#define E1000_82542_RXDCTL E1000_RXDCTL 1199#define E1000_82542_RXDCTL E1000_RXDCTL
1200#define E1000_82542_RXDCTL1 E1000_RXDCTL1
1115#define E1000_82542_RADV E1000_RADV 1201#define E1000_82542_RADV E1000_RADV
1116#define E1000_82542_RSRPD E1000_RSRPD 1202#define E1000_82542_RSRPD E1000_RSRPD
1117#define E1000_82542_TXDMAC E1000_TXDMAC 1203#define E1000_82542_TXDMAC E1000_TXDMAC
1204#define E1000_82542_KABGTXD E1000_KABGTXD
1118#define E1000_82542_TDFHS E1000_TDFHS 1205#define E1000_82542_TDFHS E1000_TDFHS
1119#define E1000_82542_TDFTS E1000_TDFTS 1206#define E1000_82542_TDFTS E1000_TDFTS
1120#define E1000_82542_TDFPC E1000_TDFPC 1207#define E1000_82542_TDFPC E1000_TDFPC
@@ -1310,13 +1397,16 @@ struct e1000_hw_stats {
1310 1397
1311/* Structure containing variables used by the shared code (e1000_hw.c) */ 1398/* Structure containing variables used by the shared code (e1000_hw.c) */
1312struct e1000_hw { 1399struct e1000_hw {
1313 uint8_t __iomem *hw_addr; 1400 uint8_t *hw_addr;
1314 uint8_t *flash_address; 1401 uint8_t *flash_address;
1315 e1000_mac_type mac_type; 1402 e1000_mac_type mac_type;
1316 e1000_phy_type phy_type; 1403 e1000_phy_type phy_type;
1317 uint32_t phy_init_script; 1404 uint32_t phy_init_script;
1318 e1000_media_type media_type; 1405 e1000_media_type media_type;
1319 void *back; 1406 void *back;
1407 struct e1000_shadow_ram *eeprom_shadow_ram;
1408 uint32_t flash_bank_size;
1409 uint32_t flash_base_addr;
1320 e1000_fc_type fc; 1410 e1000_fc_type fc;
1321 e1000_bus_speed bus_speed; 1411 e1000_bus_speed bus_speed;
1322 e1000_bus_width bus_width; 1412 e1000_bus_width bus_width;
@@ -1328,6 +1418,7 @@ struct e1000_hw {
1328 uint32_t asf_firmware_present; 1418 uint32_t asf_firmware_present;
1329 uint32_t eeprom_semaphore_present; 1419 uint32_t eeprom_semaphore_present;
1330 uint32_t swfw_sync_present; 1420 uint32_t swfw_sync_present;
1421 uint32_t swfwhw_semaphore_present;
1331 unsigned long io_base; 1422 unsigned long io_base;
1332 uint32_t phy_id; 1423 uint32_t phy_id;
1333 uint32_t phy_revision; 1424 uint32_t phy_revision;
@@ -1387,6 +1478,7 @@ struct e1000_hw {
1387 boolean_t in_ifs_mode; 1478 boolean_t in_ifs_mode;
1388 boolean_t mng_reg_access_disabled; 1479 boolean_t mng_reg_access_disabled;
1389 boolean_t leave_av_bit_off; 1480 boolean_t leave_av_bit_off;
1481 boolean_t kmrn_lock_loss_workaround_disabled;
1390}; 1482};
1391 1483
1392 1484
@@ -1435,6 +1527,7 @@ struct e1000_hw {
1435#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ 1527#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
1436#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 1528#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
1437#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 1529#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
1530#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
1438 1531
1439/* Device Status */ 1532/* Device Status */
1440#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1533#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -1449,6 +1542,8 @@ struct e1000_hw {
1449#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 1542#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
1450#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1543#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
1451#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1544#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
1545#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
1546 by EEPROM/Flash */
1452#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1547#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
1453#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ 1548#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
1454#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 1549#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
@@ -1506,6 +1601,10 @@ struct e1000_hw {
1506#define E1000_STM_OPCODE 0xDB00 1601#define E1000_STM_OPCODE 0xDB00
1507#define E1000_HICR_FW_RESET 0xC0 1602#define E1000_HICR_FW_RESET 0xC0
1508 1603
1604#define E1000_SHADOW_RAM_WORDS 2048
1605#define E1000_ICH8_NVM_SIG_WORD 0x13
1606#define E1000_ICH8_NVM_SIG_MASK 0xC0
1607
1509/* EEPROM Read */ 1608/* EEPROM Read */
1510#define E1000_EERD_START 0x00000001 /* Start Read */ 1609#define E1000_EERD_START 0x00000001 /* Start Read */
1511#define E1000_EERD_DONE 0x00000010 /* Read Done */ 1610#define E1000_EERD_DONE 0x00000010 /* Read Done */
@@ -1551,7 +1650,6 @@ struct e1000_hw {
1551#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1650#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
1552#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1651#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
1553#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1652#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
1554#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */
1555#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 1653#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
1556#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1654#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1557#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1655#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
@@ -1591,12 +1689,31 @@ struct e1000_hw {
1591#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 1689#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
1592 1690
1593/* In-Band Control */ 1691/* In-Band Control */
1692#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
1594#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 1693#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
1595 1694
1596/* Half-Duplex Control */ 1695/* Half-Duplex Control */
1597#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 1696#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
1598#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 1697#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
1599 1698
1699#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
1700
1701#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
1702#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
1703
1704#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
1705#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
1706#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
1707
1708#define E1000_KABGTXD_BGSQLBIAS 0x00050000
1709
1710#define E1000_PHY_CTRL_SPD_EN 0x00000001
1711#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
1712#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
1713#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
1714#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
1715#define E1000_PHY_CTRL_B2B_EN 0x00000080
1716
1600/* LED Control */ 1717/* LED Control */
1601#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1718#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
1602#define E1000_LEDCTL_LED0_MODE_SHIFT 0 1719#define E1000_LEDCTL_LED0_MODE_SHIFT 0
@@ -1666,6 +1783,9 @@ struct e1000_hw {
1666#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ 1783#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
1667#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ 1784#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
1668#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ 1785#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
1786#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
1787#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
1788#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
1669 1789
1670/* Interrupt Cause Set */ 1790/* Interrupt Cause Set */
1671#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1791#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1692,6 +1812,9 @@ struct e1000_hw {
1692#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1812#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1693#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1813#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1694#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1814#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1815#define E1000_ICS_DSW E1000_ICR_DSW
1816#define E1000_ICS_PHYINT E1000_ICR_PHYINT
1817#define E1000_ICS_EPRST E1000_ICR_EPRST
1695 1818
1696/* Interrupt Mask Set */ 1819/* Interrupt Mask Set */
1697#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1820#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1718,6 +1841,9 @@ struct e1000_hw {
1718#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1841#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1719#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1842#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1720#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1843#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1844#define E1000_IMS_DSW E1000_ICR_DSW
1845#define E1000_IMS_PHYINT E1000_ICR_PHYINT
1846#define E1000_IMS_EPRST E1000_ICR_EPRST
1721 1847
1722/* Interrupt Mask Clear */ 1848/* Interrupt Mask Clear */
1723#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1849#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1744,6 +1870,9 @@ struct e1000_hw {
1744#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1870#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1745#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1871#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1746#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1872#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1873#define E1000_IMC_DSW E1000_ICR_DSW
1874#define E1000_IMC_PHYINT E1000_ICR_PHYINT
1875#define E1000_IMC_EPRST E1000_ICR_EPRST
1747 1876
1748/* Receive Control */ 1877/* Receive Control */
1749#define E1000_RCTL_RST 0x00000001 /* Software reset */ 1878#define E1000_RCTL_RST 0x00000001 /* Software reset */
@@ -1918,9 +2047,10 @@ struct e1000_hw {
1918#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 2047#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
1919#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 2048#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
1920#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 2049#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
1921#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000 2050#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
1922#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 2051#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
1923#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 2052#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
2053#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
1924 2054
1925/* Definitions for power management and wakeup registers */ 2055/* Definitions for power management and wakeup registers */
1926/* Wake Up Control */ 2056/* Wake Up Control */
@@ -2010,6 +2140,15 @@ struct e1000_hw {
2010#define E1000_FWSM_MODE_SHIFT 1 2140#define E1000_FWSM_MODE_SHIFT 1
2011#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ 2141#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
2012 2142
2143#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
2144#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
2145#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
2146#define E1000_FWSM_SKUEL_SHIFT 29
2147#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
2148#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
2149#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
2150#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
2151
2013/* FFLT Debug Register */ 2152/* FFLT Debug Register */
2014#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ 2153#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
2015 2154
@@ -2082,6 +2221,8 @@ struct e1000_host_command_info {
2082 E1000_GCR_TXDSCW_NO_SNOOP | \ 2221 E1000_GCR_TXDSCW_NO_SNOOP | \
2083 E1000_GCR_TXDSCR_NO_SNOOP) 2222 E1000_GCR_TXDSCR_NO_SNOOP)
2084 2223
2224#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
2225
2085#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 2226#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
2086/* Function Active and Power State to MNG */ 2227/* Function Active and Power State to MNG */
2087#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 2228#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2140,8 +2281,10 @@ struct e1000_host_command_info {
2140#define EEPROM_PHY_CLASS_WORD 0x0007 2281#define EEPROM_PHY_CLASS_WORD 0x0007
2141#define EEPROM_INIT_CONTROL1_REG 0x000A 2282#define EEPROM_INIT_CONTROL1_REG 0x000A
2142#define EEPROM_INIT_CONTROL2_REG 0x000F 2283#define EEPROM_INIT_CONTROL2_REG 0x000F
2284#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
2143#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 2285#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
2144#define EEPROM_INIT_3GIO_3 0x001A 2286#define EEPROM_INIT_3GIO_3 0x001A
2287#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
2145#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 2288#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
2146#define EEPROM_CFG 0x0012 2289#define EEPROM_CFG 0x0012
2147#define EEPROM_FLASH_VERSION 0x0032 2290#define EEPROM_FLASH_VERSION 0x0032
@@ -2153,10 +2296,16 @@ struct e1000_host_command_info {
2153/* Word definitions for ID LED Settings */ 2296/* Word definitions for ID LED Settings */
2154#define ID_LED_RESERVED_0000 0x0000 2297#define ID_LED_RESERVED_0000 0x0000
2155#define ID_LED_RESERVED_FFFF 0xFFFF 2298#define ID_LED_RESERVED_FFFF 0xFFFF
2299#define ID_LED_RESERVED_82573 0xF746
2300#define ID_LED_DEFAULT_82573 0x1811
2156#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ 2301#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
2157 (ID_LED_OFF1_OFF2 << 8) | \ 2302 (ID_LED_OFF1_OFF2 << 8) | \
2158 (ID_LED_DEF1_DEF2 << 4) | \ 2303 (ID_LED_DEF1_DEF2 << 4) | \
2159 (ID_LED_DEF1_DEF2)) 2304 (ID_LED_DEF1_DEF2))
2305#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
2306 (ID_LED_DEF1_OFF2 << 8) | \
2307 (ID_LED_DEF1_ON2 << 4) | \
2308 (ID_LED_DEF1_DEF2))
2160#define ID_LED_DEF1_DEF2 0x1 2309#define ID_LED_DEF1_DEF2 0x1
2161#define ID_LED_DEF1_ON2 0x2 2310#define ID_LED_DEF1_ON2 0x2
2162#define ID_LED_DEF1_OFF2 0x3 2311#define ID_LED_DEF1_OFF2 0x3
@@ -2191,6 +2340,11 @@ struct e1000_host_command_info {
2191#define EEPROM_WORD0F_ASM_DIR 0x2000 2340#define EEPROM_WORD0F_ASM_DIR 0x2000
2192#define EEPROM_WORD0F_ANE 0x0800 2341#define EEPROM_WORD0F_ANE 0x0800
2193#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 2342#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
2343#define EEPROM_WORD0F_LPLU 0x0001
2344
2345/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
2346#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
2347#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
2194 2348
2195/* Mask bits for fields in Word 0x1a of the EEPROM */ 2349/* Mask bits for fields in Word 0x1a of the EEPROM */
2196#define EEPROM_WORD1A_ASPM_MASK 0x000C 2350#define EEPROM_WORD1A_ASPM_MASK 0x000C
@@ -2265,23 +2419,29 @@ struct e1000_host_command_info {
2265#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 2419#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
2266#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 2420#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
2267#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 2421#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
2268#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 2422#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
2269 2423
2270#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF 2424#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
2271#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 2425#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
2272#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 2426#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
2427#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
2428#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
2273 2429
2274/* PBA constants */ 2430/* PBA constants */
2431#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
2275#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ 2432#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
2276#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2433#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
2277#define E1000_PBA_22K 0x0016 2434#define E1000_PBA_22K 0x0016
2278#define E1000_PBA_24K 0x0018 2435#define E1000_PBA_24K 0x0018
2279#define E1000_PBA_30K 0x001E 2436#define E1000_PBA_30K 0x001E
2280#define E1000_PBA_32K 0x0020 2437#define E1000_PBA_32K 0x0020
2438#define E1000_PBA_34K 0x0022
2281#define E1000_PBA_38K 0x0026 2439#define E1000_PBA_38K 0x0026
2282#define E1000_PBA_40K 0x0028 2440#define E1000_PBA_40K 0x0028
2283#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ 2441#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
2284 2442
2443#define E1000_PBS_16K E1000_PBA_16K
2444
2285/* Flow Control Constants */ 2445/* Flow Control Constants */
2286#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 2446#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
2287#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 2447#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
@@ -2336,7 +2496,7 @@ struct e1000_host_command_info {
2336/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ 2496/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
2337#define AUTO_READ_DONE_TIMEOUT 10 2497#define AUTO_READ_DONE_TIMEOUT 10
2338/* Number of milliseconds we wait for PHY configuration done after MAC reset */ 2498/* Number of milliseconds we wait for PHY configuration done after MAC reset */
2339#define PHY_CFG_TIMEOUT 40 2499#define PHY_CFG_TIMEOUT 100
2340 2500
2341#define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2501#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
2342 2502
@@ -2764,6 +2924,17 @@ struct e1000_host_command_info {
2764#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 2924#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
2765#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ 2925#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
2766 2926
2927/* M88EC018 Rev 2 specific DownShift settings */
2928#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
2929#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
2930#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
2931#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
2932#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
2933#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
2934#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
2935#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
2936#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
2937
2767/* IGP01E1000 Specific Port Config Register - R/W */ 2938/* IGP01E1000 Specific Port Config Register - R/W */
2768#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 2939#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
2769#define IGP01E1000_PSCFR_PRE_EN 0x0020 2940#define IGP01E1000_PSCFR_PRE_EN 0x0020
@@ -2990,6 +3161,221 @@ struct e1000_host_command_info {
2990#define L1LXT971A_PHY_ID 0x001378E0 3161#define L1LXT971A_PHY_ID 0x001378E0
2991#define GG82563_E_PHY_ID 0x01410CA0 3162#define GG82563_E_PHY_ID 0x01410CA0
2992 3163
3164
3165/* Bits...
3166 * 15-5: page
3167 * 4-0: register offset
3168 */
3169#define PHY_PAGE_SHIFT 5
3170#define PHY_REG(page, reg) \
3171 (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
3172
3173#define IGP3_PHY_PORT_CTRL \
3174 PHY_REG(769, 17) /* Port General Configuration */
3175#define IGP3_PHY_RATE_ADAPT_CTRL \
3176 PHY_REG(769, 25) /* Rate Adapter Control Register */
3177
3178#define IGP3_KMRN_FIFO_CTRL_STATS \
3179 PHY_REG(770, 16) /* KMRN FIFO's control/status register */
3180#define IGP3_KMRN_POWER_MNG_CTRL \
3181 PHY_REG(770, 17) /* KMRN Power Management Control Register */
3182#define IGP3_KMRN_INBAND_CTRL \
3183 PHY_REG(770, 18) /* KMRN Inband Control Register */
3184#define IGP3_KMRN_DIAG \
3185 PHY_REG(770, 19) /* KMRN Diagnostic register */
3186#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
3187#define IGP3_KMRN_ACK_TIMEOUT \
3188 PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
3189
3190#define IGP3_VR_CTRL \
3191 PHY_REG(776, 18) /* Voltage regulator control register */
3192#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
3193
3194#define IGP3_CAPABILITY \
3195 PHY_REG(776, 19) /* IGP3 Capability Register */
3196
3197/* Capabilities for SKU Control */
3198#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
3199#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
3200#define IGP3_CAP_ASF 0x0004 /* Support ASF */
3201#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
3202#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
3203#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
3204#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
3205#define IGP3_CAP_RSS 0x0080 /* Support RSS */
3206#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
3207#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
3208
3209#define IGP3_PPC_JORDAN_EN 0x0001
3210#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
3211
3212#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
3213#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
3214#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
3215#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
3216
3217#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
3218#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
3219
3220#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
3221#define IGP3_KMRN_EC_DIS_INBAND 0x0080
3222
3223#define IGP03E1000_E_PHY_ID 0x02A80390
3224#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
3225#define IFE_PLUS_E_PHY_ID 0x02A80320
3226#define IFE_C_E_PHY_ID 0x02A80310
3227
3228#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
3229#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
3230#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
3231#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
3232#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
3233#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
3234#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
3235#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
3236#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
3237#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
3238#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
3239#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
3240#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
3241
3242#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
3243#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
3244#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
3245#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
3246#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
3247#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
3248#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
3249#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
3250
3251#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
3252#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
3253#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
3254#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
3255#define IFE_PSC_FORCE_POLARITY_SHIFT 5
3256#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
3257
3258#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
3259#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
3260#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
3261#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */
3262#define IFE_PMC_MDIX_MODE_SHIFT 6
3263#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
3264
3265#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
3266#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
3267#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
3268#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
3269#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
3270#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
3271#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
3272#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
3273#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
3274#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
3275#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
3276
3277#define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */
3278#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */
3279#define ICH8_FLASH_SEG_SIZE_256 256
3280#define ICH8_FLASH_SEG_SIZE_4K 4096
3281#define ICH8_FLASH_SEG_SIZE_64K 65536
3282
3283#define ICH8_CYCLE_READ 0x0
3284#define ICH8_CYCLE_RESERVED 0x1
3285#define ICH8_CYCLE_WRITE 0x2
3286#define ICH8_CYCLE_ERASE 0x3
3287
3288#define ICH8_FLASH_GFPREG 0x0000
3289#define ICH8_FLASH_HSFSTS 0x0004
3290#define ICH8_FLASH_HSFCTL 0x0006
3291#define ICH8_FLASH_FADDR 0x0008
3292#define ICH8_FLASH_FDATA0 0x0010
3293#define ICH8_FLASH_FRACC 0x0050
3294#define ICH8_FLASH_FREG0 0x0054
3295#define ICH8_FLASH_FREG1 0x0058
3296#define ICH8_FLASH_FREG2 0x005C
3297#define ICH8_FLASH_FREG3 0x0060
3298#define ICH8_FLASH_FPR0 0x0074
3299#define ICH8_FLASH_FPR1 0x0078
3300#define ICH8_FLASH_SSFSTS 0x0090
3301#define ICH8_FLASH_SSFCTL 0x0092
3302#define ICH8_FLASH_PREOP 0x0094
3303#define ICH8_FLASH_OPTYPE 0x0096
3304#define ICH8_FLASH_OPMENU 0x0098
3305
3306#define ICH8_FLASH_REG_MAPSIZE 0x00A0
3307#define ICH8_FLASH_SECTOR_SIZE 4096
3308#define ICH8_GFPREG_BASE_MASK 0x1FFF
3309#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
3310
3311/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
3312/* Offset 04h HSFSTS */
3313union ich8_hws_flash_status {
3314 struct ich8_hsfsts {
3315#ifdef E1000_BIG_ENDIAN
3316 uint16_t reserved2 :6;
3317 uint16_t fldesvalid :1;
3318 uint16_t flockdn :1;
3319 uint16_t flcdone :1;
3320 uint16_t flcerr :1;
3321 uint16_t dael :1;
3322 uint16_t berasesz :2;
3323 uint16_t flcinprog :1;
3324 uint16_t reserved1 :2;
3325#else
3326 uint16_t flcdone :1; /* bit 0 Flash Cycle Done */
3327 uint16_t flcerr :1; /* bit 1 Flash Cycle Error */
3328 uint16_t dael :1; /* bit 2 Direct Access error Log */
3329 uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */
3330 uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */
3331 uint16_t reserved1 :2; /* bit 13:6 Reserved */
3332 uint16_t reserved2 :6; /* bit 13:6 Reserved */
3333 uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */
3334 uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */
3335#endif
3336 } hsf_status;
3337 uint16_t regval;
3338};
3339
3340/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
3341/* Offset 06h FLCTL */
3342union ich8_hws_flash_ctrl {
3343 struct ich8_hsflctl {
3344#ifdef E1000_BIG_ENDIAN
3345 uint16_t fldbcount :2;
3346 uint16_t flockdn :6;
3347 uint16_t flcgo :1;
3348 uint16_t flcycle :2;
3349 uint16_t reserved :5;
3350#else
3351 uint16_t flcgo :1; /* 0 Flash Cycle Go */
3352 uint16_t flcycle :2; /* 2:1 Flash Cycle */
3353 uint16_t reserved :5; /* 7:3 Reserved */
3354 uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */
3355 uint16_t flockdn :6; /* 15:10 Reserved */
3356#endif
3357 } hsf_ctrl;
3358 uint16_t regval;
3359};
3360
3361/* ICH8 Flash Region Access Permissions */
3362union ich8_hws_flash_regacc {
3363 struct ich8_flracc {
3364#ifdef E1000_BIG_ENDIAN
3365 uint32_t gmwag :8;
3366 uint32_t gmrag :8;
3367 uint32_t grwa :8;
3368 uint32_t grra :8;
3369#else
3370 uint32_t grra :8; /* 0:7 GbE region Read Access */
3371 uint32_t grwa :8; /* 8:15 GbE region Write Access */
3372 uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */
3373 uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */
3374#endif
3375 } hsf_flregacc;
3376 uint16_t regval;
3377};
3378
2993/* Miscellaneous PHY bit definitions. */ 3379/* Miscellaneous PHY bit definitions. */
2994#define PHY_PREAMBLE 0xFFFFFFFF 3380#define PHY_PREAMBLE 0xFFFFFFFF
2995#define PHY_SOF 0x01 3381#define PHY_SOF 0x01
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f77624f5f17b..6e7d31bacf4d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.0.38-k4"DRIVERNAPI 39#define DRV_VERSION "7.1.9-k2"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
73 INTEL_E1000_ETHERNET_DEVICE(0x1026), 73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027), 74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028), 75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
76 INTEL_E1000_ETHERNET_DEVICE(0x1049),
77 INTEL_E1000_ETHERNET_DEVICE(0x104A),
78 INTEL_E1000_ETHERNET_DEVICE(0x104B),
79 INTEL_E1000_ETHERNET_DEVICE(0x104C),
80 INTEL_E1000_ETHERNET_DEVICE(0x104D),
76 INTEL_E1000_ETHERNET_DEVICE(0x105E), 81 INTEL_E1000_ETHERNET_DEVICE(0x105E),
77 INTEL_E1000_ETHERNET_DEVICE(0x105F), 82 INTEL_E1000_ETHERNET_DEVICE(0x105F),
78 INTEL_E1000_ETHERNET_DEVICE(0x1060), 83 INTEL_E1000_ETHERNET_DEVICE(0x1060),
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
96 INTEL_E1000_ETHERNET_DEVICE(0x109A), 101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
97 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 102 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
98 INTEL_E1000_ETHERNET_DEVICE(0x10B9), 103 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
105 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
99 /* required last entry */ 106 /* required last entry */
100 {0,} 107 {0,}
101}; 108};
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
133static void e1000_set_multi(struct net_device *netdev); 140static void e1000_set_multi(struct net_device *netdev);
134static void e1000_update_phy_info(unsigned long data); 141static void e1000_update_phy_info(unsigned long data);
135static void e1000_watchdog(unsigned long data); 142static void e1000_watchdog(unsigned long data);
136static void e1000_watchdog_task(struct e1000_adapter *adapter);
137static void e1000_82547_tx_fifo_stall(unsigned long data); 143static void e1000_82547_tx_fifo_stall(unsigned long data);
138static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 144static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
139static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 145static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
178static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 184static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
179static void e1000_restore_vlan(struct e1000_adapter *adapter); 185static void e1000_restore_vlan(struct e1000_adapter *adapter);
180 186
181#ifdef CONFIG_PM
182static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 187static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
188#ifdef CONFIG_PM
183static int e1000_resume(struct pci_dev *pdev); 189static int e1000_resume(struct pci_dev *pdev);
184#endif 190#endif
185static void e1000_shutdown(struct pci_dev *pdev); 191static void e1000_shutdown(struct pci_dev *pdev);
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = {
206 .probe = e1000_probe, 212 .probe = e1000_probe,
207 .remove = __devexit_p(e1000_remove), 213 .remove = __devexit_p(e1000_remove),
208 /* Power Managment Hooks */ 214 /* Power Managment Hooks */
209#ifdef CONFIG_PM
210 .suspend = e1000_suspend, 215 .suspend = e1000_suspend,
216#ifdef CONFIG_PM
211 .resume = e1000_resume, 217 .resume = e1000_resume,
212#endif 218#endif
213 .shutdown = e1000_shutdown, 219 .shutdown = e1000_shutdown,
@@ -261,6 +267,44 @@ e1000_exit_module(void)
261 267
262module_exit(e1000_exit_module); 268module_exit(e1000_exit_module);
263 269
270static int e1000_request_irq(struct e1000_adapter *adapter)
271{
272 struct net_device *netdev = adapter->netdev;
273 int flags, err = 0;
274
275 flags = IRQF_SHARED;
276#ifdef CONFIG_PCI_MSI
277 if (adapter->hw.mac_type > e1000_82547_rev_2) {
278 adapter->have_msi = TRUE;
279 if ((err = pci_enable_msi(adapter->pdev))) {
280 DPRINTK(PROBE, ERR,
281 "Unable to allocate MSI interrupt Error: %d\n", err);
282 adapter->have_msi = FALSE;
283 }
284 }
285 if (adapter->have_msi)
286 flags &= ~SA_SHIRQ;
287#endif
288 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
289 netdev->name, netdev)))
290 DPRINTK(PROBE, ERR,
291 "Unable to allocate interrupt Error: %d\n", err);
292
293 return err;
294}
295
296static void e1000_free_irq(struct e1000_adapter *adapter)
297{
298 struct net_device *netdev = adapter->netdev;
299
300 free_irq(adapter->pdev->irq, netdev);
301
302#ifdef CONFIG_PCI_MSI
303 if (adapter->have_msi)
304 pci_disable_msi(adapter->pdev);
305#endif
306}
307
264/** 308/**
265 * e1000_irq_disable - Mask off interrupt generation on the NIC 309 * e1000_irq_disable - Mask off interrupt generation on the NIC
266 * @adapter: board private structure 310 * @adapter: board private structure
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
329{ 373{
330 uint32_t ctrl_ext; 374 uint32_t ctrl_ext;
331 uint32_t swsm; 375 uint32_t swsm;
376 uint32_t extcnf;
332 377
333 /* Let firmware taken over control of h/w */ 378 /* Let firmware taken over control of h/w */
334 switch (adapter->hw.mac_type) { 379 switch (adapter->hw.mac_type) {
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
343 swsm = E1000_READ_REG(&adapter->hw, SWSM); 388 swsm = E1000_READ_REG(&adapter->hw, SWSM);
344 E1000_WRITE_REG(&adapter->hw, SWSM, 389 E1000_WRITE_REG(&adapter->hw, SWSM,
345 swsm & ~E1000_SWSM_DRV_LOAD); 390 swsm & ~E1000_SWSM_DRV_LOAD);
391 case e1000_ich8lan:
392 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
393 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
394 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
395 break;
346 default: 396 default:
347 break; 397 break;
348 } 398 }
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
364{ 414{
365 uint32_t ctrl_ext; 415 uint32_t ctrl_ext;
366 uint32_t swsm; 416 uint32_t swsm;
417 uint32_t extcnf;
367 /* Let firmware know the driver has taken over */ 418 /* Let firmware know the driver has taken over */
368 switch (adapter->hw.mac_type) { 419 switch (adapter->hw.mac_type) {
369 case e1000_82571: 420 case e1000_82571:
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
378 E1000_WRITE_REG(&adapter->hw, SWSM, 429 E1000_WRITE_REG(&adapter->hw, SWSM,
379 swsm | E1000_SWSM_DRV_LOAD); 430 swsm | E1000_SWSM_DRV_LOAD);
380 break; 431 break;
432 case e1000_ich8lan:
433 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
434 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
435 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
436 break;
381 default: 437 default:
382 break; 438 break;
383 } 439 }
@@ -387,18 +443,10 @@ int
387e1000_up(struct e1000_adapter *adapter) 443e1000_up(struct e1000_adapter *adapter)
388{ 444{
389 struct net_device *netdev = adapter->netdev; 445 struct net_device *netdev = adapter->netdev;
390 int i, err; 446 int i;
391 447
392 /* hardware has been reset, we need to reload some things */ 448 /* hardware has been reset, we need to reload some things */
393 449
394 /* Reset the PHY if it was previously powered down */
395 if (adapter->hw.media_type == e1000_media_type_copper) {
396 uint16_t mii_reg;
397 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
398 if (mii_reg & MII_CR_POWER_DOWN)
399 e1000_phy_hw_reset(&adapter->hw);
400 }
401
402 e1000_set_multi(netdev); 450 e1000_set_multi(netdev);
403 451
404 e1000_restore_vlan(adapter); 452 e1000_restore_vlan(adapter);
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter)
415 E1000_DESC_UNUSED(ring)); 463 E1000_DESC_UNUSED(ring));
416 } 464 }
417 465
418#ifdef CONFIG_PCI_MSI
419 if (adapter->hw.mac_type > e1000_82547_rev_2) {
420 adapter->have_msi = TRUE;
421 if ((err = pci_enable_msi(adapter->pdev))) {
422 DPRINTK(PROBE, ERR,
423 "Unable to allocate MSI interrupt Error: %d\n", err);
424 adapter->have_msi = FALSE;
425 }
426 }
427#endif
428 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
429 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
430 netdev->name, netdev))) {
431 DPRINTK(PROBE, ERR,
432 "Unable to allocate interrupt Error: %d\n", err);
433 return err;
434 }
435
436 adapter->tx_queue_len = netdev->tx_queue_len; 466 adapter->tx_queue_len = netdev->tx_queue_len;
437 467
438 mod_timer(&adapter->watchdog_timer, jiffies); 468 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter)
445 return 0; 475 return 0;
446} 476}
447 477
478/**
479 * e1000_power_up_phy - restore link in case the phy was powered down
480 * @adapter: address of board private structure
481 *
482 * The phy may be powered down to save power and turn off link when the
483 * driver is unloaded and wake on lan is not enabled (among others)
484 * *** this routine MUST be followed by a call to e1000_reset ***
485 *
486 **/
487
488static void e1000_power_up_phy(struct e1000_adapter *adapter)
489{
490 uint16_t mii_reg = 0;
491
492 /* Just clear the power down bit to wake the phy back up */
493 if (adapter->hw.media_type == e1000_media_type_copper) {
494 /* according to the manual, the phy will retain its
495 * settings across a power-down/up cycle */
496 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
497 mii_reg &= ~MII_CR_POWER_DOWN;
498 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
499 }
500}
501
502static void e1000_power_down_phy(struct e1000_adapter *adapter)
503{
504 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
505 e1000_check_mng_mode(&adapter->hw);
506 /* Power down the PHY so no link is implied when interface is down
507 * The PHY cannot be powered down if any of the following is TRUE
508 * (a) WoL is enabled
509 * (b) AMT is active
510 * (c) SoL/IDER session is active */
511 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
512 adapter->hw.mac_type != e1000_ich8lan &&
513 adapter->hw.media_type == e1000_media_type_copper &&
514 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
515 !mng_mode_enabled &&
516 !e1000_check_phy_reset_block(&adapter->hw)) {
517 uint16_t mii_reg = 0;
518 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
519 mii_reg |= MII_CR_POWER_DOWN;
520 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
521 mdelay(1);
522 }
523}
524
448void 525void
449e1000_down(struct e1000_adapter *adapter) 526e1000_down(struct e1000_adapter *adapter)
450{ 527{
451 struct net_device *netdev = adapter->netdev; 528 struct net_device *netdev = adapter->netdev;
452 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
453 e1000_check_mng_mode(&adapter->hw);
454 529
455 e1000_irq_disable(adapter); 530 e1000_irq_disable(adapter);
456 531
457 free_irq(adapter->pdev->irq, netdev);
458#ifdef CONFIG_PCI_MSI
459 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
460 adapter->have_msi == TRUE)
461 pci_disable_msi(adapter->pdev);
462#endif
463 del_timer_sync(&adapter->tx_fifo_stall_timer); 532 del_timer_sync(&adapter->tx_fifo_stall_timer);
464 del_timer_sync(&adapter->watchdog_timer); 533 del_timer_sync(&adapter->watchdog_timer);
465 del_timer_sync(&adapter->phy_info_timer); 534 del_timer_sync(&adapter->phy_info_timer);
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter)
476 e1000_reset(adapter); 545 e1000_reset(adapter);
477 e1000_clean_all_tx_rings(adapter); 546 e1000_clean_all_tx_rings(adapter);
478 e1000_clean_all_rx_rings(adapter); 547 e1000_clean_all_rx_rings(adapter);
548}
479 549
480 /* Power down the PHY so no link is implied when interface is down * 550void
481 * The PHY cannot be powered down if any of the following is TRUE * 551e1000_reinit_locked(struct e1000_adapter *adapter)
482 * (a) WoL is enabled 552{
483 * (b) AMT is active 553 WARN_ON(in_interrupt());
484 * (c) SoL/IDER session is active */ 554 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
485 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 555 msleep(1);
486 adapter->hw.media_type == e1000_media_type_copper && 556 e1000_down(adapter);
487 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && 557 e1000_up(adapter);
488 !mng_mode_enabled && 558 clear_bit(__E1000_RESETTING, &adapter->flags);
489 !e1000_check_phy_reset_block(&adapter->hw)) {
490 uint16_t mii_reg;
491 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
492 mii_reg |= MII_CR_POWER_DOWN;
493 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
494 mdelay(1);
495 }
496} 559}
497 560
498void 561void
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter)
518 case e1000_82573: 581 case e1000_82573:
519 pba = E1000_PBA_12K; 582 pba = E1000_PBA_12K;
520 break; 583 break;
584 case e1000_ich8lan:
585 pba = E1000_PBA_8K;
586 break;
521 default: 587 default:
522 pba = E1000_PBA_48K; 588 pba = E1000_PBA_48K;
523 break; 589 break;
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter)
542 /* Set the FC high water mark to 90% of the FIFO size. 608 /* Set the FC high water mark to 90% of the FIFO size.
543 * Required to clear last 3 LSB */ 609 * Required to clear last 3 LSB */
544 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; 610 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
611 /* We can't use 90% on small FIFOs because the remainder
612 * would be less than 1 full frame. In this case, we size
613 * it to allow at least a full frame above the high water
614 * mark. */
615 if (pba < E1000_PBA_16K)
616 fc_high_water_mark = (pba * 1024) - 1600;
545 617
546 adapter->hw.fc_high_water = fc_high_water_mark; 618 adapter->hw.fc_high_water = fc_high_water_mark;
547 adapter->hw.fc_low_water = fc_high_water_mark - 8; 619 adapter->hw.fc_low_water = fc_high_water_mark - 8;
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter)
564 636
565 e1000_reset_adaptive(&adapter->hw); 637 e1000_reset_adaptive(&adapter->hw);
566 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 638 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
639
640 if (!adapter->smart_power_down &&
641 (adapter->hw.mac_type == e1000_82571 ||
642 adapter->hw.mac_type == e1000_82572)) {
643 uint16_t phy_data = 0;
644 /* speed up time to link by disabling smart power down, ignore
645 * the return value of this function because there is nothing
646 * different we would do if it failed */
647 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
648 &phy_data);
649 phy_data &= ~IGP02E1000_PM_SPD;
650 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
651 phy_data);
652 }
653
654 if (adapter->hw.mac_type < e1000_ich8lan)
655 /* FIXME: this code is duplicate and wrong for PCI Express */
567 if (adapter->en_mng_pt) { 656 if (adapter->en_mng_pt) {
568 manc = E1000_READ_REG(&adapter->hw, MANC); 657 manc = E1000_READ_REG(&adapter->hw, MANC);
569 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); 658 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev,
590 struct net_device *netdev; 679 struct net_device *netdev;
591 struct e1000_adapter *adapter; 680 struct e1000_adapter *adapter;
592 unsigned long mmio_start, mmio_len; 681 unsigned long mmio_start, mmio_len;
682 unsigned long flash_start, flash_len;
593 683
594 static int cards_found = 0; 684 static int cards_found = 0;
595 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ 685 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev,
599 if ((err = pci_enable_device(pdev))) 689 if ((err = pci_enable_device(pdev)))
600 return err; 690 return err;
601 691
602 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 692 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
693 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
603 pci_using_dac = 1; 694 pci_using_dac = 1;
604 } else { 695 } else {
605 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 696 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
697 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
606 E1000_ERR("No usable DMA configuration, aborting\n"); 698 E1000_ERR("No usable DMA configuration, aborting\n");
607 return err; 699 return err;
608 } 700 }
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev,
682 if ((err = e1000_sw_init(adapter))) 774 if ((err = e1000_sw_init(adapter)))
683 goto err_sw_init; 775 goto err_sw_init;
684 776
777 /* Flash BAR mapping must happen after e1000_sw_init
778 * because it depends on mac_type */
779 if ((adapter->hw.mac_type == e1000_ich8lan) &&
780 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
781 flash_start = pci_resource_start(pdev, 1);
782 flash_len = pci_resource_len(pdev, 1);
783 adapter->hw.flash_address = ioremap(flash_start, flash_len);
784 if (!adapter->hw.flash_address) {
785 err = -EIO;
786 goto err_flashmap;
787 }
788 }
789
685 if ((err = e1000_check_phy_reset_block(&adapter->hw))) 790 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
686 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 791 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
687 792
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev,
700 NETIF_F_HW_VLAN_TX | 805 NETIF_F_HW_VLAN_TX |
701 NETIF_F_HW_VLAN_RX | 806 NETIF_F_HW_VLAN_RX |
702 NETIF_F_HW_VLAN_FILTER; 807 NETIF_F_HW_VLAN_FILTER;
808 if (adapter->hw.mac_type == e1000_ich8lan)
809 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
703 } 810 }
704 811
705#ifdef NETIF_F_TSO 812#ifdef NETIF_F_TSO
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev,
715 if (pci_using_dac) 822 if (pci_using_dac)
716 netdev->features |= NETIF_F_HIGHDMA; 823 netdev->features |= NETIF_F_HIGHDMA;
717 824
718 /* hard_start_xmit is safe against parallel locking */
719 netdev->features |= NETIF_F_LLTX; 825 netdev->features |= NETIF_F_LLTX;
720 826
721 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 827 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
722 828
829 /* initialize eeprom parameters */
830
831 if (e1000_init_eeprom_params(&adapter->hw)) {
832 E1000_ERR("EEPROM initialization failed\n");
833 return -EIO;
834 }
835
723 /* before reading the EEPROM, reset the controller to 836 /* before reading the EEPROM, reset the controller to
724 * put the device in a known good starting state */ 837 * put the device in a known good starting state */
725 838
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev,
758 adapter->watchdog_timer.function = &e1000_watchdog; 871 adapter->watchdog_timer.function = &e1000_watchdog;
759 adapter->watchdog_timer.data = (unsigned long) adapter; 872 adapter->watchdog_timer.data = (unsigned long) adapter;
760 873
761 INIT_WORK(&adapter->watchdog_task,
762 (void (*)(void *))e1000_watchdog_task, adapter);
763
764 init_timer(&adapter->phy_info_timer); 874 init_timer(&adapter->phy_info_timer);
765 adapter->phy_info_timer.function = &e1000_update_phy_info; 875 adapter->phy_info_timer.function = &e1000_update_phy_info;
766 adapter->phy_info_timer.data = (unsigned long) adapter; 876 adapter->phy_info_timer.data = (unsigned long) adapter;
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev,
790 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 900 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
791 eeprom_apme_mask = E1000_EEPROM_82544_APM; 901 eeprom_apme_mask = E1000_EEPROM_82544_APM;
792 break; 902 break;
903 case e1000_ich8lan:
904 e1000_read_eeprom(&adapter->hw,
905 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
906 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
907 break;
793 case e1000_82546: 908 case e1000_82546:
794 case e1000_82546_rev_3: 909 case e1000_82546_rev_3:
795 case e1000_82571: 910 case e1000_82571:
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev,
849 return 0; 964 return 0;
850 965
851err_register: 966err_register:
967 if (adapter->hw.flash_address)
968 iounmap(adapter->hw.flash_address);
969err_flashmap:
852err_sw_init: 970err_sw_init:
853err_eeprom: 971err_eeprom:
854 iounmap(adapter->hw.hw_addr); 972 iounmap(adapter->hw.hw_addr);
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev)
882 flush_scheduled_work(); 1000 flush_scheduled_work();
883 1001
884 if (adapter->hw.mac_type >= e1000_82540 && 1002 if (adapter->hw.mac_type >= e1000_82540 &&
1003 adapter->hw.mac_type != e1000_ich8lan &&
885 adapter->hw.media_type == e1000_media_type_copper) { 1004 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 1005 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if (manc & E1000_MANC_SMBUS_EN) { 1006 if (manc & E1000_MANC_SMBUS_EN) {
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev)
910#endif 1029#endif
911 1030
912 iounmap(adapter->hw.hw_addr); 1031 iounmap(adapter->hw.hw_addr);
1032 if (adapter->hw.flash_address)
1033 iounmap(adapter->hw.flash_address);
913 pci_release_regions(pdev); 1034 pci_release_regions(pdev);
914 1035
915 free_netdev(netdev); 1036 free_netdev(netdev);
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
960 return -EIO; 1081 return -EIO;
961 } 1082 }
962 1083
963 /* initialize eeprom parameters */
964
965 if (e1000_init_eeprom_params(hw)) {
966 E1000_ERR("EEPROM initialization failed\n");
967 return -EIO;
968 }
969
970 switch (hw->mac_type) { 1084 switch (hw->mac_type) {
971 default: 1085 default:
972 break; 1086 break;
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev)
1078 struct e1000_adapter *adapter = netdev_priv(netdev); 1192 struct e1000_adapter *adapter = netdev_priv(netdev);
1079 int err; 1193 int err;
1080 1194
1195 /* disallow open during test */
1196 if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
1197 return -EBUSY;
1198
1081 /* allocate transmit descriptors */ 1199 /* allocate transmit descriptors */
1082 1200
1083 if ((err = e1000_setup_all_tx_resources(adapter))) 1201 if ((err = e1000_setup_all_tx_resources(adapter)))
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev)
1088 if ((err = e1000_setup_all_rx_resources(adapter))) 1206 if ((err = e1000_setup_all_rx_resources(adapter)))
1089 goto err_setup_rx; 1207 goto err_setup_rx;
1090 1208
1209 err = e1000_request_irq(adapter);
1210 if (err)
1211 goto err_up;
1212
1213 e1000_power_up_phy(adapter);
1214
1091 if ((err = e1000_up(adapter))) 1215 if ((err = e1000_up(adapter)))
1092 goto err_up; 1216 goto err_up;
1093 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1217 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev)
1131{ 1255{
1132 struct e1000_adapter *adapter = netdev_priv(netdev); 1256 struct e1000_adapter *adapter = netdev_priv(netdev);
1133 1257
1258 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1134 e1000_down(adapter); 1259 e1000_down(adapter);
1260 e1000_power_down_phy(adapter);
1261 e1000_free_irq(adapter);
1135 1262
1136 e1000_free_all_tx_resources(adapter); 1263 e1000_free_all_tx_resources(adapter);
1137 e1000_free_all_rx_resources(adapter); 1264 e1000_free_all_rx_resources(adapter);
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1189 int size; 1316 int size;
1190 1317
1191 size = sizeof(struct e1000_buffer) * txdr->count; 1318 size = sizeof(struct e1000_buffer) * txdr->count;
1192 1319 txdr->buffer_info = vmalloc(size);
1193 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1194 if (!txdr->buffer_info) { 1320 if (!txdr->buffer_info) {
1195 DPRINTK(PROBE, ERR, 1321 DPRINTK(PROBE, ERR,
1196 "Unable to allocate memory for the transmit descriptor ring\n"); 1322 "Unable to allocate memory for the transmit descriptor ring\n");
@@ -1302,11 +1428,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1302 tdba = adapter->tx_ring[0].dma; 1428 tdba = adapter->tx_ring[0].dma;
1303 tdlen = adapter->tx_ring[0].count * 1429 tdlen = adapter->tx_ring[0].count *
1304 sizeof(struct e1000_tx_desc); 1430 sizeof(struct e1000_tx_desc);
1305 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1306 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1307 E1000_WRITE_REG(hw, TDLEN, tdlen); 1431 E1000_WRITE_REG(hw, TDLEN, tdlen);
1308 E1000_WRITE_REG(hw, TDH, 0); 1432 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1433 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1309 E1000_WRITE_REG(hw, TDT, 0); 1434 E1000_WRITE_REG(hw, TDT, 0);
1435 E1000_WRITE_REG(hw, TDH, 0);
1310 adapter->tx_ring[0].tdh = E1000_TDH; 1436 adapter->tx_ring[0].tdh = E1000_TDH;
1311 adapter->tx_ring[0].tdt = E1000_TDT; 1437 adapter->tx_ring[0].tdt = E1000_TDT;
1312 break; 1438 break;
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1418 int size, desc_len; 1544 int size, desc_len;
1419 1545
1420 size = sizeof(struct e1000_buffer) * rxdr->count; 1546 size = sizeof(struct e1000_buffer) * rxdr->count;
1421 rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1547 rxdr->buffer_info = vmalloc(size);
1422 if (!rxdr->buffer_info) { 1548 if (!rxdr->buffer_info) {
1423 DPRINTK(PROBE, ERR, 1549 DPRINTK(PROBE, ERR,
1424 "Unable to allocate memory for the receive descriptor ring\n"); 1550 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1560 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1686 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1561 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1687 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1562 1688
1563 if (adapter->hw.mac_type > e1000_82543)
1564 rctl |= E1000_RCTL_SECRC;
1565
1566 if (adapter->hw.tbi_compatibility_on == 1) 1689 if (adapter->hw.tbi_compatibility_on == 1)
1567 rctl |= E1000_RCTL_SBP; 1690 rctl |= E1000_RCTL_SBP;
1568 else 1691 else
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1628 rfctl |= E1000_RFCTL_IPV6_DIS; 1751 rfctl |= E1000_RFCTL_IPV6_DIS;
1629 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1752 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1630 1753
1631 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1754 rctl |= E1000_RCTL_DTYP_PS;
1632 1755
1633 psrctl |= adapter->rx_ps_bsize0 >> 1756 psrctl |= adapter->rx_ps_bsize0 >>
1634 E1000_PSRCTL_BSIZE0_SHIFT; 1757 E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1712,11 +1835,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1712 case 1: 1835 case 1:
1713 default: 1836 default:
1714 rdba = adapter->rx_ring[0].dma; 1837 rdba = adapter->rx_ring[0].dma;
1715 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1716 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1717 E1000_WRITE_REG(hw, RDLEN, rdlen); 1838 E1000_WRITE_REG(hw, RDLEN, rdlen);
1718 E1000_WRITE_REG(hw, RDH, 0); 1839 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1840 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1719 E1000_WRITE_REG(hw, RDT, 0); 1841 E1000_WRITE_REG(hw, RDT, 0);
1842 E1000_WRITE_REG(hw, RDH, 0);
1720 adapter->rx_ring[0].rdh = E1000_RDH; 1843 adapter->rx_ring[0].rdh = E1000_RDH;
1721 adapter->rx_ring[0].rdt = E1000_RDT; 1844 adapter->rx_ring[0].rdt = E1000_RDT;
1722 break; 1845 break;
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1741 E1000_WRITE_REG(hw, RXCSUM, rxcsum); 1864 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1742 } 1865 }
1743 1866
1744 if (hw->mac_type == e1000_82573)
1745 E1000_WRITE_REG(hw, ERT, 0x0100);
1746
1747 /* Enable Receives */ 1867 /* Enable Receives */
1748 E1000_WRITE_REG(hw, RCTL, rctl); 1868 E1000_WRITE_REG(hw, RCTL, rctl);
1749} 1869}
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev)
2083 uint32_t rctl; 2203 uint32_t rctl;
2084 uint32_t hash_value; 2204 uint32_t hash_value;
2085 int i, rar_entries = E1000_RAR_ENTRIES; 2205 int i, rar_entries = E1000_RAR_ENTRIES;
2206 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2207 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2208 E1000_NUM_MTA_REGISTERS;
2209
2210 if (adapter->hw.mac_type == e1000_ich8lan)
2211 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2086 2212
2087 /* reserve RAR[14] for LAA over-write work-around */ 2213 /* reserve RAR[14] for LAA over-write work-around */
2088 if (adapter->hw.mac_type == e1000_82571) 2214 if (adapter->hw.mac_type == e1000_82571)
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev)
2121 mc_ptr = mc_ptr->next; 2247 mc_ptr = mc_ptr->next;
2122 } else { 2248 } else {
2123 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2249 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2250 E1000_WRITE_FLUSH(hw);
2124 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2251 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2252 E1000_WRITE_FLUSH(hw);
2125 } 2253 }
2126 } 2254 }
2127 2255
2128 /* clear the old settings from the multicast hash table */ 2256 /* clear the old settings from the multicast hash table */
2129 2257
2130 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2258 for (i = 0; i < mta_reg_count; i++) {
2131 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2259 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2260 E1000_WRITE_FLUSH(hw);
2261 }
2132 2262
2133 /* load any remaining addresses into the hash table */ 2263 /* load any remaining addresses into the hash table */
2134 2264
@@ -2201,19 +2331,19 @@ static void
2201e1000_watchdog(unsigned long data) 2331e1000_watchdog(unsigned long data)
2202{ 2332{
2203 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2333 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2204
2205 /* Do the rest outside of interrupt context */
2206 schedule_work(&adapter->watchdog_task);
2207}
2208
2209static void
2210e1000_watchdog_task(struct e1000_adapter *adapter)
2211{
2212 struct net_device *netdev = adapter->netdev; 2334 struct net_device *netdev = adapter->netdev;
2213 struct e1000_tx_ring *txdr = adapter->tx_ring; 2335 struct e1000_tx_ring *txdr = adapter->tx_ring;
2214 uint32_t link, tctl; 2336 uint32_t link, tctl;
2215 2337 int32_t ret_val;
2216 e1000_check_for_link(&adapter->hw); 2338
2339 ret_val = e1000_check_for_link(&adapter->hw);
2340 if ((ret_val == E1000_ERR_PHY) &&
2341 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2342 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2343 /* See e1000_kumeran_lock_loss_workaround() */
2344 DPRINTK(LINK, INFO,
2345 "Gigabit has been disabled, downgrading speed\n");
2346 }
2217 if (adapter->hw.mac_type == e1000_82573) { 2347 if (adapter->hw.mac_type == e1000_82573) {
2218 e1000_enable_tx_pkt_filtering(&adapter->hw); 2348 e1000_enable_tx_pkt_filtering(&adapter->hw);
2219 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2349 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2394 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2524 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2395 int err; 2525 int err;
2396 2526
2397 if (skb_shinfo(skb)->gso_size) { 2527 if (skb_is_gso(skb)) {
2398 if (skb_header_cloned(skb)) { 2528 if (skb_header_cloned(skb)) {
2399 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2529 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2400 if (err) 2530 if (err)
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2519 * tso gets written back prematurely before the data is fully 2649 * tso gets written back prematurely before the data is fully
2520 * DMA'd to the controller */ 2650 * DMA'd to the controller */
2521 if (!skb->data_len && tx_ring->last_tx_tso && 2651 if (!skb->data_len && tx_ring->last_tx_tso &&
2522 !skb_shinfo(skb)->gso_size) { 2652 !skb_is_gso(skb)) {
2523 tx_ring->last_tx_tso = 0; 2653 tx_ring->last_tx_tso = 0;
2524 size -= 4; 2654 size -= 4;
2525 } 2655 }
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2779 case e1000_82571: 2909 case e1000_82571:
2780 case e1000_82572: 2910 case e1000_82572:
2781 case e1000_82573: 2911 case e1000_82573:
2912 case e1000_ich8lan:
2782 pull_size = min((unsigned int)4, skb->data_len); 2913 pull_size = min((unsigned int)4, skb->data_len);
2783 if (!__pskb_pull_tail(skb, pull_size)) { 2914 if (!__pskb_pull_tail(skb, pull_size)) {
2784 printk(KERN_ERR 2915 DPRINTK(DRV, ERR,
2785 "__pskb_pull_tail failed.\n"); 2916 "__pskb_pull_tail failed.\n");
2786 dev_kfree_skb_any(skb); 2917 dev_kfree_skb_any(skb);
2787 return NETDEV_TX_OK; 2918 return NETDEV_TX_OK;
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2806 2937
2807#ifdef NETIF_F_TSO 2938#ifdef NETIF_F_TSO
2808 /* Controller Erratum workaround */ 2939 /* Controller Erratum workaround */
2809 if (!skb->data_len && tx_ring->last_tx_tso && 2940 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
2810 !skb_shinfo(skb)->gso_size)
2811 count++; 2941 count++;
2812#endif 2942#endif
2813 2943
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev)
2919{ 3049{
2920 struct e1000_adapter *adapter = netdev_priv(netdev); 3050 struct e1000_adapter *adapter = netdev_priv(netdev);
2921 3051
2922 e1000_down(adapter); 3052 e1000_reinit_locked(adapter);
2923 e1000_up(adapter);
2924} 3053}
2925 3054
2926/** 3055/**
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2964 /* Adapter-specific max frame size limits. */ 3093 /* Adapter-specific max frame size limits. */
2965 switch (adapter->hw.mac_type) { 3094 switch (adapter->hw.mac_type) {
2966 case e1000_undefined ... e1000_82542_rev2_1: 3095 case e1000_undefined ... e1000_82542_rev2_1:
3096 case e1000_ich8lan:
2967 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3097 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2968 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3098 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2969 return -EINVAL; 3099 return -EINVAL;
@@ -3026,10 +3156,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3026 3156
3027 netdev->mtu = new_mtu; 3157 netdev->mtu = new_mtu;
3028 3158
3029 if (netif_running(netdev)) { 3159 if (netif_running(netdev))
3030 e1000_down(adapter); 3160 e1000_reinit_locked(adapter);
3031 e1000_up(adapter);
3032 }
3033 3161
3034 adapter->hw.max_frame_size = max_frame; 3162 adapter->hw.max_frame_size = max_frame;
3035 3163
@@ -3074,12 +3202,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
3074 adapter->stats.bprc += E1000_READ_REG(hw, BPRC); 3202 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3075 adapter->stats.mprc += E1000_READ_REG(hw, MPRC); 3203 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3076 adapter->stats.roc += E1000_READ_REG(hw, ROC); 3204 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3205
3206 if (adapter->hw.mac_type != e1000_ich8lan) {
3077 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); 3207 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3078 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); 3208 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3079 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); 3209 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3080 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); 3210 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3081 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); 3211 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3082 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); 3212 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3213 }
3083 3214
3084 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); 3215 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3085 adapter->stats.mpc += E1000_READ_REG(hw, MPC); 3216 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
@@ -3107,12 +3238,16 @@ e1000_update_stats(struct e1000_adapter *adapter)
3107 adapter->stats.totl += E1000_READ_REG(hw, TOTL); 3238 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3108 adapter->stats.toth += E1000_READ_REG(hw, TOTH); 3239 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3109 adapter->stats.tpr += E1000_READ_REG(hw, TPR); 3240 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3241
3242 if (adapter->hw.mac_type != e1000_ich8lan) {
3110 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); 3243 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3111 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); 3244 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3112 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); 3245 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3113 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); 3246 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3114 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); 3247 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3115 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); 3248 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3249 }
3250
3116 adapter->stats.mptc += E1000_READ_REG(hw, MPTC); 3251 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3117 adapter->stats.bptc += E1000_READ_REG(hw, BPTC); 3252 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3118 3253
@@ -3134,6 +3269,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
3134 if (hw->mac_type > e1000_82547_rev_2) { 3269 if (hw->mac_type > e1000_82547_rev_2) {
3135 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3270 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3136 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3271 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3272
3273 if (adapter->hw.mac_type != e1000_ich8lan) {
3137 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3274 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3138 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 3275 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3139 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 3276 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
@@ -3141,6 +3278,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3141 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 3278 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3142 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 3279 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3143 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 3280 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3281 }
3144 } 3282 }
3145 3283
3146 /* Fill out the OS statistics structure */ 3284 /* Fill out the OS statistics structure */
@@ -3547,7 +3685,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3547 /* All receives must fit into a single buffer */ 3685 /* All receives must fit into a single buffer */
3548 E1000_DBG("%s: Receive packet consumed multiple" 3686 E1000_DBG("%s: Receive packet consumed multiple"
3549 " buffers\n", netdev->name); 3687 " buffers\n", netdev->name);
3550 dev_kfree_skb_irq(skb); 3688 /* recycle */
3689 buffer_info-> skb = skb;
3551 goto next_desc; 3690 goto next_desc;
3552 } 3691 }
3553 3692
@@ -3675,7 +3814,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3675 buffer_info = &rx_ring->buffer_info[i]; 3814 buffer_info = &rx_ring->buffer_info[i];
3676 3815
3677 while (staterr & E1000_RXD_STAT_DD) { 3816 while (staterr & E1000_RXD_STAT_DD) {
3678 buffer_info = &rx_ring->buffer_info[i];
3679 ps_page = &rx_ring->ps_page[i]; 3817 ps_page = &rx_ring->ps_page[i];
3680 ps_page_dma = &rx_ring->ps_page_dma[i]; 3818 ps_page_dma = &rx_ring->ps_page_dma[i];
3681#ifdef CONFIG_E1000_NAPI 3819#ifdef CONFIG_E1000_NAPI
@@ -4180,10 +4318,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4180 return retval; 4318 return retval;
4181 } 4319 }
4182 } 4320 }
4183 if (netif_running(adapter->netdev)) { 4321 if (netif_running(adapter->netdev))
4184 e1000_down(adapter); 4322 e1000_reinit_locked(adapter);
4185 e1000_up(adapter); 4323 else
4186 } else
4187 e1000_reset(adapter); 4324 e1000_reset(adapter);
4188 break; 4325 break;
4189 case M88E1000_PHY_SPEC_CTRL: 4326 case M88E1000_PHY_SPEC_CTRL:
@@ -4200,10 +4337,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4200 case PHY_CTRL: 4337 case PHY_CTRL:
4201 if (mii_reg & MII_CR_POWER_DOWN) 4338 if (mii_reg & MII_CR_POWER_DOWN)
4202 break; 4339 break;
4203 if (netif_running(adapter->netdev)) { 4340 if (netif_running(adapter->netdev))
4204 e1000_down(adapter); 4341 e1000_reinit_locked(adapter);
4205 e1000_up(adapter); 4342 else
4206 } else
4207 e1000_reset(adapter); 4343 e1000_reset(adapter);
4208 break; 4344 break;
4209 } 4345 }
@@ -4277,18 +4413,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4277 ctrl |= E1000_CTRL_VME; 4413 ctrl |= E1000_CTRL_VME;
4278 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4414 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4279 4415
4416 if (adapter->hw.mac_type != e1000_ich8lan) {
4280 /* enable VLAN receive filtering */ 4417 /* enable VLAN receive filtering */
4281 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4418 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4282 rctl |= E1000_RCTL_VFE; 4419 rctl |= E1000_RCTL_VFE;
4283 rctl &= ~E1000_RCTL_CFIEN; 4420 rctl &= ~E1000_RCTL_CFIEN;
4284 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4421 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4285 e1000_update_mng_vlan(adapter); 4422 e1000_update_mng_vlan(adapter);
4423 }
4286 } else { 4424 } else {
4287 /* disable VLAN tag insert/strip */ 4425 /* disable VLAN tag insert/strip */
4288 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4426 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4289 ctrl &= ~E1000_CTRL_VME; 4427 ctrl &= ~E1000_CTRL_VME;
4290 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4428 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4291 4429
4430 if (adapter->hw.mac_type != e1000_ich8lan) {
4292 /* disable VLAN filtering */ 4431 /* disable VLAN filtering */
4293 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4432 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4294 rctl &= ~E1000_RCTL_VFE; 4433 rctl &= ~E1000_RCTL_VFE;
@@ -4297,6 +4436,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4297 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4436 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4298 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4437 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4299 } 4438 }
4439 }
4300 } 4440 }
4301 4441
4302 e1000_irq_enable(adapter); 4442 e1000_irq_enable(adapter);
@@ -4458,12 +4598,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4458 struct e1000_adapter *adapter = netdev_priv(netdev); 4598 struct e1000_adapter *adapter = netdev_priv(netdev);
4459 uint32_t ctrl, ctrl_ext, rctl, manc, status; 4599 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4460 uint32_t wufc = adapter->wol; 4600 uint32_t wufc = adapter->wol;
4601#ifdef CONFIG_PM
4461 int retval = 0; 4602 int retval = 0;
4603#endif
4462 4604
4463 netif_device_detach(netdev); 4605 netif_device_detach(netdev);
4464 4606
4465 if (netif_running(netdev)) 4607 if (netif_running(netdev)) {
4608 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4466 e1000_down(adapter); 4609 e1000_down(adapter);
4610 }
4467 4611
4468#ifdef CONFIG_PM 4612#ifdef CONFIG_PM
4469 /* Implement our own version of pci_save_state(pdev) because pci- 4613 /* Implement our own version of pci_save_state(pdev) because pci-
@@ -4521,7 +4665,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4521 pci_enable_wake(pdev, PCI_D3cold, 0); 4665 pci_enable_wake(pdev, PCI_D3cold, 0);
4522 } 4666 }
4523 4667
4668 /* FIXME: this code is incorrect for PCI Express */
4524 if (adapter->hw.mac_type >= e1000_82540 && 4669 if (adapter->hw.mac_type >= e1000_82540 &&
4670 adapter->hw.mac_type != e1000_ich8lan &&
4525 adapter->hw.media_type == e1000_media_type_copper) { 4671 adapter->hw.media_type == e1000_media_type_copper) {
4526 manc = E1000_READ_REG(&adapter->hw, MANC); 4672 manc = E1000_READ_REG(&adapter->hw, MANC);
4527 if (manc & E1000_MANC_SMBUS_EN) { 4673 if (manc & E1000_MANC_SMBUS_EN) {
@@ -4532,6 +4678,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4532 } 4678 }
4533 } 4679 }
4534 4680
4681 if (adapter->hw.phy_type == e1000_phy_igp_3)
4682 e1000_phy_powerdown_workaround(&adapter->hw);
4683
4535 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4684 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4536 * would have already happened in close and is redundant. */ 4685 * would have already happened in close and is redundant. */
4537 e1000_release_hw_control(adapter); 4686 e1000_release_hw_control(adapter);
@@ -4567,7 +4716,9 @@ e1000_resume(struct pci_dev *pdev)
4567 4716
4568 netif_device_attach(netdev); 4717 netif_device_attach(netdev);
4569 4718
4719 /* FIXME: this code is incorrect for PCI Express */
4570 if (adapter->hw.mac_type >= e1000_82540 && 4720 if (adapter->hw.mac_type >= e1000_82540 &&
4721 adapter->hw.mac_type != e1000_ich8lan &&
4571 adapter->hw.media_type == e1000_media_type_copper) { 4722 adapter->hw.media_type == e1000_media_type_copper) {
4572 manc = E1000_READ_REG(&adapter->hw, MANC); 4723 manc = E1000_READ_REG(&adapter->hw, MANC);
4573 manc &= ~(E1000_MANC_ARP_EN); 4724 manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 048d052be29d..2d3e8b06cab0 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -127,4 +127,17 @@ typedef enum {
127 127
128#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 128#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
129 129
130#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
131 writel((value), ((a)->flash_address + reg)))
132
133#define E1000_READ_ICH8_REG(a, reg) ( \
134 readl((a)->flash_address + reg))
135
136#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
137 writew((value), ((a)->flash_address + reg)))
138
139#define E1000_READ_ICH8_REG16(a, reg) ( \
140 readw((a)->flash_address + reg))
141
142
130#endif /* _E1000_OSDEP_H_ */ 143#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e55f8969a0fb..0ef413172c68 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -45,6 +45,16 @@
45 */ 45 */
46 46
47#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } 47#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
48/* Module Parameters are always initialized to -1, so that the driver
49 * can tell the difference between no user specified value or the
50 * user asking for the default value.
51 * The true default values are loaded in when e1000_check_options is called.
52 *
53 * This is a GCC extension to ANSI C.
54 * See the item "Labeled Elements in Initializers" in the section
55 * "Extensions to the C Language Family" of the GCC documentation.
56 */
57
48#define E1000_PARAM(X, desc) \ 58#define E1000_PARAM(X, desc) \
49 static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ 59 static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
50 static int num_##X = 0; \ 60 static int num_##X = 0; \
@@ -183,6 +193,24 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
183 193
184E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 194E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
185 195
196/* Enable Smart Power Down of the PHY
197 *
198 * Valid Range: 0, 1
199 *
200 * Default Value: 0 (disabled)
201 */
202
203E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
204
205/* Enable Kumeran Lock Loss workaround
206 *
207 * Valid Range: 0, 1
208 *
209 * Default Value: 1 (enabled)
210 */
211
212E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
213
186#define AUTONEG_ADV_DEFAULT 0x2F 214#define AUTONEG_ADV_DEFAULT 0x2F
187#define AUTONEG_ADV_MASK 0x2F 215#define AUTONEG_ADV_MASK 0x2F
188#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL 216#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
@@ -296,6 +324,7 @@ e1000_check_options(struct e1000_adapter *adapter)
296 DPRINTK(PROBE, NOTICE, 324 DPRINTK(PROBE, NOTICE,
297 "Warning: no configuration for board #%i\n", bd); 325 "Warning: no configuration for board #%i\n", bd);
298 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 326 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
327 bd = E1000_MAX_NIC;
299 } 328 }
300 329
301 { /* Transmit Descriptor Count */ 330 { /* Transmit Descriptor Count */
@@ -313,14 +342,9 @@ e1000_check_options(struct e1000_adapter *adapter)
313 opt.arg.r.max = mac_type < e1000_82544 ? 342 opt.arg.r.max = mac_type < e1000_82544 ?
314 E1000_MAX_TXD : E1000_MAX_82544_TXD; 343 E1000_MAX_TXD : E1000_MAX_82544_TXD;
315 344
316 if (num_TxDescriptors > bd) { 345 tx_ring->count = TxDescriptors[bd];
317 tx_ring->count = TxDescriptors[bd]; 346 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 e1000_validate_option(&tx_ring->count, &opt, adapter); 347 E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
319 E1000_ROUNDUP(tx_ring->count,
320 REQ_TX_DESCRIPTOR_MULTIPLE);
321 } else {
322 tx_ring->count = opt.def;
323 }
324 for (i = 0; i < adapter->num_tx_queues; i++) 348 for (i = 0; i < adapter->num_tx_queues; i++)
325 tx_ring[i].count = tx_ring->count; 349 tx_ring[i].count = tx_ring->count;
326 } 350 }
@@ -339,14 +363,9 @@ e1000_check_options(struct e1000_adapter *adapter)
339 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : 363 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
340 E1000_MAX_82544_RXD; 364 E1000_MAX_82544_RXD;
341 365
342 if (num_RxDescriptors > bd) { 366 rx_ring->count = RxDescriptors[bd];
343 rx_ring->count = RxDescriptors[bd]; 367 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 e1000_validate_option(&rx_ring->count, &opt, adapter); 368 E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
345 E1000_ROUNDUP(rx_ring->count,
346 REQ_RX_DESCRIPTOR_MULTIPLE);
347 } else {
348 rx_ring->count = opt.def;
349 }
350 for (i = 0; i < adapter->num_rx_queues; i++) 369 for (i = 0; i < adapter->num_rx_queues; i++)
351 rx_ring[i].count = rx_ring->count; 370 rx_ring[i].count = rx_ring->count;
352 } 371 }
@@ -358,13 +377,9 @@ e1000_check_options(struct e1000_adapter *adapter)
358 .def = OPTION_ENABLED 377 .def = OPTION_ENABLED
359 }; 378 };
360 379
361 if (num_XsumRX > bd) { 380 int rx_csum = XsumRX[bd];
362 int rx_csum = XsumRX[bd]; 381 e1000_validate_option(&rx_csum, &opt, adapter);
363 e1000_validate_option(&rx_csum, &opt, adapter); 382 adapter->rx_csum = rx_csum;
364 adapter->rx_csum = rx_csum;
365 } else {
366 adapter->rx_csum = opt.def;
367 }
368 } 383 }
369 { /* Flow Control */ 384 { /* Flow Control */
370 385
@@ -384,13 +399,9 @@ e1000_check_options(struct e1000_adapter *adapter)
384 .p = fc_list }} 399 .p = fc_list }}
385 }; 400 };
386 401
387 if (num_FlowControl > bd) { 402 int fc = FlowControl[bd];
388 int fc = FlowControl[bd]; 403 e1000_validate_option(&fc, &opt, adapter);
389 e1000_validate_option(&fc, &opt, adapter); 404 adapter->hw.fc = adapter->hw.original_fc = fc;
390 adapter->hw.fc = adapter->hw.original_fc = fc;
391 } else {
392 adapter->hw.fc = adapter->hw.original_fc = opt.def;
393 }
394 } 405 }
395 { /* Transmit Interrupt Delay */ 406 { /* Transmit Interrupt Delay */
396 struct e1000_option opt = { 407 struct e1000_option opt = {
@@ -402,13 +413,8 @@ e1000_check_options(struct e1000_adapter *adapter)
402 .max = MAX_TXDELAY }} 413 .max = MAX_TXDELAY }}
403 }; 414 };
404 415
405 if (num_TxIntDelay > bd) { 416 adapter->tx_int_delay = TxIntDelay[bd];
406 adapter->tx_int_delay = TxIntDelay[bd]; 417 e1000_validate_option(&adapter->tx_int_delay, &opt, adapter);
407 e1000_validate_option(&adapter->tx_int_delay, &opt,
408 adapter);
409 } else {
410 adapter->tx_int_delay = opt.def;
411 }
412 } 418 }
413 { /* Transmit Absolute Interrupt Delay */ 419 { /* Transmit Absolute Interrupt Delay */
414 struct e1000_option opt = { 420 struct e1000_option opt = {
@@ -420,13 +426,9 @@ e1000_check_options(struct e1000_adapter *adapter)
420 .max = MAX_TXABSDELAY }} 426 .max = MAX_TXABSDELAY }}
421 }; 427 };
422 428
423 if (num_TxAbsIntDelay > bd) { 429 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 430 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 431 adapter);
426 adapter);
427 } else {
428 adapter->tx_abs_int_delay = opt.def;
429 }
430 } 432 }
431 { /* Receive Interrupt Delay */ 433 { /* Receive Interrupt Delay */
432 struct e1000_option opt = { 434 struct e1000_option opt = {
@@ -438,13 +440,8 @@ e1000_check_options(struct e1000_adapter *adapter)
438 .max = MAX_RXDELAY }} 440 .max = MAX_RXDELAY }}
439 }; 441 };
440 442
441 if (num_RxIntDelay > bd) { 443 adapter->rx_int_delay = RxIntDelay[bd];
442 adapter->rx_int_delay = RxIntDelay[bd]; 444 e1000_validate_option(&adapter->rx_int_delay, &opt, adapter);
443 e1000_validate_option(&adapter->rx_int_delay, &opt,
444 adapter);
445 } else {
446 adapter->rx_int_delay = opt.def;
447 }
448 } 445 }
449 { /* Receive Absolute Interrupt Delay */ 446 { /* Receive Absolute Interrupt Delay */
450 struct e1000_option opt = { 447 struct e1000_option opt = {
@@ -456,13 +453,9 @@ e1000_check_options(struct e1000_adapter *adapter)
456 .max = MAX_RXABSDELAY }} 453 .max = MAX_RXABSDELAY }}
457 }; 454 };
458 455
459 if (num_RxAbsIntDelay > bd) { 456 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 457 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 458 adapter);
462 adapter);
463 } else {
464 adapter->rx_abs_int_delay = opt.def;
465 }
466 } 459 }
467 { /* Interrupt Throttling Rate */ 460 { /* Interrupt Throttling Rate */
468 struct e1000_option opt = { 461 struct e1000_option opt = {
@@ -474,26 +467,44 @@ e1000_check_options(struct e1000_adapter *adapter)
474 .max = MAX_ITR }} 467 .max = MAX_ITR }}
475 }; 468 };
476 469
477 if (num_InterruptThrottleRate > bd) { 470 adapter->itr = InterruptThrottleRate[bd];
478 adapter->itr = InterruptThrottleRate[bd]; 471 switch (adapter->itr) {
479 switch (adapter->itr) { 472 case 0:
480 case 0: 473 DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
481 DPRINTK(PROBE, INFO, "%s turned off\n", 474 break;
482 opt.name); 475 case 1:
483 break; 476 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
484 case 1: 477 opt.name);
485 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 478 break;
486 opt.name); 479 default:
487 break; 480 e1000_validate_option(&adapter->itr, &opt, adapter);
488 default: 481 break;
489 e1000_validate_option(&adapter->itr, &opt,
490 adapter);
491 break;
492 }
493 } else {
494 adapter->itr = opt.def;
495 } 482 }
496 } 483 }
484 { /* Smart Power Down */
485 struct e1000_option opt = {
486 .type = enable_option,
487 .name = "PHY Smart Power Down",
488 .err = "defaulting to Disabled",
489 .def = OPTION_DISABLED
490 };
491
492 int spd = SmartPowerDownEnable[bd];
493 e1000_validate_option(&spd, &opt, adapter);
494 adapter->smart_power_down = spd;
495 }
496 { /* Kumeran Lock Loss Workaround */
497 struct e1000_option opt = {
498 .type = enable_option,
499 .name = "Kumeran Lock Loss Workaround",
500 .err = "defaulting to Enabled",
501 .def = OPTION_ENABLED
502 };
503
504 int kmrn_lock_loss = KumeranLockLoss[bd];
505 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
506 adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
507 }
497 508
498 switch (adapter->hw.media_type) { 509 switch (adapter->hw.media_type) {
499 case e1000_media_type_fiber: 510 case e1000_media_type_fiber:
@@ -519,17 +530,18 @@ static void __devinit
519e1000_check_fiber_options(struct e1000_adapter *adapter) 530e1000_check_fiber_options(struct e1000_adapter *adapter)
520{ 531{
521 int bd = adapter->bd_number; 532 int bd = adapter->bd_number;
522 if (num_Speed > bd) { 533 bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
534 if ((Speed[bd] != OPTION_UNSET)) {
523 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 535 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
524 "parameter ignored\n"); 536 "parameter ignored\n");
525 } 537 }
526 538
527 if (num_Duplex > bd) { 539 if ((Duplex[bd] != OPTION_UNSET)) {
528 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 540 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
529 "parameter ignored\n"); 541 "parameter ignored\n");
530 } 542 }
531 543
532 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 544 if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
533 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 545 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
534 "not valid for fiber adapters, " 546 "not valid for fiber adapters, "
535 "parameter ignored\n"); 547 "parameter ignored\n");
@@ -548,6 +560,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
548{ 560{
549 int speed, dplx, an; 561 int speed, dplx, an;
550 int bd = adapter->bd_number; 562 int bd = adapter->bd_number;
563 bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
551 564
552 { /* Speed */ 565 { /* Speed */
553 struct e1000_opt_list speed_list[] = {{ 0, "" }, 566 struct e1000_opt_list speed_list[] = {{ 0, "" },
@@ -564,12 +577,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
564 .p = speed_list }} 577 .p = speed_list }}
565 }; 578 };
566 579
567 if (num_Speed > bd) { 580 speed = Speed[bd];
568 speed = Speed[bd]; 581 e1000_validate_option(&speed, &opt, adapter);
569 e1000_validate_option(&speed, &opt, adapter);
570 } else {
571 speed = opt.def;
572 }
573 } 582 }
574 { /* Duplex */ 583 { /* Duplex */
575 struct e1000_opt_list dplx_list[] = {{ 0, "" }, 584 struct e1000_opt_list dplx_list[] = {{ 0, "" },
@@ -591,15 +600,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
591 "Speed/Duplex/AutoNeg parameter ignored.\n"); 600 "Speed/Duplex/AutoNeg parameter ignored.\n");
592 return; 601 return;
593 } 602 }
594 if (num_Duplex > bd) { 603 dplx = Duplex[bd];
595 dplx = Duplex[bd]; 604 e1000_validate_option(&dplx, &opt, adapter);
596 e1000_validate_option(&dplx, &opt, adapter);
597 } else {
598 dplx = opt.def;
599 }
600 } 605 }
601 606
602 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 607 if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
603 DPRINTK(PROBE, INFO, 608 DPRINTK(PROBE, INFO,
604 "AutoNeg specified along with Speed or Duplex, " 609 "AutoNeg specified along with Speed or Duplex, "
605 "parameter ignored\n"); 610 "parameter ignored\n");
@@ -648,19 +653,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
648 .p = an_list }} 653 .p = an_list }}
649 }; 654 };
650 655
651 if (num_AutoNeg > bd) { 656 an = AutoNeg[bd];
652 an = AutoNeg[bd]; 657 e1000_validate_option(&an, &opt, adapter);
653 e1000_validate_option(&an, &opt, adapter);
654 } else {
655 an = opt.def;
656 }
657 adapter->hw.autoneg_advertised = an; 658 adapter->hw.autoneg_advertised = an;
658 } 659 }
659 660
660 switch (speed + dplx) { 661 switch (speed + dplx) {
661 case 0: 662 case 0:
662 adapter->hw.autoneg = adapter->fc_autoneg = 1; 663 adapter->hw.autoneg = adapter->fc_autoneg = 1;
663 if ((num_Speed > bd) && (speed != 0 || dplx != 0)) 664 if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
664 DPRINTK(PROBE, INFO, 665 DPRINTK(PROBE, INFO,
665 "Speed and duplex autonegotiation enabled\n"); 666 "Speed and duplex autonegotiation enabled\n");
666 break; 667 break;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 037d870712ff..ad81ec68f887 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 np->tx_skbuff[nr] = skb; 1495 np->tx_skbuff[nr] = skb;
1496 1496
1497#ifdef NETIF_F_TSO 1497#ifdef NETIF_F_TSO
1498 if (skb_shinfo(skb)->gso_size) 1498 if (skb_is_gso(skb))
1499 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1499 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1500 else 1500 else
1501#endif 1501#endif
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index bf1fca5a3fa0..e3c8cd5eca67 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void)
146{ 146{
147 ali_chip_t *chip; 147 ali_chip_t *chip;
148 chipio_t info; 148 chipio_t info;
149 int ret = -ENODEV; 149 int ret;
150 int cfg, cfg_base; 150 int cfg, cfg_base;
151 int reg, revision; 151 int reg, revision;
152 int i = 0; 152 int i = 0;
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void)
160 return ret; 160 return ret;
161 } 161 }
162 162
163 ret = -ENODEV;
163 164
164 /* Probe for all the ALi chipsets we know about */ 165 /* Probe for all the ALi chipsets we know about */
165 for (chip= chips; chip->name; chip++, i++) 166 for (chip= chips; chip->name; chip++, i++)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index b91e082483f6..7eb08d929139 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1173 uint16_t ipcse, tucse, mss; 1173 uint16_t ipcse, tucse, mss;
1174 int err; 1174 int err;
1175 1175
1176 if(likely(skb_shinfo(skb)->gso_size)) { 1176 if (likely(skb_is_gso(skb))) {
1177 if (skb_header_cloned(skb)) { 1177 if (skb_header_cloned(skb)) {
1178 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1178 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1179 if (err) 1179 if (err)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 43fef7de8cb9..997cbce9af6e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
139#endif 139#endif
140 140
141#ifdef LOOPBACK_TSO 141#ifdef LOOPBACK_TSO
142 if (skb_shinfo(skb)->gso_size) { 142 if (skb_is_gso(skb)) {
143 BUG_ON(skb->protocol != htons(ETH_P_IP)); 143 BUG_ON(skb->protocol != htons(ETH_P_IP));
144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
145 145
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f4c8fd373b9b..ee1de971a712 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2116,7 +2116,7 @@ abort_linearize:
2116 } 2116 }
2117 idx = (idx + 1) & tx->mask; 2117 idx = (idx + 1) & tx->mask;
2118 } while (idx != last_idx); 2118 } while (idx != last_idx);
2119 if (skb_shinfo(skb)->gso_size) { 2119 if (skb_is_gso(skb)) {
2120 printk(KERN_ERR 2120 printk(KERN_ERR
2121 "myri10ge: %s: TSO but wanted to linearize?!?!?\n", 2121 "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
2122 mgp->dev->name); 2122 mgp->dev->name);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 418f169a6a31..31093760aa1e 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1159 count = sizeof(dma_addr_t) / sizeof(u32); 1159 count = sizeof(dma_addr_t) / sizeof(u32);
1160 count += skb_shinfo(skb)->nr_frags * count; 1160 count += skb_shinfo(skb)->nr_frags * count;
1161 1161
1162 if (skb_shinfo(skb)->gso_size) 1162 if (skb_is_gso(skb))
1163 ++count; 1163 ++count;
1164 1164
1165 if (skb->ip_summed == CHECKSUM_HW) 1165 if (skb->ip_summed == CHECKSUM_HW)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f645921aff8b..ce6f3be86da0 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -10078,6 +10078,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10078 static struct pci_device_id write_reorder_chipsets[] = { 10078 static struct pci_device_id write_reorder_chipsets[] = {
10079 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 10079 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10080 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 10080 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10081 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10082 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10081 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 10083 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10082 PCI_DEVICE_ID_VIA_8385_0) }, 10084 PCI_DEVICE_ID_VIA_8385_0) },
10083 { }, 10085 { },
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 063816f2b11e..4103c37172f9 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
805 * If problems develop with TSO, check this first. 805 * If problems develop with TSO, check this first.
806 */ 806 */
807 numDesc = skb_shinfo(skb)->nr_frags + 1; 807 numDesc = skb_shinfo(skb)->nr_frags + 1;
808 if(skb_tso_size(skb)) 808 if (skb_is_gso(skb))
809 numDesc++; 809 numDesc++;
810 810
811 /* When checking for free space in the ring, we need to also 811 /* When checking for free space in the ring, we need to also
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
845 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 845 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
846 } 846 }
847 847
848 if(skb_tso_size(skb)) { 848 if (skb_is_gso(skb)) {
849 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; 849 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
850 first_txd->numDesc++; 850 first_txd->numDesc++;
851 851
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 222a1cc4aa28..d305d212ad6c 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -77,7 +77,7 @@ config HOTPLUG_PCI_IBM
77 77
78config HOTPLUG_PCI_ACPI 78config HOTPLUG_PCI_ACPI
79 tristate "ACPI PCI Hotplug driver" 79 tristate "ACPI PCI Hotplug driver"
80 depends on ACPI && HOTPLUG_PCI 80 depends on ACPI_DOCK && HOTPLUG_PCI
81 help 81 help
82 Say Y here if you have a system that supports PCI Hotplug using 82 Say Y here if you have a system that supports PCI Hotplug using
83 ACPI. 83 ACPI.
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 8e8963f15731..329e12c1fae4 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4457 queue = card->qdio.out_qs 4457 queue = card->qdio.out_qs
4458 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 4458 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
4459 4459
4460 if (skb_shinfo(skb)->gso_size) 4460 if (skb_is_gso(skb))
4461 large_send = card->options.large_send; 4461 large_send = card->options.large_send;
4462 4462
4463 /*are we able to do TSO ? If so ,prepare and send it from here */ 4463 /*are we able to do TSO ? If so ,prepare and send it from here */
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 4f061fa73794..51a166242522 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -46,8 +46,8 @@ static __inline__ void atomic_add(int i, atomic_t *v)
46{ 46{
47 __asm__ __volatile__( 47 __asm__ __volatile__(
48 LOCK_PREFIX "addl %1,%0" 48 LOCK_PREFIX "addl %1,%0"
49 :"=m" (v->counter) 49 :"+m" (v->counter)
50 :"ir" (i), "m" (v->counter)); 50 :"ir" (i));
51} 51}
52 52
53/** 53/**
@@ -61,8 +61,8 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
61{ 61{
62 __asm__ __volatile__( 62 __asm__ __volatile__(
63 LOCK_PREFIX "subl %1,%0" 63 LOCK_PREFIX "subl %1,%0"
64 :"=m" (v->counter) 64 :"+m" (v->counter)
65 :"ir" (i), "m" (v->counter)); 65 :"ir" (i));
66} 66}
67 67
68/** 68/**
@@ -80,8 +80,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
80 80
81 __asm__ __volatile__( 81 __asm__ __volatile__(
82 LOCK_PREFIX "subl %2,%0; sete %1" 82 LOCK_PREFIX "subl %2,%0; sete %1"
83 :"=m" (v->counter), "=qm" (c) 83 :"+m" (v->counter), "=qm" (c)
84 :"ir" (i), "m" (v->counter) : "memory"); 84 :"ir" (i) : "memory");
85 return c; 85 return c;
86} 86}
87 87
@@ -95,8 +95,7 @@ static __inline__ void atomic_inc(atomic_t *v)
95{ 95{
96 __asm__ __volatile__( 96 __asm__ __volatile__(
97 LOCK_PREFIX "incl %0" 97 LOCK_PREFIX "incl %0"
98 :"=m" (v->counter) 98 :"+m" (v->counter));
99 :"m" (v->counter));
100} 99}
101 100
102/** 101/**
@@ -109,8 +108,7 @@ static __inline__ void atomic_dec(atomic_t *v)
109{ 108{
110 __asm__ __volatile__( 109 __asm__ __volatile__(
111 LOCK_PREFIX "decl %0" 110 LOCK_PREFIX "decl %0"
112 :"=m" (v->counter) 111 :"+m" (v->counter));
113 :"m" (v->counter));
114} 112}
115 113
116/** 114/**
@@ -127,8 +125,8 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
127 125
128 __asm__ __volatile__( 126 __asm__ __volatile__(
129 LOCK_PREFIX "decl %0; sete %1" 127 LOCK_PREFIX "decl %0; sete %1"
130 :"=m" (v->counter), "=qm" (c) 128 :"+m" (v->counter), "=qm" (c)
131 :"m" (v->counter) : "memory"); 129 : : "memory");
132 return c != 0; 130 return c != 0;
133} 131}
134 132
@@ -146,8 +144,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
146 144
147 __asm__ __volatile__( 145 __asm__ __volatile__(
148 LOCK_PREFIX "incl %0; sete %1" 146 LOCK_PREFIX "incl %0; sete %1"
149 :"=m" (v->counter), "=qm" (c) 147 :"+m" (v->counter), "=qm" (c)
150 :"m" (v->counter) : "memory"); 148 : : "memory");
151 return c != 0; 149 return c != 0;
152} 150}
153 151
@@ -166,8 +164,8 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
166 164
167 __asm__ __volatile__( 165 __asm__ __volatile__(
168 LOCK_PREFIX "addl %2,%0; sets %1" 166 LOCK_PREFIX "addl %2,%0; sets %1"
169 :"=m" (v->counter), "=qm" (c) 167 :"+m" (v->counter), "=qm" (c)
170 :"ir" (i), "m" (v->counter) : "memory"); 168 :"ir" (i) : "memory");
171 return c; 169 return c;
172} 170}
173 171
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 7b8ceefd010f..946d97cfea23 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -20,8 +20,8 @@
20 .align 8\n\ 20 .align 8\n\
21 .long 1b,3b\n\ 21 .long 1b,3b\n\
22 .previous" \ 22 .previous" \
23 : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ 23 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
24 : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) 24 : "i" (-EFAULT), "0" (oparg), "1" (0))
25 25
26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
27 __asm__ __volatile ( \ 27 __asm__ __volatile ( \
@@ -38,9 +38,9 @@
38 .align 8\n\ 38 .align 8\n\
39 .long 1b,4b,2b,4b\n\ 39 .long 1b,4b,2b,4b\n\
40 .previous" \ 40 .previous" \
41 : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ 41 : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
42 "=&r" (tem) \ 42 "=&r" (tem) \
43 : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) 43 : "r" (oparg), "i" (-EFAULT), "1" (0))
44 44
45static inline int 45static inline int
46futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 46futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -123,7 +123,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
123 " .long 1b,3b \n" 123 " .long 1b,3b \n"
124 " .previous \n" 124 " .previous \n"
125 125
126 : "=a" (oldval), "=m" (*uaddr) 126 : "=a" (oldval), "+m" (*uaddr)
127 : "i" (-EFAULT), "r" (newval), "0" (oldval) 127 : "i" (-EFAULT), "r" (newval), "0" (oldval)
128 : "memory" 128 : "memory"
129 ); 129 );
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 3b4998c51d08..12060e22f7e2 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -17,32 +17,30 @@ static __inline__ void local_inc(local_t *v)
17{ 17{
18 __asm__ __volatile__( 18 __asm__ __volatile__(
19 "incl %0" 19 "incl %0"
20 :"=m" (v->counter) 20 :"+m" (v->counter));
21 :"m" (v->counter));
22} 21}
23 22
24static __inline__ void local_dec(local_t *v) 23static __inline__ void local_dec(local_t *v)
25{ 24{
26 __asm__ __volatile__( 25 __asm__ __volatile__(
27 "decl %0" 26 "decl %0"
28 :"=m" (v->counter) 27 :"+m" (v->counter));
29 :"m" (v->counter));
30} 28}
31 29
32static __inline__ void local_add(long i, local_t *v) 30static __inline__ void local_add(long i, local_t *v)
33{ 31{
34 __asm__ __volatile__( 32 __asm__ __volatile__(
35 "addl %1,%0" 33 "addl %1,%0"
36 :"=m" (v->counter) 34 :"+m" (v->counter)
37 :"ir" (i), "m" (v->counter)); 35 :"ir" (i));
38} 36}
39 37
40static __inline__ void local_sub(long i, local_t *v) 38static __inline__ void local_sub(long i, local_t *v)
41{ 39{
42 __asm__ __volatile__( 40 __asm__ __volatile__(
43 "subl %1,%0" 41 "subl %1,%0"
44 :"=m" (v->counter) 42 :"+m" (v->counter)
45 :"ir" (i), "m" (v->counter)); 43 :"ir" (i));
46} 44}
47 45
48/* On x86, these are no better than the atomic variants. */ 46/* On x86, these are no better than the atomic variants. */
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
index 4e47ed059ad6..133e31e7dfde 100644
--- a/include/asm-i386/posix_types.h
+++ b/include/asm-i386/posix_types.h
@@ -51,12 +51,12 @@ typedef struct {
51#undef __FD_SET 51#undef __FD_SET
52#define __FD_SET(fd,fdsetp) \ 52#define __FD_SET(fd,fdsetp) \
53 __asm__ __volatile__("btsl %1,%0": \ 53 __asm__ __volatile__("btsl %1,%0": \
54 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 54 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
55 55
56#undef __FD_CLR 56#undef __FD_CLR
57#define __FD_CLR(fd,fdsetp) \ 57#define __FD_CLR(fd,fdsetp) \
58 __asm__ __volatile__("btrl %1,%0": \ 58 __asm__ __volatile__("btrl %1,%0": \
59 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) 59 "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
60 60
61#undef __FD_ISSET 61#undef __FD_ISSET
62#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ 62#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 94f00195d543..96b0bef2ea56 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -37,7 +37,7 @@
37 "popl %%eax\n\t" \ 37 "popl %%eax\n\t" \
38 "1:\n", \ 38 "1:\n", \
39 "subl $1,%0\n\t", \ 39 "subl $1,%0\n\t", \
40 "=m" (*(volatile int *)rw) : : "memory") 40 "+m" (*(volatile int *)rw) : : "memory")
41 41
42#define __build_read_lock(rw, helper) do { \ 42#define __build_read_lock(rw, helper) do { \
43 if (__builtin_constant_p(rw)) \ 43 if (__builtin_constant_p(rw)) \
@@ -63,7 +63,7 @@
63 "popl %%eax\n\t" \ 63 "popl %%eax\n\t" \
64 "1:\n", \ 64 "1:\n", \
65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ 65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
66 "=m" (*(volatile int *)rw) : : "memory") 66 "+m" (*(volatile int *)rw) : : "memory")
67 67
68#define __build_write_lock(rw, helper) do { \ 68#define __build_write_lock(rw, helper) do { \
69 if (__builtin_constant_p(rw)) \ 69 if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 2f07601562e7..43113f5608eb 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -111,8 +111,8 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
111 " jmp 1b\n" 111 " jmp 1b\n"
112 LOCK_SECTION_END 112 LOCK_SECTION_END
113 "# ending down_read\n\t" 113 "# ending down_read\n\t"
114 : "=m"(sem->count) 114 : "+m" (sem->count)
115 : "a"(sem), "m"(sem->count) 115 : "a" (sem)
116 : "memory", "cc"); 116 : "memory", "cc");
117} 117}
118 118
@@ -133,8 +133,8 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
133 " jnz 1b\n\t" 133 " jnz 1b\n\t"
134 "2:\n\t" 134 "2:\n\t"
135 "# ending __down_read_trylock\n\t" 135 "# ending __down_read_trylock\n\t"
136 : "+m"(sem->count), "=&a"(result), "=&r"(tmp) 136 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
137 : "i"(RWSEM_ACTIVE_READ_BIAS) 137 : "i" (RWSEM_ACTIVE_READ_BIAS)
138 : "memory", "cc"); 138 : "memory", "cc");
139 return result>=0 ? 1 : 0; 139 return result>=0 ? 1 : 0;
140} 140}
@@ -161,8 +161,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
161 " jmp 1b\n" 161 " jmp 1b\n"
162 LOCK_SECTION_END 162 LOCK_SECTION_END
163 "# ending down_write" 163 "# ending down_write"
164 : "=m"(sem->count), "=d"(tmp) 164 : "+m" (sem->count), "=d" (tmp)
165 : "a"(sem), "1"(tmp), "m"(sem->count) 165 : "a" (sem), "1" (tmp)
166 : "memory", "cc"); 166 : "memory", "cc");
167} 167}
168 168
@@ -205,8 +205,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
205 " jmp 1b\n" 205 " jmp 1b\n"
206 LOCK_SECTION_END 206 LOCK_SECTION_END
207 "# ending __up_read\n" 207 "# ending __up_read\n"
208 : "=m"(sem->count), "=d"(tmp) 208 : "+m" (sem->count), "=d" (tmp)
209 : "a"(sem), "1"(tmp), "m"(sem->count) 209 : "a" (sem), "1" (tmp)
210 : "memory", "cc"); 210 : "memory", "cc");
211} 211}
212 212
@@ -231,8 +231,8 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
231 " jmp 1b\n" 231 " jmp 1b\n"
232 LOCK_SECTION_END 232 LOCK_SECTION_END
233 "# ending __up_write\n" 233 "# ending __up_write\n"
234 : "=m"(sem->count) 234 : "+m" (sem->count)
235 : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) 235 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
236 : "memory", "cc", "edx"); 236 : "memory", "cc", "edx");
237} 237}
238 238
@@ -256,8 +256,8 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
256 " jmp 1b\n" 256 " jmp 1b\n"
257 LOCK_SECTION_END 257 LOCK_SECTION_END
258 "# ending __downgrade_write\n" 258 "# ending __downgrade_write\n"
259 : "=m"(sem->count) 259 : "+m" (sem->count)
260 : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) 260 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
261 : "memory", "cc"); 261 : "memory", "cc");
262} 262}
263 263
@@ -268,8 +268,8 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
268{ 268{
269 __asm__ __volatile__( 269 __asm__ __volatile__(
270LOCK_PREFIX "addl %1,%0" 270LOCK_PREFIX "addl %1,%0"
271 : "=m"(sem->count) 271 : "+m" (sem->count)
272 : "ir"(delta), "m"(sem->count)); 272 : "ir" (delta));
273} 273}
274 274
275/* 275/*
@@ -280,10 +280,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
280 int tmp = delta; 280 int tmp = delta;
281 281
282 __asm__ __volatile__( 282 __asm__ __volatile__(
283LOCK_PREFIX "xadd %0,(%2)" 283LOCK_PREFIX "xadd %0,%1"
284 : "+r"(tmp), "=m"(sem->count) 284 : "+r" (tmp), "+m" (sem->count)
285 : "r"(sem), "m"(sem->count) 285 : : "memory");
286 : "memory");
287 286
288 return tmp+delta; 287 return tmp+delta;
289} 288}
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index f7a0f310c524..d51e800acf29 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -107,7 +107,7 @@ static inline void down(struct semaphore * sem)
107 "call __down_failed\n\t" 107 "call __down_failed\n\t"
108 "jmp 1b\n" 108 "jmp 1b\n"
109 LOCK_SECTION_END 109 LOCK_SECTION_END
110 :"=m" (sem->count) 110 :"+m" (sem->count)
111 : 111 :
112 :"memory","ax"); 112 :"memory","ax");
113} 113}
@@ -132,7 +132,7 @@ static inline int down_interruptible(struct semaphore * sem)
132 "call __down_failed_interruptible\n\t" 132 "call __down_failed_interruptible\n\t"
133 "jmp 1b\n" 133 "jmp 1b\n"
134 LOCK_SECTION_END 134 LOCK_SECTION_END
135 :"=a" (result), "=m" (sem->count) 135 :"=a" (result), "+m" (sem->count)
136 : 136 :
137 :"memory"); 137 :"memory");
138 return result; 138 return result;
@@ -157,7 +157,7 @@ static inline int down_trylock(struct semaphore * sem)
157 "call __down_failed_trylock\n\t" 157 "call __down_failed_trylock\n\t"
158 "jmp 1b\n" 158 "jmp 1b\n"
159 LOCK_SECTION_END 159 LOCK_SECTION_END
160 :"=a" (result), "=m" (sem->count) 160 :"=a" (result), "+m" (sem->count)
161 : 161 :
162 :"memory"); 162 :"memory");
163 return result; 163 return result;
@@ -182,7 +182,7 @@ static inline void up(struct semaphore * sem)
182 "jmp 1b\n" 182 "jmp 1b\n"
183 LOCK_SECTION_END 183 LOCK_SECTION_END
184 ".subsection 0\n" 184 ".subsection 0\n"
185 :"=m" (sem->count) 185 :"+m" (sem->count)
186 : 186 :
187 :"memory","ax"); 187 :"memory","ax");
188} 188}
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 87c40f830653..d816c62a7a1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -65,7 +65,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
65 alternative_smp( 65 alternative_smp(
66 __raw_spin_lock_string, 66 __raw_spin_lock_string,
67 __raw_spin_lock_string_up, 67 __raw_spin_lock_string_up,
68 "=m" (lock->slock) : : "memory"); 68 "+m" (lock->slock) : : "memory");
69} 69}
70 70
71/* 71/*
@@ -79,7 +79,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
79 alternative_smp( 79 alternative_smp(
80 __raw_spin_lock_string_flags, 80 __raw_spin_lock_string_flags,
81 __raw_spin_lock_string_up, 81 __raw_spin_lock_string_up,
82 "=m" (lock->slock) : "r" (flags) : "memory"); 82 "+m" (lock->slock) : "r" (flags) : "memory");
83} 83}
84#endif 84#endif
85 85
@@ -88,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
88 char oldval; 88 char oldval;
89 __asm__ __volatile__( 89 __asm__ __volatile__(
90 "xchgb %b0,%1" 90 "xchgb %b0,%1"
91 :"=q" (oldval), "=m" (lock->slock) 91 :"=q" (oldval), "+m" (lock->slock)
92 :"0" (0) : "memory"); 92 :"0" (0) : "memory");
93 return oldval > 0; 93 return oldval > 0;
94} 94}
@@ -104,7 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
104 104
105#define __raw_spin_unlock_string \ 105#define __raw_spin_unlock_string \
106 "movb $1,%0" \ 106 "movb $1,%0" \
107 :"=m" (lock->slock) : : "memory" 107 :"+m" (lock->slock) : : "memory"
108 108
109 109
110static inline void __raw_spin_unlock(raw_spinlock_t *lock) 110static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
118 118
119#define __raw_spin_unlock_string \ 119#define __raw_spin_unlock_string \
120 "xchgb %b0, %1" \ 120 "xchgb %b0, %1" \
121 :"=q" (oldval), "=m" (lock->slock) \ 121 :"=q" (oldval), "+m" (lock->slock) \
122 :"0" (oldval) : "memory" 122 :"0" (oldval) : "memory"
123 123
124static inline void __raw_spin_unlock(raw_spinlock_t *lock) 124static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -199,13 +199,13 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
199 199
200static inline void __raw_read_unlock(raw_rwlock_t *rw) 200static inline void __raw_read_unlock(raw_rwlock_t *rw)
201{ 201{
202 asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); 202 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
203} 203}
204 204
205static inline void __raw_write_unlock(raw_rwlock_t *rw) 205static inline void __raw_write_unlock(raw_rwlock_t *rw)
206{ 206{
207 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" 207 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
208 : "=m" (rw->lock) : : "memory"); 208 : "+m" (rw->lock) : : "memory");
209} 209}
210 210
211#endif /* __ASM_SPINLOCK_H */ 211#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 2833fa2c0dd0..54d6d7aea938 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,6 +140,8 @@ static inline struct thread_info *current_thread_info(void)
140#define TIF_SECCOMP 8 /* secure computing */ 140#define TIF_SECCOMP 8 /* secure computing */
141#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 141#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
142#define TIF_MEMDIE 16 142#define TIF_MEMDIE 16
143#define TIF_DEBUG 17 /* uses debug registers */
144#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
143 145
144#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
145#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 147#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -151,6 +153,8 @@ static inline struct thread_info *current_thread_info(void)
151#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
152#define _TIF_SECCOMP (1<<TIF_SECCOMP) 154#define _TIF_SECCOMP (1<<TIF_SECCOMP)
153#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 155#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
156#define _TIF_DEBUG (1<<TIF_DEBUG)
157#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
154 158
155/* work to do on interrupt/exception return */ 159/* work to do on interrupt/exception return */
156#define _TIF_WORK_MASK \ 160#define _TIF_WORK_MASK \
@@ -159,6 +163,9 @@ static inline struct thread_info *current_thread_info(void)
159/* work to do on any return to u-space */ 163/* work to do on any return to u-space */
160#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 164#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
161 165
166/* flags to check in __switch_to() */
167#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
168
162/* 169/*
163 * Thread-synchronous status. 170 * Thread-synchronous status.
164 * 171 *
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index bb3c0ab7e667..53283e2540b3 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -27,8 +27,8 @@ static __inline__ void atomic_add(int a, atomic_t *v)
27 PPC405_ERR77(0,%3) 27 PPC405_ERR77(0,%3)
28" stwcx. %0,0,%3 \n\ 28" stwcx. %0,0,%3 \n\
29 bne- 1b" 29 bne- 1b"
30 : "=&r" (t), "=m" (v->counter) 30 : "=&r" (t), "+m" (v->counter)
31 : "r" (a), "r" (&v->counter), "m" (v->counter) 31 : "r" (a), "r" (&v->counter)
32 : "cc"); 32 : "cc");
33} 33}
34 34
@@ -63,8 +63,8 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
63 PPC405_ERR77(0,%3) 63 PPC405_ERR77(0,%3)
64" stwcx. %0,0,%3 \n\ 64" stwcx. %0,0,%3 \n\
65 bne- 1b" 65 bne- 1b"
66 : "=&r" (t), "=m" (v->counter) 66 : "=&r" (t), "+m" (v->counter)
67 : "r" (a), "r" (&v->counter), "m" (v->counter) 67 : "r" (a), "r" (&v->counter)
68 : "cc"); 68 : "cc");
69} 69}
70 70
@@ -97,8 +97,8 @@ static __inline__ void atomic_inc(atomic_t *v)
97 PPC405_ERR77(0,%2) 97 PPC405_ERR77(0,%2)
98" stwcx. %0,0,%2 \n\ 98" stwcx. %0,0,%2 \n\
99 bne- 1b" 99 bne- 1b"
100 : "=&r" (t), "=m" (v->counter) 100 : "=&r" (t), "+m" (v->counter)
101 : "r" (&v->counter), "m" (v->counter) 101 : "r" (&v->counter)
102 : "cc"); 102 : "cc");
103} 103}
104 104
@@ -141,8 +141,8 @@ static __inline__ void atomic_dec(atomic_t *v)
141 PPC405_ERR77(0,%2)\ 141 PPC405_ERR77(0,%2)\
142" stwcx. %0,0,%2\n\ 142" stwcx. %0,0,%2\n\
143 bne- 1b" 143 bne- 1b"
144 : "=&r" (t), "=m" (v->counter) 144 : "=&r" (t), "+m" (v->counter)
145 : "r" (&v->counter), "m" (v->counter) 145 : "r" (&v->counter)
146 : "cc"); 146 : "cc");
147} 147}
148 148
@@ -253,8 +253,8 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
253 add %0,%2,%0\n\ 253 add %0,%2,%0\n\
254 stdcx. %0,0,%3 \n\ 254 stdcx. %0,0,%3 \n\
255 bne- 1b" 255 bne- 1b"
256 : "=&r" (t), "=m" (v->counter) 256 : "=&r" (t), "+m" (v->counter)
257 : "r" (a), "r" (&v->counter), "m" (v->counter) 257 : "r" (a), "r" (&v->counter)
258 : "cc"); 258 : "cc");
259} 259}
260 260
@@ -287,8 +287,8 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
287 subf %0,%2,%0\n\ 287 subf %0,%2,%0\n\
288 stdcx. %0,0,%3 \n\ 288 stdcx. %0,0,%3 \n\
289 bne- 1b" 289 bne- 1b"
290 : "=&r" (t), "=m" (v->counter) 290 : "=&r" (t), "+m" (v->counter)
291 : "r" (a), "r" (&v->counter), "m" (v->counter) 291 : "r" (a), "r" (&v->counter)
292 : "cc"); 292 : "cc");
293} 293}
294 294
@@ -319,8 +319,8 @@ static __inline__ void atomic64_inc(atomic64_t *v)
319 addic %0,%0,1\n\ 319 addic %0,%0,1\n\
320 stdcx. %0,0,%2 \n\ 320 stdcx. %0,0,%2 \n\
321 bne- 1b" 321 bne- 1b"
322 : "=&r" (t), "=m" (v->counter) 322 : "=&r" (t), "+m" (v->counter)
323 : "r" (&v->counter), "m" (v->counter) 323 : "r" (&v->counter)
324 : "cc"); 324 : "cc");
325} 325}
326 326
@@ -361,8 +361,8 @@ static __inline__ void atomic64_dec(atomic64_t *v)
361 addic %0,%0,-1\n\ 361 addic %0,%0,-1\n\
362 stdcx. %0,0,%2\n\ 362 stdcx. %0,0,%2\n\
363 bne- 1b" 363 bne- 1b"
364 : "=&r" (t), "=m" (v->counter) 364 : "=&r" (t), "+m" (v->counter)
365 : "r" (&v->counter), "m" (v->counter) 365 : "r" (&v->counter)
366 : "cc"); 366 : "cc");
367} 367}
368 368
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 76e2f08c3c83..c341063d0804 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -65,8 +65,8 @@ static __inline__ void set_bit(int nr, volatile unsigned long *addr)
65 PPC405_ERR77(0,%3) 65 PPC405_ERR77(0,%3)
66 PPC_STLCX "%0,0,%3\n" 66 PPC_STLCX "%0,0,%3\n"
67 "bne- 1b" 67 "bne- 1b"
68 : "=&r"(old), "=m"(*p) 68 : "=&r" (old), "+m" (*p)
69 : "r"(mask), "r"(p), "m"(*p) 69 : "r" (mask), "r" (p)
70 : "cc" ); 70 : "cc" );
71} 71}
72 72
@@ -82,8 +82,8 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
82 PPC405_ERR77(0,%3) 82 PPC405_ERR77(0,%3)
83 PPC_STLCX "%0,0,%3\n" 83 PPC_STLCX "%0,0,%3\n"
84 "bne- 1b" 84 "bne- 1b"
85 : "=&r"(old), "=m"(*p) 85 : "=&r" (old), "+m" (*p)
86 : "r"(mask), "r"(p), "m"(*p) 86 : "r" (mask), "r" (p)
87 : "cc" ); 87 : "cc" );
88} 88}
89 89
@@ -99,8 +99,8 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
99 PPC405_ERR77(0,%3) 99 PPC405_ERR77(0,%3)
100 PPC_STLCX "%0,0,%3\n" 100 PPC_STLCX "%0,0,%3\n"
101 "bne- 1b" 101 "bne- 1b"
102 : "=&r"(old), "=m"(*p) 102 : "=&r" (old), "+m" (*p)
103 : "r"(mask), "r"(p), "m"(*p) 103 : "r" (mask), "r" (p)
104 : "cc" ); 104 : "cc" );
105} 105}
106 106
@@ -179,8 +179,8 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
179 "or %0,%0,%2\n" 179 "or %0,%0,%2\n"
180 PPC_STLCX "%0,0,%3\n" 180 PPC_STLCX "%0,0,%3\n"
181 "bne- 1b" 181 "bne- 1b"
182 : "=&r" (old), "=m" (*addr) 182 : "=&r" (old), "+m" (*addr)
183 : "r" (mask), "r" (addr), "m" (*addr) 183 : "r" (mask), "r" (addr)
184 : "cc"); 184 : "cc");
185} 185}
186 186
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d075725bf444..c6569516ba35 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -220,8 +220,8 @@ __xchg_u32(volatile void *p, unsigned long val)
220" stwcx. %3,0,%2 \n\ 220" stwcx. %3,0,%2 \n\
221 bne- 1b" 221 bne- 1b"
222 ISYNC_ON_SMP 222 ISYNC_ON_SMP
223 : "=&r" (prev), "=m" (*(volatile unsigned int *)p) 223 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
224 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p) 224 : "r" (p), "r" (val)
225 : "cc", "memory"); 225 : "cc", "memory");
226 226
227 return prev; 227 return prev;
@@ -240,8 +240,8 @@ __xchg_u64(volatile void *p, unsigned long val)
240" stdcx. %3,0,%2 \n\ 240" stdcx. %3,0,%2 \n\
241 bne- 1b" 241 bne- 1b"
242 ISYNC_ON_SMP 242 ISYNC_ON_SMP
243 : "=&r" (prev), "=m" (*(volatile unsigned long *)p) 243 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
244 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) 244 : "r" (p), "r" (val)
245 : "cc", "memory"); 245 : "cc", "memory");
246 246
247 return prev; 247 return prev;
@@ -299,8 +299,8 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
299 ISYNC_ON_SMP 299 ISYNC_ON_SMP
300 "\n\ 300 "\n\
3012:" 3012:"
302 : "=&r" (prev), "=m" (*p) 302 : "=&r" (prev), "+m" (*p)
303 : "r" (p), "r" (old), "r" (new), "m" (*p) 303 : "r" (p), "r" (old), "r" (new)
304 : "cc", "memory"); 304 : "cc", "memory");
305 305
306 return prev; 306 return prev;
@@ -322,8 +322,8 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
322 ISYNC_ON_SMP 322 ISYNC_ON_SMP
323 "\n\ 323 "\n\
3242:" 3242:"
325 : "=&r" (prev), "=m" (*p) 325 : "=&r" (prev), "+m" (*p)
326 : "r" (p), "r" (old), "r" (new), "m" (*p) 326 : "r" (p), "r" (old), "r" (new)
327 : "cc", "memory"); 327 : "cc", "memory");
328 328
329 return prev; 329 return prev;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index a7e8cef73d15..7520cc1ff9e2 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -11,7 +11,7 @@ enum blktrace_cat {
11 BLK_TC_READ = 1 << 0, /* reads */ 11 BLK_TC_READ = 1 << 0, /* reads */
12 BLK_TC_WRITE = 1 << 1, /* writes */ 12 BLK_TC_WRITE = 1 << 1, /* writes */
13 BLK_TC_BARRIER = 1 << 2, /* barrier */ 13 BLK_TC_BARRIER = 1 << 2, /* barrier */
14 BLK_TC_SYNC = 1 << 3, /* barrier */ 14 BLK_TC_SYNC = 1 << 3, /* sync IO */
15 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ 15 BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
16 BLK_TC_REQUEUE = 1 << 5, /* requeueing */ 16 BLK_TC_REQUEUE = 1 << 5, /* requeueing */
17 BLK_TC_ISSUE = 1 << 6, /* issue */ 17 BLK_TC_ISSUE = 1 << 6, /* issue */
@@ -19,6 +19,7 @@ enum blktrace_cat {
19 BLK_TC_FS = 1 << 8, /* fs requests */ 19 BLK_TC_FS = 1 << 8, /* fs requests */
20 BLK_TC_PC = 1 << 9, /* pc requests */ 20 BLK_TC_PC = 1 << 9, /* pc requests */
21 BLK_TC_NOTIFY = 1 << 10, /* special message */ 21 BLK_TC_NOTIFY = 1 << 10, /* special message */
22 BLK_TC_AHEAD = 1 << 11, /* readahead */
22 23
23 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 24 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
24}; 25};
@@ -147,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
147 u32 what) 148 u32 what)
148{ 149{
149 struct blk_trace *bt = q->blk_trace; 150 struct blk_trace *bt = q->blk_trace;
150 int rw = rq->flags & 0x07; 151 int rw = rq->flags & 0x03;
151 152
152 if (likely(!bt)) 153 if (likely(!bt))
153 return; 154 return;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 85f99f60deea..76cc099c8580 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -549,6 +549,7 @@ struct packet_type {
549 struct net_device *); 549 struct net_device *);
550 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 550 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
551 int features); 551 int features);
552 int (*gso_send_check)(struct sk_buff *skb);
552 void *af_packet_priv; 553 void *af_packet_priv;
553 struct list_head list; 554 struct list_head list;
554}; 555};
@@ -1001,13 +1002,14 @@ static inline int net_gso_ok(int features, int gso_type)
1001 1002
1002static inline int skb_gso_ok(struct sk_buff *skb, int features) 1003static inline int skb_gso_ok(struct sk_buff *skb, int features)
1003{ 1004{
1004 return net_gso_ok(features, skb_shinfo(skb)->gso_size ? 1005 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
1005 skb_shinfo(skb)->gso_type : 0);
1006} 1006}
1007 1007
1008static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 1008static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1009{ 1009{
1010 return !skb_gso_ok(skb, dev->features); 1010 return skb_is_gso(skb) &&
1011 (!skb_gso_ok(skb, dev->features) ||
1012 unlikely(skb->ip_summed != CHECKSUM_HW));
1011} 1013}
1012 1014
1013#endif /* __KERNEL__ */ 1015#endif /* __KERNEL__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3597b4f14389..0bf31b83578c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
1455{ } 1455{ }
1456#endif 1456#endif
1457 1457
1458static inline int skb_is_gso(const struct sk_buff *skb)
1459{
1460 return skb_shinfo(skb)->gso_size;
1461}
1462
1458#endif /* __KERNEL__ */ 1463#endif /* __KERNEL__ */
1459#endif /* _LINUX_SKBUFF_H */ 1464#endif /* _LINUX_SKBUFF_H */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index a225d6371cb1..c643bce64e55 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -36,6 +36,7 @@
36struct net_protocol { 36struct net_protocol {
37 int (*handler)(struct sk_buff *skb); 37 int (*handler)(struct sk_buff *skb);
38 void (*err_handler)(struct sk_buff *skb, u32 info); 38 void (*err_handler)(struct sk_buff *skb, u32 info);
39 int (*gso_send_check)(struct sk_buff *skb);
39 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 40 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
40 int features); 41 int features);
41 int no_policy; 42 int no_policy;
@@ -51,6 +52,7 @@ struct inet6_protocol
51 int type, int code, int offset, 52 int type, int code, int offset,
52 __u32 info); 53 __u32 info);
53 54
55 int (*gso_send_check)(struct sk_buff *skb);
54 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 56 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
55 int features); 57 int features);
56 58
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3cd803b0d7a5..0720bddff1e9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1086,6 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops;
1086 1086
1087extern int tcp_v4_destroy_sock(struct sock *sk); 1087extern int tcp_v4_destroy_sock(struct sock *sk);
1088 1088
1089extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1089extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 1090extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1090 1091
1091#ifdef CONFIG_PROC_FS 1092#ifdef CONFIG_PROC_FS
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 121bf6f49148..2e62105d91bd 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = {
962 962
963static int __init atm_clip_init(void) 963static int __init atm_clip_init(void)
964{ 964{
965 struct proc_dir_entry *p;
966 neigh_table_init_no_netlink(&clip_tbl); 965 neigh_table_init_no_netlink(&clip_tbl);
967 966
968 clip_tbl_hook = &clip_tbl; 967 clip_tbl_hook = &clip_tbl;
@@ -972,9 +971,15 @@ static int __init atm_clip_init(void)
972 971
973 setup_timer(&idle_timer, idle_timer_check, 0); 972 setup_timer(&idle_timer, idle_timer_check, 0);
974 973
975 p = create_proc_entry("arp", S_IRUGO, atm_proc_root); 974#ifdef CONFIG_PROC_FS
976 if (p) 975 {
977 p->proc_fops = &arp_seq_fops; 976 struct proc_dir_entry *p;
977
978 p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
979 if (p)
980 p->proc_fops = &arp_seq_fops;
981 }
982#endif
978 983
979 return 0; 984 return 0;
980} 985}
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 4b1faca5013f..1d3de42fada0 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -25,22 +25,27 @@
25/* 25/*
26 * skb_migrate appends the list at "from" to "to", emptying "from" in the 26 * skb_migrate appends the list at "from" to "to", emptying "from" in the
27 * process. skb_migrate is atomic with respect to all other skb operations on 27 * process. skb_migrate is atomic with respect to all other skb operations on
28 * "from" and "to". Note that it locks both lists at the same time, so beware 28 * "from" and "to". Note that it locks both lists at the same time, so to deal
29 * of potential deadlocks. 29 * with the lock ordering, the locks are taken in address order.
30 * 30 *
31 * This function should live in skbuff.c or skbuff.h. 31 * This function should live in skbuff.c or skbuff.h.
32 */ 32 */
33 33
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
39 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
40 struct sk_buff *prev; 40 struct sk_buff *prev;
41 41
42 spin_lock_irqsave(&from->lock,flags); 42 if ((unsigned long) from < (unsigned long) to) {
43 spin_lock(&to->lock); 43 spin_lock_irqsave(&from->lock, flags);
44 spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
45 } else {
46 spin_lock_irqsave(&to->lock, flags);
47 spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
48 }
44 prev = from->prev; 49 prev = from->prev;
45 from->next->prev = to->prev; 50 from->next->prev = to->prev;
46 prev->next = skb_to; 51 prev->next = skb_to;
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
51 from->prev = skb_from; 56 from->prev = skb_from;
52 from->next = skb_from; 57 from->next = skb_from;
53 from->qlen = 0; 58 from->qlen = 0;
54 spin_unlock_irqrestore(&from->lock,flags); 59 spin_unlock_irqrestore(&from->lock, flags);
55} 60}
56 61
57 62
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 10a3c0aa8398..f12be2acf9bc 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
486{ 486{
487 ax25_cb *ax25; 487 ax25_cb *ax25;
488 488
489 if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) 489 if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
490 return NULL; 490 return NULL;
491 491
492 memset(ax25, 0x00, sizeof(*ax25));
493 atomic_set(&ax25->refcount, 1); 492 atomic_set(&ax25->refcount, 1);
494 493
495 skb_queue_head_init(&ax25->write_queue); 494 skb_queue_head_init(&ax25->write_queue);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 47e6e790bd67..b787678220ff 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev)
55{ 55{
56 ax25_dev *ax25_dev; 56 ax25_dev *ax25_dev;
57 57
58 if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { 58 if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
59 printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); 59 printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
60 return; 60 return;
61 } 61 }
62 62
63 ax25_unregister_sysctl(); 63 ax25_unregister_sysctl();
64 64
65 memset(ax25_dev, 0x00, sizeof(*ax25_dev));
66
67 dev->ax25_ptr = ax25_dev; 65 dev->ax25_ptr = ax25_dev;
68 ax25_dev->dev = dev; 66 ax25_dev->dev = dev;
69 dev_hold(dev); 67 dev_hold(dev);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 8be9f2123e54..6ccd32b30809 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb)
35int br_dev_queue_push_xmit(struct sk_buff *skb) 35int br_dev_queue_push_xmit(struct sk_buff *skb)
36{ 36{
37 /* drop mtu oversized packets except gso */ 37 /* drop mtu oversized packets except gso */
38 if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) 38 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
39 kfree_skb(skb); 39 kfree_skb(skb);
40 else { 40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8298a5179aef..cbc8a389a0a8 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761{ 761{
762 if (skb->protocol == htons(ETH_P_IP) && 762 if (skb->protocol == htons(ETH_P_IP) &&
763 skb->len > skb->dev->mtu && 763 skb->len > skb->dev->mtu &&
764 !skb_shinfo(skb)->gso_size) 764 !skb_is_gso(skb))
765 return ip_fragment(skb, br_dev_queue_push_xmit); 765 return ip_fragment(skb, br_dev_queue_push_xmit);
766 else 766 else
767 return br_dev_queue_push_xmit(skb); 767 return br_dev_queue_push_xmit(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 066a60a75280..4d2b5167d7f5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1162 unsigned int csum; 1162 unsigned int csum;
1163 int ret = 0, offset = skb->h.raw - skb->data; 1163 int ret = 0, offset = skb->h.raw - skb->data;
1164 1164
1165 if (inward) { 1165 if (inward)
1166 skb->ip_summed = CHECKSUM_NONE; 1166 goto out_set_summed;
1167 goto out; 1167
1168 if (unlikely(skb_shinfo(skb)->gso_size)) {
1169 static int warned;
1170
1171 WARN_ON(!warned);
1172 warned = 1;
1173
1174 /* Let GSO fix up the checksum. */
1175 goto out_set_summed;
1168 } 1176 }
1169 1177
1170 if (skb_cloned(skb)) { 1178 if (skb_cloned(skb)) {
@@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1181 BUG_ON(skb->csum + 2 > offset); 1189 BUG_ON(skb->csum + 2 > offset);
1182 1190
1183 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1191 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1192
1193out_set_summed:
1184 skb->ip_summed = CHECKSUM_NONE; 1194 skb->ip_summed = CHECKSUM_NONE;
1185out: 1195out:
1186 return ret; 1196 return ret;
@@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1201 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1211 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1202 struct packet_type *ptype; 1212 struct packet_type *ptype;
1203 int type = skb->protocol; 1213 int type = skb->protocol;
1214 int err;
1204 1215
1205 BUG_ON(skb_shinfo(skb)->frag_list); 1216 BUG_ON(skb_shinfo(skb)->frag_list);
1206 BUG_ON(skb->ip_summed != CHECKSUM_HW);
1207 1217
1208 skb->mac.raw = skb->data; 1218 skb->mac.raw = skb->data;
1209 skb->mac_len = skb->nh.raw - skb->data; 1219 skb->mac_len = skb->nh.raw - skb->data;
1210 __skb_pull(skb, skb->mac_len); 1220 __skb_pull(skb, skb->mac_len);
1211 1221
1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1223 static int warned;
1224
1225 WARN_ON(!warned);
1226 warned = 1;
1227
1228 if (skb_header_cloned(skb) &&
1229 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1230 return ERR_PTR(err);
1231 }
1232
1212 rcu_read_lock(); 1233 rcu_read_lock();
1213 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1234 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1214 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1235 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1236 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1237 err = ptype->gso_send_check(skb);
1238 segs = ERR_PTR(err);
1239 if (err || skb_gso_ok(skb, features))
1240 break;
1241 __skb_push(skb, skb->data - skb->nh.raw);
1242 }
1215 segs = ptype->gso_segment(skb, features); 1243 segs = ptype->gso_segment(skb, features);
1216 break; 1244 break;
1217 } 1245 }
@@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb)
1727 if (dev->qdisc_ingress) { 1755 if (dev->qdisc_ingress) {
1728 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1756 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1729 if (MAX_RED_LOOP < ttl++) { 1757 if (MAX_RED_LOOP < ttl++) {
1730 printk("Redir loop detected Dropping packet (%s->%s)\n", 1758 printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n",
1731 skb->input_dev->name, skb->dev->name); 1759 skb->input_dev->name, skb->dev->name);
1732 return TC_ACT_SHOT; 1760 return TC_ACT_SHOT;
1733 } 1761 }
@@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev)
2922 /* Fix illegal SG+CSUM combinations. */ 2950 /* Fix illegal SG+CSUM combinations. */
2923 if ((dev->features & NETIF_F_SG) && 2951 if ((dev->features & NETIF_F_SG) &&
2924 !(dev->features & NETIF_F_ALL_CSUM)) { 2952 !(dev->features & NETIF_F_ALL_CSUM)) {
2925 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", 2953 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
2926 dev->name); 2954 dev->name);
2927 dev->features &= ~NETIF_F_SG; 2955 dev->features &= ~NETIF_F_SG;
2928 } 2956 }
@@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev)
2930 /* TSO requires that SG is present as well. */ 2958 /* TSO requires that SG is present as well. */
2931 if ((dev->features & NETIF_F_TSO) && 2959 if ((dev->features & NETIF_F_TSO) &&
2932 !(dev->features & NETIF_F_SG)) { 2960 !(dev->features & NETIF_F_SG)) {
2933 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", 2961 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
2934 dev->name); 2962 dev->name);
2935 dev->features &= ~NETIF_F_TSO; 2963 dev->features &= ~NETIF_F_TSO;
2936 } 2964 }
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 06e785fe5757..22f321d9bf9d 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
399 rcu_read_lock(); 399 rcu_read_lock();
400 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { 400 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
401 if (idx < s_idx) 401 if (idx < s_idx)
402 continue; 402 goto next;
403 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) 403 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
404 break; 404 break;
405next:
405 idx++; 406 idx++;
406 } 407 }
407 rcu_read_unlock(); 408 rcu_read_unlock();
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 318d4674faa1..c84a32070f8d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk)
1097 1097
1098EXPORT_SYMBOL(inet_sk_rebuild_header); 1098EXPORT_SYMBOL(inet_sk_rebuild_header);
1099 1099
1100static int inet_gso_send_check(struct sk_buff *skb)
1101{
1102 struct iphdr *iph;
1103 struct net_protocol *ops;
1104 int proto;
1105 int ihl;
1106 int err = -EINVAL;
1107
1108 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1109 goto out;
1110
1111 iph = skb->nh.iph;
1112 ihl = iph->ihl * 4;
1113 if (ihl < sizeof(*iph))
1114 goto out;
1115
1116 if (unlikely(!pskb_may_pull(skb, ihl)))
1117 goto out;
1118
1119 skb->h.raw = __skb_pull(skb, ihl);
1120 iph = skb->nh.iph;
1121 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1122 err = -EPROTONOSUPPORT;
1123
1124 rcu_read_lock();
1125 ops = rcu_dereference(inet_protos[proto]);
1126 if (likely(ops && ops->gso_send_check))
1127 err = ops->gso_send_check(skb);
1128 rcu_read_unlock();
1129
1130out:
1131 return err;
1132}
1133
1100static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 1134static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1101{ 1135{
1102 struct sk_buff *segs = ERR_PTR(-EINVAL); 1136 struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = {
1162static struct net_protocol tcp_protocol = { 1196static struct net_protocol tcp_protocol = {
1163 .handler = tcp_v4_rcv, 1197 .handler = tcp_v4_rcv,
1164 .err_handler = tcp_v4_err, 1198 .err_handler = tcp_v4_err,
1199 .gso_send_check = tcp_v4_gso_send_check,
1165 .gso_segment = tcp_tso_segment, 1200 .gso_segment = tcp_tso_segment,
1166 .no_policy = 1, 1201 .no_policy = 1,
1167}; 1202};
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void);
1208static struct packet_type ip_packet_type = { 1243static struct packet_type ip_packet_type = {
1209 .type = __constant_htons(ETH_P_IP), 1244 .type = __constant_htons(ETH_P_IP),
1210 .func = ip_rcv, 1245 .func = ip_rcv,
1246 .gso_send_check = inet_gso_send_check,
1211 .gso_segment = inet_gso_segment, 1247 .gso_segment = inet_gso_segment,
1212}; 1248};
1213 1249
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6c642d11d4ca..773b12ba4e3c 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
457 457
458 rcu_read_lock(); 458 rcu_read_lock();
459 hlist_for_each_entry(r, node, &fib_rules, hlist) { 459 hlist_for_each_entry(r, node, &fib_rules, hlist) {
460
461 if (idx < s_idx) 460 if (idx < s_idx)
462 continue; 461 goto next;
463 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, 462 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
464 cb->nlh->nlmsg_seq, 463 cb->nlh->nlmsg_seq,
465 RTM_NEWRULE, NLM_F_MULTI) < 0) 464 RTM_NEWRULE, NLM_F_MULTI) < 0)
466 break; 465 break;
466next:
467 idx++; 467 idx++;
468 } 468 }
469 rcu_read_unlock(); 469 rcu_read_unlock();
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ca0e714613fb..7c9f9a6421b8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
209 return dst_output(skb); 209 return dst_output(skb);
210 } 210 }
211#endif 211#endif
212 if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) 212 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213 return ip_fragment(skb, ip_finish_output2); 213 return ip_fragment(skb, ip_finish_output2);
214 else 214 else
215 return ip_finish_output2(skb); 215 return ip_finish_output2(skb);
@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1095 while (size > 0) { 1095 while (size > 0) {
1096 int i; 1096 int i;
1097 1097
1098 if (skb_shinfo(skb)->gso_size) 1098 if (skb_is_gso(skb))
1099 len = size; 1099 len = size;
1100 else { 1100 else {
1101 1101
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a886e6efbbe..a891133f00e4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
496 } 496 }
497} 497}
498 498
499int tcp_v4_gso_send_check(struct sk_buff *skb)
500{
501 struct iphdr *iph;
502 struct tcphdr *th;
503
504 if (!pskb_may_pull(skb, sizeof(*th)))
505 return -EINVAL;
506
507 iph = skb->nh.iph;
508 th = skb->h.th;
509
510 th->check = 0;
511 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
512 skb->csum = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_HW;
514 return 0;
515}
516
499/* 517/*
500 * This routine will send an RST to the other tcp. 518 * This routine will send an RST to the other tcp.
501 * 519 *
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 193363e22932..d16f863cf687 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
134 } 134 }
135#endif 135#endif
136 136
137 if (!skb_shinfo(skb)->gso_size) 137 if (!skb_is_gso(skb))
138 return xfrm4_output_finish2(skb); 138 return xfrm4_output_finish2(skb);
139 139
140 skb->protocol = htons(ETH_P_IP); 140 skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c5b44575af0..3bc74ce78800 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
147 147
148int ip6_output(struct sk_buff *skb) 148int ip6_output(struct sk_buff *skb)
149{ 149{
150 if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || 150 if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
151 dst_allfrag(skb->dst)) 151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2); 152 return ip6_fragment(skb, ip6_output2);
153 else 153 else
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
229 skb->priority = sk->sk_priority; 229 skb->priority = sk->sk_priority;
230 230
231 mtu = dst_mtu(dst); 231 mtu = dst_mtu(dst);
232 if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { 232 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, 234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
235 dst_output); 235 dst_output);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0c17dec11c8d..43327264e69c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -57,29 +57,11 @@
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; 58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59
60static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) 60static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
61 int proto)
61{ 62{
62 struct sk_buff *segs = ERR_PTR(-EINVAL); 63 struct inet6_protocol *ops = NULL;
63 struct ipv6hdr *ipv6h;
64 struct inet6_protocol *ops;
65 int proto;
66 64
67 if (unlikely(skb_shinfo(skb)->gso_type &
68 ~(SKB_GSO_UDP |
69 SKB_GSO_DODGY |
70 SKB_GSO_TCP_ECN |
71 SKB_GSO_TCPV6 |
72 0)))
73 goto out;
74
75 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
76 goto out;
77
78 ipv6h = skb->nh.ipv6h;
79 proto = ipv6h->nexthdr;
80 __skb_pull(skb, sizeof(*ipv6h));
81
82 rcu_read_lock();
83 for (;;) { 65 for (;;) {
84 struct ipv6_opt_hdr *opth; 66 struct ipv6_opt_hdr *opth;
85 int len; 67 int len;
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
88 ops = rcu_dereference(inet6_protos[proto]); 70 ops = rcu_dereference(inet6_protos[proto]);
89 71
90 if (unlikely(!ops)) 72 if (unlikely(!ops))
91 goto unlock; 73 break;
92 74
93 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 75 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
94 break; 76 break;
95 } 77 }
96 78
97 if (unlikely(!pskb_may_pull(skb, 8))) 79 if (unlikely(!pskb_may_pull(skb, 8)))
98 goto unlock; 80 break;
99 81
100 opth = (void *)skb->data; 82 opth = (void *)skb->data;
101 len = opth->hdrlen * 8 + 8; 83 len = opth->hdrlen * 8 + 8;
102 84
103 if (unlikely(!pskb_may_pull(skb, len))) 85 if (unlikely(!pskb_may_pull(skb, len)))
104 goto unlock; 86 break;
105 87
106 proto = opth->nexthdr; 88 proto = opth->nexthdr;
107 __skb_pull(skb, len); 89 __skb_pull(skb, len);
108 } 90 }
109 91
110 skb->h.raw = skb->data; 92 return ops;
111 if (likely(ops->gso_segment)) 93}
112 segs = ops->gso_segment(skb, features); 94
95static int ipv6_gso_send_check(struct sk_buff *skb)
96{
97 struct ipv6hdr *ipv6h;
98 struct inet6_protocol *ops;
99 int err = -EINVAL;
100
101 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
102 goto out;
113 103
114unlock: 104 ipv6h = skb->nh.ipv6h;
105 __skb_pull(skb, sizeof(*ipv6h));
106 err = -EPROTONOSUPPORT;
107
108 rcu_read_lock();
109 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
110 if (likely(ops && ops->gso_send_check)) {
111 skb->h.raw = skb->data;
112 err = ops->gso_send_check(skb);
113 }
114 rcu_read_unlock();
115
116out:
117 return err;
118}
119
120static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
121{
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops;
125
126 if (unlikely(skb_shinfo(skb)->gso_type &
127 ~(SKB_GSO_UDP |
128 SKB_GSO_DODGY |
129 SKB_GSO_TCP_ECN |
130 SKB_GSO_TCPV6 |
131 0)))
132 goto out;
133
134 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
135 goto out;
136
137 ipv6h = skb->nh.ipv6h;
138 __skb_pull(skb, sizeof(*ipv6h));
139 segs = ERR_PTR(-EPROTONOSUPPORT);
140
141 rcu_read_lock();
142 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
143 if (likely(ops && ops->gso_segment)) {
144 skb->h.raw = skb->data;
145 segs = ops->gso_segment(skb, features);
146 }
115 rcu_read_unlock(); 147 rcu_read_unlock();
116 148
117 if (unlikely(IS_ERR(segs))) 149 if (unlikely(IS_ERR(segs)))
@@ -130,6 +162,7 @@ out:
130static struct packet_type ipv6_packet_type = { 162static struct packet_type ipv6_packet_type = {
131 .type = __constant_htons(ETH_P_IPV6), 163 .type = __constant_htons(ETH_P_IPV6),
132 .func = ipv6_rcv, 164 .func = ipv6_rcv,
165 .gso_send_check = ipv6_gso_send_check,
133 .gso_segment = ipv6_gso_segment, 166 .gso_segment = ipv6_gso_segment,
134}; 167};
135 168
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5bdcb9002cf7..923989d0520d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
552 } 552 }
553} 553}
554 554
555static int tcp_v6_gso_send_check(struct sk_buff *skb)
556{
557 struct ipv6hdr *ipv6h;
558 struct tcphdr *th;
559
560 if (!pskb_may_pull(skb, sizeof(*th)))
561 return -EINVAL;
562
563 ipv6h = skb->nh.ipv6h;
564 th = skb->h.th;
565
566 th->check = 0;
567 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
568 IPPROTO_TCP, 0);
569 skb->csum = offsetof(struct tcphdr, check);
570 skb->ip_summed = CHECKSUM_HW;
571 return 0;
572}
555 573
556static void tcp_v6_send_reset(struct sk_buff *skb) 574static void tcp_v6_send_reset(struct sk_buff *skb)
557{ 575{
@@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = {
1603static struct inet6_protocol tcpv6_protocol = { 1621static struct inet6_protocol tcpv6_protocol = {
1604 .handler = tcp_v6_rcv, 1622 .handler = tcp_v6_rcv,
1605 .err_handler = tcp_v6_err, 1623 .err_handler = tcp_v6_err,
1624 .gso_send_check = tcp_v6_gso_send_check,
1606 .gso_segment = tcp_tso_segment, 1625 .gso_segment = tcp_tso_segment,
1607 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1626 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1608}; 1627};
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 48fccb1eca08..0eea60ea9ebc 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
122{ 122{
123 struct sk_buff *segs; 123 struct sk_buff *segs;
124 124
125 if (!skb_shinfo(skb)->gso_size) 125 if (!skb_is_gso(skb))
126 return xfrm6_output_finish2(skb); 126 return xfrm6_output_finish2(skb);
127 127
128 skb->protocol = htons(ETH_P_IP); 128 skb->protocol = htons(ETH_P_IP);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 389a4119e1b4..ecc796878f38 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1382,14 +1382,12 @@ static int __init nr_proto_init(void)
1382 return -1; 1382 return -1;
1383 } 1383 }
1384 1384
1385 dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1385 dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1386 if (dev_nr == NULL) { 1386 if (dev_nr == NULL) {
1387 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); 1387 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
1388 return -1; 1388 return -1;
1389 } 1389 }
1390 1390
1391 memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *));
1392
1393 for (i = 0; i < nr_ndevs; i++) { 1391 for (i = 0; i < nr_ndevs; i++) {
1394 char name[IFNAMSIZ]; 1392 char name[IFNAMSIZ];
1395 struct net_device *dev; 1393 struct net_device *dev;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d0a67bb31363..c115295ab431 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1490,14 +1490,13 @@ static int __init rose_proto_init(void)
1490 1490
1491 rose_callsign = null_ax25_address; 1491 rose_callsign = null_ax25_address;
1492 1492
1493 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1493 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1494 if (dev_rose == NULL) { 1494 if (dev_rose == NULL) {
1495 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1495 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1496 rc = -ENOMEM; 1496 rc = -ENOMEM;
1497 goto out_proto_unregister; 1497 goto out_proto_unregister;
1498 } 1498 }
1499 1499
1500 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
1501 for (i = 0; i < rose_ndevs; i++) { 1500 for (i = 0; i < rose_ndevs; i++) {
1502 struct net_device *dev; 1501 struct net_device *dev;
1503 char name[IFNAMSIZ]; 1502 char name[IFNAMSIZ];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 599423cc9d0d..0972247a839c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
602 return err; 602 return err;
603 603
604rtattr_failure: 604rtattr_failure:
605 module_put(a->ops->owner);
606nlmsg_failure: 605nlmsg_failure:
606 module_put(a->ops->owner);
607err_out: 607err_out:
608 kfree_skb(skb); 608 kfree_skb(skb);
609 kfree(a); 609 kfree(a);