aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/block/as-iosched.txt172
-rw-r--r--Documentation/kvm/api.txt10
-rw-r--r--Documentation/sound/alsa/Procfile.txt2
-rw-r--r--Documentation/vgaarbiter.txt2
-rw-r--r--arch/ia64/kvm/vcpu.h9
-rw-r--r--arch/ia64/kvm/vmm.c4
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c13
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c22
-rw-r--r--arch/x86/include/asm/kvm.h4
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h20
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c12
-rw-r--r--arch/x86/kvm/lapic.c1
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/pci/bus_numa.c2
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-settings.c121
-rw-r--r--block/cfq-iosched.c67
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c5
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/md/md.c42
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h16
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/e1000_phy.c9
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c9
-rw-r--r--drivers/net/igbvf/netdev.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/pcnet32.c3
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/falcon.c1
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi_phy.c93
-rw-r--r--drivers/net/sfc/net_driver.h1
-rw-r--r--drivers/net/sfc/nic.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c238
-rw-r--r--drivers/net/sfc/siena.c1
-rw-r--r--drivers/net/sfc/tenxpress.c138
-rw-r--r--drivers/net/sfc/tx.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/ucc_geth.c42
-rw-r--r--drivers/net/via-rhine.c41
-rw-r--r--drivers/net/vxge/vxge-main.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c42
-rw-r--r--drivers/net/wireless/b43/dma.c197
-rw-r--r--drivers/net/wireless/b43/dma.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c68
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/libertas/mesh.c4
-rw-r--r--drivers/net/wireless/libertas/scan.c22
-rw-r--r--drivers/net/wireless/libertas/wext.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c140
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c3
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pci/intr_remapping.c2
-rw-r--r--drivers/pci/pci-acpi.c10
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug4
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c4
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/quirks.c57
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c9
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--fs/namei.c1
-rw-r--r--include/linux/blkdev.h17
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/scsi/libsrp.h1
-rw-r--r--mm/mmap.c40
-rw-r--r--mm/nommu.c25
-rw-r--r--mm/slab.c4
-rw-r--r--mm/util.c44
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/mac80211/ht.c25
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mlme.c8
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c12
-rw-r--r--net/wireless/mlme.c13
-rw-r--r--net/wireless/scan.c13
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--sound/arm/aaci.c3
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/pci/hda/hda_beep.c16
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_intel.c21
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/pci/hda/patch_sigmatel.c18
-rw-r--r--virt/kvm/assigned-dev.c6
-rw-r--r--virt/kvm/kvm_main.c5
158 files changed, 1352 insertions, 1310 deletions
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 961a0513f8c3..a406286f6f3e 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -1,7 +1,5 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3as-iosched.txt
4 - Anticipatory IO scheduler
5barrier.txt 3barrier.txt
6 - I/O Barriers 4 - I/O Barriers
7biodoc.txt 5biodoc.txt
diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt
deleted file mode 100644
index 738b72be128e..000000000000
--- a/Documentation/block/as-iosched.txt
+++ /dev/null
@@ -1,172 +0,0 @@
1Anticipatory IO scheduler
2-------------------------
3Nick Piggin <piggin@cyberone.com.au> 13 Sep 2003
4
5Attention! Database servers, especially those using "TCQ" disks should
6investigate performance with the 'deadline' IO scheduler. Any system with high
7disk performance requirements should do so, in fact.
8
9If you see unusual performance characteristics of your disk systems, or you
10see big performance regressions versus the deadline scheduler, please email
11me. Database users don't bother unless you're willing to test a lot of patches
12from me ;) its a known issue.
13
14Also, users with hardware RAID controllers, doing striping, may find
15highly variable performance results with using the as-iosched. The
16as-iosched anticipatory implementation is based on the notion that a disk
17device has only one physical seeking head. A striped RAID controller
18actually has a head for each physical device in the logical RAID device.
19
20However, setting the antic_expire (see tunable parameters below) produces
21very similar behavior to the deadline IO scheduler.
22
23Selecting IO schedulers
24-----------------------
25Refer to Documentation/block/switching-sched.txt for information on
26selecting an io scheduler on a per-device basis.
27
28Anticipatory IO scheduler Policies
29----------------------------------
30The as-iosched implementation implements several layers of policies
31to determine when an IO request is dispatched to the disk controller.
32Here are the policies outlined, in order of application.
33
341. one-way Elevator algorithm.
35
36The elevator algorithm is similar to that used in deadline scheduler, with
37the addition that it allows limited backward movement of the elevator
38(i.e. seeks backwards). A seek backwards can occur when choosing between
39two IO requests where one is behind the elevator's current position, and
40the other is in front of the elevator's position. If the seek distance to
41the request in back of the elevator is less than half the seek distance to
42the request in front of the elevator, then the request in back can be chosen.
43Backward seeks are also limited to a maximum of MAXBACK (1024*1024) sectors.
44This favors forward movement of the elevator, while allowing opportunistic
45"short" backward seeks.
46
472. FIFO expiration times for reads and for writes.
48
49This is again very similar to the deadline IO scheduler. The expiration
50times for requests on these lists is tunable using the parameters read_expire
51and write_expire discussed below. When a read or a write expires in this way,
52the IO scheduler will interrupt its current elevator sweep or read anticipation
53to service the expired request.
54
553. Read and write request batching
56
57A batch is a collection of read requests or a collection of write
58requests. The as scheduler alternates dispatching read and write batches
59to the driver. In the case a read batch, the scheduler submits read
60requests to the driver as long as there are read requests to submit, and
61the read batch time limit has not been exceeded (read_batch_expire).
62The read batch time limit begins counting down only when there are
63competing write requests pending.
64
65In the case of a write batch, the scheduler submits write requests to
66the driver as long as there are write requests available, and the
67write batch time limit has not been exceeded (write_batch_expire).
68However, the length of write batches will be gradually shortened
69when read batches frequently exceed their time limit.
70
71When changing between batch types, the scheduler waits for all requests
72from the previous batch to complete before scheduling requests for the
73next batch.
74
75The read and write fifo expiration times described in policy 2 above
76are checked only when in scheduling IO of a batch for the corresponding
77(read/write) type. So for example, the read FIFO timeout values are
78tested only during read batches. Likewise, the write FIFO timeout
79values are tested only during write batches. For this reason,
80it is generally not recommended for the read batch time
81to be longer than the write expiration time, nor for the write batch
82time to exceed the read expiration time (see tunable parameters below).
83
84When the IO scheduler changes from a read to a write batch,
85it begins the elevator from the request that is on the head of the
86write expiration FIFO. Likewise, when changing from a write batch to
87a read batch, scheduler begins the elevator from the first entry
88on the read expiration FIFO.
89
904. Read anticipation.
91
92Read anticipation occurs only when scheduling a read batch.
93This implementation of read anticipation allows only one read request
94to be dispatched to the disk controller at a time. In
95contrast, many write requests may be dispatched to the disk controller
96at a time during a write batch. It is this characteristic that can make
97the anticipatory scheduler perform anomalously with controllers supporting
98TCQ, or with hardware striped RAID devices. Setting the antic_expire
99queue parameter (see below) to zero disables this behavior, and the
100anticipatory scheduler behaves essentially like the deadline scheduler.
101
102When read anticipation is enabled (antic_expire is not zero), reads
103are dispatched to the disk controller one at a time.
104At the end of each read request, the IO scheduler examines its next
105candidate read request from its sorted read list. If that next request
106is from the same process as the request that just completed,
107or if the next request in the queue is "very close" to the
108just completed request, it is dispatched immediately. Otherwise,
109statistics (average think time, average seek distance) on the process
110that submitted the just completed request are examined. If it seems
111likely that that process will submit another request soon, and that
112request is likely to be near the just completed request, then the IO
113scheduler will stop dispatching more read requests for up to (antic_expire)
114milliseconds, hoping that process will submit a new request near the one
115that just completed. If such a request is made, then it is dispatched
116immediately. If the antic_expire wait time expires, then the IO scheduler
117will dispatch the next read request from the sorted read queue.
118
119To decide whether an anticipatory wait is worthwhile, the scheduler
120maintains statistics for each process that can be used to compute
121mean "think time" (the time between read requests), and mean seek
122distance for that process. One observation is that these statistics
123are associated with each process, but those statistics are not associated
124with a specific IO device. So for example, if a process is doing IO
125on several file systems on separate devices, the statistics will be
126a combination of IO behavior from all those devices.
127
128
129Tuning the anticipatory IO scheduler
130------------------------------------
131When using 'as', the anticipatory IO scheduler there are 5 parameters under
132/sys/block/*/queue/iosched/. All are units of milliseconds.
133
134The parameters are:
135* read_expire
136 Controls how long until a read request becomes "expired". It also controls the
137 interval between which expired requests are served, so set to 50, a request
138 might take anywhere < 100ms to be serviced _if_ it is the next on the
139 expired list. Obviously request expiration strategies won't make the disk
140 go faster. The result basically equates to the timeslice a single reader
141 gets in the presence of other IO. 100*((seek time / read_expire) + 1) is
142 very roughly the % streaming read efficiency your disk should get with
143 multiple readers.
144
145* read_batch_expire
146 Controls how much time a batch of reads is given before pending writes are
147 served. A higher value is more efficient. This might be set below read_expire
148 if writes are to be given higher priority than reads, but reads are to be
149 as efficient as possible when there are no writes. Generally though, it
150 should be some multiple of read_expire.
151
152* write_expire, and
153* write_batch_expire are equivalent to the above, for writes.
154
155* antic_expire
156 Controls the maximum amount of time we can anticipate a good read (one
157 with a short seek distance from the most recently completed request) before
158 giving up. Many other factors may cause anticipation to be stopped early,
159 or some processes will not be "anticipated" at all. Should be a bit higher
160 for big seek time devices though not a linear correspondence - most
161 processes have only a few ms thinktime.
162
163In addition to the tunables above there is a read-only file named est_time
164which, when read, will show:
165
166 - The probability of a task exiting without a cooperating task
167 submitting an anticipated IO.
168
169 - The current mean think time.
170
171 - The seek distance used to determine if an incoming IO is better.
172
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index e1a114161027..2811e452f756 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
685 __u8 pad; 685 __u8 pad;
686 } nmi; 686 } nmi;
687 __u32 sipi_vector; 687 __u32 sipi_vector;
688 __u32 flags; /* must be zero */ 688 __u32 flags;
689}; 689};
690 690
6914.30 KVM_SET_VCPU_EVENTS 6914.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.
701 701
702See KVM_GET_VCPU_EVENTS for the data structure. 702See KVM_GET_VCPU_EVENTS for the data structure.
703 703
704Fields that may be modified asynchronously by running VCPUs can be excluded
705from the update. These fields are nmi.pending and sipi_vector. Keep the
706corresponding bits in the flags field cleared to suppress overwriting the
707current in-kernel state. The bits are:
708
709KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
710KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
711
704 712
7055. The kvm_run structure 7135. The kvm_run structure
706 714
diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt
index 719a819f8cc2..07301de12cc4 100644
--- a/Documentation/sound/alsa/Procfile.txt
+++ b/Documentation/sound/alsa/Procfile.txt
@@ -95,7 +95,7 @@ card*/pcm*/xrun_debug
95 It takes an integer value, can be changed by writing to this 95 It takes an integer value, can be changed by writing to this
96 file, such as 96 file, such as
97 97
98 # cat 5 > /proc/asound/card0/pcm0p/xrun_debug 98 # echo 5 > /proc/asound/card0/pcm0p/xrun_debug
99 99
100 The value consists of the following bit flags: 100 The value consists of the following bit flags:
101 bit 0 = Enable XRUN/jiffies debug messages 101 bit 0 = Enable XRUN/jiffies debug messages
diff --git a/Documentation/vgaarbiter.txt b/Documentation/vgaarbiter.txt
index 987f9b0a5ece..43a9b0694fdd 100644
--- a/Documentation/vgaarbiter.txt
+++ b/Documentation/vgaarbiter.txt
@@ -103,7 +103,7 @@ I.2 libpciaccess
103---------------- 103----------------
104 104
105To use the vga arbiter char device it was implemented an API inside the 105To use the vga arbiter char device it was implemented an API inside the
106libpciaccess library. One fieldd was added to struct pci_device (each device 106libpciaccess library. One field was added to struct pci_device (each device
107on the system): 107on the system):
108 108
109 /* the type of resource decoded by the device */ 109 /* the type of resource decoded by the device */
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae69..988911b4cc7a 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
388#define _vmm_raw_spin_lock(x) do {}while(0) 388#define _vmm_raw_spin_lock(x) do {}while(0)
389#define _vmm_raw_spin_unlock(x) do {}while(0) 389#define _vmm_raw_spin_unlock(x) do {}while(0)
390#else 390#else
391typedef struct {
392 volatile unsigned int lock;
393} vmm_spinlock_t;
391#define _vmm_raw_spin_lock(x) \ 394#define _vmm_raw_spin_lock(x) \
392 do { \ 395 do { \
393 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ 396 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
405 408
406#define _vmm_raw_spin_unlock(x) \ 409#define _vmm_raw_spin_unlock(x) \
407 do { barrier(); \ 410 do { barrier(); \
408 ((spinlock_t *)x)->raw_lock.lock = 0; } \ 411 ((vmm_spinlock_t *)x)->lock = 0; } \
409while (0) 412while (0)
410#endif 413#endif
411 414
412void vmm_spin_lock(spinlock_t *lock); 415void vmm_spin_lock(vmm_spinlock_t *lock);
413void vmm_spin_unlock(spinlock_t *lock); 416void vmm_spin_unlock(vmm_spinlock_t *lock);
414enum { 417enum {
415 I_TLB = 1, 418 I_TLB = 1,
416 D_TLB = 2 419 D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6c..7a62f75778c5 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
60 return ; 60 return ;
61} 61}
62 62
63void vmm_spin_lock(spinlock_t *lock) 63void vmm_spin_lock(vmm_spinlock_t *lock)
64{ 64{
65 _vmm_raw_spin_lock(lock); 65 _vmm_raw_spin_lock(lock);
66} 66}
67 67
68void vmm_spin_unlock(spinlock_t *lock) 68void vmm_spin_unlock(vmm_spinlock_t *lock)
69{ 69{
70 _vmm_raw_spin_unlock(lock); 70 _vmm_raw_spin_unlock(lock);
71} 71}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6e..4332f7ee5203 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{ 182{
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e8dfdbd9327a..cadbed679fbb 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1107,6 +1107,12 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1107 list_for_each_entry(dev, &bus->devices, bus_list) { 1107 list_for_each_entry(dev, &bus->devices, bus_list) {
1108 struct dev_archdata *sd = &dev->dev.archdata; 1108 struct dev_archdata *sd = &dev->dev.archdata;
1109 1109
1110 /* Cardbus can call us to add new devices to a bus, so ignore
1111 * those who are already fully discovered
1112 */
1113 if (dev->is_added)
1114 continue;
1115
1110 /* Setup OF node pointer in archdata */ 1116 /* Setup OF node pointer in archdata */
1111 sd->of_node = pci_device_to_OF_node(dev); 1117 sd->of_node = pci_device_to_OF_node(dev);
1112 1118
@@ -1147,6 +1153,13 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1147} 1153}
1148EXPORT_SYMBOL(pcibios_fixup_bus); 1154EXPORT_SYMBOL(pcibios_fixup_bus);
1149 1155
1156void __devinit pci_fixup_cardbus(struct pci_bus *bus)
1157{
1158 /* Now fixup devices on that bus */
1159 pcibios_setup_bus_devices(bus);
1160}
1161
1162
1150static int skip_isa_ioresource_align(struct pci_dev *dev) 1163static int skip_isa_ioresource_align(struct pci_dev *dev)
1151{ 1164{
1152 if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) && 1165 if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 5598f88f142e..e4beeb371a73 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
390{ 390{
391 u64 rb = 0, rs = 0; 391 u64 rb = 0, rs = 0;
392 392
393 /*
394 * According to Book3 2.01 mtsrin is implemented as:
395 *
396 * The SLB entry specified by (RB)32:35 is loaded from register
397 * RS, as follows.
398 *
399 * SLBE Bit Source SLB Field
400 *
401 * 0:31 0x0000_0000 ESID-0:31
402 * 32:35 (RB)32:35 ESID-32:35
403 * 36 0b1 V
404 * 37:61 0x00_0000|| 0b0 VSID-0:24
405 * 62:88 (RS)37:63 VSID-25:51
406 * 89:91 (RS)33:35 Ks Kp N
407 * 92 (RS)36 L ((RS)36 must be 0b0)
408 * 93 0b0 C
409 */
410
411 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
412
393 /* ESID = srnum */ 413 /* ESID = srnum */
394 rb |= (srnum & 0xf) << 28; 414 rb |= (srnum & 0xf) << 28;
395 /* Set the valid bit */ 415 /* Set the valid bit */
@@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
400 /* VSID = VSID */ 420 /* VSID = VSID */
401 rs |= (value & 0xfffffff) << 12; 421 rs |= (value & 0xfffffff) << 12;
402 /* flags = flags */ 422 /* flags = flags */
403 rs |= ((value >> 27) & 0xf) << 9; 423 rs |= ((value >> 28) & 0x7) << 9;
404 424
405 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); 425 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
406} 426}
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 950df434763f..f46b79f6c16c 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
254 __u8 reserved[31]; 254 __u8 reserved[31];
255}; 255};
256 256
257/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
258#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
259#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
260
257/* for KVM_GET/SET_VCPU_EVENTS */ 261/* for KVM_GET/SET_VCPU_EVENTS */
258struct kvm_vcpu_events { 262struct kvm_vcpu_events {
259 struct { 263 struct {
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 811bfabc80b7..bcdb708993d2 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -338,6 +338,18 @@ static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long o
338 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val); 338 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
339} 339}
340 340
341static inline void uv_write_global_mmr8(int pnode, unsigned long offset,
342 unsigned char val)
343{
344 writeb(val, uv_global_mmr64_address(pnode, offset));
345}
346
347static inline unsigned char uv_read_global_mmr8(int pnode,
348 unsigned long offset)
349{
350 return readb(uv_global_mmr64_address(pnode, offset));
351}
352
341/* 353/*
342 * Access hub local MMRs. Faster than using global space but only local MMRs 354 * Access hub local MMRs. Faster than using global space but only local MMRs
343 * are accessible. 355 * are accessible.
@@ -457,11 +469,17 @@ static inline void uv_set_scir_bits(unsigned char value)
457 } 469 }
458} 470}
459 471
472static inline unsigned long uv_scir_offset(int apicid)
473{
474 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
475}
476
460static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 477static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
461{ 478{
462 if (uv_cpu_hub_info(cpu)->scir.state != value) { 479 if (uv_cpu_hub_info(cpu)->scir.state != value) {
480 uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
481 uv_cpu_hub_info(cpu)->scir.offset, value);
463 uv_cpu_hub_info(cpu)->scir.state = value; 482 uv_cpu_hub_info(cpu)->scir.state = value;
464 uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
465 } 483 }
466} 484}
467 485
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d56b0efb2057..5f92494dab61 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -629,8 +629,10 @@ void __init uv_system_init(void)
629 uv_rtc_init(); 629 uv_rtc_init();
630 630
631 for_each_present_cpu(cpu) { 631 for_each_present_cpu(cpu) {
632 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
633
632 nid = cpu_to_node(cpu); 634 nid = cpu_to_node(cpu);
633 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 635 pnode = uv_apicid_to_pnode(apicid);
634 blade = boot_pnode_to_blade(pnode); 636 blade = boot_pnode_to_blade(pnode);
635 lcpu = uv_blade_info[blade].nr_possible_cpus; 637 lcpu = uv_blade_info[blade].nr_possible_cpus;
636 uv_blade_info[blade].nr_possible_cpus++; 638 uv_blade_info[blade].nr_possible_cpus++;
@@ -651,15 +653,13 @@ void __init uv_system_init(void)
651 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 653 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
652 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 654 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
653 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 655 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
654 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 656 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
655 uv_node_to_blade[nid] = blade; 657 uv_node_to_blade[nid] = blade;
656 uv_cpu_to_blade[cpu] = blade; 658 uv_cpu_to_blade[cpu] = blade;
657 max_pnode = max(pnode, max_pnode); 659 max_pnode = max(pnode, max_pnode);
658 660
659 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 661 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
660 "lcpu %d, blade %d\n", 662 cpu, apicid, pnode, nid, lcpu, blade);
661 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
662 lcpu, blade);
663 } 663 }
664 664
665 /* Add blade/pnode info for nodes without cpus */ 665 /* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cd60c0bd1b32..3063a0c4858b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1150 hrtimer_cancel(&apic->lapic_timer.timer); 1150 hrtimer_cancel(&apic->lapic_timer.timer);
1151 update_divide_count(apic); 1151 update_divide_count(apic);
1152 start_apic_timer(apic); 1152 start_apic_timer(apic);
1153 apic->irr_pending = true;
1153} 1154}
1154 1155
1155void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1156void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a6017132fba8..58a0f1e88596 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -455,8 +455,6 @@ out_unlock:
455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
456{ 456{
457 struct kvm_shadow_walk_iterator iterator; 457 struct kvm_shadow_walk_iterator iterator;
458 pt_element_t gpte;
459 gpa_t pte_gpa = -1;
460 int level; 458 int level;
461 u64 *sptep; 459 u64 *sptep;
462 int need_flush = 0; 460 int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
470 if (level == PT_PAGE_TABLE_LEVEL || 468 if (level == PT_PAGE_TABLE_LEVEL ||
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || 469 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { 470 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
473 struct kvm_mmu_page *sp = page_header(__pa(sptep));
474
475 pte_gpa = (sp->gfn << PAGE_SHIFT);
476 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
477 471
478 if (is_shadow_present_pte(*sptep)) { 472 if (is_shadow_present_pte(*sptep)) {
479 rmap_remove(vcpu->kvm, sptep); 473 rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
492 if (need_flush) 486 if (need_flush)
493 kvm_flush_remote_tlbs(vcpu->kvm); 487 kvm_flush_remote_tlbs(vcpu->kvm);
494 spin_unlock(&vcpu->kvm->mmu_lock); 488 spin_unlock(&vcpu->kvm->mmu_lock);
495
496 if (pte_gpa == -1)
497 return;
498 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
499 sizeof(pt_element_t)))
500 return;
501 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
502 if (mmu_topup_memory_caches(vcpu))
503 return;
504 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
505 sizeof(pt_element_t), 0);
506 }
507} 489}
508 490
509static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 491static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d068966fb2a..6651dbf58675 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1913 1913
1914 events->sipi_vector = vcpu->arch.sipi_vector; 1914 events->sipi_vector = vcpu->arch.sipi_vector;
1915 1915
1916 events->flags = 0; 1916 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
1917 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
1917 1918
1918 vcpu_put(vcpu); 1919 vcpu_put(vcpu);
1919} 1920}
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1921static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 1922static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1922 struct kvm_vcpu_events *events) 1923 struct kvm_vcpu_events *events)
1923{ 1924{
1924 if (events->flags) 1925 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
1926 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
1925 return -EINVAL; 1927 return -EINVAL;
1926 1928
1927 vcpu_load(vcpu); 1929 vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1938 kvm_pic_clear_isr_ack(vcpu->kvm); 1940 kvm_pic_clear_isr_ack(vcpu->kvm);
1939 1941
1940 vcpu->arch.nmi_injected = events->nmi.injected; 1942 vcpu->arch.nmi_injected = events->nmi.injected;
1941 vcpu->arch.nmi_pending = events->nmi.pending; 1943 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
1944 vcpu->arch.nmi_pending = events->nmi.pending;
1942 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 1945 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1943 1946
1944 vcpu->arch.sipi_vector = events->sipi_vector; 1947 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
1948 vcpu->arch.sipi_vector = events->sipi_vector;
1945 1949
1946 vcpu_put(vcpu); 1950 vcpu_put(vcpu);
1947 1951
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 145df00e0387..f939d603adfa 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -51,7 +51,7 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
51 } 51 }
52} 52}
53 53
54void __init update_res(struct pci_root_info *info, size_t start, 54void __devinit update_res(struct pci_root_info *info, size_t start,
55 size_t end, unsigned long flags, int merge) 55 size_t end, unsigned long flags, int merge)
56{ 56{
57 int i; 57 int i;
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 8873b9b439ff..8618d8996fea 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -402,7 +402,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
402 * our current implementations need. If we'll ever need 402 * our current implementations need. If we'll ever need
403 * more the interface will need revisiting. 403 * more the interface will need revisiting.
404 */ 404 */
405 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 405 page = alloc_page(gfp_mask | __GFP_ZERO);
406 if (!page) 406 if (!page)
407 goto out_free_bio; 407 goto out_free_bio;
408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) 408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6ae118d6e193..d52d4adc440b 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -505,21 +505,30 @@ static unsigned int lcm(unsigned int a, unsigned int b)
505 505
506/** 506/**
507 * blk_stack_limits - adjust queue_limits for stacked devices 507 * blk_stack_limits - adjust queue_limits for stacked devices
508 * @t: the stacking driver limits (top) 508 * @t: the stacking driver limits (top device)
509 * @b: the underlying queue limits (bottom) 509 * @b: the underlying queue limits (bottom, component device)
510 * @offset: offset to beginning of data within component device 510 * @offset: offset to beginning of data within component device
511 * 511 *
512 * Description: 512 * Description:
513 * Merges two queue_limit structs. Returns 0 if alignment didn't 513 * This function is used by stacking drivers like MD and DM to ensure
514 * change. Returns -1 if adding the bottom device caused 514 * that all component devices have compatible block sizes and
515 * misalignment. 515 * alignments. The stacking driver must provide a queue_limits
516 * struct (top) and then iteratively call the stacking function for
517 * all component (bottom) devices. The stacking function will
518 * attempt to combine the values and ensure proper alignment.
519 *
520 * Returns 0 if the top and bottom queue_limits are compatible. The
521 * top device's block sizes and alignment offsets may be adjusted to
522 * ensure alignment with the bottom device. If no compatible sizes
523 * and alignments exist, -1 is returned and the resulting top
524 * queue_limits will have the misaligned flag set to indicate that
525 * the alignment_offset is undefined.
516 */ 526 */
517int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 527int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
518 sector_t offset) 528 sector_t offset)
519{ 529{
520 int ret; 530 sector_t alignment;
521 531 unsigned int top, bottom;
522 ret = 0;
523 532
524 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
525 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -537,6 +546,22 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
537 t->max_segment_size = min_not_zero(t->max_segment_size, 546 t->max_segment_size = min_not_zero(t->max_segment_size,
538 b->max_segment_size); 547 b->max_segment_size);
539 548
549 alignment = queue_limit_alignment_offset(b, offset);
550
551 /* Bottom device has different alignment. Check that it is
552 * compatible with the current top alignment.
553 */
554 if (t->alignment_offset != alignment) {
555
556 top = max(t->physical_block_size, t->io_min)
557 + t->alignment_offset;
558 bottom = max(b->physical_block_size, b->io_min) + alignment;
559
560 /* Verify that top and bottom intervals line up */
561 if (max(top, bottom) & (min(top, bottom) - 1))
562 t->misaligned = 1;
563 }
564
540 t->logical_block_size = max(t->logical_block_size, 565 t->logical_block_size = max(t->logical_block_size,
541 b->logical_block_size); 566 b->logical_block_size);
542 567
@@ -544,54 +569,64 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
544 b->physical_block_size); 569 b->physical_block_size);
545 570
546 t->io_min = max(t->io_min, b->io_min); 571 t->io_min = max(t->io_min, b->io_min);
572 t->io_opt = lcm(t->io_opt, b->io_opt);
573
547 t->no_cluster |= b->no_cluster; 574 t->no_cluster |= b->no_cluster;
548 t->discard_zeroes_data &= b->discard_zeroes_data; 575 t->discard_zeroes_data &= b->discard_zeroes_data;
549 576
550 /* Bottom device offset aligned? */ 577 /* Physical block size a multiple of the logical block size? */
551 if (offset && 578 if (t->physical_block_size & (t->logical_block_size - 1)) {
552 (offset & (b->physical_block_size - 1)) != b->alignment_offset) { 579 t->physical_block_size = t->logical_block_size;
553 t->misaligned = 1; 580 t->misaligned = 1;
554 ret = -1;
555 } 581 }
556 582
557 /* 583 /* Minimum I/O a multiple of the physical block size? */
558 * Temporarily disable discard granularity. It's currently buggy 584 if (t->io_min & (t->physical_block_size - 1)) {
559 * since we default to 0 for discard_granularity, hence this 585 t->io_min = t->physical_block_size;
560 * "failure" will always trigger for non-zero offsets. 586 t->misaligned = 1;
561 */
562#if 0
563 if (offset &&
564 (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
565 t->discard_misaligned = 1;
566 ret = -1;
567 } 587 }
568#endif
569
570 /* If top has no alignment offset, inherit from bottom */
571 if (!t->alignment_offset)
572 t->alignment_offset =
573 b->alignment_offset & (b->physical_block_size - 1);
574 588
575 if (!t->discard_alignment) 589 /* Optimal I/O a multiple of the physical block size? */
576 t->discard_alignment = 590 if (t->io_opt & (t->physical_block_size - 1)) {
577 b->discard_alignment & (b->discard_granularity - 1); 591 t->io_opt = 0;
578
579 /* Top device aligned on logical block boundary? */
580 if (t->alignment_offset & (t->logical_block_size - 1)) {
581 t->misaligned = 1; 592 t->misaligned = 1;
582 ret = -1;
583 } 593 }
584 594
585 /* Find lcm() of optimal I/O size and granularity */ 595 /* Find lowest common alignment_offset */
586 t->io_opt = lcm(t->io_opt, b->io_opt); 596 t->alignment_offset = lcm(t->alignment_offset, alignment)
587 t->discard_granularity = lcm(t->discard_granularity, 597 & (max(t->physical_block_size, t->io_min) - 1);
588 b->discard_granularity);
589 598
590 /* Verify that optimal I/O size is a multiple of io_min */ 599 /* Verify that new alignment_offset is on a logical block boundary */
591 if (t->io_min && t->io_opt % t->io_min) 600 if (t->alignment_offset & (t->logical_block_size - 1))
592 ret = -1; 601 t->misaligned = 1;
602
603 /* Discard alignment and granularity */
604 if (b->discard_granularity) {
605 unsigned int granularity = b->discard_granularity;
606 offset &= granularity - 1;
607
608 alignment = (granularity + b->discard_alignment - offset)
609 & (granularity - 1);
610
611 if (t->discard_granularity != 0 &&
612 t->discard_alignment != alignment) {
613 top = t->discard_granularity + t->discard_alignment;
614 bottom = b->discard_granularity + alignment;
615
616 /* Verify that top and bottom intervals line up */
617 if (max(top, bottom) & (min(top, bottom) - 1))
618 t->discard_misaligned = 1;
619 }
620
621 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
622 b->max_discard_sectors);
623 t->discard_granularity = max(t->discard_granularity,
624 b->discard_granularity);
625 t->discard_alignment = lcm(t->discard_alignment, alignment) &
626 (t->discard_granularity - 1);
627 }
593 628
594 return ret; 629 return t->misaligned ? -1 : 0;
595} 630}
596EXPORT_SYMBOL(blk_stack_limits); 631EXPORT_SYMBOL(blk_stack_limits);
597 632
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2f80463ed0d..918c7fd9aeb1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -208,8 +208,6 @@ struct cfq_data {
208 /* Root service tree for cfq_groups */ 208 /* Root service tree for cfq_groups */
209 struct cfq_rb_root grp_service_tree; 209 struct cfq_rb_root grp_service_tree;
210 struct cfq_group root_group; 210 struct cfq_group root_group;
211 /* Number of active cfq groups on group service tree */
212 int nr_groups;
213 211
214 /* 212 /*
215 * The priority currently being served 213 * The priority currently being served
@@ -294,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
294 292
295static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 293static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
296 enum wl_prio_t prio, 294 enum wl_prio_t prio,
297 enum wl_type_t type, 295 enum wl_type_t type)
298 struct cfq_data *cfqd)
299{ 296{
300 if (!cfqg) 297 if (!cfqg)
301 return NULL; 298 return NULL;
@@ -842,7 +839,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
842 839
843 __cfq_group_service_tree_add(st, cfqg); 840 __cfq_group_service_tree_add(st, cfqg);
844 cfqg->on_st = true; 841 cfqg->on_st = true;
845 cfqd->nr_groups++;
846 st->total_weight += cfqg->weight; 842 st->total_weight += cfqg->weight;
847} 843}
848 844
@@ -863,7 +859,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
863 859
864 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 860 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
865 cfqg->on_st = false; 861 cfqg->on_st = false;
866 cfqd->nr_groups--;
867 st->total_weight -= cfqg->weight; 862 st->total_weight -= cfqg->weight;
868 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 863 if (!RB_EMPTY_NODE(&cfqg->rb_node))
869 cfq_rb_erase(&cfqg->rb_node, st); 864 cfq_rb_erase(&cfqg->rb_node, st);
@@ -1150,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1150#endif 1145#endif
1151 1146
1152 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1147 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1153 cfqq_type(cfqq), cfqd); 1148 cfqq_type(cfqq));
1154 if (cfq_class_idle(cfqq)) { 1149 if (cfq_class_idle(cfqq)) {
1155 rb_key = CFQ_IDLE_DELAY; 1150 rb_key = CFQ_IDLE_DELAY;
1156 parent = rb_last(&service_tree->rb); 1151 parent = rb_last(&service_tree->rb);
@@ -1513,9 +1508,6 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1513 struct cfq_io_context *cic; 1508 struct cfq_io_context *cic;
1514 struct cfq_queue *cfqq; 1509 struct cfq_queue *cfqq;
1515 1510
1516 /* Deny merge if bio and rq don't belong to same cfq group */
1517 if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0))
1518 return false;
1519 /* 1511 /*
1520 * Disallow merge of a sync bio into an async request. 1512 * Disallow merge of a sync bio into an async request.
1521 */ 1513 */
@@ -1616,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1616{ 1608{
1617 struct cfq_rb_root *service_tree = 1609 struct cfq_rb_root *service_tree =
1618 service_tree_for(cfqd->serving_group, cfqd->serving_prio, 1610 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1619 cfqd->serving_type, cfqd); 1611 cfqd->serving_type);
1620 1612
1621 if (!cfqd->rq_queued) 1613 if (!cfqd->rq_queued)
1622 return NULL; 1614 return NULL;
@@ -1675,13 +1667,17 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1675#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) 1667#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1676 1668
1677static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1669static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1678 struct request *rq) 1670 struct request *rq, bool for_preempt)
1679{ 1671{
1680 sector_t sdist = cfqq->seek_mean; 1672 sector_t sdist = cfqq->seek_mean;
1681 1673
1682 if (!sample_valid(cfqq->seek_samples)) 1674 if (!sample_valid(cfqq->seek_samples))
1683 sdist = CFQQ_SEEK_THR; 1675 sdist = CFQQ_SEEK_THR;
1684 1676
1677 /* if seek_mean is big, using it as close criteria is meaningless */
1678 if (sdist > CFQQ_SEEK_THR && !for_preempt)
1679 sdist = CFQQ_SEEK_THR;
1680
1685 return cfq_dist_from_last(cfqd, rq) <= sdist; 1681 return cfq_dist_from_last(cfqd, rq) <= sdist;
1686} 1682}
1687 1683
@@ -1709,7 +1705,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1709 * will contain the closest sector. 1705 * will contain the closest sector.
1710 */ 1706 */
1711 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1707 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1712 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1708 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1713 return __cfqq; 1709 return __cfqq;
1714 1710
1715 if (blk_rq_pos(__cfqq->next_rq) < sector) 1711 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1720,7 +1716,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1720 return NULL; 1716 return NULL;
1721 1717
1722 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1718 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1723 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1719 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1724 return __cfqq; 1720 return __cfqq;
1725 1721
1726 return NULL; 1722 return NULL;
@@ -1963,8 +1959,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1963} 1959}
1964 1960
1965static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, 1961static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1966 struct cfq_group *cfqg, enum wl_prio_t prio, 1962 struct cfq_group *cfqg, enum wl_prio_t prio)
1967 bool prio_changed)
1968{ 1963{
1969 struct cfq_queue *queue; 1964 struct cfq_queue *queue;
1970 int i; 1965 int i;
@@ -1972,24 +1967,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1972 unsigned long lowest_key = 0; 1967 unsigned long lowest_key = 0;
1973 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 1968 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1974 1969
1975 if (prio_changed) { 1970 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
1976 /* 1971 /* select the one with lowest rb_key */
1977 * When priorities switched, we prefer starting 1972 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
1978 * from SYNC_NOIDLE (first choice), or just SYNC
1979 * over ASYNC
1980 */
1981 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1982 return cur_best;
1983 cur_best = SYNC_WORKLOAD;
1984 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1985 return cur_best;
1986
1987 return ASYNC_WORKLOAD;
1988 }
1989
1990 for (i = 0; i < 3; ++i) {
1991 /* otherwise, select the one with lowest rb_key */
1992 queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
1993 if (queue && 1973 if (queue &&
1994 (!key_valid || time_before(queue->rb_key, lowest_key))) { 1974 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1995 lowest_key = queue->rb_key; 1975 lowest_key = queue->rb_key;
@@ -2003,8 +1983,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2003 1983
2004static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) 1984static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2005{ 1985{
2006 enum wl_prio_t previous_prio = cfqd->serving_prio;
2007 bool prio_changed;
2008 unsigned slice; 1986 unsigned slice;
2009 unsigned count; 1987 unsigned count;
2010 struct cfq_rb_root *st; 1988 struct cfq_rb_root *st;
@@ -2032,24 +2010,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2032 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2010 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2033 * expiration time 2011 * expiration time
2034 */ 2012 */
2035 prio_changed = (cfqd->serving_prio != previous_prio); 2013 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2036 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
2037 cfqd);
2038 count = st->count; 2014 count = st->count;
2039 2015
2040 /* 2016 /*
2041 * If priority didn't change, check workload expiration, 2017 * check workload expiration, and that we still have other queues ready
2042 * and that we still have other queues ready
2043 */ 2018 */
2044 if (!prio_changed && count && 2019 if (count && !time_after(jiffies, cfqd->workload_expires))
2045 !time_after(jiffies, cfqd->workload_expires))
2046 return; 2020 return;
2047 2021
2048 /* otherwise select new workload type */ 2022 /* otherwise select new workload type */
2049 cfqd->serving_type = 2023 cfqd->serving_type =
2050 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); 2024 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2051 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, 2025 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2052 cfqd);
2053 count = st->count; 2026 count = st->count;
2054 2027
2055 /* 2028 /*
@@ -3143,7 +3116,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3143 * if this request is as-good as one we would expect from the 3116 * if this request is as-good as one we would expect from the
3144 * current cfqq, let it preempt 3117 * current cfqq, let it preempt
3145 */ 3118 */
3146 if (cfq_rq_close(cfqd, cfqq, rq)) 3119 if (cfq_rq_close(cfqd, cfqq, rq, true))
3147 return true; 3120 return true;
3148 3121
3149 return false; 3122 return false;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb4fa1943944..ce1fa923c414 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = {
7101 7101
7102static struct DAC960_privdata DAC960_LP_privdata = { 7102static struct DAC960_privdata DAC960_LP_privdata = {
7103 .HardwareType = DAC960_LP_Controller, 7103 .HardwareType = DAC960_LP_Controller,
7104 .FirmwareType = DAC960_LP_Controller, 7104 .FirmwareType = DAC960_V2_Controller,
7105 .InterruptHandler = DAC960_LP_InterruptHandler, 7105 .InterruptHandler = DAC960_LP_InterruptHandler,
7106 .MemoryWindowSize = DAC960_LP_RegisterWindowSize, 7106 .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
7107}; 7107};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 13bb69d2abb3..64a223b0cc22 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
735 part_stat_unlock(); 735 part_stat_unlock();
736} 736}
737 737
738/*
739 * Ensure we don't create aliases in VI caches
740 */
741static inline void
742killalias(struct bio *bio)
743{
744 struct bio_vec *bv;
745 int i;
746
747 if (bio_data_dir(bio) == READ)
748 __bio_for_each_segment(bv, bio, i, 0) {
749 flush_dcache_page(bv->bv_page);
750 }
751}
752
753void 738void
754aoecmd_ata_rsp(struct sk_buff *skb) 739aoecmd_ata_rsp(struct sk_buff *skb)
755{ 740{
@@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
871 if (buf->flags & BUFFL_FAIL) 856 if (buf->flags & BUFFL_FAIL)
872 bio_endio(buf->bio, -EIO); 857 bio_endio(buf->bio, -EIO);
873 else { 858 else {
874 killalias(buf->bio); 859 bio_flush_dcache_pages(buf->bio);
875 bio_endio(buf->bio, 0); 860 bio_endio(buf->bio, 0);
876 } 861 }
877 mempool_free(buf, d->bufpool); 862 mempool_free(buf, d->bufpool);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2312d782fe99..c97558763430 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1490,7 +1490,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
1490 1490
1491/* drbd_proc.c */ 1491/* drbd_proc.c */
1492extern struct proc_dir_entry *drbd_proc; 1492extern struct proc_dir_entry *drbd_proc;
1493extern struct file_operations drbd_proc_fops; 1493extern const struct file_operations drbd_proc_fops;
1494extern const char *drbd_conn_str(enum drbd_conns s); 1494extern const char *drbd_conn_str(enum drbd_conns s);
1495extern const char *drbd_role_str(enum drbd_role s); 1495extern const char *drbd_role_str(enum drbd_role s);
1496 1496
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 157d1e4343c2..9348f33f6242 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/drbd.h> 30#include <linux/drbd.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
33#include <asm/types.h> 32#include <asm/types.h>
@@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait;
151 150
152DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); 151DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
153 152
154static struct block_device_operations drbd_ops = { 153static const struct block_device_operations drbd_ops = {
155 .owner = THIS_MODULE, 154 .owner = THIS_MODULE,
156 .open = drbd_open, 155 .open = drbd_open,
157 .release = drbd_release, 156 .release = drbd_release,
@@ -3623,7 +3622,7 @@ _drbd_fault_random(struct fault_random_state *rsp)
3623{ 3622{
3624 long refresh; 3623 long refresh;
3625 3624
3626 if (--rsp->count < 0) { 3625 if (!rsp->count--) {
3627 get_random_bytes(&refresh, sizeof(refresh)); 3626 get_random_bytes(&refresh, sizeof(refresh));
3628 rsp->state += refresh; 3627 rsp->state += refresh;
3629 rsp->count = FAULT_RANDOM_REFRESH; 3628 rsp->count = FAULT_RANDOM_REFRESH;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bdd0b4943b10..df8ad9660d8f 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file);
38 38
39 39
40struct proc_dir_entry *drbd_proc; 40struct proc_dir_entry *drbd_proc;
41struct file_operations drbd_proc_fops = { 41const struct file_operations drbd_proc_fops = {
42 .owner = THIS_MODULE, 42 .owner = THIS_MODULE,
43 .open = drbd_proc_open, 43 .open = drbd_proc_open,
44 .read = seq_read, 44 .read = seq_read,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c548f24f54a1..259c1351b152 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -28,7 +28,6 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <net/sock.h> 29#include <net/sock.h>
30 30
31#include <linux/version.h>
32#include <linux/drbd.h> 31#include <linux/drbd.h>
33#include <linux/fs.h> 32#include <linux/fs.h>
34#include <linux/file.h> 33#include <linux/file.h>
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ed8796f1112d..b453c2bca3be 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -24,7 +24,6 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/version.h>
28#include <linux/drbd.h> 27#include <linux/drbd.h>
29#include <linux/sched.h> 28#include <linux/sched.h>
30#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
@@ -34,7 +33,6 @@
34#include <linux/mm_inline.h> 33#include <linux/mm_inline.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/random.h> 35#include <linux/random.h>
37#include <linux/mm.h>
38#include <linux/string.h> 36#include <linux/string.h>
39#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
40 38
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e0339aaa1815..02b2583df7fc 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev)
860 err = -EINVAL; 860 err = -EINVAL;
861 goto probe_err_2; 861 goto probe_err_2;
862 } 862 }
863 host->dev_base = ioremap(rsc->start , rsc->end + 1); 863 host->dev_base = ioremap(rsc->start, resource_size(rsc));
864 if (!host->dev_base) { 864 if (!host->dev_base) {
865 printk(KERN_ERR "%s:%d ioremap fail\n", 865 printk(KERN_ERR "%s:%d ioremap fail\n",
866 __func__, __LINE__); 866 __func__, __LINE__);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e989f67bb61f..3d9c61e5acbf 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
158 goto out; 158 goto out;
159 } 159 }
160 } 160 }
161out_unlock:
162 mutex_unlock(&rng_mutex);
163out: 161out:
164 return ret ? : err; 162 return ret ? : err;
163out_unlock:
164 mutex_unlock(&rng_mutex);
165 goto out;
165} 166}
166 167
167 168
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f4f5f82f9f53..dd3dfe42d5a9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
386 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 386 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
387 return; 387 return;
388 if (!mddev->raid_disks && list_empty(&mddev->disks) && 388 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
389 !mddev->hold_active) { 389 mddev->ctime == 0 && !mddev->hold_active) {
390 /* Array is not configured at all, and not held active,
391 * so destroy it */
390 list_del(&mddev->all_mddevs); 392 list_del(&mddev->all_mddevs);
391 if (mddev->gendisk) { 393 if (mddev->gendisk) {
392 /* we did a probe so need to clean up. 394 /* we did a probe so need to clean up.
@@ -4355,7 +4357,7 @@ static int do_md_run(mddev_t * mddev)
4355 mddev->barriers_work = 1; 4357 mddev->barriers_work = 1;
4356 mddev->ok_start_degraded = start_dirty_degraded; 4358 mddev->ok_start_degraded = start_dirty_degraded;
4357 4359
4358 if (start_readonly) 4360 if (start_readonly && mddev->ro == 0)
4359 mddev->ro = 2; /* read-only, but switch on first write */ 4361 mddev->ro = 2; /* read-only, but switch on first write */
4360 4362
4361 err = mddev->pers->run(mddev); 4363 err = mddev->pers->run(mddev);
@@ -4419,33 +4421,6 @@ static int do_md_run(mddev_t * mddev)
4419 4421
4420 set_capacity(disk, mddev->array_sectors); 4422 set_capacity(disk, mddev->array_sectors);
4421 4423
4422 /* If there is a partially-recovered drive we need to
4423 * start recovery here. If we leave it to md_check_recovery,
4424 * it will remove the drives and not do the right thing
4425 */
4426 if (mddev->degraded && !mddev->sync_thread) {
4427 int spares = 0;
4428 list_for_each_entry(rdev, &mddev->disks, same_set)
4429 if (rdev->raid_disk >= 0 &&
4430 !test_bit(In_sync, &rdev->flags) &&
4431 !test_bit(Faulty, &rdev->flags))
4432 /* complete an interrupted recovery */
4433 spares++;
4434 if (spares && mddev->pers->sync_request) {
4435 mddev->recovery = 0;
4436 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4437 mddev->sync_thread = md_register_thread(md_do_sync,
4438 mddev,
4439 "resync");
4440 if (!mddev->sync_thread) {
4441 printk(KERN_ERR "%s: could not start resync"
4442 " thread...\n",
4443 mdname(mddev));
4444 /* leave the spares where they are, it shouldn't hurt */
4445 mddev->recovery = 0;
4446 }
4447 }
4448 }
4449 md_wakeup_thread(mddev->thread); 4424 md_wakeup_thread(mddev->thread);
4450 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4425 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4451 4426
@@ -5262,6 +5237,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5262 mddev->minor_version = info->minor_version; 5237 mddev->minor_version = info->minor_version;
5263 mddev->patch_version = info->patch_version; 5238 mddev->patch_version = info->patch_version;
5264 mddev->persistent = !info->not_persistent; 5239 mddev->persistent = !info->not_persistent;
5240 /* ensure mddev_put doesn't delete this now that there
5241 * is some minimal configuration.
5242 */
5243 mddev->ctime = get_seconds();
5265 return 0; 5244 return 0;
5266 } 5245 }
5267 mddev->major_version = MD_MAJOR_VERSION; 5246 mddev->major_version = MD_MAJOR_VERSION;
@@ -6494,10 +6473,11 @@ void md_do_sync(mddev_t *mddev)
6494 mddev->curr_resync = 2; 6473 mddev->curr_resync = 2;
6495 6474
6496 try_again: 6475 try_again:
6497 if (kthread_should_stop()) { 6476 if (kthread_should_stop())
6498 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6477 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6478
6479 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6499 goto skip; 6480 goto skip;
6500 }
6501 for_each_mddev(mddev2, tmp) { 6481 for_each_mddev(mddev2, tmp) {
6502 if (mddev2 == mddev) 6482 if (mddev2 == mddev)
6503 continue; 6483 continue;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc231153e55..77cf0901a441 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,6 +56,7 @@ static const char version[] =
56#include <linux/errno.h> 56#include <linux/errno.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h> 58#include <linux/etherdevice.h>
59#include <linux/if_ether.h>
59#include <linux/skbuff.h> 60#include <linux/skbuff.h>
60#include <linux/slab.h> 61#include <linux/slab.h>
61#include <linux/init.h> 62#include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
734 memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); 735 memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
735 736
736 /* Fill in the station address. */ 737 /* Fill in the station address. */
737 memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, 738 memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
738 sizeof(dev->dev_addr));
739 739
740 /* The Tx-block list is written as needed. We just set up the values. */ 740 /* The Tx-block list is written as needed. We just set up the values. */
741 lp->tx_cmd_link = IDLELOOP + 4; 741 lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e58a65391ad2..dd9a09c72dff 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2346,6 +2346,7 @@ config GELIC_NET
2346 2346
2347config GELIC_WIRELESS 2347config GELIC_WIRELESS
2348 bool "PS3 Wireless support" 2348 bool "PS3 Wireless support"
2349 depends on WLAN
2349 depends on GELIC_NET 2350 depends on GELIC_NET
2350 select WIRELESS_EXT 2351 select WIRELESS_EXT
2351 help 2352 help
@@ -2358,6 +2359,7 @@ config GELIC_WIRELESS
2358config GELIC_WIRELESS_OLD_PSK_INTERFACE 2359config GELIC_WIRELESS_OLD_PSK_INTERFACE
2359 bool "PS3 Wireless private PSK interface (OBSOLETE)" 2360 bool "PS3 Wireless private PSK interface (OBSOLETE)"
2360 depends on GELIC_WIRELESS 2361 depends on GELIC_WIRELESS
2362 select WEXT_PRIV
2361 help 2363 help
2362 This option retains the obsolete private interface to pass 2364 This option retains the obsolete private interface to pass
2363 the PSK from user space programs to the driver. The PSK 2365 the PSK from user space programs to the driver. The PSK
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014d27ed..9fd8e5ecd5d7 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -275,6 +275,7 @@ struct be_adapter {
275 u32 tx_fc; /* Tx flow control */ 275 u32 tx_fc; /* Tx flow control */
276 int link_speed; 276 int link_speed;
277 u8 port_type; 277 u8 port_type;
278 u8 transceiver;
278}; 279};
279 280
280extern const struct ethtool_ops be_ethtool_ops; 281extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd98dc0c..102ade134165 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1479,6 +1479,41 @@ err:
1479 return status; 1479 return status;
1480} 1480}
1481 1481
1482int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1483 u8 loopback_type, u8 enable)
1484{
1485 struct be_mcc_wrb *wrb;
1486 struct be_cmd_req_set_lmode *req;
1487 int status;
1488
1489 spin_lock_bh(&adapter->mcc_lock);
1490
1491 wrb = wrb_from_mccq(adapter);
1492 if (!wrb) {
1493 status = -EBUSY;
1494 goto err;
1495 }
1496
1497 req = embedded_payload(wrb);
1498
1499 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1500 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1501
1502 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1503 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1504 sizeof(*req));
1505
1506 req->src_port = port_num;
1507 req->dest_port = port_num;
1508 req->loopback_type = loopback_type;
1509 req->loopback_state = enable;
1510
1511 status = be_mcc_notify_wait(adapter);
1512err:
1513 spin_unlock_bh(&adapter->mcc_lock);
1514 return status;
1515}
1516
1482int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 1517int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1483 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 1518 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1484{ 1519{
@@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1501 1536
1502 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1537 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1503 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1538 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1539 req->hdr.timeout = 4;
1504 1540
1505 req->pattern = cpu_to_le64(pattern); 1541 req->pattern = cpu_to_le64(pattern);
1506 req->src_port = cpu_to_le32(port_num); 1542 req->src_port = cpu_to_le32(port_num);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef156ed..c002b8391b4d 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -155,6 +155,7 @@ struct be_mcc_mailbox {
155 155
156#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 156#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
157#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 157#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
158#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
158 159
159struct be_cmd_req_hdr { 160struct be_cmd_req_hdr {
160 u8 opcode; /* dword 0 */ 161 u8 opcode; /* dword 0 */
@@ -821,6 +822,19 @@ struct be_cmd_resp_loopback_test {
821 u32 ticks_compl; 822 u32 ticks_compl;
822}; 823};
823 824
825struct be_cmd_req_set_lmode {
826 struct be_cmd_req_hdr hdr;
827 u8 src_port;
828 u8 dest_port;
829 u8 loopback_type;
830 u8 loopback_state;
831};
832
833struct be_cmd_resp_set_lmode {
834 struct be_cmd_resp_hdr resp_hdr;
835 u8 rsvd0[4];
836};
837
824/********************** DDR DMA test *********************/ 838/********************** DDR DMA test *********************/
825struct be_cmd_req_ddrdma_test { 839struct be_cmd_req_ddrdma_test {
826 struct be_cmd_req_hdr hdr; 840 struct be_cmd_req_hdr hdr;
@@ -912,3 +926,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
912 u32 num_pkts, u64 pattern); 926 u32 num_pkts, u64 pattern);
913extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 927extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
914 u32 byte_cnt, struct be_dma_mem *cmd); 928 u32 byte_cnt, struct be_dma_mem *cmd);
929extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
930 u8 loopback_type, u8 enable);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92cbd689..5d001c4deac1 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
118#define BE_MAC_LOOPBACK 0x0 118#define BE_MAC_LOOPBACK 0x0
119#define BE_PHY_LOOPBACK 0x1 119#define BE_PHY_LOOPBACK 0x1
120#define BE_ONE_PORT_EXT_LOOPBACK 0x2 120#define BE_ONE_PORT_EXT_LOOPBACK 0x2
121#define BE_NO_LOOPBACK 0xff
121 122
122static void 123static void
123be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 124be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
339 340
340 status = be_cmd_read_port_type(adapter, adapter->port_num, 341 status = be_cmd_read_port_type(adapter, adapter->port_num,
341 &connector); 342 &connector);
342 switch (connector) { 343 if (!status) {
343 case 7: 344 switch (connector) {
344 ecmd->port = PORT_FIBRE; 345 case 7:
345 break; 346 ecmd->port = PORT_FIBRE;
346 default: 347 ecmd->transceiver = XCVR_EXTERNAL;
347 ecmd->port = PORT_TP; 348 break;
348 break; 349 case 0:
350 ecmd->port = PORT_TP;
351 ecmd->transceiver = XCVR_EXTERNAL;
352 break;
353 default:
354 ecmd->port = PORT_TP;
355 ecmd->transceiver = XCVR_INTERNAL;
356 break;
357 }
358 } else {
359 ecmd->port = PORT_AUI;
360 ecmd->transceiver = XCVR_INTERNAL;
349 } 361 }
350 362
351 /* Save for future use */ 363 /* Save for future use */
352 adapter->link_speed = ecmd->speed; 364 adapter->link_speed = ecmd->speed;
353 adapter->port_type = ecmd->port; 365 adapter->port_type = ecmd->port;
366 adapter->transceiver = ecmd->transceiver;
354 } else { 367 } else {
355 ecmd->speed = adapter->link_speed; 368 ecmd->speed = adapter->link_speed;
356 ecmd->port = adapter->port_type; 369 ecmd->port = adapter->port_type;
370 ecmd->transceiver = adapter->transceiver;
357 } 371 }
358 372
359 ecmd->duplex = DUPLEX_FULL; 373 ecmd->duplex = DUPLEX_FULL;
360 ecmd->autoneg = AUTONEG_DISABLE; 374 ecmd->autoneg = AUTONEG_DISABLE;
361 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
362 ecmd->phy_address = adapter->port_num; 375 ecmd->phy_address = adapter->port_num;
363 ecmd->transceiver = XCVR_INTERNAL; 376 switch (ecmd->port) {
377 case PORT_FIBRE:
378 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
379 break;
380 case PORT_TP:
381 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
382 break;
383 case PORT_AUI:
384 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
385 break;
386 }
364 387
365 return 0; 388 return 0;
366} 389}
@@ -489,6 +512,19 @@ err:
489 return ret; 512 return ret;
490} 513}
491 514
515static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
516 u64 *status)
517{
518 be_cmd_set_loopback(adapter, adapter->port_num,
519 loopback_type, 1);
520 *status = be_cmd_loopback_test(adapter, adapter->port_num,
521 loopback_type, 1500,
522 2, 0xabc);
523 be_cmd_set_loopback(adapter, adapter->port_num,
524 BE_NO_LOOPBACK, 1);
525 return *status;
526}
527
492static void 528static void
493be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 529be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
494{ 530{
@@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
497 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 533 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
498 534
499 if (test->flags & ETH_TEST_FL_OFFLINE) { 535 if (test->flags & ETH_TEST_FL_OFFLINE) {
500 data[0] = be_cmd_loopback_test(adapter, adapter->port_num, 536 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
501 BE_MAC_LOOPBACK, 1500, 537 &data[0]) != 0) {
502 2, 0xabc);
503 if (data[0] != 0)
504 test->flags |= ETH_TEST_FL_FAILED; 538 test->flags |= ETH_TEST_FL_FAILED;
505 539 }
506 data[1] = be_cmd_loopback_test(adapter, adapter->port_num, 540 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
507 BE_PHY_LOOPBACK, 1500, 541 &data[1]) != 0) {
508 2, 0xabc);
509 if (data[1] != 0)
510 test->flags |= ETH_TEST_FL_FAILED; 542 test->flags |= ETH_TEST_FL_FAILED;
511 543 }
512 data[2] = be_cmd_loopback_test(adapter, adapter->port_num, 544 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
513 BE_ONE_PORT_EXT_LOOPBACK, 545 &data[2]) != 0) {
514 1500, 2, 0xabc);
515 if (data[2] != 0)
516 test->flags |= ETH_TEST_FL_FAILED; 546 test->flags |= ETH_TEST_FL_FAILED;
547 }
517 548
518 data[3] = be_test_ddr_dma(adapter); 549 data[3] = be_test_ddr_dma(adapter);
519 if (data[3] != 0) 550 if (data[3] != 0)
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba13520d87..306c2b8165e2 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; 7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7597 CNIC_SB_ID(bp));
7596 } 7598 }
7597 mutex_unlock(&bp->cnic_mutex); 7599 mutex_unlock(&bp->cnic_mutex);
7598#endif 7600#endif
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0fb7a4964e75..822f586d72af 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1580 // check if any partner replys 1580 // check if any partner replys
1581 if (best->is_individual) { 1581 if (best->is_individual) {
1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", 1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
1583 best->slave->dev->master->name); 1583 best->slave ? best->slave->dev->master->name : "NULL");
1584 } 1584 }
1585 1585
1586 best->is_active = 1; 1586 best->is_active = 1;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e0620d084644..8bd3c9f17532 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
143static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
147 146
148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 147MODULE_AUTHOR("Freescale Semiconductor, Inc");
149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
455 .ndo_set_multicast_list = gfar_set_multi, 454 .ndo_set_multicast_list = gfar_set_multi,
456 .ndo_tx_timeout = gfar_timeout, 455 .ndo_tx_timeout = gfar_timeout,
457 .ndo_do_ioctl = gfar_ioctl, 456 .ndo_do_ioctl = gfar_ioctl,
458 .ndo_select_queue = gfar_select_queue,
459 .ndo_get_stats = gfar_get_stats, 457 .ndo_get_stats = gfar_get_stats,
460 .ndo_vlan_rx_register = gfar_vlan_rx_register, 458 .ndo_vlan_rx_register = gfar_vlan_rx_register,
461 .ndo_set_mac_address = eth_mac_addr, 459 .ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
506 return priv->vlgrp || priv->rx_csum_enable; 504 return priv->vlgrp || priv->rx_csum_enable;
507} 505}
508 506
509u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
510{
511 return skb_get_queue_mapping(skb);
512}
513static void free_tx_pointers(struct gfar_private *priv) 507static void free_tx_pointers(struct gfar_private *priv)
514{ 508{
515 int i = 0; 509 int i = 0;
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2470 fcb = (struct rxfcb *)skb->data; 2464 fcb = (struct rxfcb *)skb->data;
2471 2465
2472 /* Remove the FCB from the skb */ 2466 /* Remove the FCB from the skb */
2473 skb_set_queue_mapping(skb, fcb->rq);
2474 /* Remove the padded bytes, if there are any */ 2467 /* Remove the padded bytes, if there are any */
2475 if (amount_pull) 2468 if (amount_pull) {
2469 skb_record_rx_queue(skb, fcb->rq);
2476 skb_pull(skb, amount_pull); 2470 skb_pull(skb, amount_pull);
2471 }
2477 2472
2478 if (priv->rx_csum_enable) 2473 if (priv->rx_csum_enable)
2479 gfar_rx_checksum(skb, fcb); 2474 gfar_rx_checksum(skb, fcb);
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2554 /* Remove the FCS from the packet length */ 2549 /* Remove the FCS from the packet length */
2555 skb_put(skb, pkt_len); 2550 skb_put(skb, pkt_len);
2556 rx_queue->stats.rx_bytes += pkt_len; 2551 rx_queue->stats.rx_bytes += pkt_len;
2557 2552 skb_record_rx_queue(skb, rx_queue->qindex);
2558 gfar_process_frame(dev, skb, amount_pull); 2553 gfar_process_frame(dev, skb, amount_pull);
2559 2554
2560 } else { 2555 } else {
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3af112..052c74091d91 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -87,6 +87,7 @@ History:
87#include <linux/module.h> 87#include <linux/module.h>
88#include <linux/netdevice.h> 88#include <linux/netdevice.h>
89#include <linux/etherdevice.h> 89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
90#include <linux/skbuff.h> 91#include <linux/skbuff.h>
91#include <linux/bitops.h> 92#include <linux/bitops.h>
92 93
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
988 989
989 /* copy out MAC address */ 990 /* copy out MAC address */
990 991
991 for (z = 0; z < sizeof(dev->dev_addr); z++) 992 for (z = 0; z < ETH_ALEN; z++)
992 dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); 993 dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
993 994
994 /* print config */ 995 /* print config */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e9194a88..c505b50d1fa3 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1097 } else { 1097 } else {
1098 /* Set PCS register for forced link */ 1098 /* Set PCS register for forced link */
1099 reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ 1099 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1100 E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
1101 E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
1102 1100
1103 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1101 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1104 } 1102 }
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e9bb8d..3670a66401b8 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; 457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
458 458
459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); 459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
460 if (ret_val)
461 goto out;
462
463 /* Set number of link attempts before downshift */
464 ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
465 if (ret_val)
466 goto out;
467 phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
468 ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
469 460
470out: 461out:
471 return ret_val; 462 return ret_val;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d5272650d..f771a6c08777 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1795 /* dual port cards only support WoL on port A from now on 1795 /* dual port cards only support WoL on port A from now on
1796 * unless it was enabled in the eeprom for port B 1796 * unless it was enabled in the eeprom for port B
1797 * so exclude FUNC_1 ports from having WoL enabled */ 1797 * so exclude FUNC_1 ports from having WoL enabled */
1798 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1798 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1799 !adapter->eeprom_wol) { 1799 !adapter->eeprom_wol) {
1800 wol->supported = 0; 1800 wol->supported = 0;
1801 break; 1801 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0e128d..933c64ff2465 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1306,13 +1306,8 @@ void igb_reset(struct igb_adapter *adapter)
1306 hwm = min(((pba << 10) * 9 / 10), 1306 hwm = min(((pba << 10) * 9 / 10),
1307 ((pba << 10) - 2 * adapter->max_frame_size)); 1307 ((pba << 10) - 2 * adapter->max_frame_size));
1308 1308
1309 if (mac->type < e1000_82576) { 1309 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1310 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1310 fc->low_water = fc->high_water - 16;
1311 fc->low_water = fc->high_water - 8;
1312 } else {
1313 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1314 fc->low_water = fc->high_water - 16;
1315 }
1316 fc->pause_time = 0xFFFF; 1311 fc->pause_time = 0xFFFF;
1317 fc->send_xon = 1; 1312 fc->send_xon = 1;
1318 fc->current_mode = fc->requested_mode; 1313 fc->current_mode = fc->requested_mode;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index e9dd95f136aa..0dbd0320023a 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2763,7 +2763,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2763 err = hw->mac.ops.reset_hw(hw); 2763 err = hw->mac.ops.reset_hw(hw);
2764 if (err) { 2764 if (err) {
2765 dev_info(&pdev->dev, 2765 dev_info(&pdev->dev,
2766 "PF still in reset state, assigning new address\n"); 2766 "PF still in reset state, assigning new address."
2767 " Is the PF interface up?\n");
2767 random_ether_addr(hw->mac.addr); 2768 random_ether_addr(hw->mac.addr);
2768 } else { 2769 } else {
2769 err = hw->mac.ops.read_mac_addr(hw); 2770 err = hw->mac.ops.read_mac_addr(hw);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd64387563f0..1a2ea621e371 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4373,6 +4373,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
4373 4373
4374 pci_set_power_state(pdev, PCI_D0); 4374 pci_set_power_state(pdev, PCI_D0);
4375 pci_restore_state(pdev); 4375 pci_restore_state(pdev);
4376 /*
4377 * pci_restore_state clears dev->state_saved so call
4378 * pci_save_state to restore it.
4379 */
4380 pci_save_state(pdev);
4376 4381
4377 err = pci_enable_device_mem(pdev); 4382 err = pci_enable_device_mem(pdev);
4378 if (err) { 4383 if (err) {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a35e8f2..e154677ff706 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -45,6 +45,7 @@ static const char *const version =
45#include <linux/crc32.h> 45#include <linux/crc32.h>
46#include <linux/netdevice.h> 46#include <linux/netdevice.h>
47#include <linux/etherdevice.h> 47#include <linux/etherdevice.h>
48#include <linux/if_ether.h>
48#include <linux/skbuff.h> 49#include <linux/skbuff.h>
49#include <linux/spinlock.h> 50#include <linux/spinlock.h>
50#include <linux/moduleparam.h> 51#include <linux/moduleparam.h>
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1765 1766
1766 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1767 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1767 if (!is_valid_ether_addr(dev->perm_addr)) 1768 if (!is_valid_ether_addr(dev->perm_addr))
1768 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1769 memset(dev->dev_addr, 0, ETH_ALEN);
1769 1770
1770 if (pcnet32_debug & NETIF_MSG_PROBE) { 1771 if (pcnet32_debug & NETIF_MSG_PROBE) {
1771 printk(" %pM", dev->dev_addr); 1772 printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b507cc..103e8b0e2a0d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
741 741
742 EFX_LOG(efx, "create port\n"); 742 EFX_LOG(efx, "create port\n");
743 743
744 if (phy_flash_cfg)
745 efx->phy_mode = PHY_MODE_SPECIAL;
746
744 /* Connect up MAC/PHY operations table */ 747 /* Connect up MAC/PHY operations table */
745 rc = efx->type->probe_port(efx); 748 rc = efx->type->probe_port(efx);
746 if (rc) 749 if (rc)
747 goto err; 750 goto err;
748 751
749 if (phy_flash_cfg)
750 efx->phy_mode = PHY_MODE_SPECIAL;
751
752 /* Sanity check MAC address */ 752 /* Sanity check MAC address */
753 if (is_valid_ether_addr(efx->mac_address)) { 753 if (is_valid_ether_addr(efx->mac_address)) {
754 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 754 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd26e870..9d009c46e962 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
925 925
926static void falcon_remove_port(struct efx_nic *efx) 926static void falcon_remove_port(struct efx_nic *efx)
927{ 927{
928 efx->phy_op->remove(efx);
928 efx_nic_free_buffer(efx, &efx->stats_buffer); 929 efx_nic_free_buffer(efx, &efx->stats_buffer);
929} 930}
930 931
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f8f079..8ccab2c67a20 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK); 111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
112} 112}
113 113
114/* Get status of XAUI link */ 114static bool falcon_xgxs_link_ok(struct efx_nic *efx)
115static bool falcon_xaui_link_ok(struct efx_nic *efx)
116{ 115{
117 efx_oword_t reg; 116 efx_oword_t reg;
118 bool align_done, link_ok = false; 117 bool align_done, link_ok = false;
119 int sync_status; 118 int sync_status;
120 119
121 if (LOOPBACK_INTERNAL(efx))
122 return true;
123
124 /* Read link status */ 120 /* Read link status */
125 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT); 121 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
126 122
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
135 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); 131 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
136 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT); 132 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
137 133
138 /* If the link is up, then check the phy side of the xaui link */
139 if (efx->link_state.up && link_ok)
140 if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
141 link_ok = efx_mdio_phyxgxs_lane_sync(efx);
142
143 return link_ok; 134 return link_ok;
144} 135}
145 136
137static bool falcon_xmac_link_ok(struct efx_nic *efx)
138{
139 /*
140 * Check MAC's XGXS link status except when using XGMII loopback
141 * which bypasses the XGXS block.
142 * If possible, check PHY's XGXS link status except when using
143 * MAC loopback.
144 */
145 return (efx->loopback_mode == LOOPBACK_XGMII ||
146 falcon_xgxs_link_ok(efx)) &&
147 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
148 LOOPBACK_INTERNAL(efx) ||
149 efx_mdio_phyxgxs_lane_sync(efx));
150}
151
146void falcon_reconfigure_xmac_core(struct efx_nic *efx) 152void falcon_reconfigure_xmac_core(struct efx_nic *efx)
147{ 153{
148 unsigned int max_frame_len; 154 unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
245 251
246 252
247/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ 253/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
248static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) 254static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
249{ 255{
250 bool mac_up = falcon_xaui_link_ok(efx); 256 bool mac_up = falcon_xmac_link_ok(efx);
251 257
252 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || 258 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
253 efx_phy_mode_disabled(efx->phy_mode)) 259 efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
261 falcon_reset_xaui(efx); 267 falcon_reset_xaui(efx);
262 udelay(200); 268 udelay(200);
263 269
264 mac_up = falcon_xaui_link_ok(efx); 270 mac_up = falcon_xmac_link_ok(efx);
265 --tries; 271 --tries;
266 } 272 }
267 273
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
272 278
273static bool falcon_xmac_check_fault(struct efx_nic *efx) 279static bool falcon_xmac_check_fault(struct efx_nic *efx)
274{ 280{
275 return !falcon_check_xaui_link_up(efx, 5); 281 return !falcon_xmac_link_ok_retry(efx, 5);
276} 282}
277 283
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 284static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
284 290
285 falcon_reconfigure_mac_wrapper(efx); 291 falcon_reconfigure_mac_wrapper(efx);
286 292
287 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5); 293 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
288 falcon_mask_status_intr(efx, true); 294 falcon_mask_status_intr(efx, true);
289 295
290 return 0; 296 return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
357 return; 363 return;
358 364
359 falcon_mask_status_intr(efx, false); 365 falcon_mask_status_intr(efx, false);
360 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1); 366 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
361 falcon_mask_status_intr(efx, true); 367 falcon_mask_status_intr(efx, true);
362} 368}
363 369
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5a0d52..eb694af7a473 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
304 304
305static int efx_mcdi_phy_probe(struct efx_nic *efx) 305static int efx_mcdi_phy_probe(struct efx_nic *efx)
306{ 306{
307 struct efx_mcdi_phy_cfg *phy_cfg; 307 struct efx_mcdi_phy_cfg *phy_data;
308 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
309 u32 caps;
308 int rc; 310 int rc;
309 311
310 /* TODO: Move phy_data initialisation to 312 /* Initialise and populate phy_data */
311 * phy_op->probe/remove, rather than init/fini */ 313 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
312 phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); 314 if (phy_data == NULL)
313 if (phy_cfg == NULL) { 315 return -ENOMEM;
314 rc = -ENOMEM; 316
315 goto fail_alloc; 317 rc = efx_mcdi_get_phy_cfg(efx, phy_data);
316 }
317 rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
318 if (rc != 0) 318 if (rc != 0)
319 goto fail; 319 goto fail;
320 320
321 efx->phy_type = phy_cfg->type; 321 /* Read initial link advertisement */
322 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
323 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
324 outbuf, sizeof(outbuf), NULL);
325 if (rc)
326 goto fail;
327
328 /* Fill out nic state */
329 efx->phy_data = phy_data;
330 efx->phy_type = phy_data->type;
322 331
323 efx->mdio_bus = phy_cfg->channel; 332 efx->mdio_bus = phy_data->channel;
324 efx->mdio.prtad = phy_cfg->port; 333 efx->mdio.prtad = phy_data->port;
325 efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); 334 efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
326 efx->mdio.mode_support = 0; 335 efx->mdio.mode_support = 0;
327 if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) 336 if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
328 efx->mdio.mode_support |= MDIO_SUPPORTS_C22; 337 efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
329 if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) 338 if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
330 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 339 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
331 340
341 caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
342 if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
343 efx->link_advertising =
344 mcdi_to_ethtool_cap(phy_data->media, caps);
345 else
346 phy_data->forced_cap = caps;
347
332 /* Assert that we can map efx -> mcdi loopback modes */ 348 /* Assert that we can map efx -> mcdi loopback modes */
333 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); 349 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
334 BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); 350 BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
365 * but by convention we don't */ 381 * but by convention we don't */
366 efx->loopback_modes &= ~(1 << LOOPBACK_NONE); 382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
367 383
368 kfree(phy_cfg);
369
370 return 0;
371
372fail:
373 kfree(phy_cfg);
374fail_alloc:
375 return rc;
376}
377
378static int efx_mcdi_phy_init(struct efx_nic *efx)
379{
380 struct efx_mcdi_phy_cfg *phy_data;
381 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
382 u32 caps;
383 int rc;
384
385 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
386 if (phy_data == NULL)
387 return -ENOMEM;
388
389 rc = efx_mcdi_get_phy_cfg(efx, phy_data);
390 if (rc != 0)
391 goto fail;
392
393 efx->phy_data = phy_data;
394
395 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
396 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
397 outbuf, sizeof(outbuf), NULL);
398 if (rc)
399 goto fail;
400
401 caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
402 if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
403 efx->link_advertising =
404 mcdi_to_ethtool_cap(phy_data->media, caps);
405 else
406 phy_data->forced_cap = caps;
407
408 return 0; 384 return 0;
409 385
410fail: 386fail:
@@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
504 return !efx_link_state_equal(&efx->link_state, &old_state); 480 return !efx_link_state_equal(&efx->link_state, &old_state);
505} 481}
506 482
507static void efx_mcdi_phy_fini(struct efx_nic *efx) 483static void efx_mcdi_phy_remove(struct efx_nic *efx)
508{ 484{
509 struct efx_mcdi_phy_data *phy_data = efx->phy_data; 485 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
510 486
@@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
586 562
587struct efx_phy_operations efx_mcdi_phy_ops = { 563struct efx_phy_operations efx_mcdi_phy_ops = {
588 .probe = efx_mcdi_phy_probe, 564 .probe = efx_mcdi_phy_probe,
589 .init = efx_mcdi_phy_init, 565 .init = efx_port_dummy_op_int,
590 .reconfigure = efx_mcdi_phy_reconfigure, 566 .reconfigure = efx_mcdi_phy_reconfigure,
591 .poll = efx_mcdi_phy_poll, 567 .poll = efx_mcdi_phy_poll,
592 .fini = efx_mcdi_phy_fini, 568 .fini = efx_port_dummy_op_void,
569 .remove = efx_mcdi_phy_remove,
593 .get_settings = efx_mcdi_phy_get_settings, 570 .get_settings = efx_mcdi_phy_get_settings,
594 .set_settings = efx_mcdi_phy_set_settings, 571 .set_settings = efx_mcdi_phy_set_settings,
595 .run_tests = NULL, 572 .run_tests = NULL,
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f009b7..d5aab5b3fa06 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -524,6 +524,7 @@ struct efx_phy_operations {
524 int (*probe) (struct efx_nic *efx); 524 int (*probe) (struct efx_nic *efx);
525 int (*init) (struct efx_nic *efx); 525 int (*init) (struct efx_nic *efx);
526 void (*fini) (struct efx_nic *efx); 526 void (*fini) (struct efx_nic *efx);
527 void (*remove) (struct efx_nic *efx);
527 int (*reconfigure) (struct efx_nic *efx); 528 int (*reconfigure) (struct efx_nic *efx);
528 bool (*poll) (struct efx_nic *efx); 529 bool (*poll) (struct efx_nic *efx);
529 void (*get_settings) (struct efx_nic *efx, 530 void (*get_settings) (struct efx_nic *efx,
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be227862..db44224ed2ca 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1579 /* Disable hardware watchdog which can misfire */
1580 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1579 /* Squash TX of packets of 16 bytes or less */ 1581 /* Squash TX of packets of 16 bytes or less */
1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1582 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1581 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1583 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc791b2f..ff8f0a417fa3 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -33,6 +33,9 @@
33#define PCS_FW_HEARTBEAT_REG 0xd7ee 33#define PCS_FW_HEARTBEAT_REG 0xd7ee
34#define PCS_FW_HEARTB_LBN 0 34#define PCS_FW_HEARTB_LBN 0
35#define PCS_FW_HEARTB_WIDTH 8 35#define PCS_FW_HEARTB_WIDTH 8
36#define PCS_FW_PRODUCT_CODE_1 0xd7f0
37#define PCS_FW_VERSION_1 0xd7f3
38#define PCS_FW_BUILD_1 0xd7f6
36#define PCS_UC8051_STATUS_REG 0xd7fd 39#define PCS_UC8051_STATUS_REG 0xd7fd
37#define PCS_UC_STATUS_LBN 0 40#define PCS_UC_STATUS_LBN 0
38#define PCS_UC_STATUS_WIDTH 8 41#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
52 55
53struct qt202x_phy_data { 56struct qt202x_phy_data {
54 enum efx_phy_mode phy_mode; 57 enum efx_phy_mode phy_mode;
58 bool bug17190_in_bad_state;
59 unsigned long bug17190_timer;
60 u32 firmware_ver;
55}; 61};
56 62
57#define QT2022C2_MAX_RESET_TIME 500 63#define QT2022C2_MAX_RESET_TIME 500
58#define QT2022C2_RESET_WAIT 10 64#define QT2022C2_RESET_WAIT 10
59 65
60static int qt2025c_wait_reset(struct efx_nic *efx) 66#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
67#define QT2025C_HEARTB_WAIT 100
68#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
69#define QT2025C_FWSTART_WAIT 100
70
71#define BUG17190_INTERVAL (2 * HZ)
72
73static int qt2025c_wait_heartbeat(struct efx_nic *efx)
61{ 74{
62 unsigned long timeout = jiffies + 10 * HZ; 75 unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
63 int reg, old_counter = 0; 76 int reg, old_counter = 0;
64 77
65 /* Wait for firmware heartbeat to start */ 78 /* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
74 old_counter = counter; 87 old_counter = counter;
75 else if (counter != old_counter) 88 else if (counter != old_counter)
76 break; 89 break;
77 if (time_after(jiffies, timeout)) 90 if (time_after(jiffies, timeout)) {
91 /* Some cables have EEPROMs that conflict with the
92 * PHY's on-board EEPROM so it cannot load firmware */
93 EFX_ERR(efx, "If an SFP+ direct attach cable is"
94 " connected, please check that it complies"
95 " with the SFP+ specification\n");
78 return -ETIMEDOUT; 96 return -ETIMEDOUT;
79 msleep(10); 97 }
98 msleep(QT2025C_HEARTB_WAIT);
80 } 99 }
81 100
101 return 0;
102}
103
104static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
105{
106 unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
107 int reg;
108
82 /* Wait for firmware status to look good */ 109 /* Wait for firmware status to look good */
83 for (;;) { 110 for (;;) {
84 reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); 111 reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
90 break; 117 break;
91 if (time_after(jiffies, timeout)) 118 if (time_after(jiffies, timeout))
92 return -ETIMEDOUT; 119 return -ETIMEDOUT;
120 msleep(QT2025C_FWSTART_WAIT);
121 }
122
123 return 0;
124}
125
126static void qt2025c_restart_firmware(struct efx_nic *efx)
127{
128 /* Restart microcontroller execution of firmware from RAM */
129 efx_mdio_write(efx, 3, 0xe854, 0x00c0);
130 efx_mdio_write(efx, 3, 0xe854, 0x0040);
131 msleep(50);
132}
133
134static int qt2025c_wait_reset(struct efx_nic *efx)
135{
136 int rc;
137
138 rc = qt2025c_wait_heartbeat(efx);
139 if (rc != 0)
140 return rc;
141
142 rc = qt2025c_wait_fw_status_good(efx);
143 if (rc == -ETIMEDOUT) {
144 /* Bug 17689: occasionally heartbeat starts but firmware status
145 * code never progresses beyond 0x00. Try again, once, after
146 * restarting execution of the firmware image. */
147 EFX_LOG(efx, "bashing QT2025C microcontroller\n");
148 qt2025c_restart_firmware(efx);
149 rc = qt2025c_wait_heartbeat(efx);
150 if (rc != 0)
151 return rc;
152 rc = qt2025c_wait_fw_status_good(efx);
153 }
154
155 return rc;
156}
157
158static void qt2025c_firmware_id(struct efx_nic *efx)
159{
160 struct qt202x_phy_data *phy_data = efx->phy_data;
161 u8 firmware_id[9];
162 size_t i;
163
164 for (i = 0; i < sizeof(firmware_id); i++)
165 firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
166 PCS_FW_PRODUCT_CODE_1 + i);
167 EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
168 (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
169 firmware_id[3] >> 4, firmware_id[3] & 0xf,
170 firmware_id[4], firmware_id[5],
171 firmware_id[6], firmware_id[7], firmware_id[8]);
172 phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
173 ((firmware_id[3] & 0x0f) << 16) |
174 (firmware_id[4] << 8) | firmware_id[5];
175}
176
177static void qt2025c_bug17190_workaround(struct efx_nic *efx)
178{
179 struct qt202x_phy_data *phy_data = efx->phy_data;
180
181 /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
182 * layers up, but PCS down (no block_lock). If we notice this state
183 * persisting for a couple of seconds, we switch PMA/PMD loopback
184 * briefly on and then off again, which is normally sufficient to
185 * recover it.
186 */
187 if (efx->link_state.up ||
188 !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
189 phy_data->bug17190_in_bad_state = false;
190 return;
191 }
192
193 if (!phy_data->bug17190_in_bad_state) {
194 phy_data->bug17190_in_bad_state = true;
195 phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
196 return;
197 }
198
199 if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
200 EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
201 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
202 MDIO_PMA_CTRL1_LOOPBACK, true);
93 msleep(100); 203 msleep(100);
204 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
205 MDIO_PMA_CTRL1_LOOPBACK, false);
206 phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
207 }
208}
209
210static int qt2025c_select_phy_mode(struct efx_nic *efx)
211{
212 struct qt202x_phy_data *phy_data = efx->phy_data;
213 struct falcon_board *board = falcon_board(efx);
214 int reg, rc, i;
215 uint16_t phy_op_mode;
216
217 /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
218 * Self-Configure mode. Don't attempt any switching if we encounter
219 * older firmware. */
220 if (phy_data->firmware_ver < 0x02000100)
221 return 0;
222
223 /* In general we will get optimal behaviour in "SFP+ Self-Configure"
224 * mode; however, that powers down most of the PHY when no module is
225 * present, so we must use a different mode (any fixed mode will do)
226 * to be sure that loopbacks will work. */
227 phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
228
229 /* Only change mode if really necessary */
230 reg = efx_mdio_read(efx, 1, 0xc319);
231 if ((reg & 0x0038) == phy_op_mode)
232 return 0;
233 EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
234
235 /* This sequence replicates the register writes configured in the boot
236 * EEPROM (including the differences between board revisions), except
237 * that the operating mode is changed, and the PHY is prevented from
238 * unnecessarily reloading the main firmware image again. */
239 efx_mdio_write(efx, 1, 0xc300, 0x0000);
240 /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
241 * STOPs onto the firmware/module I2C bus to reset it, varies across
242 * board revisions, as the bus is connected to different GPIO/LED
243 * outputs on the PHY.) */
244 if (board->major == 0 && board->minor < 2) {
245 efx_mdio_write(efx, 1, 0xc303, 0x4498);
246 for (i = 0; i < 9; i++) {
247 efx_mdio_write(efx, 1, 0xc303, 0x4488);
248 efx_mdio_write(efx, 1, 0xc303, 0x4480);
249 efx_mdio_write(efx, 1, 0xc303, 0x4490);
250 efx_mdio_write(efx, 1, 0xc303, 0x4498);
251 }
252 } else {
253 efx_mdio_write(efx, 1, 0xc303, 0x0920);
254 efx_mdio_write(efx, 1, 0xd008, 0x0004);
255 for (i = 0; i < 9; i++) {
256 efx_mdio_write(efx, 1, 0xc303, 0x0900);
257 efx_mdio_write(efx, 1, 0xd008, 0x0005);
258 efx_mdio_write(efx, 1, 0xc303, 0x0920);
259 efx_mdio_write(efx, 1, 0xd008, 0x0004);
260 }
261 efx_mdio_write(efx, 1, 0xc303, 0x4900);
262 }
263 efx_mdio_write(efx, 1, 0xc303, 0x4900);
264 efx_mdio_write(efx, 1, 0xc302, 0x0004);
265 efx_mdio_write(efx, 1, 0xc316, 0x0013);
266 efx_mdio_write(efx, 1, 0xc318, 0x0054);
267 efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
268 efx_mdio_write(efx, 1, 0xc31a, 0x0098);
269 efx_mdio_write(efx, 3, 0x0026, 0x0e00);
270 efx_mdio_write(efx, 3, 0x0027, 0x0013);
271 efx_mdio_write(efx, 3, 0x0028, 0xa528);
272 efx_mdio_write(efx, 1, 0xd006, 0x000a);
273 efx_mdio_write(efx, 1, 0xd007, 0x0009);
274 efx_mdio_write(efx, 1, 0xd008, 0x0004);
275 /* This additional write is not present in the boot EEPROM. It
276 * prevents the PHY's internal boot ROM doing another pointless (and
277 * slow) reload of the firmware image (the microcontroller's code
278 * memory is not affected by the microcontroller reset). */
279 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
280 efx_mdio_write(efx, 1, 0xc300, 0x0002);
281 msleep(20);
282
283 /* Restart microcontroller execution of firmware from RAM */
284 qt2025c_restart_firmware(efx);
285
286 /* Wait for the microcontroller to be ready again */
287 rc = qt2025c_wait_reset(efx);
288 if (rc < 0) {
289 EFX_ERR(efx, "PHY microcontroller reset during mode switch "
290 "timed out\n");
291 return rc;
94 } 292 }
95 293
96 return 0; 294 return 0;
@@ -137,6 +335,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
137 335
138static int qt202x_phy_probe(struct efx_nic *efx) 336static int qt202x_phy_probe(struct efx_nic *efx)
139{ 337{
338 struct qt202x_phy_data *phy_data;
339
340 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
341 if (!phy_data)
342 return -ENOMEM;
343 efx->phy_data = phy_data;
344 phy_data->phy_mode = efx->phy_mode;
345 phy_data->bug17190_in_bad_state = false;
346 phy_data->bug17190_timer = 0;
347
140 efx->mdio.mmds = QT202X_REQUIRED_DEVS; 348 efx->mdio.mmds = QT202X_REQUIRED_DEVS;
141 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 349 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
142 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 350 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +353,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
145 353
146static int qt202x_phy_init(struct efx_nic *efx) 354static int qt202x_phy_init(struct efx_nic *efx)
147{ 355{
148 struct qt202x_phy_data *phy_data;
149 u32 devid; 356 u32 devid;
150 int rc; 357 int rc;
151 358
@@ -155,17 +362,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
155 return rc; 362 return rc;
156 } 363 }
157 364
158 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
159 if (!phy_data)
160 return -ENOMEM;
161 efx->phy_data = phy_data;
162
163 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 365 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
164 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 366 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
165 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 367 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
166 efx_mdio_id_rev(devid)); 368 efx_mdio_id_rev(devid));
167 369
168 phy_data->phy_mode = efx->phy_mode; 370 if (efx->phy_type == PHY_TYPE_QT2025C)
371 qt2025c_firmware_id(efx);
372
169 return 0; 373 return 0;
170} 374}
171 375
@@ -183,6 +387,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
183 efx->link_state.fd = true; 387 efx->link_state.fd = true;
184 efx->link_state.fc = efx->wanted_fc; 388 efx->link_state.fc = efx->wanted_fc;
185 389
390 if (efx->phy_type == PHY_TYPE_QT2025C)
391 qt2025c_bug17190_workaround(efx);
392
186 return efx->link_state.up != was_up; 393 return efx->link_state.up != was_up;
187} 394}
188 395
@@ -191,6 +398,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
191 struct qt202x_phy_data *phy_data = efx->phy_data; 398 struct qt202x_phy_data *phy_data = efx->phy_data;
192 399
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 400 if (efx->phy_type == PHY_TYPE_QT2025C) {
401 int rc = qt2025c_select_phy_mode(efx);
402 if (rc)
403 return rc;
404
194 /* There are several different register bits which can 405 /* There are several different register bits which can
195 * disable TX (and save power) on direct-attach cables 406 * disable TX (and save power) on direct-attach cables
196 * or optical transceivers, varying somewhat between 407 * or optical transceivers, varying somewhat between
@@ -224,7 +435,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
224 mdio45_ethtool_gset(&efx->mdio, ecmd); 435 mdio45_ethtool_gset(&efx->mdio, ecmd);
225} 436}
226 437
227static void qt202x_phy_fini(struct efx_nic *efx) 438static void qt202x_phy_remove(struct efx_nic *efx)
228{ 439{
229 /* Free the context block */ 440 /* Free the context block */
230 kfree(efx->phy_data); 441 kfree(efx->phy_data);
@@ -236,7 +447,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
236 .init = qt202x_phy_init, 447 .init = qt202x_phy_init,
237 .reconfigure = qt202x_phy_reconfigure, 448 .reconfigure = qt202x_phy_reconfigure,
238 .poll = qt202x_phy_poll, 449 .poll = qt202x_phy_poll,
239 .fini = qt202x_phy_fini, 450 .fini = efx_port_dummy_op_void,
451 .remove = qt202x_phy_remove,
240 .get_settings = qt202x_phy_get_settings, 452 .get_settings = qt202x_phy_get_settings,
241 .set_settings = efx_mdio_set_settings, 453 .set_settings = efx_mdio_set_settings,
242}; 454};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f031b2..f8c6771e66d8 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
133 133
134void siena_remove_port(struct efx_nic *efx) 134void siena_remove_port(struct efx_nic *efx)
135{ 135{
136 efx->phy_op->remove(efx);
136 efx_nic_free_buffer(efx, &efx->stats_buffer); 137 efx_nic_free_buffer(efx, &efx->stats_buffer);
137} 138}
138 139
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572a49a9..3009c297c135 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
202 int rc; 202 int rc;
203 203
204 rtnl_lock(); 204 rtnl_lock();
205 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, 205 if (efx->state != STATE_RUNNING) {
206 MDIO_PMA_10GBT_TXPWR_SHORT, 206 rc = -EBUSY;
207 count != 0 && *buf != '0'); 207 } else {
208 rc = efx_reconfigure_port(efx); 208 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
209 MDIO_PMA_10GBT_TXPWR_SHORT,
210 count != 0 && *buf != '0');
211 rc = efx_reconfigure_port(efx);
212 }
209 rtnl_unlock(); 213 rtnl_unlock();
210 214
211 return rc < 0 ? rc : (ssize_t)count; 215 return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
298 return 0; 302 return 0;
299} 303}
300 304
301static int sfx7101_phy_probe(struct efx_nic *efx) 305static int tenxpress_phy_probe(struct efx_nic *efx)
302{ 306{
303 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 307 struct tenxpress_phy_data *phy_data;
304 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 308 int rc;
305 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 309
306 return 0; 310 /* Allocate phy private storage */
307} 311 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
312 if (!phy_data)
313 return -ENOMEM;
314 efx->phy_data = phy_data;
315 phy_data->phy_mode = efx->phy_mode;
316
317 /* Create any special files */
318 if (efx->phy_type == PHY_TYPE_SFT9001B) {
319 rc = device_create_file(&efx->pci_dev->dev,
320 &dev_attr_phy_short_reach);
321 if (rc)
322 goto fail;
323 }
324
325 if (efx->phy_type == PHY_TYPE_SFX7101) {
326 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
327 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
328
329 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
330
331 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
332 ADVERTISED_10000baseT_Full);
333 } else {
334 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
335 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
336
337 efx->loopback_modes = (SFT9001_LOOPBACKS |
338 FALCON_XMAC_LOOPBACKS |
339 FALCON_GMAC_LOOPBACKS);
340
341 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
342 ADVERTISED_10000baseT_Full |
343 ADVERTISED_1000baseT_Full |
344 ADVERTISED_100baseT_Full);
345 }
308 346
309static int sft9001_phy_probe(struct efx_nic *efx)
310{
311 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
312 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
313 efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
314 FALCON_GMAC_LOOPBACKS);
315 return 0; 347 return 0;
348
349fail:
350 kfree(efx->phy_data);
351 efx->phy_data = NULL;
352 return rc;
316} 353}
317 354
318static int tenxpress_phy_init(struct efx_nic *efx) 355static int tenxpress_phy_init(struct efx_nic *efx)
319{ 356{
320 struct tenxpress_phy_data *phy_data; 357 int rc;
321 int rc = 0;
322 358
323 falcon_board(efx)->type->init_phy(efx); 359 falcon_board(efx)->type->init_phy(efx);
324 360
325 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
326 if (!phy_data)
327 return -ENOMEM;
328 efx->phy_data = phy_data;
329 phy_data->phy_mode = efx->phy_mode;
330
331 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 361 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
332 if (efx->phy_type == PHY_TYPE_SFT9001A) { 362 if (efx->phy_type == PHY_TYPE_SFT9001A) {
333 int reg; 363 int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
341 371
342 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 372 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
343 if (rc < 0) 373 if (rc < 0)
344 goto fail; 374 return rc;
345 375
346 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 376 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
347 if (rc < 0) 377 if (rc < 0)
348 goto fail; 378 return rc;
349 } 379 }
350 380
351 rc = tenxpress_init(efx); 381 rc = tenxpress_init(efx);
352 if (rc < 0) 382 if (rc < 0)
353 goto fail; 383 return rc;
354 384
355 /* Initialise advertising flags */ 385 /* Reinitialise flow control settings */
356 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
357 ADVERTISED_10000baseT_Full);
358 if (efx->phy_type != PHY_TYPE_SFX7101)
359 efx->link_advertising |= (ADVERTISED_1000baseT_Full |
360 ADVERTISED_100baseT_Full);
361 efx_link_set_wanted_fc(efx, efx->wanted_fc); 386 efx_link_set_wanted_fc(efx, efx->wanted_fc);
362 efx_mdio_an_reconfigure(efx); 387 efx_mdio_an_reconfigure(efx);
363 388
364 if (efx->phy_type == PHY_TYPE_SFT9001B) {
365 rc = device_create_file(&efx->pci_dev->dev,
366 &dev_attr_phy_short_reach);
367 if (rc)
368 goto fail;
369 }
370
371 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ 389 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
372 390
373 /* Let XGXS and SerDes out of reset */ 391 /* Let XGXS and SerDes out of reset */
374 falcon_reset_xaui(efx); 392 falcon_reset_xaui(efx);
375 393
376 return 0; 394 return 0;
377
378 fail:
379 kfree(efx->phy_data);
380 efx->phy_data = NULL;
381 return rc;
382} 395}
383 396
384/* Perform a "special software reset" on the PHY. The caller is 397/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
589 return !efx_link_state_equal(&efx->link_state, &old_state); 602 return !efx_link_state_equal(&efx->link_state, &old_state);
590} 603}
591 604
592static void tenxpress_phy_fini(struct efx_nic *efx) 605static void sfx7101_phy_fini(struct efx_nic *efx)
593{ 606{
594 int reg; 607 int reg;
595 608
609 /* Power down the LNPGA */
610 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
611 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
612
613 /* Waiting here ensures that the board fini, which can turn
614 * off the power to the PHY, won't get run until the LNPGA
615 * powerdown has been given long enough to complete. */
616 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
617}
618
619static void tenxpress_phy_remove(struct efx_nic *efx)
620{
596 if (efx->phy_type == PHY_TYPE_SFT9001B) 621 if (efx->phy_type == PHY_TYPE_SFT9001B)
597 device_remove_file(&efx->pci_dev->dev, 622 device_remove_file(&efx->pci_dev->dev,
598 &dev_attr_phy_short_reach); 623 &dev_attr_phy_short_reach);
599 624
600 if (efx->phy_type == PHY_TYPE_SFX7101) {
601 /* Power down the LNPGA */
602 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
603 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
604
605 /* Waiting here ensures that the board fini, which can turn
606 * off the power to the PHY, won't get run until the LNPGA
607 * powerdown has been given long enough to complete. */
608 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
609 }
610
611 kfree(efx->phy_data); 625 kfree(efx->phy_data);
612 efx->phy_data = NULL; 626 efx->phy_data = NULL;
613} 627}
@@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
819} 833}
820 834
821struct efx_phy_operations falcon_sfx7101_phy_ops = { 835struct efx_phy_operations falcon_sfx7101_phy_ops = {
822 .probe = sfx7101_phy_probe, 836 .probe = tenxpress_phy_probe,
823 .init = tenxpress_phy_init, 837 .init = tenxpress_phy_init,
824 .reconfigure = tenxpress_phy_reconfigure, 838 .reconfigure = tenxpress_phy_reconfigure,
825 .poll = tenxpress_phy_poll, 839 .poll = tenxpress_phy_poll,
826 .fini = tenxpress_phy_fini, 840 .fini = sfx7101_phy_fini,
841 .remove = tenxpress_phy_remove,
827 .get_settings = tenxpress_get_settings, 842 .get_settings = tenxpress_get_settings,
828 .set_settings = tenxpress_set_settings, 843 .set_settings = tenxpress_set_settings,
829 .set_npage_adv = sfx7101_set_npage_adv, 844 .set_npage_adv = sfx7101_set_npage_adv,
@@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
832}; 847};
833 848
834struct efx_phy_operations falcon_sft9001_phy_ops = { 849struct efx_phy_operations falcon_sft9001_phy_ops = {
835 .probe = sft9001_phy_probe, 850 .probe = tenxpress_phy_probe,
836 .init = tenxpress_phy_init, 851 .init = tenxpress_phy_init,
837 .reconfigure = tenxpress_phy_reconfigure, 852 .reconfigure = tenxpress_phy_reconfigure,
838 .poll = tenxpress_phy_poll, 853 .poll = tenxpress_phy_poll,
839 .fini = tenxpress_phy_fini, 854 .fini = efx_port_dummy_op_void,
855 .remove = tenxpress_phy_remove,
840 .get_settings = tenxpress_get_settings, 856 .get_settings = tenxpress_get_settings,
841 .set_settings = tenxpress_set_settings, 857 .set_settings = tenxpress_set_settings,
842 .set_npage_adv = sft9001_set_npage_adv, 858 .set_npage_adv = sft9001_set_npage_adv,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94e821b..a8b70ef6d817 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
821 EFX_TXQ_MASK]; 821 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 822 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 823 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0;
825 buffer->continuation = true;
826 if (buffer->unmap_len) { 824 if (buffer->unmap_len) {
827 unmap_addr = (buffer->dma_addr + buffer->len - 825 unmap_addr = (buffer->dma_addr + buffer->len -
828 buffer->unmap_len); 826 buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
836 PCI_DMA_TODEVICE); 834 PCI_DMA_TODEVICE);
837 buffer->unmap_len = 0; 835 buffer->unmap_len = 0;
838 } 836 }
837 buffer->len = 0;
838 buffer->continuation = true;
839 } 839 }
840} 840}
841 841
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f22210e..2834a01bae24 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
850 wake_up_interruptible_sync(sk->sk_sleep); 850 wake_up_interruptible_sync(sk->sk_sleep);
851 851
852 tun = container_of(sk, struct tun_sock, sk)->tun; 852 tun = tun_sk(sk)->tun;
853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
854} 854}
855 855
856static void tun_sock_destruct(struct sock *sk) 856static void tun_sock_destruct(struct sock *sk)
857{ 857{
858 free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev); 858 free_netdev(tun_sk(sk)->tun->dev);
859} 859}
860 860
861static struct proto tun_proto = { 861static struct proto tun_proto = {
@@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
990 sk->sk_write_space = tun_sock_write_space; 990 sk->sk_write_space = tun_sock_write_space;
991 sk->sk_sndbuf = INT_MAX; 991 sk->sk_sndbuf = INT_MAX;
992 992
993 container_of(sk, struct tun_sock, sk)->tun = tun; 993 tun_sk(sk)->tun = tun;
994 994
995 security_tun_dev_post_create(sk); 995 security_tun_dev_post_create(sk);
996 996
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088b72ea..41ad2f3697c7 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1563 1563
1564static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1564static void ugeth_quiesce(struct ucc_geth_private *ugeth)
1565{ 1565{
1566 /* Wait for and prevent any further xmits. */ 1566 /* Prevent any further xmits, plus detach the device. */
1567 netif_device_detach(ugeth->ndev);
1568
1569 /* Wait for any current xmits to finish. */
1567 netif_tx_disable(ugeth->ndev); 1570 netif_tx_disable(ugeth->ndev);
1568 1571
1569 /* Disable the interrupt to avoid NAPI rescheduling. */ 1572 /* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
1577{ 1580{
1578 napi_enable(&ugeth->napi); 1581 napi_enable(&ugeth->napi);
1579 enable_irq(ugeth->ug_info->uf_info.irq); 1582 enable_irq(ugeth->ug_info->uf_info.irq);
1580 netif_tx_wake_all_queues(ugeth->ndev); 1583 netif_device_attach(ugeth->ndev);
1581} 1584}
1582 1585
1583/* Called every time the controller might need to be made 1586/* Called every time the controller might need to be made
@@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
1648 ugeth->oldspeed = phydev->speed; 1651 ugeth->oldspeed = phydev->speed;
1649 } 1652 }
1650 1653
1651 /*
1652 * To change the MAC configuration we need to disable the
1653 * controller. To do so, we have to either grab ugeth->lock,
1654 * which is a bad idea since 'graceful stop' commands might
1655 * take quite a while, or we can quiesce driver's activity.
1656 */
1657 ugeth_quiesce(ugeth);
1658 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1659
1660 out_be32(&ug_regs->maccfg2, tempval);
1661 out_be32(&uf_regs->upsmr, upsmr);
1662
1663 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1664 ugeth_activate(ugeth);
1665
1666 if (!ugeth->oldlink) { 1654 if (!ugeth->oldlink) {
1667 new_state = 1; 1655 new_state = 1;
1668 ugeth->oldlink = 1; 1656 ugeth->oldlink = 1;
1669 } 1657 }
1658
1659 if (new_state) {
1660 /*
1661 * To change the MAC configuration we need to disable
1662 * the controller. To do so, we have to either grab
1663 * ugeth->lock, which is a bad idea since 'graceful
1664 * stop' commands might take quite a while, or we can
1665 * quiesce driver's activity.
1666 */
1667 ugeth_quiesce(ugeth);
1668 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1669
1670 out_be32(&ug_regs->maccfg2, tempval);
1671 out_be32(&uf_regs->upsmr, upsmr);
1672
1673 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1674 ugeth_activate(ugeth);
1675 }
1670 } else if (ugeth->oldlink) { 1676 } else if (ugeth->oldlink) {
1671 new_state = 1; 1677 new_state = 1;
1672 ugeth->oldlink = 0; 1678 ugeth->oldlink = 0;
@@ -3273,7 +3279,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3273 /* Handle the transmitted buffer and release */ 3279 /* Handle the transmitted buffer and release */
3274 /* the BD to be used with the current frame */ 3280 /* the BD to be used with the current frame */
3275 3281
3276 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3282 if (bd == ugeth->txBd[txQ]) /* queue empty? */
3277 break; 3283 break;
3278 3284
3279 dev->stats.tx_packets++; 3285 dev->stats.tx_packets++;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f64e9b..611b80435955 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
102#include <linux/ethtool.h> 102#include <linux/ethtool.h>
103#include <linux/crc32.h> 103#include <linux/crc32.h>
104#include <linux/bitops.h> 104#include <linux/bitops.h>
105#include <linux/workqueue.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */ 106#include <asm/processor.h> /* Processor type for cache alignment. */
106#include <asm/io.h> 107#include <asm/io.h>
107#include <asm/irq.h> 108#include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
389 struct net_device *dev; 390 struct net_device *dev;
390 struct napi_struct napi; 391 struct napi_struct napi;
391 spinlock_t lock; 392 spinlock_t lock;
393 struct work_struct reset_task;
392 394
393 /* Frequently used values: keep some adjacent for cache effect. */ 395 /* Frequently used values: keep some adjacent for cache effect. */
394 u32 quirks; 396 u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
407static int mdio_read(struct net_device *dev, int phy_id, int location); 409static int mdio_read(struct net_device *dev, int phy_id, int location);
408static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 410static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
409static int rhine_open(struct net_device *dev); 411static int rhine_open(struct net_device *dev);
412static void rhine_reset_task(struct work_struct *work);
410static void rhine_tx_timeout(struct net_device *dev); 413static void rhine_tx_timeout(struct net_device *dev);
411static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 414static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
412 struct net_device *dev); 415 struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
775 dev->irq = pdev->irq; 778 dev->irq = pdev->irq;
776 779
777 spin_lock_init(&rp->lock); 780 spin_lock_init(&rp->lock);
781 INIT_WORK(&rp->reset_task, rhine_reset_task);
782
778 rp->mii_if.dev = dev; 783 rp->mii_if.dev = dev;
779 rp->mii_if.mdio_read = mdio_read; 784 rp->mii_if.mdio_read = mdio_read;
780 rp->mii_if.mdio_write = mdio_write; 785 rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
1179 return 0; 1184 return 0;
1180} 1185}
1181 1186
1182static void rhine_tx_timeout(struct net_device *dev) 1187static void rhine_reset_task(struct work_struct *work)
1183{ 1188{
1184 struct rhine_private *rp = netdev_priv(dev); 1189 struct rhine_private *rp = container_of(work, struct rhine_private,
1185 void __iomem *ioaddr = rp->base; 1190 reset_task);
1186 1191 struct net_device *dev = rp->dev;
1187 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1188 "%4.4x, resetting...\n",
1189 dev->name, ioread16(ioaddr + IntrStatus),
1190 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1191 1192
1192 /* protect against concurrent rx interrupts */ 1193 /* protect against concurrent rx interrupts */
1193 disable_irq(rp->pdev->irq); 1194 disable_irq(rp->pdev->irq);
1194 1195
1195 napi_disable(&rp->napi); 1196 napi_disable(&rp->napi);
1196 1197
1197 spin_lock(&rp->lock); 1198 spin_lock_bh(&rp->lock);
1198 1199
1199 /* clear all descriptors */ 1200 /* clear all descriptors */
1200 free_tbufs(dev); 1201 free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
1206 rhine_chip_reset(dev); 1207 rhine_chip_reset(dev);
1207 init_registers(dev); 1208 init_registers(dev);
1208 1209
1209 spin_unlock(&rp->lock); 1210 spin_unlock_bh(&rp->lock);
1210 enable_irq(rp->pdev->irq); 1211 enable_irq(rp->pdev->irq);
1211 1212
1212 dev->trans_start = jiffies; 1213 dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
1214 netif_wake_queue(dev); 1215 netif_wake_queue(dev);
1215} 1216}
1216 1217
1218static void rhine_tx_timeout(struct net_device *dev)
1219{
1220 struct rhine_private *rp = netdev_priv(dev);
1221 void __iomem *ioaddr = rp->base;
1222
1223 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1224 "%4.4x, resetting...\n",
1225 dev->name, ioread16(ioaddr + IntrStatus),
1226 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1227
1228 schedule_work(&rp->reset_task);
1229}
1230
1217static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1231static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1218 struct net_device *dev) 1232 struct net_device *dev)
1219{ 1233{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
1830 struct rhine_private *rp = netdev_priv(dev); 1844 struct rhine_private *rp = netdev_priv(dev);
1831 void __iomem *ioaddr = rp->base; 1845 void __iomem *ioaddr = rp->base;
1832 1846
1833 spin_lock_irq(&rp->lock);
1834
1835 netif_stop_queue(dev);
1836 napi_disable(&rp->napi); 1847 napi_disable(&rp->napi);
1848 cancel_work_sync(&rp->reset_task);
1849 netif_stop_queue(dev);
1850
1851 spin_lock_irq(&rp->lock);
1837 1852
1838 if (debug > 1) 1853 if (debug > 1)
1839 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1854 printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a1e867..0fdfd58a35a1 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4087 goto _exit0; 4087 goto _exit0;
4088 } 4088 }
4089 4089
4090 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 4090 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4091 vxge_debug_ll_config(VXGE_TRACE, 4091 vxge_debug_ll_config(VXGE_TRACE,
4092 "%s : using 64bit DMA", __func__); 4092 "%s : using 64bit DMA", __func__);
4093 4093
4094 high_dma = 1; 4094 high_dma = 1;
4095 4095
4096 if (pci_set_consistent_dma_mask(pdev, 4096 if (pci_set_consistent_dma_mask(pdev,
4097 0xffffffffffffffffULL)) { 4097 DMA_BIT_MASK(64))) {
4098 vxge_debug_init(VXGE_ERR, 4098 vxge_debug_init(VXGE_ERR,
4099 "%s : unable to obtain 64bit DMA for " 4099 "%s : unable to obtain 64bit DMA for "
4100 "consistent allocations", __func__); 4100 "consistent allocations", __func__);
4101 ret = -ENOMEM; 4101 ret = -ENOMEM;
4102 goto _exit1; 4102 goto _exit1;
4103 } 4103 }
4104 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { 4104 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4105 vxge_debug_ll_config(VXGE_TRACE, 4105 vxge_debug_ll_config(VXGE_TRACE,
4106 "%s : using 32bit DMA", __func__); 4106 "%s : using 32bit DMA", __func__);
4107 } else { 4107 } else {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4c086f069b1..e63b7c40d0ee 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1903,17 +1903,6 @@ accept:
1903 rxs->noise = sc->ah->ah_noise_floor; 1903 rxs->noise = sc->ah->ah_noise_floor;
1904 rxs->signal = rxs->noise + rs.rs_rssi; 1904 rxs->signal = rxs->noise + rs.rs_rssi;
1905 1905
1906 /* An rssi of 35 indicates you should be able use
1907 * 54 Mbps reliably. A more elaborate scheme can be used
1908 * here but it requires a map of SNR/throughput for each
1909 * possible mode used */
1910 rxs->qual = rs.rs_rssi * 100 / 35;
1911
1912 /* rssi can be more than 35 though, anything above that
1913 * should be considered at 100% */
1914 if (rxs->qual > 100)
1915 rxs->qual = 100;
1916
1917 rxs->antenna = rs.rs_antenna; 1906 rxs->antenna = rs.rs_antenna;
1918 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1907 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1919 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 1908 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
2381 */ 2370 */
2382 ath5k_stop_locked(sc); 2371 ath5k_stop_locked(sc);
2383 2372
2373 /* Set PHY calibration interval */
2374 ah->ah_cal_intval = ath5k_calinterval;
2375
2384 /* 2376 /*
2385 * The basic interface to setting the hardware in a good 2377 * The basic interface to setting the hardware in a good
2386 * state is ``reset''. On return the hardware is known to 2378 * state is ``reset''. On return the hardware is known to
@@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
2408 2400
2409 /* Set ack to be sent at low bit-rates */ 2401 /* Set ack to be sent at low bit-rates */
2410 ath5k_hw_set_ack_bitrate_high(ah, false); 2402 ath5k_hw_set_ack_bitrate_high(ah, false);
2411
2412 /* Set PHY calibration inteval */
2413 ah->ah_cal_intval = ath5k_calinterval;
2414
2415 ret = 0; 2403 ret = 0;
2416done: 2404done:
2417 mmiowb(); 2405 mmiowb();
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 71b84d91dcff..efc420cd42bf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
186 wait = wait_time; 186 wait = wait_time;
187 while (ath9k_hw_numtxpending(ah, q)) { 187 while (ath9k_hw_numtxpending(ah, q)) {
188 if ((--wait) == 0) { 188 if ((--wait) == 0) {
189 ath_print(common, ATH_DBG_QUEUE, 189 ath_print(common, ATH_DBG_FATAL,
190 "Failed to stop TX DMA in 100 " 190 "Failed to stop TX DMA in 100 "
191 "msec after killing last frame\n"); 191 "msec after killing last frame\n");
192 break; 192 break;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0c87771383f0..e185479e295e 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -77,6 +77,9 @@
77#define ATH9K_TXERR_XTXOP 0x08 77#define ATH9K_TXERR_XTXOP 0x08
78#define ATH9K_TXERR_TIMER_EXPIRED 0x10 78#define ATH9K_TXERR_TIMER_EXPIRED 0x10
79#define ATH9K_TX_ACKED 0x20 79#define ATH9K_TX_ACKED 0x20
80#define ATH9K_TXERR_MASK \
81 (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
82 ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
80 83
81#define ATH9K_TX_BA 0x01 84#define ATH9K_TX_BA 0x01
82#define ATH9K_TX_PWRMGMT 0x02 85#define ATH9K_TX_PWRMGMT 0x02
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48743452515..996eb90263cc 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1973 struct ieee80211_hw *hw = sc->hw; 1973 struct ieee80211_hw *hw = sc->hw;
1974 int r; 1974 int r;
1975 1975
1976 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer);
1978
1976 ath9k_hw_set_interrupts(ah, 0); 1979 ath9k_hw_set_interrupts(ah, 0);
1977 ath_drain_all_txq(sc, retry_tx); 1980 ath_drain_all_txq(sc, retry_tx);
1978 ath_stoprecv(sc); 1981 ath_stoprecv(sc);
@@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
2014 } 2017 }
2015 } 2018 }
2016 2019
2020 /* Start ANI */
2021 ath_start_ani(common);
2022
2017 return r; 2023 return r;
2018} 2024}
2019 2025
@@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2508 return; /* another wiphy still in use */ 2514 return; /* another wiphy still in use */
2509 } 2515 }
2510 2516
2517 /* Ensure HW is awake when we try to shut it down. */
2518 ath9k_ps_wakeup(sc);
2519
2511 if (ah->btcoex_hw.enabled) { 2520 if (ah->btcoex_hw.enabled) {
2512 ath9k_hw_btcoex_disable(ah); 2521 ath9k_hw_btcoex_disable(ah);
2513 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 2522 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2528 /* disable HAL and put h/w to sleep */ 2537 /* disable HAL and put h/w to sleep */
2529 ath9k_hw_disable(ah); 2538 ath9k_hw_disable(ah);
2530 ath9k_hw_configpcipowersave(ah, 1, 1); 2539 ath9k_hw_configpcipowersave(ah, 1, 1);
2540 ath9k_ps_restore(sc);
2541
2542 /* Finally, put the chip in FULL SLEEP mode */
2531 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 2543 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
2532 2544
2533 sc->sc_flags |= SC_OP_INVALID; 2545 sc->sc_flags |= SC_OP_INVALID;
@@ -2641,8 +2653,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2641 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 2653 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2642 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 2654 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2643 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 2655 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2656 ath9k_ps_wakeup(sc);
2644 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2657 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2645 ath_beacon_return(sc, avp); 2658 ath_beacon_return(sc, avp);
2659 ath9k_ps_restore(sc);
2646 } 2660 }
2647 2661
2648 sc->sc_flags &= ~SC_OP_BEACONS; 2662 sc->sc_flags &= ~SC_OP_BEACONS;
@@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
3091 case IEEE80211_AMPDU_RX_STOP: 3105 case IEEE80211_AMPDU_RX_STOP:
3092 break; 3106 break;
3093 case IEEE80211_AMPDU_TX_START: 3107 case IEEE80211_AMPDU_TX_START:
3108 ath9k_ps_wakeup(sc);
3094 ath_tx_aggr_start(sc, sta, tid, ssn); 3109 ath_tx_aggr_start(sc, sta, tid, ssn);
3095 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3110 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3111 ath9k_ps_restore(sc);
3096 break; 3112 break;
3097 case IEEE80211_AMPDU_TX_STOP: 3113 case IEEE80211_AMPDU_TX_STOP:
3114 ath9k_ps_wakeup(sc);
3098 ath_tx_aggr_stop(sc, sta, tid); 3115 ath_tx_aggr_stop(sc, sta, tid);
3099 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3116 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3117 ath9k_ps_restore(sc);
3100 break; 3118 break;
3101 case IEEE80211_AMPDU_TX_OPERATIONAL: 3119 case IEEE80211_AMPDU_TX_OPERATIONAL:
3120 ath9k_ps_wakeup(sc);
3102 ath_tx_aggr_resume(sc, sta, tid); 3121 ath_tx_aggr_resume(sc, sta, tid);
3122 ath9k_ps_restore(sc);
3103 break; 3123 break;
3104 default: 3124 default:
3105 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 3125 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 5321f735e5a0..f7af5ea54753 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); 96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
97} 97}
98 98
99const static struct ath_bus_ops ath_pci_bus_ops = { 99static const struct ath_bus_ops ath_pci_bus_ops = {
100 .read_cachesize = ath_pci_read_cachesize, 100 .read_cachesize = ath_pci_read_cachesize,
101 .cleanup = ath_pci_cleanup, 101 .cleanup = ath_pci_cleanup,
102 .eeprom_read = ath_pci_eeprom_read, 102 .eeprom_read = ath_pci_eeprom_read,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2a11cc57ceea..fa12b9060b0b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1108 if (npend) { 1108 if (npend) {
1109 int r; 1109 int r;
1110 1110
1111 ath_print(common, ATH_DBG_XMIT, 1111 ath_print(common, ATH_DBG_FATAL,
1112 "Unable to stop TxDMA. Reset HAL!\n"); 1112 "Unable to stop TxDMA. Reset HAL!\n");
1113 1113
1114 spin_lock_bh(&sc->sc_resetlock); 1114 spin_lock_bh(&sc->sc_resetlock);
1115 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); 1115 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1116 if (r) 1116 if (r)
1117 ath_print(common, ATH_DBG_FATAL, 1117 ath_print(common, ATH_DBG_FATAL,
1118 "Unable to reset hardware; reset status %d\n", 1118 "Unable to reset hardware; reset status %d\n",
@@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
1414 * For HT capable stations, we save tidno for later use. 1414 * For HT capable stations, we save tidno for later use.
1415 * We also override seqno set by upper layer with the one 1415 * We also override seqno set by upper layer with the one
1416 * in tx aggregation state. 1416 * in tx aggregation state.
1417 *
1418 * If fragmentation is on, the sequence number is
1419 * not overridden, since it has been
1420 * incremented by the fragmentation routine.
1421 *
1422 * FIXME: check if the fragmentation threshold exceeds
1423 * IEEE80211 max.
1424 */ 1417 */
1425 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1418 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1426 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << 1419 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1427 IEEE80211_SEQ_SEQ_SHIFT);
1428 bf->bf_seqno = tid->seq_next; 1420 bf->bf_seqno = tid->seq_next;
1429 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1421 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1430} 1422}
@@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1636 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1628 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1637 } 1629 }
1638 1630
1639 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR)) 1631 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1632 (sc->sc_flags & SC_OP_TXAGGR))
1640 assign_aggr_tid_seqno(skb, bf); 1633 assign_aggr_tid_seqno(skb, bf);
1641 1634
1642 bf->bf_mpdu = skb; 1635 bf->bf_mpdu = skb;
@@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1780 struct ath_wiphy *aphy = hw->priv; 1773 struct ath_wiphy *aphy = hw->priv;
1781 struct ath_softc *sc = aphy->sc; 1774 struct ath_softc *sc = aphy->sc;
1782 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1775 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1783 int hdrlen, padsize; 1776 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1777 int padpos, padsize;
1784 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1778 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1785 struct ath_tx_control txctl; 1779 struct ath_tx_control txctl;
1786 1780
@@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1792 * BSSes. 1786 * BSSes.
1793 */ 1787 */
1794 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1788 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1795 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1796 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1789 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1797 sc->tx.seq_no += 0x10; 1790 sc->tx.seq_no += 0x10;
1798 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1791 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1800 } 1793 }
1801 1794
1802 /* Add the padding after the header if this is not already done */ 1795 /* Add the padding after the header if this is not already done */
1803 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1796 padpos = ath9k_cmn_padpos(hdr->frame_control);
1804 if (hdrlen & 3) { 1797 padsize = padpos & 3;
1805 padsize = hdrlen % 4; 1798 if (padsize && skb->len>padpos) {
1806 if (skb_headroom(skb) < padsize) { 1799 if (skb_headroom(skb) < padsize) {
1807 ath_print(common, ATH_DBG_XMIT, 1800 ath_print(common, ATH_DBG_XMIT,
1808 "TX CABQ padding failed\n"); 1801 "TX CABQ padding failed\n");
@@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1810 return; 1803 return;
1811 } 1804 }
1812 skb_push(skb, padsize); 1805 skb_push(skb, padsize);
1813 memmove(skb->data, skb->data + padsize, hdrlen); 1806 memmove(skb->data, skb->data + padsize, padpos);
1814 } 1807 }
1815 1808
1816 txctl.txq = sc->beacon.cabq; 1809 txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1838 struct ieee80211_hw *hw = sc->hw; 1831 struct ieee80211_hw *hw = sc->hw;
1839 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1832 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1840 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1833 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1841 int hdrlen, padsize; 1834 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1835 int padpos, padsize;
1842 1836
1843 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1837 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1844 1838
@@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1853 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1847 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1854 } 1848 }
1855 1849
1856 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1850 padpos = ath9k_cmn_padpos(hdr->frame_control);
1857 padsize = hdrlen & 3; 1851 padsize = padpos & 3;
1858 if (padsize && hdrlen >= 24) { 1852 if (padsize && skb->len>padpos+padsize) {
1859 /* 1853 /*
1860 * Remove MAC header padding before giving the frame back to 1854 * Remove MAC header padding before giving the frame back to
1861 * mac80211. 1855 * mac80211.
1862 */ 1856 */
1863 memmove(skb->data + padsize, skb->data, hdrlen); 1857 memmove(skb->data + padsize, skb->data, padpos);
1864 skb_pull(skb, padsize); 1858 skb_pull(skb, padsize);
1865 } 1859 }
1866 1860
@@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2078 &txq->axq_q, lastbf->list.prev); 2072 &txq->axq_q, lastbf->list.prev);
2079 2073
2080 txq->axq_depth--; 2074 txq->axq_depth--;
2081 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT); 2075 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
2082 txq->axq_tx_inprogress = false; 2076 txq->axq_tx_inprogress = false;
2083 spin_unlock_bh(&txq->axq_lock); 2077 spin_unlock_bh(&txq->axq_lock);
2084 2078
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 027be275e035..88d1fd02d40a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,160 +383,44 @@ static inline
383 } 383 }
384} 384}
385 385
386/* Check if a DMA region fits the device constraints.
387 * Returns true, if the region is OK for usage with this device. */
388static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
389 dma_addr_t addr, size_t size)
390{
391 switch (ring->type) {
392 case B43_DMA_30BIT:
393 if ((u64)addr + size > (1ULL << 30))
394 return 0;
395 break;
396 case B43_DMA_32BIT:
397 if ((u64)addr + size > (1ULL << 32))
398 return 0;
399 break;
400 case B43_DMA_64BIT:
401 /* Currently we can't have addresses beyond
402 * 64bit in the kernel. */
403 break;
404 }
405 return 1;
406}
407
408#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
409#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
410
411static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
412 dma_addr_t dmaaddr, size_t size)
413{
414 ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
415 free_pages((unsigned long)base, get_order(size));
416}
417
418static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
419 dma_addr_t *dmaaddr, size_t size,
420 gfp_t gfp_flags)
421{
422 void *base;
423
424 base = (void *)__get_free_pages(gfp_flags, get_order(size));
425 if (!base)
426 return NULL;
427 memset(base, 0, size);
428 *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
429 DMA_TO_DEVICE);
430 if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
431 free_pages((unsigned long)base, get_order(size));
432 return NULL;
433 }
434
435 return base;
436}
437
438static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
439 dma_addr_t *dmaaddr, size_t size)
440{
441 void *base;
442
443 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
444 GFP_KERNEL);
445 if (!base) {
446 b43err(ring->dev->wl, "Failed to allocate or map pages "
447 "for DMA ringmemory\n");
448 return NULL;
449 }
450 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
451 /* The memory does not fit our device constraints.
452 * Retry with GFP_DMA set to get lower memory. */
453 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
454 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
455 GFP_KERNEL | GFP_DMA);
456 if (!base) {
457 b43err(ring->dev->wl, "Failed to allocate or map pages "
458 "in the GFP_DMA region for DMA ringmemory\n");
459 return NULL;
460 }
461 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
462 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
463 b43err(ring->dev->wl, "Failed to allocate DMA "
464 "ringmemory that fits device constraints\n");
465 return NULL;
466 }
467 }
468 /* We expect the memory to be 4k aligned, at least. */
469 if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
470 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
471 return NULL;
472 }
473
474 return base;
475}
476
477static int alloc_ringmemory(struct b43_dmaring *ring) 386static int alloc_ringmemory(struct b43_dmaring *ring)
478{ 387{
479 unsigned int required; 388 gfp_t flags = GFP_KERNEL;
480 void *base; 389
481 dma_addr_t dmaaddr; 390 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
482 391 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
483 /* There are several requirements to the descriptor ring memory: 392 * has shown that 4K is sufficient for the latter as long as the buffer
484 * - The memory region needs to fit the address constraints for the 393 * does not cross an 8K boundary.
485 * device (same as for frame buffers). 394 *
486 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. 395 * For unknown reasons - possibly a hardware error - the BCM4311 rev
487 * - For 64bit DMA devices, the descriptor ring must be 8k aligned. 396 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
397 * which accounts for the GFP_DMA flag below.
398 *
399 * The flags here must match the flags in free_ringmemory below!
488 */ 400 */
489
490 if (ring->type == B43_DMA_64BIT) 401 if (ring->type == B43_DMA_64BIT)
491 required = ring->nr_slots * sizeof(struct b43_dmadesc64); 402 flags |= GFP_DMA;
492 else 403 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
493 required = ring->nr_slots * sizeof(struct b43_dmadesc32); 404 B43_DMA_RINGMEMSIZE,
494 if (B43_WARN_ON(required > 0x1000)) 405 &(ring->dmabase), flags);
406 if (!ring->descbase) {
407 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
495 return -ENOMEM; 408 return -ENOMEM;
496
497 ring->alloc_descsize = 0x1000;
498 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
499 if (!base)
500 return -ENOMEM;
501 ring->alloc_descbase = base;
502 ring->alloc_dmabase = dmaaddr;
503
504 if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
505 /* We're on <=32bit DMA, or we already got 8k aligned memory.
506 * That's all we need, so we're fine. */
507 ring->descbase = base;
508 ring->dmabase = dmaaddr;
509 return 0;
510 }
511 b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
512
513 /* Ok, we failed at the 8k alignment requirement.
514 * Try to force-align the memory region now. */
515 ring->alloc_descsize = 0x2000;
516 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
517 if (!base)
518 return -ENOMEM;
519 ring->alloc_descbase = base;
520 ring->alloc_dmabase = dmaaddr;
521
522 if (is_8k_aligned(dmaaddr)) {
523 /* We're already 8k aligned. That Ok, too. */
524 ring->descbase = base;
525 ring->dmabase = dmaaddr;
526 return 0;
527 } 409 }
528 /* Force-align it to 8k */ 410 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
529 ring->descbase = (void *)((u8 *)base + 0x1000);
530 ring->dmabase = dmaaddr + 0x1000;
531 B43_WARN_ON(!is_8k_aligned(ring->dmabase));
532 411
533 return 0; 412 return 0;
534} 413}
535 414
536static void free_ringmemory(struct b43_dmaring *ring) 415static void free_ringmemory(struct b43_dmaring *ring)
537{ 416{
538 b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, 417 gfp_t flags = GFP_KERNEL;
539 ring->alloc_dmabase, ring->alloc_descsize); 418
419 if (ring->type == B43_DMA_64BIT)
420 flags |= GFP_DMA;
421
422 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423 ring->descbase, ring->dmabase, flags);
540} 424}
541 425
542/* Reset the RX DMA channel */ 426/* Reset the RX DMA channel */
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
646 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 530 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
647 return 1; 531 return 1;
648 532
649 if (!b43_dma_address_ok(ring, addr, buffersize)) { 533 switch (ring->type) {
650 /* We can't support this address. Unmap it again. */ 534 case B43_DMA_30BIT:
651 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 535 if ((u64)addr + buffersize > (1ULL << 30))
652 return 1; 536 goto address_error;
537 break;
538 case B43_DMA_32BIT:
539 if ((u64)addr + buffersize > (1ULL << 32))
540 goto address_error;
541 break;
542 case B43_DMA_64BIT:
543 /* Currently we can't have addresses beyond
544 * 64bit in the kernel. */
545 break;
653 } 546 }
654 547
655 /* The address is OK. */ 548 /* The address is OK. */
656 return 0; 549 return 0;
550
551address_error:
552 /* We can't support this address. Unmap it again. */
553 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555 return 1;
657} 556}
658 557
659static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 558static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
715 meta->dmaaddr = dmaaddr; 614 meta->dmaaddr = dmaaddr;
716 ring->ops->fill_descriptor(ring, desc, dmaaddr, 615 ring->ops->fill_descriptor(ring, desc, dmaaddr,
717 ring->rx_buffersize, 0, 0, 0); 616 ring->rx_buffersize, 0, 0, 0);
718 ssb_dma_sync_single_for_device(ring->dev->dev,
719 ring->alloc_dmabase,
720 ring->alloc_descsize, DMA_TO_DEVICE);
721 617
722 return 0; 618 return 0;
723} 619}
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1354 } 1250 }
1355 /* Now transfer the whole frame. */ 1251 /* Now transfer the whole frame. */
1356 wmb(); 1252 wmb();
1357 ssb_dma_sync_single_for_device(ring->dev->dev,
1358 ring->alloc_dmabase,
1359 ring->alloc_descsize, DMA_TO_DEVICE);
1360 ops->poke_tx(ring, next_slot(ring, slot)); 1253 ops->poke_tx(ring, next_slot(ring, slot));
1361 return 0; 1254 return 0;
1362 1255
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e607b392314c..f7ab37c4cdbc 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
157} __attribute__ ((__packed__)); 157} __attribute__ ((__packed__));
158 158
159/* Misc DMA constants */ 159/* Misc DMA constants */
160#define B43_DMA_RINGMEMSIZE PAGE_SIZE
160#define B43_DMA0_RX_FRAMEOFFSET 30 161#define B43_DMA0_RX_FRAMEOFFSET 30
161 162
162/* DMA engine tuning knobs */ 163/* DMA engine tuning knobs */
@@ -246,12 +247,6 @@ struct b43_dmaring {
246 /* The QOS priority assigned to this ring. Only used for TX rings. 247 /* The QOS priority assigned to this ring. Only used for TX rings.
247 * This is the mac80211 "queue" value. */ 248 * This is the mac80211 "queue" value. */
248 u8 queue_prio; 249 u8 queue_prio;
249 /* Pointers and size of the originally allocated and mapped memory
250 * region for the descriptor ring. */
251 void *alloc_descbase;
252 dma_addr_t alloc_dmabase;
253 unsigned int alloc_descsize;
254 /* Pointer to our wireless device. */
255 struct b43_wldev *dev; 250 struct b43_wldev *dev;
256#ifdef CONFIG_B43_DEBUG 251#ifdef CONFIG_B43_DEBUG
257 /* Maximum number of used slots. */ 252 /* Maximum number of used slots. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7da1dab933d9..234891d8cc10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
681 snr = rx_stats_sig_avg / rx_stats_noise_diff; 681 snr = rx_stats_sig_avg / rx_stats_noise_diff;
682 rx_status.noise = rx_status.signal - 682 rx_status.noise = rx_status.signal -
683 iwl3945_calc_db_from_ratio(snr); 683 iwl3945_calc_db_from_ratio(snr);
684 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
685 rx_status.noise);
686
687 /* If noise info not available, calculate signal quality indicator (%)
688 * using just the dBm signal level. */
689 } else { 684 } else {
690 rx_status.noise = priv->last_rx_noise; 685 rx_status.noise = priv->last_rx_noise;
691 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
692 } 686 }
693 687
694 688
695 IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 689 IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
696 rx_status.signal, rx_status.noise, rx_status.qual, 690 rx_status.signal, rx_status.noise,
697 rx_stats_sig_avg, rx_stats_noise_diff); 691 rx_stats_sig_avg, rx_stats_noise_diff);
698 692
699 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 693 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1835 rc = -EIO; 1829 rc = -EIO;
1836 } 1830 }
1837 1831
1838 priv->alloc_rxb_page--; 1832 iwl_free_pages(priv, cmd.reply_page);
1839 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
1840 1833
1841 return rc; 1834 return rc;
1842} 1835}
@@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2836 .use_isr_legacy = true, 2829 .use_isr_legacy = true,
2837 .ht_greenfield_support = false, 2830 .ht_greenfield_support = false,
2838 .led_compensation = 64, 2831 .led_compensation = 64,
2832 .broken_powersave = true,
2839}; 2833};
2840 2834
2841static struct iwl_cfg iwl3945_abg_cfg = { 2835static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2852 .use_isr_legacy = true, 2846 .use_isr_legacy = true,
2853 .ht_greenfield_support = false, 2847 .ht_greenfield_support = false,
2854 .led_compensation = 64, 2848 .led_compensation = 64,
2849 .broken_powersave = true,
2855}; 2850};
2856 2851
2857struct pci_device_id iwl3945_hw_card_ids[] = { 2852struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ecc23ec1f6a4..531fa125f5a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
222 * 222 *
223 *****************************************************************************/ 223 *****************************************************************************/
224extern int iwl3945_calc_db_from_ratio(int sig_ratio); 224extern int iwl3945_calc_db_from_ratio(int sig_ratio);
225extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
226extern void iwl3945_rx_replenish(void *data); 225extern void iwl3945_rx_replenish(void *data);
227extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
228extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 386513b601f5..484c5fdf7c2a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1204 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1204 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1205 1205
1206 /* calculate tx gain adjustment based on power supply voltage */ 1206 /* calculate tx gain adjustment based on power supply voltage */
1207 voltage = priv->calib_info->voltage; 1207 voltage = le16_to_cpu(priv->calib_info->voltage);
1208 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1208 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1209 voltage_compensation = 1209 voltage_compensation =
1210 iwl4965_get_voltage_compensation(voltage, init_voltage); 1210 iwl4965_get_voltage_compensation(voltage, init_voltage);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 4ef6804a455a..bc056e9ab85f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -92,11 +92,15 @@
92 92
93static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) 93static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
94{ 94{
95 u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, 95 u16 temperature, voltage;
96 EEPROM_5000_TEMPERATURE); 96 __le16 *temp_calib =
97 /* offset = temperature - voltage / coef */ 97 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
98 s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); 98
99 return offset; 99 temperature = le16_to_cpu(temp_calib[0]);
100 voltage = le16_to_cpu(temp_calib[1]);
101
102 /* offset = temp - volt / coeff */
103 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
100} 104}
101 105
102/* Fixed (non-configurable) rx data from phy */ 106/* Fixed (non-configurable) rx data from phy */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e2f8615c8c9b..33a5866538e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
333static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) 333static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
334{ 334{
335 struct iwl_calib_xtal_freq_cmd cmd; 335 struct iwl_calib_xtal_freq_cmd cmd;
336 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); 336 __le16 *xtal_calib =
337 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
337 338
338 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; 339 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
339 cmd.hdr.first_group = 0; 340 cmd.hdr.first_group = 0;
340 cmd.hdr.groups_num = 1; 341 cmd.hdr.groups_num = 1;
341 cmd.hdr.data_valid = 1; 342 cmd.hdr.data_valid = 1;
342 cmd.cap_pin1 = (u8)xtal_calib[0]; 343 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
343 cmd.cap_pin2 = (u8)xtal_calib[1]; 344 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
344 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], 345 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
345 (u8 *)&cmd, sizeof(cmd)); 346 (u8 *)&cmd, sizeof(cmd));
346} 347}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index fe511cbf012e..b93e49158196 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
150}; 150};
151 151
152/* mbps, mcs */ 152/* mbps, mcs */
153const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { 153static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
154 { "1", "BPSK DSSS"}, 154 { "1", "BPSK DSSS"},
155 { "2", "QPSK DSSS"}, 155 { "2", "QPSK DSSS"},
156 {"5.5", "BPSK CCK"}, 156 {"5.5", "BPSK CCK"},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8377efb3ba7..1c9866daf815 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1842 } 1842 }
1843 1843
1844#ifdef CONFIG_IWLWIFI_DEBUG 1844#ifdef CONFIG_IWLWIFI_DEBUG
1845 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) 1845 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1846 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 1846 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1847 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; 1847 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1848#else 1848#else
@@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
3173 3173
3174 priv->ibss_beacon = NULL; 3174 priv->ibss_beacon = NULL;
3175 3175
3176 spin_lock_init(&priv->lock);
3177 spin_lock_init(&priv->sta_lock); 3176 spin_lock_init(&priv->sta_lock);
3178 spin_lock_init(&priv->hcmd_lock); 3177 spin_lock_init(&priv->hcmd_lock);
3179 3178
@@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3361 (unsigned long long) pci_resource_len(pdev, 0)); 3360 (unsigned long long) pci_resource_len(pdev, 0));
3362 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); 3361 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3363 3362
3364 /* this spin lock will be used in apm_ops.init and EEPROM access 3363 /* these spin locks will be used in apm_ops.init and EEPROM access
3365 * we should init now 3364 * we should init now
3366 */ 3365 */
3367 spin_lock_init(&priv->reg_lock); 3366 spin_lock_init(&priv->reg_lock);
3367 spin_lock_init(&priv->lock);
3368 iwl_hw_detect(priv); 3368 iwl_hw_detect(priv);
3369 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", 3369 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
3370 priv->cfg->name, priv->hw_rev); 3370 priv->cfg->name, priv->hw_rev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a7bfae01f19b..1ec8cb4d5eae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -77,8 +77,7 @@
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing 77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers. 78 * the CSR registers.
79 * 79 *
80 * NOTE: Newer devices using one-time-programmable (OTP) memory 80 * NOTE: Device does need to be awake in order to read this memory
81 * require device to be awake in order to read this memory
82 * via CSR_EEPROM and CSR_OTP registers 81 * via CSR_EEPROM and CSR_OTP registers
83 */ 82 */
84#define CSR_BASE (0x000) 83#define CSR_BASE (0x000)
@@ -111,9 +110,8 @@
111/* 110/*
112 * EEPROM and OTP (one-time-programmable) memory reads 111 * EEPROM and OTP (one-time-programmable) memory reads
113 * 112 *
114 * NOTE: For (newer) devices using OTP, device must be awake, initialized via 113 * NOTE: Device must be awake, initialized via apm_ops.init(),
115 * apm_ops.init() in order to read. Older devices (3945/4965/5000) 114 * in order to read.
116 * use EEPROM and do not require this.
117 */ 115 */
118#define CSR_EEPROM_REG (CSR_BASE+0x02c) 116#define CSR_EEPROM_REG (CSR_BASE+0x02c)
119#define CSR_EEPROM_GP (CSR_BASE+0x030) 117#define CSR_EEPROM_GP (CSR_BASE+0x030)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2673e9a4db92..165d1f6e2dd9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1168,7 +1168,7 @@ struct iwl_priv {
1168 u32 last_beacon_time; 1168 u32 last_beacon_time;
1169 u64 last_tsf; 1169 u64 last_tsf;
1170 1170
1171 /* eeprom */ 1171 /* eeprom -- this is in the card's little endian byte order */
1172 u8 *eeprom; 1172 u8 *eeprom;
1173 int nvm_device_type; 1173 int nvm_device_type;
1174 struct iwl_eeprom_calib_info *calib_info; 1174 struct iwl_eeprom_calib_info *calib_info;
@@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1353 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1353 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1354} 1354}
1355 1355
1356static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
1357{
1358 __free_pages(page, priv->hw_params.rx_page_order);
1359 priv->alloc_rxb_page--;
1360}
1361
1362static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
1363{
1364 free_pages(page, priv->hw_params.rx_page_order);
1365 priv->alloc_rxb_page--;
1366}
1356#endif /* __iwl_dev_h__ */ 1367#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3946e5c03f81..4a30969689ff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
370 return ret; 370 return ret;
371} 371}
372 372
373static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) 373static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
374{ 374{
375 int ret = 0; 375 int ret = 0;
376 u32 r; 376 u32 r;
@@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
404 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); 404 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
405 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); 405 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
406 } 406 }
407 *eeprom_data = le16_to_cpu((__force __le16)(r >> 16)); 407 *eeprom_data = cpu_to_le16(r >> 16);
408 return 0; 408 return 0;
409} 409}
410 410
@@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
413 */ 413 */
414static bool iwl_is_otp_empty(struct iwl_priv *priv) 414static bool iwl_is_otp_empty(struct iwl_priv *priv)
415{ 415{
416 u16 next_link_addr = 0, link_value; 416 u16 next_link_addr = 0;
417 __le16 link_value;
417 bool is_empty = false; 418 bool is_empty = false;
418 419
419 /* locate the beginning of OTP link list */ 420 /* locate the beginning of OTP link list */
@@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
443static int iwl_find_otp_image(struct iwl_priv *priv, 444static int iwl_find_otp_image(struct iwl_priv *priv,
444 u16 *validblockaddr) 445 u16 *validblockaddr)
445{ 446{
446 u16 next_link_addr = 0, link_value = 0, valid_addr; 447 u16 next_link_addr = 0, valid_addr;
448 __le16 link_value = 0;
447 int usedblocks = 0; 449 int usedblocks = 0;
448 450
449 /* set addressing mode to absolute to traverse the link list */ 451 /* set addressing mode to absolute to traverse the link list */
@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
463 * check for more block on the link list 465 * check for more block on the link list
464 */ 466 */
465 valid_addr = next_link_addr; 467 valid_addr = next_link_addr;
466 next_link_addr = link_value * sizeof(u16); 468 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
467 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", 469 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
468 usedblocks, next_link_addr); 470 usedblocks, next_link_addr);
469 if (iwl_read_otp_word(priv, next_link_addr, &link_value)) 471 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
497 */ 499 */
498int iwl_eeprom_init(struct iwl_priv *priv) 500int iwl_eeprom_init(struct iwl_priv *priv)
499{ 501{
500 u16 *e; 502 __le16 *e;
501 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 503 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
502 int sz; 504 int sz;
503 int ret; 505 int ret;
@@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
516 ret = -ENOMEM; 518 ret = -ENOMEM;
517 goto alloc_err; 519 goto alloc_err;
518 } 520 }
519 e = (u16 *)priv->eeprom; 521 e = (__le16 *)priv->eeprom;
520 522
521 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 523 priv->cfg->ops->lib->apm_ops.init(priv);
522 /* OTP reads require powered-up chip */
523 priv->cfg->ops->lib->apm_ops.init(priv);
524 }
525 524
526 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 525 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
527 if (ret < 0) { 526 if (ret < 0) {
@@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
562 } 561 }
563 for (addr = validblockaddr; addr < validblockaddr + sz; 562 for (addr = validblockaddr; addr < validblockaddr + sz;
564 addr += sizeof(u16)) { 563 addr += sizeof(u16)) {
565 u16 eeprom_data; 564 __le16 eeprom_data;
566 565
567 ret = iwl_read_otp_word(priv, addr, &eeprom_data); 566 ret = iwl_read_otp_word(priv, addr, &eeprom_data);
568 if (ret) 567 if (ret)
@@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
570 e[cache_addr / 2] = eeprom_data; 569 e[cache_addr / 2] = eeprom_data;
571 cache_addr += sizeof(u16); 570 cache_addr += sizeof(u16);
572 } 571 }
573
574 /*
575 * Now that OTP reads are complete, reset chip to save
576 * power until we load uCode during "up".
577 */
578 priv->cfg->ops->lib->apm_ops.stop(priv);
579
580 } else { 572 } else {
581 /* eeprom is an array of 16bit values */ 573 /* eeprom is an array of 16bit values */
582 for (addr = 0; addr < sz; addr += sizeof(u16)) { 574 for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
594 goto done; 586 goto done;
595 } 587 }
596 r = _iwl_read_direct32(priv, CSR_EEPROM_REG); 588 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
597 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 589 e[addr / 2] = cpu_to_le16(r >> 16);
598 } 590 }
599 } 591 }
600 ret = 0; 592 ret = 0;
@@ -603,6 +595,8 @@ done:
603err: 595err:
604 if (ret) 596 if (ret)
605 iwl_eeprom_free(priv); 597 iwl_eeprom_free(priv);
598 /* Reset chip to save power until we load uCode during "up". */
599 priv->cfg->ops->lib->apm_ops.stop(priv);
606alloc_err: 600alloc_err:
607 return ret; 601 return ret;
608} 602}
@@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
755 ch_info->ht40_eeprom = *eeprom_ch; 749 ch_info->ht40_eeprom = *eeprom_ch;
756 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 750 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
757 ch_info->ht40_flags = eeprom_ch->flags; 751 ch_info->ht40_flags = eeprom_ch->flags;
758 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; 752 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
753 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
759 754
760 return 0; 755 return 0;
761} 756}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 5cd2b66bbe45..0cd9c02ee044 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
137 * 137 *
138 */ 138 */
139struct iwl_eeprom_enhanced_txpwr { 139struct iwl_eeprom_enhanced_txpwr {
140 u16 common; 140 __le16 common;
141 s8 chain_a_max; 141 s8 chain_a_max;
142 s8 chain_b_max; 142 s8 chain_b_max;
143 s8 chain_c_max; 143 s8 chain_c_max;
@@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
360struct iwl_eeprom_calib_info { 360struct iwl_eeprom_calib_info {
361 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ 361 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
362 u8 saturation_power52; /* half-dBm */ 362 u8 saturation_power52; /* half-dBm */
363 s16 voltage; /* signed */ 363 __le16 voltage; /* signed */
364 struct iwl_eeprom_calib_subband_info 364 struct iwl_eeprom_calib_subband_info
365 band_info[EEPROM_TX_POWER_BANDS]; 365 band_info[EEPROM_TX_POWER_BANDS];
366} __attribute__ ((packed)); 366} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a23165948202..30e9ea6d54ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -234,7 +234,7 @@ cancel:
234 } 234 }
235fail: 235fail:
236 if (cmd->reply_page) { 236 if (cmd->reply_page) {
237 free_pages(cmd->reply_page, priv->hw_params.rx_page_order); 237 iwl_free_pages(priv, cmd->reply_page);
238 cmd->reply_page = 0; 238 cmd->reply_page = 0;
239 } 239 }
240out: 240out:
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6090bc15a6d5..6f36b6e79f5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
345 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 345 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
346 PAGE_SIZE << priv->hw_params.rx_page_order, 346 PAGE_SIZE << priv->hw_params.rx_page_order,
347 PCI_DMA_FROMDEVICE); 347 PCI_DMA_FROMDEVICE);
348 __free_pages(rxq->pool[i].page, 348 __iwl_free_pages(priv, rxq->pool[i].page);
349 priv->hw_params.rx_page_order);
350 rxq->pool[i].page = NULL; 349 rxq->pool[i].page = NULL;
351 priv->alloc_rxb_page--;
352 } 350 }
353 } 351 }
354 352
@@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
416 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 414 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
417 PAGE_SIZE << priv->hw_params.rx_page_order, 415 PAGE_SIZE << priv->hw_params.rx_page_order,
418 PCI_DMA_FROMDEVICE); 416 PCI_DMA_FROMDEVICE);
419 priv->alloc_rxb_page--; 417 __iwl_free_pages(priv, rxq->pool[i].page);
420 __free_pages(rxq->pool[i].page,
421 priv->hw_params.rx_page_order);
422 rxq->pool[i].page = NULL; 418 rxq->pool[i].page = NULL;
423 } 419 }
424 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 420 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
654} 650}
655EXPORT_SYMBOL(iwl_reply_statistics); 651EXPORT_SYMBOL(iwl_reply_statistics);
656 652
657#define PERFECT_RSSI (-20) /* dBm */
658#define WORST_RSSI (-95) /* dBm */
659#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
660
661/* Calculate an indication of rx signal quality (a percentage, not dBm!).
662 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
663 * about formulas used below. */
664static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
665{
666 int sig_qual;
667 int degradation = PERFECT_RSSI - rssi_dbm;
668
669 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
670 * as indicator; formula is (signal dbm - noise dbm).
671 * SNR at or above 40 is a great signal (100%).
672 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
673 * Weakest usable signal is usually 10 - 15 dB SNR. */
674 if (noise_dbm) {
675 if (rssi_dbm - noise_dbm >= 40)
676 return 100;
677 else if (rssi_dbm < noise_dbm)
678 return 0;
679 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
680
681 /* Else use just the signal level.
682 * This formula is a least squares fit of data points collected and
683 * compared with a reference system that had a percentage (%) display
684 * for signal quality. */
685 } else
686 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
687 (15 * RSSI_RANGE + 62 * degradation)) /
688 (RSSI_RANGE * RSSI_RANGE);
689
690 if (sig_qual > 100)
691 sig_qual = 100;
692 else if (sig_qual < 1)
693 sig_qual = 0;
694
695 return sig_qual;
696}
697
698/* Calc max signal level (dBm) among 3 possible receivers */ 653/* Calc max signal level (dBm) among 3 possible receivers */
699static inline int iwl_calc_rssi(struct iwl_priv *priv, 654static inline int iwl_calc_rssi(struct iwl_priv *priv,
700 struct iwl_rx_phy_res *rx_resp) 655 struct iwl_rx_phy_res *rx_resp)
@@ -1105,11 +1060,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1105 if (iwl_is_associated(priv) && 1060 if (iwl_is_associated(priv) &&
1106 !test_bit(STATUS_SCANNING, &priv->status)) { 1061 !test_bit(STATUS_SCANNING, &priv->status)) {
1107 rx_status.noise = priv->last_rx_noise; 1062 rx_status.noise = priv->last_rx_noise;
1108 rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
1109 rx_status.noise);
1110 } else { 1063 } else {
1111 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 1064 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1112 rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
1113 } 1065 }
1114 1066
1115 /* Reset beacon noise level if not associated. */ 1067 /* Reset beacon noise level if not associated. */
@@ -1122,8 +1074,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1122 iwl_dbg_report_frame(priv, phy_res, len, header, 1); 1074 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1123#endif 1075#endif
1124 iwl_dbg_log_rx_data_frame(priv, len, header); 1076 iwl_dbg_log_rx_data_frame(priv, len, header);
1125 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n", 1077 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1126 rx_status.signal, rx_status.noise, rx_status.qual, 1078 rx_status.signal, rx_status.noise,
1127 (unsigned long long)rx_status.mactime); 1079 (unsigned long long)rx_status.mactime);
1128 1080
1129 /* 1081 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a2b2b8315ff9..fa1c89ba6459 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
144 clear_bit(STATUS_SCAN_HW, &priv->status); 144 clear_bit(STATUS_SCAN_HW, &priv->status);
145 } 145 }
146 146
147 priv->alloc_rxb_page--; 147 iwl_free_pages(priv, cmd.reply_page);
148 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
149 148
150 return ret; 149 return ret;
151} 150}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cd6a6901216e..cde09a890b73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
164 break; 164 break;
165 } 165 }
166 } 166 }
167 167 iwl_free_pages(priv, cmd.reply_page);
168 priv->alloc_rxb_page--;
169 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
170 168
171 return ret; 169 return ret;
172} 170}
@@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
391 break; 389 break;
392 } 390 }
393 } 391 }
394 392 iwl_free_pages(priv, cmd.reply_page);
395 priv->alloc_rxb_page--;
396 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
397 393
398 return ret; 394 return ret;
399} 395}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 00da5e152d46..87ce2bd292c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
407 int txq_id; 407 int txq_id;
408 408
409 /* Tx queues */ 409 /* Tx queues */
410 if (priv->txq) 410 if (priv->txq) {
411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
412 txq_id++) 412 txq_id++)
413 if (txq_id == IWL_CMD_QUEUE_NUM) 413 if (txq_id == IWL_CMD_QUEUE_NUM)
414 iwl_cmd_queue_free(priv); 414 iwl_cmd_queue_free(priv);
415 else 415 else
416 iwl_tx_queue_free(priv, txq_id); 416 iwl_tx_queue_free(priv, txq_id);
417 }
417 iwl_free_dma_ptr(priv, &priv->kw); 418 iwl_free_dma_ptr(priv, &priv->kw);
418 419
419 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 420 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 2a28a1f8b1fe..f8e4e4b18d02 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
548 txq = &priv->txq[txq_id]; 548 txq = &priv->txq[txq_id];
549 q = &txq->q; 549 q = &txq->q;
550 550
551 if ((iwl_queue_space(q) < q->high_mark))
552 goto drop;
553
551 spin_lock_irqsave(&priv->lock, flags); 554 spin_lock_irqsave(&priv->lock, flags);
552 555
553 idx = get_cmd_index(q, q->write_ptr, 0); 556 idx = get_cmd_index(q, q->write_ptr, 0);
@@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
812 break; 815 break;
813 } 816 }
814 817
815 free_pages(cmd.reply_page, priv->hw_params.rx_page_order); 818 iwl_free_pages(priv, cmd.reply_page);
816 819
817 return rc; 820 return rc;
818} 821}
@@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1198 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1201 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1199 PAGE_SIZE << priv->hw_params.rx_page_order, 1202 PAGE_SIZE << priv->hw_params.rx_page_order,
1200 PCI_DMA_FROMDEVICE); 1203 PCI_DMA_FROMDEVICE);
1201 priv->alloc_rxb_page--; 1204 __iwl_free_pages(priv, rxq->pool[i].page);
1202 __free_pages(rxq->pool[i].page,
1203 priv->hw_params.rx_page_order);
1204 rxq->pool[i].page = NULL; 1205 rxq->pool[i].page = NULL;
1205 } 1206 }
1206 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1207 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1247 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1248 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1248 PAGE_SIZE << priv->hw_params.rx_page_order, 1249 PAGE_SIZE << priv->hw_params.rx_page_order,
1249 PCI_DMA_FROMDEVICE); 1250 PCI_DMA_FROMDEVICE);
1250 __free_pages(rxq->pool[i].page, 1251 __iwl_free_pages(priv, rxq->pool[i].page);
1251 priv->hw_params.rx_page_order);
1252 rxq->pool[i].page = NULL; 1252 rxq->pool[i].page = NULL;
1253 priv->alloc_rxb_page--;
1254 } 1253 }
1255 } 1254 }
1256 1255
@@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
1300 return (int)ratio2dB[sig_ratio]; 1299 return (int)ratio2dB[sig_ratio];
1301} 1300}
1302 1301
1303#define PERFECT_RSSI (-20) /* dBm */
1304#define WORST_RSSI (-95) /* dBm */
1305#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
1306
1307/* Calculate an indication of rx signal quality (a percentage, not dBm!).
1308 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
1309 * about formulas used below. */
1310int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
1311{
1312 int sig_qual;
1313 int degradation = PERFECT_RSSI - rssi_dbm;
1314
1315 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
1316 * as indicator; formula is (signal dbm - noise dbm).
1317 * SNR at or above 40 is a great signal (100%).
1318 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
1319 * Weakest usable signal is usually 10 - 15 dB SNR. */
1320 if (noise_dbm) {
1321 if (rssi_dbm - noise_dbm >= 40)
1322 return 100;
1323 else if (rssi_dbm < noise_dbm)
1324 return 0;
1325 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
1326
1327 /* Else use just the signal level.
1328 * This formula is a least squares fit of data points collected and
1329 * compared with a reference system that had a percentage (%) display
1330 * for signal quality. */
1331 } else
1332 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
1333 (15 * RSSI_RANGE + 62 * degradation)) /
1334 (RSSI_RANGE * RSSI_RANGE);
1335
1336 if (sig_qual > 100)
1337 sig_qual = 100;
1338 else if (sig_qual < 1)
1339 sig_qual = 0;
1340
1341 return sig_qual;
1342}
1343
1344/** 1302/**
1345 * iwl3945_rx_handle - Main entry function for receiving responses from uCode 1303 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1346 * 1304 *
@@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1688 } 1646 }
1689 1647
1690#ifdef CONFIG_IWLWIFI_DEBUG 1648#ifdef CONFIG_IWLWIFI_DEBUG
1691 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) 1649 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1692 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1650 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1693 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1651 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1694#else 1652#else
@@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3867 priv->retry_rate = 1; 3825 priv->retry_rate = 1;
3868 priv->ibss_beacon = NULL; 3826 priv->ibss_beacon = NULL;
3869 3827
3870 spin_lock_init(&priv->lock);
3871 spin_lock_init(&priv->sta_lock); 3828 spin_lock_init(&priv->sta_lock);
3872 spin_lock_init(&priv->hcmd_lock); 3829 spin_lock_init(&priv->hcmd_lock);
3873 3830
@@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3936 /* Tell mac80211 our characteristics */ 3893 /* Tell mac80211 our characteristics */
3937 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3894 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3938 IEEE80211_HW_NOISE_DBM | 3895 IEEE80211_HW_NOISE_DBM |
3939 IEEE80211_HW_SPECTRUM_MGMT | 3896 IEEE80211_HW_SPECTRUM_MGMT;
3940 IEEE80211_HW_SUPPORTS_PS | 3897
3941 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3898 if (!priv->cfg->broken_powersave)
3899 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3900 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3942 3901
3943 hw->wiphy->interface_modes = 3902 hw->wiphy->interface_modes =
3944 BIT(NL80211_IFTYPE_STATION) | 3903 BIT(NL80211_IFTYPE_STATION) |
@@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4057 * PCI Tx retries from interfering with C3 CPU state */ 4016 * PCI Tx retries from interfering with C3 CPU state */
4058 pci_write_config_byte(pdev, 0x41, 0x00); 4017 pci_write_config_byte(pdev, 0x41, 0x00);
4059 4018
4060 /* this spin lock will be used in apm_ops.init and EEPROM access 4019 /* these spin locks will be used in apm_ops.init and EEPROM access
4061 * we should init now 4020 * we should init now
4062 */ 4021 */
4063 spin_lock_init(&priv->reg_lock); 4022 spin_lock_init(&priv->reg_lock);
4023 spin_lock_init(&priv->lock);
4064 4024
4065 /*********************** 4025 /***********************
4066 * 4. Read EEPROM 4026 * 4. Read EEPROM
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 5a26bb05a33a..842811142bef 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
268 268
269 struct sk_buff_head rx_list; 269 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 270 struct list_head rx_tickets;
271 struct list_head rx_packets[IWM_RX_ID_HASH]; 271 struct list_head rx_packets[IWM_RX_ID_HASH + 1];
272 struct workqueue_struct *rx_wq; 272 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 273 struct work_struct rx_worker;
274 274
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
349int iwm_down(struct iwm_priv *iwm); 349int iwm_down(struct iwm_priv *iwm);
350 350
351/* TX API */ 351/* TX API */
352u16 iwm_tid_to_queue(u16 tid); 352int iwm_tid_to_queue(u16 tid);
353void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages); 353void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
354void iwm_tx_worker(struct work_struct *work); 354void iwm_tx_worker(struct work_struct *work);
355int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 355int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index e4f0f8705f65..c4c0d23c63ec 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
76 */ 76 */
77static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; 77static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
78 78
79u16 iwm_tid_to_queue(u16 tid) 79int iwm_tid_to_queue(u16 tid)
80{ 80{
81 if (tid > IWM_UMAC_TID_NR - 2) 81 if (tid > IWM_UMAC_TID_NR - 2)
82 return -EINVAL; 82 return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 1c57c1f72cba..6d6ed7485175 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
1126 1126
1127 if (!stop) { 1127 if (!stop) {
1128 struct iwm_tx_queue *txq; 1128 struct iwm_tx_queue *txq;
1129 u16 queue = iwm_tid_to_queue(bit); 1129 int queue = iwm_tid_to_queue(bit);
1130 1130
1131 if (queue < 0) 1131 if (queue < 0)
1132 continue; 1132 continue;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 2f91c9b808af..92b7a357a5e4 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
2#include <linux/delay.h> 2#include <linux/delay.h>
3#include <linux/etherdevice.h> 3#include <linux/etherdevice.h>
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/if_ether.h>
5#include <linux/if_arp.h> 6#include <linux/if_arp.h>
6#include <linux/kthread.h> 7#include <linux/kthread.h>
7#include <linux/kfifo.h> 8#include <linux/kfifo.h>
@@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
351 352
352 mesh_dev->netdev_ops = &mesh_netdev_ops; 353 mesh_dev->netdev_ops = &mesh_netdev_ops;
353 mesh_dev->ethtool_ops = &lbs_ethtool_ops; 354 mesh_dev->ethtool_ops = &lbs_ethtool_ops;
354 memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, 355 memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
355 sizeof(priv->dev->dev_addr));
356 356
357 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); 357 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
358 358
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index c6a6c042b82f..b0b1c7841500 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
567 chan_count = lbs_scan_create_channel_list(priv, chan_list); 567 chan_count = lbs_scan_create_channel_list(priv, chan_list);
568 568
569 netif_stop_queue(priv->dev); 569 netif_stop_queue(priv->dev);
570 netif_carrier_off(priv->dev); 570 if (priv->mesh_dev)
571 if (priv->mesh_dev) {
572 netif_stop_queue(priv->mesh_dev); 571 netif_stop_queue(priv->mesh_dev);
573 netif_carrier_off(priv->mesh_dev);
574 }
575 572
576 /* Prepare to continue an interrupted scan */ 573 /* Prepare to continue an interrupted scan */
577 lbs_deb_scan("chan_count %d, scan_channel %d\n", 574 lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +632,13 @@ out2:
635 priv->scan_channel = 0; 632 priv->scan_channel = 0;
636 633
637out: 634out:
638 if (priv->connect_status == LBS_CONNECTED) { 635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
639 netif_carrier_on(priv->dev); 636 netif_wake_queue(priv->dev);
640 if (!priv->tx_pending_len) 637
641 netif_wake_queue(priv->dev); 638 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
642 } 639 !priv->tx_pending_len)
643 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) { 640 netif_wake_queue(priv->mesh_dev);
644 netif_carrier_on(priv->mesh_dev); 641
645 if (!priv->tx_pending_len)
646 netif_wake_queue(priv->mesh_dev);
647 }
648 kfree(chan_list); 642 kfree(chan_list);
649 643
650 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); 644 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index a8eb9e1fcf36..4b1aab593a84 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
2025 if (priv->connect_status == LBS_CONNECTED) { 2025 if (priv->connect_status == LBS_CONNECTED) {
2026 memcpy(extra, priv->curbssparams.ssid, 2026 memcpy(extra, priv->curbssparams.ssid,
2027 priv->curbssparams.ssid_len); 2027 priv->curbssparams.ssid_len);
2028 extra[priv->curbssparams.ssid_len] = '\0';
2029 } else { 2028 } else {
2030 memset(extra, 0, 32); 2029 memset(extra, 0, 32);
2031 extra[priv->curbssparams.ssid_len] = '\0';
2032 } 2030 }
2033 /* 2031 /*
2034 * If none, we may want to get the one that was set 2032 * If none, we may want to get the one that was set
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 019431d2f8a9..26a1abd5bb03 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
495 stats.band = IEEE80211_BAND_2GHZ; 495 stats.band = IEEE80211_BAND_2GHZ;
496 stats.signal = prxpd->snr; 496 stats.signal = prxpd->snr;
497 stats.noise = prxpd->nf; 497 stats.noise = prxpd->nf;
498 stats.qual = prxpd->snr - prxpd->nf;
499 /* Marvell rate index has a hole at value 4 */ 498 /* Marvell rate index has a hole at value 4 */
500 if (prxpd->rx_rate > 4) 499 if (prxpd->rx_rate > 4)
501 --prxpd->rx_rate; 500 --prxpd->rx_rate;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 7698fdd6a3a2..31ca241f7753 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -23,7 +23,7 @@
23#define MAX_RID_LEN 1024 23#define MAX_RID_LEN 1024
24 24
25/* Helper routine to record keys 25/* Helper routine to record keys
26 * Do not call from interrupt context */ 26 * It is called under orinoco_lock so it may not sleep */
27static int orinoco_set_key(struct orinoco_private *priv, int index, 27static int orinoco_set_key(struct orinoco_private *priv, int index,
28 enum orinoco_alg alg, const u8 *key, int key_len, 28 enum orinoco_alg alg, const u8 *key, int key_len,
29 const u8 *seq, int seq_len) 29 const u8 *seq, int seq_len)
@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
32 kzfree(priv->keys[index].seq); 32 kzfree(priv->keys[index].seq);
33 33
34 if (key_len) { 34 if (key_len) {
35 priv->keys[index].key = kzalloc(key_len, GFP_KERNEL); 35 priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
36 if (!priv->keys[index].key) 36 if (!priv->keys[index].key)
37 goto nomem; 37 goto nomem;
38 } else 38 } else
39 priv->keys[index].key = NULL; 39 priv->keys[index].key = NULL;
40 40
41 if (seq_len) { 41 if (seq_len) {
42 priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL); 42 priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
43 if (!priv->keys[index].seq) 43 if (!priv->keys[index].seq)
44 goto free_key; 44 goto free_key;
45 } else 45 } else
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c5fe867665e6..1a7eae357fef 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1323,7 +1323,7 @@
1323#define PAIRWISE_KEY_ENTRY(__idx) \ 1323#define PAIRWISE_KEY_ENTRY(__idx) \
1324 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1324 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1325#define MAC_IVEIV_ENTRY(__idx) \ 1325#define MAC_IVEIV_ENTRY(__idx) \
1326 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) ) 1326 ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
1327#define MAC_WCID_ATTR_ENTRY(__idx) \ 1327#define MAC_WCID_ATTR_ENTRY(__idx) \
1328 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) 1328 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1329#define SHARED_KEY_ENTRY(__idx) \ 1329#define SHARED_KEY_ENTRY(__idx) \
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index eb1e1d00bec3..27bf887f1453 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38 38
39#include "rt2x00.h" 39#include "rt2x00.h"
40#ifdef CONFIG_RT2800USB 40#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
41#include "rt2x00usb.h" 41#include "rt2x00usb.h"
42#endif 42#endif
43#include "rt2800lib.h" 43#include "rt2800lib.h"
@@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1121 1121
1122 if (rt2x00_intf_is_usb(rt2x00dev)) { 1122 if (rt2x00_intf_is_usb(rt2x00dev)) {
1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1124#ifdef CONFIG_RT2800USB 1124#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1126 USB_MODE_RESET, REGISTER_TIMEOUT); 1126 USB_MODE_RESET, REGISTER_TIMEOUT);
1127#endif 1127#endif
@@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2022 u16 eeprom; 2022 u16 eeprom;
2023 2023
2024 /* 2024 /*
2025 * Disable powersaving as default on PCI devices.
2026 */
2027 if (rt2x00_intf_is_pci(rt2x00dev))
2028 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2029
2030 /*
2025 * Initialize all hw fields. 2031 * Initialize all hw fields.
2026 */ 2032 */
2027 rt2x00dev->hw->flags = 2033 rt2x00dev->hw->flags =
@@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2074 IEEE80211_HT_CAP_SGI_20 | 2080 IEEE80211_HT_CAP_SGI_20 |
2075 IEEE80211_HT_CAP_SGI_40 | 2081 IEEE80211_HT_CAP_SGI_40 |
2076 IEEE80211_HT_CAP_TX_STBC | 2082 IEEE80211_HT_CAP_TX_STBC |
2077 IEEE80211_HT_CAP_RX_STBC | 2083 IEEE80211_HT_CAP_RX_STBC;
2078 IEEE80211_HT_CAP_PSMP_SUPPORT;
2079 spec->ht.ampdu_factor = 3; 2084 spec->ht.ampdu_factor = 3;
2080 spec->ht.ampdu_density = 4; 2085 spec->ht.ampdu_density = 4;
2081 spec->ht.mcs.tx_params = 2086 spec->ht.mcs.tx_params =
@@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2140 rt2800_register_multiread(rt2x00dev, offset, 2145 rt2800_register_multiread(rt2x00dev, offset,
2141 &iveiv_entry, sizeof(iveiv_entry)); 2146 &iveiv_entry, sizeof(iveiv_entry));
2142 2147
2143 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16)); 2148 memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
2144 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32)); 2149 memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
2145} 2150}
2146 2151
2147static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2152static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index af85d18cdbe7..ab95346cf6a3 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
922 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 922 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
923 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 923 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
924 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, 924 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
925 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
925 /* Logitec */ 926 /* Logitec */
926 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, 927 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
927 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, 928 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 687e17dc2e9f..0ca589306d71 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2539 unsigned int i; 2539 unsigned int i;
2540 2540
2541 /* 2541 /*
2542 * Disable powersaving as default.
2543 */
2544 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2545
2546 /*
2542 * Initialize all hw fields. 2547 * Initialize all hw fields.
2543 */ 2548 */
2544 rt2x00dev->hw->flags = 2549 rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index a1a3dd15c664..8a40a1439984 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
132 132
133 rx_status.antenna = (flags2 >> 15) & 1; 133 rx_status.antenna = (flags2 >> 15) & 1;
134 /* TODO: improve signal/rssi reporting */ 134 /* TODO: improve signal/rssi reporting */
135 rx_status.qual = flags2 & 0xFF;
136 rx_status.signal = (flags2 >> 8) & 0x7F; 135 rx_status.signal = (flags2 >> 8) & 0x7F;
137 /* XXX: is this correct? */ 136 /* XXX: is this correct? */
138 rx_status.rate_idx = (flags >> 20) & 0xF; 137 rx_status.rate_idx = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2e733e7bdfd4..28a808674080 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
256 } 256 }
257 } 257 }
258 258
259 if (loop >= INIT_LOOP) { 259 if (loop > INIT_LOOP) {
260 wl1251_error("timeout waiting for the hardware to " 260 wl1251_error("timeout waiting for the hardware to "
261 "complete initialization"); 261 "complete initialization");
262 return -EIO; 262 return -EIO;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 886a9bc39cc1..c3385b3d246c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -777,7 +777,7 @@ out:
777 return ret; 777 return ret;
778} 778}
779 779
780static int wl1271_build_basic_rates(char *rates, u8 band) 780static int wl1271_build_basic_rates(u8 *rates, u8 band)
781{ 781{
782 u8 index = 0; 782 u8 index = 0;
783 783
@@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
804 return index; 804 return index;
805} 805}
806 806
807static int wl1271_build_extended_rates(char *rates, u8 band) 807static int wl1271_build_extended_rates(u8 *rates, u8 band)
808{ 808{
809 u8 index = 0; 809 u8 index = 0;
810 810
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index dfa1b9bc22c8..7ca95c414fa8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
1325 return r; 1325 return r;
1326} 1326}
1327 1327
1328static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
1329{
1330 static const u16 constants[] = {
1331 715, 655, 585, 540, 470, 410, 360, 315,
1332 270, 235, 205, 175, 150, 125, 105, 85,
1333 65, 50, 40, 25, 15
1334 };
1335
1336 int i;
1337 u32 x;
1338
1339 /* It seems that their quality parameter is somehow per signal
1340 * and is now transferred per bit.
1341 */
1342 switch (zd_rate) {
1343 case ZD_OFDM_RATE_6M:
1344 case ZD_OFDM_RATE_12M:
1345 case ZD_OFDM_RATE_24M:
1346 size *= 2;
1347 break;
1348 case ZD_OFDM_RATE_9M:
1349 case ZD_OFDM_RATE_18M:
1350 case ZD_OFDM_RATE_36M:
1351 case ZD_OFDM_RATE_54M:
1352 size *= 4;
1353 size /= 3;
1354 break;
1355 case ZD_OFDM_RATE_48M:
1356 size *= 3;
1357 size /= 2;
1358 break;
1359 default:
1360 return -EINVAL;
1361 }
1362
1363 x = (10000 * status_quality)/size;
1364 for (i = 0; i < ARRAY_SIZE(constants); i++) {
1365 if (x > constants[i])
1366 break;
1367 }
1368
1369 switch (zd_rate) {
1370 case ZD_OFDM_RATE_6M:
1371 case ZD_OFDM_RATE_9M:
1372 i += 3;
1373 break;
1374 case ZD_OFDM_RATE_12M:
1375 case ZD_OFDM_RATE_18M:
1376 i += 5;
1377 break;
1378 case ZD_OFDM_RATE_24M:
1379 case ZD_OFDM_RATE_36M:
1380 i += 9;
1381 break;
1382 case ZD_OFDM_RATE_48M:
1383 case ZD_OFDM_RATE_54M:
1384 i += 15;
1385 break;
1386 default:
1387 return -EINVAL;
1388 }
1389
1390 return i;
1391}
1392
1393static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
1394{
1395 int r;
1396
1397 r = ofdm_qual_db(status_quality, zd_rate, size);
1398 ZD_ASSERT(r >= 0);
1399 if (r < 0)
1400 r = 0;
1401
1402 r = (r * 100)/29;
1403 return r <= 100 ? r : 100;
1404}
1405
1406static unsigned int log10times100(unsigned int x)
1407{
1408 static const u8 log10[] = {
1409 0,
1410 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
1411 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
1412 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
1413 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
1414 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
1415 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
1416 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
1417 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
1418 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
1419 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
1420 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
1421 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
1422 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
1423 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
1424 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
1425 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
1426 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
1427 223, 223, 223, 224, 224, 224, 224,
1428 };
1429
1430 return x < ARRAY_SIZE(log10) ? log10[x] : 225;
1431}
1432
1433enum {
1434 MAX_CCK_EVM_DB = 45,
1435};
1436
1437static int cck_evm_db(u8 status_quality)
1438{
1439 return (20 * log10times100(status_quality)) / 100;
1440}
1441
1442static int cck_snr_db(u8 status_quality)
1443{
1444 int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
1445 ZD_ASSERT(r >= 0);
1446 return r;
1447}
1448
1449static int cck_qual_percent(u8 status_quality)
1450{
1451 int r;
1452
1453 r = cck_snr_db(status_quality);
1454 r = (100*r)/17;
1455 return r <= 100 ? r : 100;
1456}
1457
1458static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) 1328static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
1459{ 1329{
1460 return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); 1330 return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
1461} 1331}
1462 1332
1463u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
1464 const struct rx_status *status)
1465{
1466 return (status->frame_status&ZD_RX_OFDM) ?
1467 ofdm_qual_percent(status->signal_quality_ofdm,
1468 zd_rate_from_ofdm_plcp_header(rx_frame),
1469 size) :
1470 cck_qual_percent(status->signal_quality_cck);
1471}
1472
1473/** 1333/**
1474 * zd_rx_rate - report zd-rate 1334 * zd_rx_rate - report zd-rate
1475 * @rx_frame - received frame 1335 * @rx_frame - received frame
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 9fd8f3508d66..f8bbf7d302ae 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
929 929
930struct rx_status; 930struct rx_status;
931 931
932u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
933 const struct rx_status *status);
934
935u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status); 932u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
936 933
937struct zd_mc_hash { 934struct zd_mc_hash {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cf51e8f8174b..8ebf5c33955d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
828 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; 828 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
829 stats.band = IEEE80211_BAND_2GHZ; 829 stats.band = IEEE80211_BAND_2GHZ;
830 stats.signal = status->signal_strength; 830 stats.signal = status->signal_strength;
831 stats.qual = zd_rx_qual_percent(buffer,
832 length - sizeof(struct rx_status),
833 status);
834 831
835 rate = zd_rx_rate(buffer, status); 832 rate = zd_rx_rate(buffer, status);
836 833
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index bd588eb8e922..8e210cd76e55 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -121,7 +121,7 @@ struct controller {
121#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 121#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
122#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 122#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
123 123
124/* AMD PCIX bridge registers */ 124/* AMD PCI-X bridge registers */
125#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C 125#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C
126#define PCIX_MISCII_OFFSET 0x48 126#define PCIX_MISCII_OFFSET 0x48
127#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 127#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e56f9bed6f2b..417312528ddf 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -305,7 +305,7 @@ struct device_domain_info {
305 int segment; /* PCI domain */ 305 int segment; /* PCI domain */
306 u8 bus; /* PCI bus number */ 306 u8 bus; /* PCI bus number */
307 u8 devfn; /* PCI devfn number */ 307 u8 devfn; /* PCI devfn number */
308 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ 308 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
309 struct intel_iommu *iommu; /* IOMMU used by this device */ 309 struct intel_iommu *iommu; /* IOMMU used by this device */
310 struct dmar_domain *domain; /* pointer to domain */ 310 struct dmar_domain *domain; /* pointer to domain */
311}; 311};
@@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1604 return ret; 1604 return ret;
1605 parent = parent->bus->self; 1605 parent = parent->bus->self;
1606 } 1606 }
1607 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ 1607 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1608 return domain_context_mapping_one(domain, 1608 return domain_context_mapping_one(domain,
1609 pci_domain_nr(tmp->subordinate), 1609 pci_domain_nr(tmp->subordinate),
1610 tmp->subordinate->number, 0, 1610 tmp->subordinate->number, 0,
@@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3325 parent->devfn); 3325 parent->devfn);
3326 parent = parent->bus->self; 3326 parent = parent->bus->self;
3327 } 3327 }
3328 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ 3328 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3329 iommu_detach_dev(iommu, 3329 iommu_detach_dev(iommu,
3330 tmp->subordinate->number, 0); 3330 tmp->subordinate->number, 0);
3331 else /* this is a legacy PCI bridge */ 3331 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8b65a489581b..95b849130ad4 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
528 528
529 bridge = pci_find_upstream_pcie_bridge(dev); 529 bridge = pci_find_upstream_pcie_bridge(dev);
530 if (bridge) { 530 if (bridge) {
531 if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */ 531 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
533 (bridge->bus->number << 8) | dev->bus->number); 533 (bridge->bus->number << 8) | dev->bus->number);
534 else /* this is a legacy PCI bridge */ 534 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index cc617ddd33d0..7e2829538a4c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
112static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) 112static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
113{ 113{
114 while (bus->parent) { 114 while (bus->parent) {
115 struct pci_dev *bridge = bus->self; 115 if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
116 int ret;
117
118 ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
119 if (!ret || pci_is_pcie(bridge))
120 return; 116 return;
121 bus = bus->parent; 117 bus = bus->parent;
122 } 118 }
@@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
131 if (acpi_pci_can_wakeup(dev)) 127 if (acpi_pci_can_wakeup(dev))
132 return acpi_pm_device_sleep_wake(&dev->dev, enable); 128 return acpi_pm_device_sleep_wake(&dev->dev, enable);
133 129
134 if (!pci_is_pcie(dev)) 130 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
135 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
136
137 return 0; 131 return 0;
138} 132}
139 133
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0bc27e059019..864e703cf737 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1153,11 +1153,11 @@ pci_disable_device(struct pci_dev *dev)
1153 1153
1154/** 1154/**
1155 * pcibios_set_pcie_reset_state - set reset state for device dev 1155 * pcibios_set_pcie_reset_state - set reset state for device dev
1156 * @dev: the PCI-E device reset 1156 * @dev: the PCIe device reset
1157 * @state: Reset state to enter into 1157 * @state: Reset state to enter into
1158 * 1158 *
1159 * 1159 *
1160 * Sets the PCI-E reset state for the device. This is the default 1160 * Sets the PCIe reset state for the device. This is the default
1161 * implementation. Architecture implementations can override this. 1161 * implementation. Architecture implementations can override this.
1162 */ 1162 */
1163int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1163int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1168,7 +1168,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1168 1168
1169/** 1169/**
1170 * pci_set_pcie_reset_state - set reset state for device dev 1170 * pci_set_pcie_reset_state - set reset state for device dev
1171 * @dev: the PCI-E device reset 1171 * @dev: the PCIe device reset
1172 * @state: Reset state to enter into 1172 * @state: Reset state to enter into
1173 * 1173 *
1174 * 1174 *
@@ -2284,6 +2284,21 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2284 return 0; 2284 return 0;
2285} 2285}
2286 2286
2287static int pci_dev_specific_reset(struct pci_dev *dev, int probe)
2288{
2289 struct pci_dev_reset_methods *i;
2290
2291 for (i = pci_dev_reset_methods; i->reset; i++) {
2292 if ((i->vendor == dev->vendor ||
2293 i->vendor == (u16)PCI_ANY_ID) &&
2294 (i->device == dev->device ||
2295 i->device == (u16)PCI_ANY_ID))
2296 return i->reset(dev, probe);
2297 }
2298
2299 return -ENOTTY;
2300}
2301
2287static int pci_dev_reset(struct pci_dev *dev, int probe) 2302static int pci_dev_reset(struct pci_dev *dev, int probe)
2288{ 2303{
2289 int rc; 2304 int rc;
@@ -2296,6 +2311,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
2296 down(&dev->dev.sem); 2311 down(&dev->dev.sem);
2297 } 2312 }
2298 2313
2314 rc = pci_dev_specific_reset(dev, probe);
2315 if (rc != -ENOTTY)
2316 goto done;
2317
2299 rc = pcie_flr(dev, probe); 2318 rc = pcie_flr(dev, probe);
2300 if (rc != -ENOTTY) 2319 if (rc != -ENOTTY)
2301 goto done; 2320 goto done;
@@ -2779,6 +2798,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
2779 return 1; 2798 return 1;
2780} 2799}
2781 2800
2801void __weak pci_fixup_cardbus(struct pci_bus *bus)
2802{
2803}
2804EXPORT_SYMBOL(pci_fixup_cardbus);
2805
2782static int __init pci_setup(char *str) 2806static int __init pci_setup(char *str)
2783{ 2807{
2784 while (str) { 2808 while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 33ed8e0aba1e..709eaa4fee51 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
313 313
314extern void pci_enable_acs(struct pci_dev *dev); 314extern void pci_enable_acs(struct pci_dev *dev);
315 315
316struct pci_dev_reset_methods {
317 u16 vendor;
318 u16 device;
319 int (*reset)(struct pci_dev *dev, int probe);
320};
321
322extern struct pci_dev_reset_methods pci_dev_reset_methods[];
323
316#endif /* DRIVERS_PCI_H */ 324#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
index b8c925c1f6aa..9142949734f5 100644
--- a/drivers/pci/pcie/aer/Kconfig.debug
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -3,14 +3,14 @@
3# 3#
4 4
5config PCIEAER_INJECT 5config PCIEAER_INJECT
6 tristate "PCIE AER error injector support" 6 tristate "PCIe AER error injector support"
7 depends on PCIEAER 7 depends on PCIEAER
8 default n 8 default n
9 help 9 help
10 This enables PCI Express Root Port Advanced Error Reporting 10 This enables PCI Express Root Port Advanced Error Reporting
11 (AER) software error injector. 11 (AER) software error injector.
12 12
13 Debuging PCIE AER code is quite difficult because it is hard 13 Debugging PCIe AER code is quite difficult because it is hard
14 to trigger various real hardware errors. Software based 14 to trigger various real hardware errors. Software based
15 error injection can fake almost all kinds of errors with the 15 error injection can fake almost all kinds of errors with the
16 help of a user space helper tool aer-inject, which can be 16 help of a user space helper tool aer-inject, which can be
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 7fcd5331b14c..797d47809f7a 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * PCIE AER software error injection support. 2 * PCIe AER software error injection support.
3 * 3 *
4 * Debuging PCIE AER code is quite difficult because it is hard to 4 * Debuging PCIe AER code is quite difficult because it is hard to
5 * trigger various real hardware errors. Software based error 5 * trigger various real hardware errors. Software based error
6 * injection can fake almost all kinds of errors with the help of a 6 * injection can fake almost all kinds of errors with the help of a
7 * user space helper tool aer-inject, which can be gotten from: 7 * user space helper tool aer-inject, which can be gotten from:
@@ -484,5 +484,5 @@ static void __exit aer_inject_exit(void)
484module_init(aer_inject_init); 484module_init(aer_inject_init);
485module_exit(aer_inject_exit); 485module_exit(aer_inject_exit);
486 486
487MODULE_DESCRIPTION("PCIE AER software error injector"); 487MODULE_DESCRIPTION("PCIe AER software error injector");
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 97a345927b55..21f215f4daa3 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
155 mutex_init(&rpc->rpc_mutex); 155 mutex_init(&rpc->rpc_mutex);
156 init_waitqueue_head(&rpc->wait_release); 156 init_waitqueue_head(&rpc->wait_release);
157 157
158 /* Use PCIE bus function to store rpc into PCIE device */ 158 /* Use PCIe bus function to store rpc into PCIe device */
159 set_service_data(dev, rpc); 159 set_service_data(dev, rpc);
160 160
161 return rpc; 161 return rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 8edb2f300e8f..04814087658d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -24,7 +24,7 @@
24 * 24 *
25 * @return: Zero on success. Nonzero otherwise. 25 * @return: Zero on success. Nonzero otherwise.
26 * 26 *
27 * Invoked when PCIE bus loads AER service driver. To avoid conflict with 27 * Invoked when PCIe bus loads AER service driver. To avoid conflict with
28 * BIOS AER support requires BIOS to yield AER control to OS native driver. 28 * BIOS AER support requires BIOS to yield AER control to OS native driver.
29 **/ 29 **/
30int aer_osc_setup(struct pcie_device *pciedev) 30int aer_osc_setup(struct pcie_device *pciedev)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ae672ca80333..c843a799814d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev,
587 * aer_enable_rootport - enable Root Port's interrupts when receiving messages 587 * aer_enable_rootport - enable Root Port's interrupts when receiving messages
588 * @rpc: pointer to a Root Port data structure 588 * @rpc: pointer to a Root Port data structure
589 * 589 *
590 * Invoked when PCIE bus loads AER service driver. 590 * Invoked when PCIe bus loads AER service driver.
591 */ 591 */
592void aer_enable_rootport(struct aer_rpc *rpc) 592void aer_enable_rootport(struct aer_rpc *rpc)
593{ 593{
@@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
597 u32 reg32; 597 u32 reg32;
598 598
599 pos = pci_pcie_cap(pdev); 599 pos = pci_pcie_cap(pdev);
600 /* Clear PCIE Capability's Device Status */ 600 /* Clear PCIe Capability's Device Status */
601 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); 601 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
602 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); 602 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
603 603
@@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
631 * disable_root_aer - disable Root Port's interrupts when receiving messages 631 * disable_root_aer - disable Root Port's interrupts when receiving messages
632 * @rpc: pointer to a Root Port data structure 632 * @rpc: pointer to a Root Port data structure
633 * 633 *
634 * Invoked when PCIE bus unloads AER service driver. 634 * Invoked when PCIe bus unloads AER service driver.
635 */ 635 */
636static void disable_root_aer(struct aer_rpc *rpc) 636static void disable_root_aer(struct aer_rpc *rpc)
637{ 637{
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 44acde72294f..9d3e4c8d0184 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
184 184
185 if (info->status == 0) { 185 if (info->status == 0) {
186 AER_PR(info, dev, 186 AER_PR(info, dev,
187 "PCIE Bus Error: severity=%s, type=Unaccessible, " 187 "PCIe Bus Error: severity=%s, type=Unaccessible, "
188 "id=%04x(Unregistered Agent ID)\n", 188 "id=%04x(Unregistered Agent ID)\n",
189 aer_error_severity_string[info->severity], id); 189 aer_error_severity_string[info->severity], id);
190 } else { 190 } else {
@@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
194 agent = AER_GET_AGENT(info->severity, info->status); 194 agent = AER_GET_AGENT(info->severity, info->status);
195 195
196 AER_PR(info, dev, 196 AER_PR(info, dev,
197 "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 197 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
198 aer_error_severity_string[info->severity], 198 aer_error_severity_string[info->severity],
199 aer_error_layer[layer], id, aer_agent_string[agent]); 199 aer_error_layer[layer], id, aer_agent_string[agent]);
200 200
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a01fc7fbf05..be53d98fa384 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * File: drivers/pci/pcie/aspm.c 2 * File: drivers/pci/pcie/aspm.c
3 * Enabling PCIE link L0s/L1 state and Clock Power Management 3 * Enabling PCIe link L0s/L1 state and Clock Power Management
4 * 4 *
5 * Copyright (C) 2007 Intel 5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) 6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
499 int pos; 499 int pos;
500 u32 reg32; 500 u32 reg32;
501 /* 501 /*
502 * Some functions in a slot might not all be PCIE functions, 502 * Some functions in a slot might not all be PCIe functions,
503 * very strange. Disable ASPM for the whole slot 503 * very strange. Disable ASPM for the whole slot
504 */ 504 */
505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index a49452e2aed9..34d65172a4d7 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -24,7 +24,7 @@
24 */ 24 */
25#define DRIVER_VERSION "v1.0" 25#define DRIVER_VERSION "v1.0"
26#define DRIVER_AUTHOR "tom.l.nguyen@intel.com" 26#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
27#define DRIVER_DESC "PCIE Port Bus Driver" 27#define DRIVER_DESC "PCIe Port Bus Driver"
28MODULE_AUTHOR(DRIVER_AUTHOR); 28MODULE_AUTHOR(DRIVER_AUTHOR);
29MODULE_DESCRIPTION(DRIVER_DESC); 29MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7cfa7c38d318..8726698866b1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2629,13 +2629,68 @@ static int __init pci_apply_final_quirks(void)
2629 if (!pci_cache_line_size) { 2629 if (!pci_cache_line_size) {
2630 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", 2630 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
2631 cls << 2, pci_dfl_cache_line_size << 2); 2631 cls << 2, pci_dfl_cache_line_size << 2);
2632 pci_cache_line_size = cls; 2632 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
2633 } 2633 }
2634 2634
2635 return 0; 2635 return 0;
2636} 2636}
2637 2637
2638fs_initcall_sync(pci_apply_final_quirks); 2638fs_initcall_sync(pci_apply_final_quirks);
2639
2640/*
2641 * Followings are device-specific reset methods which can be used to
2642 * reset a single function if other methods (e.g. FLR, PM D0->D3) are
2643 * not available.
2644 */
2645static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
2646{
2647 int pos;
2648
2649 /* only implement PCI_CLASS_SERIAL_USB at present */
2650 if (dev->class == PCI_CLASS_SERIAL_USB) {
2651 pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
2652 if (!pos)
2653 return -ENOTTY;
2654
2655 if (probe)
2656 return 0;
2657
2658 pci_write_config_byte(dev, pos + 0x4, 1);
2659 msleep(100);
2660
2661 return 0;
2662 } else {
2663 return -ENOTTY;
2664 }
2665}
2666
2667static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
2668{
2669 int pos;
2670
2671 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2672 if (!pos)
2673 return -ENOTTY;
2674
2675 if (probe)
2676 return 0;
2677
2678 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
2679 PCI_EXP_DEVCTL_BCR_FLR);
2680 msleep(100);
2681
2682 return 0;
2683}
2684
2685#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
2686
2687struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2688 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
2689 reset_intel_82599_sfp_virtfn },
2690 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2691 reset_intel_generic_dev },
2692 { 0 }
2693};
2639#else 2694#else
2640void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} 2695void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
2641#endif 2696#endif
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 6dae87143258..4a471dc4f4b9 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,9 +15,9 @@
15 15
16DECLARE_RWSEM(pci_bus_sem); 16DECLARE_RWSEM(pci_bus_sem);
17/* 17/*
18 * find the upstream PCIE-to-PCI bridge of a PCI device 18 * find the upstream PCIe-to-PCI bridge of a PCI device
19 * if the device is PCIE, return NULL 19 * if the device is PCIE, return NULL
20 * if the device isn't connected to a PCIE bridge (that is its parent is a 20 * if the device isn't connected to a PCIe bridge (that is its parent is a
21 * legacy PCI bridge and the bridge is directly connected to bus 0), return its 21 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
22 * parent 22 * parent
23 */ 23 */
@@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
37 tmp = pdev; 37 tmp = pdev;
38 continue; 38 continue;
39 } 39 }
40 /* PCI device should connect to a PCIE bridge */ 40 /* PCI device should connect to a PCIe bridge */
41 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { 41 if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
42 /* Busted hardware? */ 42 /* Busted hardware? */
43 WARN_ON_ONCE(1); 43 WARN_ON_ONCE(1);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index cdf50f3bc2df..d99f846451a3 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
222 unsigned int max, pass; 222 unsigned int max, pass;
223 223
224 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); 224 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
225/* pcibios_fixup_bus(bus); */ 225 pci_fixup_cardbus(bus);
226 226
227 max = bus->secondary; 227 max = bus->secondary;
228 for (pass = 0; pass < 2; pass++) 228 for (pass = 0; pass < 2; pass++)
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 916ccb2b316c..4c7e70299d6b 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -323,6 +323,7 @@ static int __init dell_wmi_input_setup(void)
323static int __init dell_wmi_init(void) 323static int __init dell_wmi_init(void)
324{ 324{
325 int err; 325 int err;
326 acpi_status status;
326 327
327 if (wmi_has_guid(DELL_EVENT_GUID)) { 328 if (wmi_has_guid(DELL_EVENT_GUID)) {
328 printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n"); 329 printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
@@ -336,14 +337,14 @@ static int __init dell_wmi_init(void)
336 if (err) 337 if (err)
337 return err; 338 return err;
338 339
339 err = wmi_install_notify_handler(DELL_EVENT_GUID, 340 status = wmi_install_notify_handler(DELL_EVENT_GUID,
340 dell_wmi_notify, NULL); 341 dell_wmi_notify, NULL);
341 if (err) { 342 if (ACPI_FAILURE(status)) {
342 input_unregister_device(dell_wmi_input_dev); 343 input_unregister_device(dell_wmi_input_dev);
343 printk(KERN_ERR 344 printk(KERN_ERR
344 "dell-wmi: Unable to register notify handler - %d\n", 345 "dell-wmi: Unable to register notify handler - %d\n",
345 err); 346 status);
346 return err; 347 return -ENODEV;
347 } 348 }
348 349
349 return 0; 350 return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 9f93d6c0f510..cc9ad740bda1 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data)
492 if (!guid || !handler) 492 if (!guid || !handler)
493 return AE_BAD_PARAMETER; 493 return AE_BAD_PARAMETER;
494 494
495 find_guid(guid, &block); 495 if (!find_guid(guid, &block))
496 if (!block)
497 return AE_NOT_EXIST; 496 return AE_NOT_EXIST;
498 497
499 if (block->handler) 498 if (block->handler)
@@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
521 if (!guid) 520 if (!guid)
522 return AE_BAD_PARAMETER; 521 return AE_BAD_PARAMETER;
523 522
524 find_guid(guid, &block); 523 if (!find_guid(guid, &block))
525 if (!block)
526 return AE_NOT_EXIST; 524 return AE_NOT_EXIST;
527 525
528 if (!block->handler) 526 if (!block->handler)
diff --git a/fs/namei.c b/fs/namei.c
index 68921d9b5302..b55440baf7ab 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -232,6 +232,7 @@ int generic_permission(struct inode *inode, int mask,
232 /* 232 /*
233 * Searching includes executable on directories, else just read. 233 * Searching includes executable on directories, else just read.
234 */ 234 */
235 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
235 if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) 236 if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
236 if (capable(CAP_DAC_READ_SEARCH)) 237 if (capable(CAP_DAC_READ_SEARCH))
237 return 0; 238 return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 784a919aa0d0..9b98173a8184 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
845 * blk_rq_err_bytes() : bytes left till the next error boundary 845 * blk_rq_err_bytes() : bytes left till the next error boundary
846 * blk_rq_sectors() : sectors left in the entire request 846 * blk_rq_sectors() : sectors left in the entire request
847 * blk_rq_cur_sectors() : sectors left in the current segment 847 * blk_rq_cur_sectors() : sectors left in the current segment
848 * blk_rq_err_sectors() : sectors left till the next error boundary
849 */ 848 */
850static inline sector_t blk_rq_pos(const struct request *rq) 849static inline sector_t blk_rq_pos(const struct request *rq)
851{ 850{
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
874 return blk_rq_cur_bytes(rq) >> 9; 873 return blk_rq_cur_bytes(rq) >> 9;
875} 874}
876 875
877static inline unsigned int blk_rq_err_sectors(const struct request *rq)
878{
879 return blk_rq_err_bytes(rq) >> 9;
880}
881
882/* 876/*
883 * Request issue related functions. 877 * Request issue related functions.
884 */ 878 */
@@ -1116,11 +1110,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
1116 return q->limits.alignment_offset; 1110 return q->limits.alignment_offset;
1117} 1111}
1118 1112
1113static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
1114{
1115 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1116
1117 offset &= granularity - 1;
1118 return (granularity + lim->alignment_offset - offset) & (granularity - 1);
1119}
1120
1119static inline int queue_sector_alignment_offset(struct request_queue *q, 1121static inline int queue_sector_alignment_offset(struct request_queue *q,
1120 sector_t sector) 1122 sector_t sector)
1121{ 1123{
1122 return ((sector << 9) - q->limits.alignment_offset) 1124 return queue_limit_alignment_offset(&q->limits, sector << 9);
1123 & (q->limits.io_min - 1);
1124} 1125}
1125 1126
1126static inline int bdev_alignment_offset(struct block_device *bdev) 1127static inline int bdev_alignment_offset(struct block_device *bdev)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index d9724a28c0c2..163c840437d6 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -832,7 +832,7 @@ struct ieee80211_ht_cap {
832#define IEEE80211_HT_CAP_DELAY_BA 0x0400 832#define IEEE80211_HT_CAP_DELAY_BA 0x0400
833#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 833#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
834#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 834#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
835#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000 835#define IEEE80211_HT_CAP_RESERVED 0x2000
836#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 836#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
837#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 837#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
838 838
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 699e85c01a4d..b2304929434e 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -81,6 +81,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
81#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 81#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
82#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 82#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
83#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) 83#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
84#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
84#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 85#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
85 ACCEPT_SOURCE_ROUTE) 86 ACCEPT_SOURCE_ROUTE)
86#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) 87#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 3d44e9c65a8e..7c6b32a1421c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -81,7 +81,7 @@ union { \
81} 81}
82 82
83/** 83/**
84 * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO 84 * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
85 * @name: name of the declared kfifo datatype 85 * @name: name of the declared kfifo datatype
86 */ 86 */
87#define INIT_KFIFO(name) \ 87#define INIT_KFIFO(name) \
diff --git a/include/linux/pci.h b/include/linux/pci.h
index bf1e67080849..5da0690d9cee 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -566,6 +566,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
566 resource_size_t); 566 resource_size_t);
567void pcibios_update_irq(struct pci_dev *, int irq); 567void pcibios_update_irq(struct pci_dev *, int irq);
568 568
569/* Weak but can be overriden by arch */
570void pci_fixup_cardbus(struct pci_bus *);
571
569/* Generic PCI functions used internally */ 572/* Generic PCI functions used internally */
570 573
571extern struct pci_bus *pci_find_bus(int domain, int busnr); 574extern struct pci_bus *pci_find_bus(int domain, int busnr);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 877ba039e6a4..bd27fbc9db62 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -482,6 +482,7 @@ enum
482 NET_IPV4_CONF_ARP_ACCEPT=21, 482 NET_IPV4_CONF_ARP_ACCEPT=21,
483 NET_IPV4_CONF_ARP_NOTIFY=22, 483 NET_IPV4_CONF_ARP_NOTIFY=22,
484 NET_IPV4_CONF_ACCEPT_LOCAL=23, 484 NET_IPV4_CONF_ACCEPT_LOCAL=23,
485 NET_IPV4_CONF_SRC_VMARK=24,
485 __NET_IPV4_CONF_MAX 486 __NET_IPV4_CONF_MAX
486}; 487};
487 488
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2aff4906b2ae..0bf369752274 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -547,7 +547,6 @@ enum mac80211_rx_flags {
547 * unspecified depending on the hardware capabilities flags 547 * unspecified depending on the hardware capabilities flags
548 * @IEEE80211_HW_SIGNAL_* 548 * @IEEE80211_HW_SIGNAL_*
549 * @noise: noise when receiving this frame, in dBm. 549 * @noise: noise when receiving this frame, in dBm.
550 * @qual: overall signal quality indication, in percent (0-100).
551 * @antenna: antenna used 550 * @antenna: antenna used
552 * @rate_idx: index of data rate into band's supported rates or MCS index if 551 * @rate_idx: index of data rate into band's supported rates or MCS index if
553 * HT rates are use (RX_FLAG_HT) 552 * HT rates are use (RX_FLAG_HT)
@@ -559,7 +558,6 @@ struct ieee80211_rx_status {
559 int freq; 558 int freq;
560 int signal; 559 int signal;
561 int noise; 560 int noise;
562 int __deprecated qual;
563 int antenna; 561 int antenna;
564 int rate_idx; 562 int rate_idx;
565 int flag; 563 int flag;
@@ -1737,6 +1735,12 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
1737 local_bh_enable(); 1735 local_bh_enable();
1738} 1736}
1739 1737
1738/*
1739 * The TX headroom reserved by mac80211 for its own tx_status functions.
1740 * This is enough for the radiotap header.
1741 */
1742#define IEEE80211_TX_STATUS_HEADROOM 13
1743
1740/** 1744/**
1741 * ieee80211_tx_status - transmit status callback 1745 * ieee80211_tx_status - transmit status callback
1742 * 1746 *
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
index 07e3adde21d9..f4105c91af53 100644
--- a/include/scsi/libsrp.h
+++ b/include/scsi/libsrp.h
@@ -2,6 +2,7 @@
2#define __LIBSRP_H__ 2#define __LIBSRP_H__
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/kfifo.h>
5#include <scsi/scsi_cmnd.h> 6#include <scsi/scsi_cmnd.h>
6#include <scsi/scsi_host.h> 7#include <scsi/scsi_host.h>
7#include <scsi/srp.h> 8#include <scsi/srp.h>
diff --git a/mm/mmap.c b/mm/mmap.c
index d9c77b2dbe9d..ee2298936fe6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1043,6 +1043,46 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1043} 1043}
1044EXPORT_SYMBOL(do_mmap_pgoff); 1044EXPORT_SYMBOL(do_mmap_pgoff);
1045 1045
1046SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1047 unsigned long, prot, unsigned long, flags,
1048 unsigned long, fd, unsigned long, pgoff)
1049{
1050 struct file *file = NULL;
1051 unsigned long retval = -EBADF;
1052
1053 if (!(flags & MAP_ANONYMOUS)) {
1054 if (unlikely(flags & MAP_HUGETLB))
1055 return -EINVAL;
1056 file = fget(fd);
1057 if (!file)
1058 goto out;
1059 } else if (flags & MAP_HUGETLB) {
1060 struct user_struct *user = NULL;
1061 /*
1062 * VM_NORESERVE is used because the reservations will be
1063 * taken when vm_ops->mmap() is called
1064 * A dummy user value is used because we are not locking
1065 * memory so no accounting is necessary
1066 */
1067 len = ALIGN(len, huge_page_size(&default_hstate));
1068 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
1069 &user, HUGETLB_ANONHUGE_INODE);
1070 if (IS_ERR(file))
1071 return PTR_ERR(file);
1072 }
1073
1074 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1075
1076 down_write(&current->mm->mmap_sem);
1077 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1078 up_write(&current->mm->mmap_sem);
1079
1080 if (file)
1081 fput(file);
1082out:
1083 return retval;
1084}
1085
1046/* 1086/*
1047 * Some shared mappigns will want the pages marked read-only 1087 * Some shared mappigns will want the pages marked read-only
1048 * to track write events. If so, we'll downgrade vm_page_prot 1088 * to track write events. If so, we'll downgrade vm_page_prot
diff --git a/mm/nommu.c b/mm/nommu.c
index 8687973462bb..6f9248f89bde 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1398,6 +1398,31 @@ error_getting_region:
1398} 1398}
1399EXPORT_SYMBOL(do_mmap_pgoff); 1399EXPORT_SYMBOL(do_mmap_pgoff);
1400 1400
1401SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1402 unsigned long, prot, unsigned long, flags,
1403 unsigned long, fd, unsigned long, pgoff)
1404{
1405 struct file *file = NULL;
1406 unsigned long retval = -EBADF;
1407
1408 if (!(flags & MAP_ANONYMOUS)) {
1409 file = fget(fd);
1410 if (!file)
1411 goto out;
1412 }
1413
1414 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1415
1416 down_write(&current->mm->mmap_sem);
1417 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1418 up_write(&current->mm->mmap_sem);
1419
1420 if (file)
1421 fput(file);
1422out:
1423 return retval;
1424}
1425
1401/* 1426/*
1402 * split a vma into two pieces at address 'addr', a new vma is allocated either 1427 * split a vma into two pieces at address 'addr', a new vma is allocated either
1403 * for the first part or the tail. 1428 * for the first part or the tail.
diff --git a/mm/slab.c b/mm/slab.c
index 7d41f15b48d3..7451bdacaf18 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -654,7 +654,7 @@ static void init_node_lock_keys(int q)
654 654
655 l3 = s->cs_cachep->nodelists[q]; 655 l3 = s->cs_cachep->nodelists[q];
656 if (!l3 || OFF_SLAB(s->cs_cachep)) 656 if (!l3 || OFF_SLAB(s->cs_cachep))
657 return; 657 continue;
658 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 658 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
659 alc = l3->alien; 659 alc = l3->alien;
660 /* 660 /*
@@ -665,7 +665,7 @@ static void init_node_lock_keys(int q)
665 * for alloc_alien_cache, 665 * for alloc_alien_cache,
666 */ 666 */
667 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 667 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
668 return; 668 continue;
669 for_each_node(r) { 669 for_each_node(r) {
670 if (alc[r]) 670 if (alc[r])
671 lockdep_set_class(&alc[r]->lock, 671 lockdep_set_class(&alc[r]->lock,
diff --git a/mm/util.c b/mm/util.c
index b377ce430803..7c35ad95f927 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,10 +4,6 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/hugetlb.h>
8#include <linux/syscalls.h>
9#include <linux/mman.h>
10#include <linux/file.h>
11#include <asm/uaccess.h> 7#include <asm/uaccess.h>
12 8
13#define CREATE_TRACE_POINTS 9#define CREATE_TRACE_POINTS
@@ -272,46 +268,6 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
272} 268}
273EXPORT_SYMBOL_GPL(get_user_pages_fast); 269EXPORT_SYMBOL_GPL(get_user_pages_fast);
274 270
275SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
276 unsigned long, prot, unsigned long, flags,
277 unsigned long, fd, unsigned long, pgoff)
278{
279 struct file * file = NULL;
280 unsigned long retval = -EBADF;
281
282 if (!(flags & MAP_ANONYMOUS)) {
283 if (unlikely(flags & MAP_HUGETLB))
284 return -EINVAL;
285 file = fget(fd);
286 if (!file)
287 goto out;
288 } else if (flags & MAP_HUGETLB) {
289 struct user_struct *user = NULL;
290 /*
291 * VM_NORESERVE is used because the reservations will be
292 * taken when vm_ops->mmap() is called
293 * A dummy user value is used because we are not locking
294 * memory so no accounting is necessary
295 */
296 len = ALIGN(len, huge_page_size(&default_hstate));
297 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
298 &user, HUGETLB_ANONHUGE_INODE);
299 if (IS_ERR(file))
300 return PTR_ERR(file);
301 }
302
303 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
304
305 down_write(&current->mm->mmap_sem);
306 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
307 up_write(&current->mm->mmap_sem);
308
309 if (file)
310 fput(file);
311out:
312 return retval;
313}
314
315/* Tracepoints definitions. */ 271/* Tracepoints definitions. */
316EXPORT_TRACEPOINT_SYMBOL(kmalloc); 272EXPORT_TRACEPOINT_SYMBOL(kmalloc);
317EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 273EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a23b45f08ec9..de0c2c726420 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -250,8 +250,7 @@ struct pktgen_dev {
250 __u64 count; /* Default No packets to send */ 250 __u64 count; /* Default No packets to send */
251 __u64 sofar; /* How many pkts we've sent so far */ 251 __u64 sofar; /* How many pkts we've sent so far */
252 __u64 tx_bytes; /* How many bytes we've transmitted */ 252 __u64 tx_bytes; /* How many bytes we've transmitted */
253 __u64 errors; /* Errors when trying to transmit, 253 __u64 errors; /* Errors when trying to transmit, */
254 pkts will be re-sent */
255 254
256 /* runtime counters relating to clone_skb */ 255 /* runtime counters relating to clone_skb */
257 256
@@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3465 pkt_dev->seq_num++; 3464 pkt_dev->seq_num++;
3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3465 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3467 break; 3466 break;
3467 case NET_XMIT_DROP:
3468 case NET_XMIT_CN:
3469 case NET_XMIT_POLICED:
3470 /* skb has been consumed */
3471 pkt_dev->errors++;
3472 break;
3468 default: /* Drivers are not supposed to return other values! */ 3473 default: /* Drivers are not supposed to return other values! */
3469 if (net_ratelimit()) 3474 if (net_ratelimit())
3470 pr_info("pktgen: %s xmit error: %d\n", 3475 pr_info("pktgen: %s xmit error: %d\n",
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5cdbc102a418..040c4f05b653 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table {
1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, 1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
1398 "accept_source_route"), 1398 "accept_source_route"),
1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), 1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
1400 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
1400 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), 1401 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
1401 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), 1402 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
1402 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), 1403 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3323168ee52d..82dbf711d6d0 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
252 no_addr = in_dev->ifa_list == NULL; 252 no_addr = in_dev->ifa_list == NULL;
253 rpf = IN_DEV_RPFILTER(in_dev); 253 rpf = IN_DEV_RPFILTER(in_dev);
254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
255 if (mark && !IN_DEV_SRC_VMARK(in_dev))
256 fl.mark = 0;
255 } 257 }
256 rcu_read_unlock(); 258 rcu_read_unlock();
257 259
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 3787455fb696..d7dcee680728 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -34,9 +34,28 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
34 34
35 ht_cap->ht_supported = true; 35 ht_cap->ht_supported = true;
36 36
37 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap; 37 /*
38 ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS; 38 * The bits listed in this expression should be
39 ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS; 39 * the same for the peer and us, if the station
40 * advertises more then we can't use those thus
41 * we mask them out.
42 */
43 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) &
44 (sband->ht_cap.cap |
45 ~(IEEE80211_HT_CAP_LDPC_CODING |
46 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
47 IEEE80211_HT_CAP_GRN_FLD |
48 IEEE80211_HT_CAP_SGI_20 |
49 IEEE80211_HT_CAP_SGI_40 |
50 IEEE80211_HT_CAP_DSSSCCK40));
51 /*
52 * The STBC bits are asymmetric -- if we don't have
53 * TX then mask out the peer's RX and vice versa.
54 */
55 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
56 ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC;
57 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
58 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
40 59
41 ampdu_info = ht_cap_ie->ampdu_params_info; 60 ampdu_info = ht_cap_ie->ampdu_params_info;
42 ht_cap->ampdu_factor = 61 ht_cap->ampdu_factor =
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 10d13856f86c..1f2db647bb5c 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -382,6 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
382struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 382struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
383 u8 *bssid,u8 *addr, u32 supp_rates) 383 u8 *bssid,u8 *addr, u32 supp_rates)
384{ 384{
385 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
385 struct ieee80211_local *local = sdata->local; 386 struct ieee80211_local *local = sdata->local;
386 struct sta_info *sta; 387 struct sta_info *sta;
387 int band = local->hw.conf.channel->band; 388 int band = local->hw.conf.channel->band;
@@ -397,6 +398,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
397 return NULL; 398 return NULL;
398 } 399 }
399 400
401 if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
402 return NULL;
403
400 if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) 404 if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
401 return NULL; 405 return NULL;
402 406
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 8116d1a96a4a..0d2d94881f1f 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -515,6 +515,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
515 * and we need some headroom for passing the frame to monitor 515 * and we need some headroom for passing the frame to monitor
516 * interfaces, but never both at the same time. 516 * interfaces, but never both at the same time.
517 */ 517 */
518 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
519 sizeof(struct ieee80211_tx_status_rtap_hdr));
518 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 520 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
519 sizeof(struct ieee80211_tx_status_rtap_hdr)); 521 sizeof(struct ieee80211_tx_status_rtap_hdr));
520 522
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d8d50fb5e823..c79e59f82fd9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -915,6 +915,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
916 IEEE80211_STA_BEACON_POLL); 916 IEEE80211_STA_BEACON_POLL);
917 917
918 /*
919 * Always handle WMM once after association regardless
920 * of the first value the AP uses. Setting -1 here has
921 * that effect because the AP values is an unsigned
922 * 4-bit value.
923 */
924 sdata->u.mgd.wmm_last_param_set = -1;
925
918 ieee80211_led_assoc(local, 1); 926 ieee80211_led_assoc(local, 1);
919 927
920 sdata->vif.bss_conf.assoc = 1; 928 sdata->vif.bss_conf.assoc = 1;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8834cc93c716..27ceaefd7bc8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1419,6 +1419,10 @@ static bool need_dynamic_ps(struct ieee80211_local *local)
1419 if (!local->ps_sdata) 1419 if (!local->ps_sdata)
1420 return false; 1420 return false;
1421 1421
1422 /* No point if we're going to suspend */
1423 if (local->quiescing)
1424 return false;
1425
1422 return true; 1426 return true;
1423} 1427}
1424 1428
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 78a6e924c7e1..dc76267e436e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1039,7 +1039,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1039 1039
1040 /* restart hardware */ 1040 /* restart hardware */
1041 if (local->open_count) { 1041 if (local->open_count) {
1042 /*
1043 * Upon resume hardware can sometimes be goofy due to
1044 * various platform / driver / bus issues, so restarting
1045 * the device may at times not work immediately. Propagate
1046 * the error.
1047 */
1042 res = drv_start(local); 1048 res = drv_start(local);
1049 if (res) {
1050 WARN(local->suspended, "Harware became unavailable "
1051 "upon resume. This is could be a software issue"
1052 "prior to suspend or a harware issue\n");
1053 return res;
1054 }
1043 1055
1044 ieee80211_led_radio(local, true); 1056 ieee80211_led_radio(local, true);
1045 } 1057 }
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1001db4912f7..82e6002c8d67 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -93,7 +93,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
93 } 93 }
94 } 94 }
95 95
96 WARN_ON(!bss); 96 /*
97 * We might be coming here because the driver reported
98 * a successful association at the same time as the
99 * user requested a deauth. In that case, we will have
100 * removed the BSS from the auth_bsses list due to the
101 * deauth request when the assoc response makes it. If
102 * the two code paths acquire the lock the other way
103 * around, that's just the standard situation of a
104 * deauth being requested while connected.
105 */
106 if (!bss)
107 goto out;
97 } else if (wdev->conn) { 108 } else if (wdev->conn) {
98 cfg80211_sme_failed_assoc(wdev); 109 cfg80211_sme_failed_assoc(wdev);
99 /* 110 /*
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 12dfa62aad18..0c2cbbebca95 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -601,7 +601,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
601 struct cfg80211_registered_device *rdev; 601 struct cfg80211_registered_device *rdev;
602 struct wiphy *wiphy; 602 struct wiphy *wiphy;
603 struct iw_scan_req *wreq = NULL; 603 struct iw_scan_req *wreq = NULL;
604 struct cfg80211_scan_request *creq; 604 struct cfg80211_scan_request *creq = NULL;
605 int i, err, n_channels = 0; 605 int i, err, n_channels = 0;
606 enum ieee80211_band band; 606 enum ieee80211_band band;
607 607
@@ -694,8 +694,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
694 /* translate "Scan for SSID" request */ 694 /* translate "Scan for SSID" request */
695 if (wreq) { 695 if (wreq) {
696 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 696 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
697 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) 697 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
698 return -EINVAL; 698 err = -EINVAL;
699 goto out;
700 }
699 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); 701 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
700 creq->ssids[0].ssid_len = wreq->essid_len; 702 creq->ssids[0].ssid_len = wreq->essid_len;
701 } 703 }
@@ -707,12 +709,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
707 err = rdev->ops->scan(wiphy, dev, creq); 709 err = rdev->ops->scan(wiphy, dev, creq);
708 if (err) { 710 if (err) {
709 rdev->scan_req = NULL; 711 rdev->scan_req = NULL;
710 kfree(creq); 712 /* creq will be freed below */
711 } else { 713 } else {
712 nl80211_send_scan_start(rdev, dev); 714 nl80211_send_scan_start(rdev, dev);
715 /* creq now owned by driver */
716 creq = NULL;
713 dev_hold(dev); 717 dev_hold(dev);
714 } 718 }
715 out: 719 out:
720 kfree(creq);
716 cfg80211_unlock_rdev(rdev); 721 cfg80211_unlock_rdev(rdev);
717 return err; 722 return err;
718} 723}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cb81ca35b0d6..4725a549ad4d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1445,7 +1445,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1445 if (!dev) 1445 if (!dev)
1446 goto free_dst; 1446 goto free_dst;
1447 1447
1448 /* Copy neighbout for reachability confirmation */ 1448 /* Copy neighbour for reachability confirmation */
1449 dst0->neighbour = neigh_clone(dst->neighbour); 1449 dst0->neighbour = neigh_clone(dst->neighbour);
1450 1450
1451 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1451 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index c5699863643b..656e474dca47 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -441,6 +441,7 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
441 struct snd_pcm_hw_params *params) 441 struct snd_pcm_hw_params *params)
442{ 442{
443 int err; 443 int err;
444 struct aaci *aaci = substream->private_data;
444 445
445 aaci_pcm_hw_free(substream); 446 aaci_pcm_hw_free(substream);
446 if (aacirun->pcm_open) { 447 if (aacirun->pcm_open) {
@@ -560,7 +561,6 @@ static int aaci_pcm_open(struct snd_pcm_substream *substream)
560static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream, 561static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream,
561 struct snd_pcm_hw_params *params) 562 struct snd_pcm_hw_params *params)
562{ 563{
563 struct aaci *aaci = substream->private_data;
564 struct aaci_runtime *aacirun = substream->runtime->private_data; 564 struct aaci_runtime *aacirun = substream->runtime->private_data;
565 unsigned int channels = params_channels(params); 565 unsigned int channels = params_channels(params);
566 int ret; 566 int ret;
@@ -659,7 +659,6 @@ static struct snd_pcm_ops aaci_playback_ops = {
659static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream, 659static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream,
660 struct snd_pcm_hw_params *params) 660 struct snd_pcm_hw_params *params)
661{ 661{
662 struct aaci *aaci = substream->private_data;
663 struct aaci_runtime *aacirun = substream->runtime->private_data; 662 struct aaci_runtime *aacirun = substream->runtime->private_data;
664 int ret; 663 int ret;
665 664
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 29ab46a12e11..25b0641e6b8c 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1918,13 +1918,13 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
1918 1918
1919 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, 1919 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
1920 hw->rate_min, hw->rate_max); 1920 hw->rate_min, hw->rate_max);
1921 if (err < 0) 1921 if (err < 0)
1922 return err; 1922 return err;
1923 1923
1924 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 1924 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
1925 hw->period_bytes_min, hw->period_bytes_max); 1925 hw->period_bytes_min, hw->period_bytes_max);
1926 if (err < 0) 1926 if (err < 0)
1927 return err; 1927 return err;
1928 1928
1929 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, 1929 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
1930 hw->periods_min, hw->periods_max); 1930 hw->periods_min, hw->periods_max);
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 5fe34a8d8c81..e4581a42ace5 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -42,7 +42,7 @@ static void snd_hda_generate_beep(struct work_struct *work)
42 return; 42 return;
43 43
44 /* generate tone */ 44 /* generate tone */
45 snd_hda_codec_write_cache(codec, beep->nid, 0, 45 snd_hda_codec_write(codec, beep->nid, 0,
46 AC_VERB_SET_BEEP_CONTROL, beep->tone); 46 AC_VERB_SET_BEEP_CONTROL, beep->tone);
47} 47}
48 48
@@ -119,7 +119,7 @@ static void snd_hda_do_detach(struct hda_beep *beep)
119 beep->dev = NULL; 119 beep->dev = NULL;
120 cancel_work_sync(&beep->beep_work); 120 cancel_work_sync(&beep->beep_work);
121 /* turn off beep for sure */ 121 /* turn off beep for sure */
122 snd_hda_codec_write_cache(beep->codec, beep->nid, 0, 122 snd_hda_codec_write(beep->codec, beep->nid, 0,
123 AC_VERB_SET_BEEP_CONTROL, 0); 123 AC_VERB_SET_BEEP_CONTROL, 0);
124} 124}
125 125
@@ -192,7 +192,7 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
192 beep->enabled = enable; 192 beep->enabled = enable;
193 if (!enable) { 193 if (!enable) {
194 /* turn off beep */ 194 /* turn off beep */
195 snd_hda_codec_write_cache(beep->codec, beep->nid, 0, 195 snd_hda_codec_write(beep->codec, beep->nid, 0,
196 AC_VERB_SET_BEEP_CONTROL, 0); 196 AC_VERB_SET_BEEP_CONTROL, 0);
197 } 197 }
198 if (beep->mode == HDA_BEEP_MODE_SWREG) { 198 if (beep->mode == HDA_BEEP_MODE_SWREG) {
@@ -239,8 +239,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
239 mutex_init(&beep->mutex); 239 mutex_init(&beep->mutex);
240 240
241 if (beep->mode == HDA_BEEP_MODE_ON) { 241 if (beep->mode == HDA_BEEP_MODE_ON) {
242 beep->enabled = 1; 242 int err = snd_hda_do_attach(beep);
243 snd_hda_do_register(&beep->register_work); 243 if (err < 0) {
244 kfree(beep);
245 codec->beep = NULL;
246 return err;
247 }
244 } 248 }
245 249
246 return 0; 250 return 0;
@@ -253,7 +257,7 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
253 if (beep) { 257 if (beep) {
254 cancel_work_sync(&beep->register_work); 258 cancel_work_sync(&beep->register_work);
255 cancel_delayed_work(&beep->unregister_work); 259 cancel_delayed_work(&beep->unregister_work);
256 if (beep->enabled) 260 if (beep->dev)
257 snd_hda_do_detach(beep); 261 snd_hda_do_detach(beep);
258 codec->beep = NULL; 262 codec->beep = NULL;
259 kfree(beep); 263 kfree(beep);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 950ee5cfcacf..f98b47cd6cfb 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1327,11 +1327,13 @@ EXPORT_SYMBOL_HDA(snd_hda_query_pin_caps);
1327 */ 1327 */
1328u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid) 1328u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
1329{ 1329{
1330 u32 pincap = snd_hda_query_pin_caps(codec, nid); 1330 u32 pincap;
1331
1332 if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
1333 snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
1334 1331
1332 if (!codec->no_trigger_sense) {
1333 pincap = snd_hda_query_pin_caps(codec, nid);
1334 if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
1335 snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
1336 }
1335 return snd_hda_codec_read(codec, nid, 0, 1337 return snd_hda_codec_read(codec, nid, 0,
1336 AC_VERB_GET_PIN_SENSE, 0); 1338 AC_VERB_GET_PIN_SENSE, 0);
1337} 1339}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 1d541b7f5547..0a770a28e71f 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -817,6 +817,7 @@ struct hda_codec {
817 unsigned int pin_amp_workaround:1; /* pin out-amp takes index 817 unsigned int pin_amp_workaround:1; /* pin out-amp takes index
818 * (e.g. Conexant codecs) 818 * (e.g. Conexant codecs)
819 */ 819 */
820 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
820#ifdef CONFIG_SND_HDA_POWER_SAVE 821#ifdef CONFIG_SND_HDA_POWER_SAVE
821 unsigned int power_on :1; /* current (global) power-state */ 822 unsigned int power_on :1; /* current (global) power-state */
822 unsigned int power_transition :1; /* power-state in transition */ 823 unsigned int power_transition :1; /* power-state in transition */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index ff8ad46cc50e..ec9c348336cc 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -356,6 +356,7 @@ struct azx_dev {
356 */ 356 */
357 unsigned char stream_tag; /* assigned stream */ 357 unsigned char stream_tag; /* assigned stream */
358 unsigned char index; /* stream index */ 358 unsigned char index; /* stream index */
359 int device; /* last device number assigned to */
359 360
360 unsigned int opened :1; 361 unsigned int opened :1;
361 unsigned int running :1; 362 unsigned int running :1;
@@ -1441,10 +1442,13 @@ static int __devinit azx_codec_configure(struct azx *chip)
1441 */ 1442 */
1442 1443
1443/* assign a stream for the PCM */ 1444/* assign a stream for the PCM */
1444static inline struct azx_dev *azx_assign_device(struct azx *chip, int stream) 1445static inline struct azx_dev *
1446azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
1445{ 1447{
1446 int dev, i, nums; 1448 int dev, i, nums;
1447 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1449 struct azx_dev *res = NULL;
1450
1451 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1448 dev = chip->playback_index_offset; 1452 dev = chip->playback_index_offset;
1449 nums = chip->playback_streams; 1453 nums = chip->playback_streams;
1450 } else { 1454 } else {
@@ -1453,10 +1457,15 @@ static inline struct azx_dev *azx_assign_device(struct azx *chip, int stream)
1453 } 1457 }
1454 for (i = 0; i < nums; i++, dev++) 1458 for (i = 0; i < nums; i++, dev++)
1455 if (!chip->azx_dev[dev].opened) { 1459 if (!chip->azx_dev[dev].opened) {
1456 chip->azx_dev[dev].opened = 1; 1460 res = &chip->azx_dev[dev];
1457 return &chip->azx_dev[dev]; 1461 if (res->device == substream->pcm->device)
1462 break;
1458 } 1463 }
1459 return NULL; 1464 if (res) {
1465 res->opened = 1;
1466 res->device = substream->pcm->device;
1467 }
1468 return res;
1460} 1469}
1461 1470
1462/* release the assigned stream */ 1471/* release the assigned stream */
@@ -1505,7 +1514,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1505 int err; 1514 int err;
1506 1515
1507 mutex_lock(&chip->open_mutex); 1516 mutex_lock(&chip->open_mutex);
1508 azx_dev = azx_assign_device(chip, substream->stream); 1517 azx_dev = azx_assign_device(chip, substream);
1509 if (azx_dev == NULL) { 1518 if (azx_dev == NULL) {
1510 mutex_unlock(&chip->open_mutex); 1519 mutex_unlock(&chip->open_mutex);
1511 return -EBUSY; 1520 return -EBUSY;
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1a36137e13ec..69a941c7b158 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1186,6 +1186,8 @@ static int patch_ad1986a(struct hda_codec *codec)
1186 */ 1186 */
1187 spec->multiout.no_share_stream = 1; 1187 spec->multiout.no_share_stream = 1;
1188 1188
1189 codec->no_trigger_sense = 1;
1190
1189 return 0; 1191 return 0;
1190} 1192}
1191 1193
@@ -1371,6 +1373,8 @@ static int patch_ad1983(struct hda_codec *codec)
1371 1373
1372 codec->patch_ops = ad198x_patch_ops; 1374 codec->patch_ops = ad198x_patch_ops;
1373 1375
1376 codec->no_trigger_sense = 1;
1377
1374 return 0; 1378 return 0;
1375} 1379}
1376 1380
@@ -1813,6 +1817,9 @@ static int patch_ad1981(struct hda_codec *codec)
1813 codec->patch_ops.unsol_event = ad1981_hp_unsol_event; 1817 codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
1814 break; 1818 break;
1815 } 1819 }
1820
1821 codec->no_trigger_sense = 1;
1822
1816 return 0; 1823 return 0;
1817} 1824}
1818 1825
@@ -3118,6 +3125,8 @@ static int patch_ad1988(struct hda_codec *codec)
3118#endif 3125#endif
3119 spec->vmaster_nid = 0x04; 3126 spec->vmaster_nid = 0x04;
3120 3127
3128 codec->no_trigger_sense = 1;
3129
3121 return 0; 3130 return 0;
3122} 3131}
3123 3132
@@ -3330,6 +3339,8 @@ static int patch_ad1884(struct hda_codec *codec)
3330 3339
3331 codec->patch_ops = ad198x_patch_ops; 3340 codec->patch_ops = ad198x_patch_ops;
3332 3341
3342 codec->no_trigger_sense = 1;
3343
3333 return 0; 3344 return 0;
3334} 3345}
3335 3346
@@ -4287,6 +4298,8 @@ static int patch_ad1884a(struct hda_codec *codec)
4287 break; 4298 break;
4288 } 4299 }
4289 4300
4301 codec->no_trigger_sense = 1;
4302
4290 return 0; 4303 return 0;
4291} 4304}
4292 4305
@@ -4623,6 +4636,9 @@ static int patch_ad1882(struct hda_codec *codec)
4623 spec->mixers[2] = ad1882_6stack_mixers; 4636 spec->mixers[2] = ad1882_6stack_mixers;
4624 break; 4637 break;
4625 } 4638 }
4639
4640 codec->no_trigger_sense = 1;
4641
4626 return 0; 4642 return 0;
4627} 4643}
4628 4644
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index eeda7beeb57a..2291a8396817 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4453,14 +4453,7 @@ static inline int get_pin_presence(struct hda_codec *codec, hda_nid_t nid)
4453{ 4453{
4454 if (!nid) 4454 if (!nid)
4455 return 0; 4455 return 0;
4456 /* NOTE: we can't use snd_hda_jack_detect() here because STAC/IDT 4456 return snd_hda_jack_detect(codec, nid);
4457 * codecs behave wrongly when SET_PIN_SENSE is triggered, although
4458 * the pincap gives TRIG_REQ bit.
4459 */
4460 if (snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_SENSE, 0) &
4461 AC_PINSENSE_PRESENCE)
4462 return 1;
4463 return 0;
4464} 4457}
4465 4458
4466static void stac92xx_line_out_detect(struct hda_codec *codec, 4459static void stac92xx_line_out_detect(struct hda_codec *codec,
@@ -4962,6 +4955,7 @@ static int patch_stac9200(struct hda_codec *codec)
4962 if (spec == NULL) 4955 if (spec == NULL)
4963 return -ENOMEM; 4956 return -ENOMEM;
4964 4957
4958 codec->no_trigger_sense = 1;
4965 codec->spec = spec; 4959 codec->spec = spec;
4966 spec->num_pins = ARRAY_SIZE(stac9200_pin_nids); 4960 spec->num_pins = ARRAY_SIZE(stac9200_pin_nids);
4967 spec->pin_nids = stac9200_pin_nids; 4961 spec->pin_nids = stac9200_pin_nids;
@@ -5024,6 +5018,7 @@ static int patch_stac925x(struct hda_codec *codec)
5024 if (spec == NULL) 5018 if (spec == NULL)
5025 return -ENOMEM; 5019 return -ENOMEM;
5026 5020
5021 codec->no_trigger_sense = 1;
5027 codec->spec = spec; 5022 codec->spec = spec;
5028 spec->num_pins = ARRAY_SIZE(stac925x_pin_nids); 5023 spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
5029 spec->pin_nids = stac925x_pin_nids; 5024 spec->pin_nids = stac925x_pin_nids;
@@ -5108,6 +5103,7 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
5108 if (spec == NULL) 5103 if (spec == NULL)
5109 return -ENOMEM; 5104 return -ENOMEM;
5110 5105
5106 codec->no_trigger_sense = 1;
5111 codec->spec = spec; 5107 codec->spec = spec;
5112 codec->slave_dig_outs = stac92hd73xx_slave_dig_outs; 5108 codec->slave_dig_outs = stac92hd73xx_slave_dig_outs;
5113 spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids); 5109 spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids);
@@ -5255,6 +5251,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5255 if (spec == NULL) 5251 if (spec == NULL)
5256 return -ENOMEM; 5252 return -ENOMEM;
5257 5253
5254 codec->no_trigger_sense = 1;
5258 codec->spec = spec; 5255 codec->spec = spec;
5259 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; 5256 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
5260 spec->digbeep_nid = 0x21; 5257 spec->digbeep_nid = 0x21;
@@ -5418,6 +5415,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
5418 if (spec == NULL) 5415 if (spec == NULL)
5419 return -ENOMEM; 5416 return -ENOMEM;
5420 5417
5418 codec->no_trigger_sense = 1;
5421 codec->spec = spec; 5419 codec->spec = spec;
5422 codec->patch_ops = stac92xx_patch_ops; 5420 codec->patch_ops = stac92xx_patch_ops;
5423 spec->num_pins = STAC92HD71BXX_NUM_PINS; 5421 spec->num_pins = STAC92HD71BXX_NUM_PINS;
@@ -5661,6 +5659,7 @@ static int patch_stac922x(struct hda_codec *codec)
5661 if (spec == NULL) 5659 if (spec == NULL)
5662 return -ENOMEM; 5660 return -ENOMEM;
5663 5661
5662 codec->no_trigger_sense = 1;
5664 codec->spec = spec; 5663 codec->spec = spec;
5665 spec->num_pins = ARRAY_SIZE(stac922x_pin_nids); 5664 spec->num_pins = ARRAY_SIZE(stac922x_pin_nids);
5666 spec->pin_nids = stac922x_pin_nids; 5665 spec->pin_nids = stac922x_pin_nids;
@@ -5764,6 +5763,7 @@ static int patch_stac927x(struct hda_codec *codec)
5764 if (spec == NULL) 5763 if (spec == NULL)
5765 return -ENOMEM; 5764 return -ENOMEM;
5766 5765
5766 codec->no_trigger_sense = 1;
5767 codec->spec = spec; 5767 codec->spec = spec;
5768 codec->slave_dig_outs = stac927x_slave_dig_outs; 5768 codec->slave_dig_outs = stac927x_slave_dig_outs;
5769 spec->num_pins = ARRAY_SIZE(stac927x_pin_nids); 5769 spec->num_pins = ARRAY_SIZE(stac927x_pin_nids);
@@ -5898,6 +5898,7 @@ static int patch_stac9205(struct hda_codec *codec)
5898 if (spec == NULL) 5898 if (spec == NULL)
5899 return -ENOMEM; 5899 return -ENOMEM;
5900 5900
5901 codec->no_trigger_sense = 1;
5901 codec->spec = spec; 5902 codec->spec = spec;
5902 spec->num_pins = ARRAY_SIZE(stac9205_pin_nids); 5903 spec->num_pins = ARRAY_SIZE(stac9205_pin_nids);
5903 spec->pin_nids = stac9205_pin_nids; 5904 spec->pin_nids = stac9205_pin_nids;
@@ -6053,6 +6054,7 @@ static int patch_stac9872(struct hda_codec *codec)
6053 spec = kzalloc(sizeof(*spec), GFP_KERNEL); 6054 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
6054 if (spec == NULL) 6055 if (spec == NULL)
6055 return -ENOMEM; 6056 return -ENOMEM;
6057 codec->no_trigger_sense = 1;
6056 codec->spec = spec; 6058 codec->spec = spec;
6057 spec->num_pins = ARRAY_SIZE(stac9872_pin_nids); 6059 spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
6058 spec->pin_nids = stac9872_pin_nids; 6060 spec->pin_nids = stac9872_pin_nids;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index fd9c097b760a..f73de631e3ee 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -508,8 +508,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
508 struct kvm_assigned_dev_kernel *match; 508 struct kvm_assigned_dev_kernel *match;
509 struct pci_dev *dev; 509 struct pci_dev *dev;
510 510
511 down_read(&kvm->slots_lock);
512 mutex_lock(&kvm->lock); 511 mutex_lock(&kvm->lock);
512 down_read(&kvm->slots_lock);
513 513
514 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 514 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
515 assigned_dev->assigned_dev_id); 515 assigned_dev->assigned_dev_id);
@@ -573,8 +573,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
573 } 573 }
574 574
575out: 575out:
576 mutex_unlock(&kvm->lock);
577 up_read(&kvm->slots_lock); 576 up_read(&kvm->slots_lock);
577 mutex_unlock(&kvm->lock);
578 return r; 578 return r;
579out_list_del: 579out_list_del:
580 list_del(&match->list); 580 list_del(&match->list);
@@ -585,8 +585,8 @@ out_put:
585 pci_dev_put(dev); 585 pci_dev_put(dev);
586out_free: 586out_free:
587 kfree(match); 587 kfree(match);
588 mutex_unlock(&kvm->lock);
589 up_read(&kvm->slots_lock); 588 up_read(&kvm->slots_lock);
589 mutex_unlock(&kvm->lock);
590 return r; 590 return r;
591} 591}
592 592
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b5af88167613..a944be392d6e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -64,7 +64,7 @@ MODULE_LICENSE("GPL");
64/* 64/*
65 * Ordering of locks: 65 * Ordering of locks:
66 * 66 *
67 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
68 */ 68 */
69 69
70DEFINE_SPINLOCK(kvm_lock); 70DEFINE_SPINLOCK(kvm_lock);
@@ -406,8 +406,11 @@ static struct kvm *kvm_create_vm(void)
406out: 406out:
407 return kvm; 407 return kvm;
408 408
409#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
410 (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
409out_err: 411out_err:
410 hardware_disable_all(); 412 hardware_disable_all();
413#endif
411out_err_nodisable: 414out_err_nodisable:
412 kfree(kvm); 415 kfree(kvm);
413 return ERR_PTR(r); 416 return ERR_PTR(r);