aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-agp.c21
-rw-r--r--drivers/firewire/fw-card.c149
-rw-r--r--drivers/firewire/fw-cdev.c1044
-rw-r--r--drivers/firewire/fw-device.c203
-rw-r--r--drivers/firewire/fw-device.h23
-rw-r--r--drivers/firewire/fw-iso.c227
-rw-r--r--drivers/firewire/fw-ohci.c260
-rw-r--r--drivers/firewire/fw-sbp2.c57
-rw-r--r--drivers/firewire/fw-topology.c29
-rw-r--r--drivers/firewire/fw-topology.h19
-rw-r--r--drivers/firewire/fw-transaction.c185
-rw-r--r--drivers/firewire/fw-transaction.h138
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c235
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_info.c328
-rw-r--r--drivers/gpu/drm/drm_proc.c721
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c257
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c334
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
-rw-r--r--drivers/ieee1394/csr.c8
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/highlevel.c2
-rw-r--r--drivers/ieee1394/nodemgr.c4
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/raw1394.c14
-rw-r--r--drivers/ieee1394/sbp2.c9
-rw-r--r--drivers/ieee1394/video1394.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c10
-rw-r--r--drivers/mtd/mtdsuper.c7
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ac3200.c22
-rw-r--r--drivers/net/appletalk/cops.c45
-rw-r--r--drivers/net/appletalk/ltpc.c38
-rw-r--r--drivers/net/at1700.c19
-rw-r--r--drivers/net/benet/be_main.c28
-rw-r--r--drivers/net/cs89x0.c28
-rw-r--r--drivers/net/cxgb3/adapter.h4
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb3/sge.c164
-rw-r--r--drivers/net/cxgb3/t3_hw.c80
-rw-r--r--drivers/net/depca.c19
-rw-r--r--drivers/net/eepro.c17
-rw-r--r--drivers/net/eexpress.c17
-rw-r--r--drivers/net/eth16i.c18
-rw-r--r--drivers/net/ethoc.c1112
-rw-r--r--drivers/net/ewrk3.c19
-rw-r--r--drivers/net/gianfar.c48
-rw-r--r--drivers/net/ibmlana.c17
-rw-r--r--drivers/net/irda/donauboe.c12
-rw-r--r--drivers/net/lance.c19
-rw-r--r--drivers/net/lp486e.c17
-rw-r--r--drivers/net/ni52.c21
-rw-r--r--drivers/net/ni65.c75
-rw-r--r--drivers/net/seeq8005.c17
-rw-r--r--drivers/net/smc-ultra.c5
-rw-r--r--drivers/net/smc-ultra32.c23
-rw-r--r--drivers/net/smc9194.c17
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/tokenring/madgemc.c11
-rw-r--r--drivers/net/tokenring/proteon.c9
-rw-r--r--drivers/net/tokenring/skisa.c9
-rw-r--r--drivers/net/tokenring/smctr.c16
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/wan/sdla.c36
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ar9170/Kconfig17
-rw-r--r--drivers/net/wireless/ar9170/Makefile3
-rw-r--r--drivers/net/wireless/ar9170/ar9170.h209
-rw-r--r--drivers/net/wireless/ar9170/cmd.c129
-rw-r--r--drivers/net/wireless/ar9170/cmd.h91
-rw-r--r--drivers/net/wireless/ar9170/eeprom.h179
-rw-r--r--drivers/net/wireless/ar9170/hw.h417
-rw-r--r--drivers/net/wireless/ar9170/led.c171
-rw-r--r--drivers/net/wireless/ar9170/mac.c452
-rw-r--r--drivers/net/wireless/ar9170/main.c1671
-rw-r--r--drivers/net/wireless/ar9170/phy.c1240
-rw-r--r--drivers/net/wireless/ar9170/usb.c748
-rw-r--r--drivers/net/wireless/ar9170/usb.h74
-rw-r--r--drivers/net/wireless/arlan-main.c21
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h35
-rw-r--r--drivers/net/wireless/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath5k/base.c46
-rw-r--r--drivers/net/wireless/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath5k/desc.c4
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c774
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h128
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath5k/phy.c1170
-rw-r--r--drivers/net/wireless/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath5k/reset.c35
-rw-r--r--drivers/net/wireless/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath9k/ani.h2
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath9k/beacon.c56
-rw-r--r--drivers/net/wireless/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath9k/eeprom.c308
-rw-r--r--drivers/net/wireless/ath9k/eeprom.h5
-rw-r--r--drivers/net/wireless/ath9k/hw.c14
-rw-r--r--drivers/net/wireless/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath9k/initvals.h2
-rw-r--r--drivers/net/wireless/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath9k/main.c47
-rw-r--r--drivers/net/wireless/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath9k/phy.c2
-rw-r--r--drivers/net/wireless/ath9k/phy.h2
-rw-r--r--drivers/net/wireless/ath9k/rc.c23
-rw-r--r--drivers/net/wireless/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath9k/reg.h2
-rw-r--r--drivers/net/wireless/ath9k/regd.c2
-rw-r--r--drivers/net/wireless/ath9k/regd.h2
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2
-rw-r--r--drivers/net/wireless/ath9k/xmit.c78
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c38
-rw-r--r--drivers/net/wireless/libertas/radiotap.h10
-rw-r--r--drivers/net/wireless/libertas/rx.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/p54/Kconfig39
-rw-r--r--drivers/net/wireless/p54/p54common.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/wavelan.c79
-rw-r--r--drivers/net/wireless/wavelan.p.h9
-rw-r--r--drivers/ssb/Kconfig16
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
165 files changed, 13423 insertions, 3528 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4373adb2119a..9d9490e22e07 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -26,6 +26,10 @@
26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC 27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE 28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
29#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
30#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
31#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
32#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
29#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 33#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
30#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 34#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
31#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 35#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -60,7 +64,12 @@
60 64
61#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 65#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
62 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
63 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) 67 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
68 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
69 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
70
71#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
72 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
64 73
65#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 74#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 75 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
510 size = 512; 519 size = 512;
511 } 520 }
512 size += 4; /* add in BIOS popup space */ 521 size += 4; /* add in BIOS popup space */
513 } else if (IS_G33) { 522 } else if (IS_G33 && !IS_IGD) {
514 /* G33's GTT size defined in gmch_ctrl */ 523 /* G33's GTT size defined in gmch_ctrl */
515 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 524 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
516 case G33_PGETBL_SIZE_1M: 525 case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
526 size = 512; 535 size = 512;
527 } 536 }
528 size += 4; 537 size += 4;
529 } else if (IS_G4X) { 538 } else if (IS_G4X || IS_IGD) {
530 /* On 4 series hardware, GTT stolen is separate from graphics 539 /* On 4 series hardware, GTT stolen is separate from graphics
531 * stolen, ignore it in stolen gtt entries counting. However, 540 * stolen, ignore it in stolen gtt entries counting. However,
532 * 4KB of the stolen memory doesn't get mapped to the GTT. 541 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
2161 NULL, &intel_g33_driver }, 2170 NULL, &intel_g33_driver },
2162 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2171 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2163 NULL, &intel_g33_driver }, 2172 NULL, &intel_g33_driver },
2173 { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
2174 NULL, &intel_g33_driver },
2175 { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
2176 NULL, &intel_g33_driver },
2164 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2177 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2165 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, 2178 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
2166 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2179 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2355 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2368 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2356 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 2369 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2357 ID(PCI_DEVICE_ID_INTEL_82945GME_HB), 2370 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2371 ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
2372 ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
2358 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), 2373 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2359 ID(PCI_DEVICE_ID_INTEL_82G35_HB), 2374 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
2360 ID(PCI_DEVICE_ID_INTEL_82965Q_HB), 2375 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a5dd7a665aa8..8b8c8c22f0fc 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -63,8 +63,7 @@ static int descriptor_count;
63#define BIB_CMC ((1) << 30) 63#define BIB_CMC ((1) << 30)
64#define BIB_IMC ((1) << 31) 64#define BIB_IMC ((1) << 31)
65 65
66static u32 * 66static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
67generate_config_rom(struct fw_card *card, size_t *config_rom_length)
68{ 67{
69 struct fw_descriptor *desc; 68 struct fw_descriptor *desc;
70 static u32 config_rom[256]; 69 static u32 config_rom[256];
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
128 return config_rom; 127 return config_rom;
129} 128}
130 129
131static void 130static void update_config_roms(void)
132update_config_roms(void)
133{ 131{
134 struct fw_card *card; 132 struct fw_card *card;
135 u32 *config_rom; 133 u32 *config_rom;
@@ -141,8 +139,7 @@ update_config_roms(void)
141 } 139 }
142} 140}
143 141
144int 142int fw_core_add_descriptor(struct fw_descriptor *desc)
145fw_core_add_descriptor(struct fw_descriptor *desc)
146{ 143{
147 size_t i; 144 size_t i;
148 145
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
171 return 0; 168 return 0;
172} 169}
173 170
174void 171void fw_core_remove_descriptor(struct fw_descriptor *desc)
175fw_core_remove_descriptor(struct fw_descriptor *desc)
176{ 172{
177 mutex_lock(&card_mutex); 173 mutex_lock(&card_mutex);
178 174
@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
185 mutex_unlock(&card_mutex); 181 mutex_unlock(&card_mutex);
186} 182}
187 183
184static int set_broadcast_channel(struct device *dev, void *data)
185{
186 fw_device_set_broadcast_channel(fw_device(dev), (long)data);
187 return 0;
188}
189
190static void allocate_broadcast_channel(struct fw_card *card, int generation)
191{
192 int channel, bandwidth = 0;
193
194 fw_iso_resource_manage(card, generation, 1ULL << 31,
195 &channel, &bandwidth, true);
196 if (channel == 31) {
197 card->broadcast_channel_allocated = true;
198 device_for_each_child(card->device, (void *)(long)generation,
199 set_broadcast_channel);
200 }
201}
202
188static const char gap_count_table[] = { 203static const char gap_count_table[] = {
189 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 204 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
190}; 205};
191 206
192void 207void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
193fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
194{ 208{
195 int scheduled; 209 int scheduled;
196 210
@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
200 fw_card_put(card); 214 fw_card_put(card);
201} 215}
202 216
203static void 217static void fw_card_bm_work(struct work_struct *work)
204fw_card_bm_work(struct work_struct *work)
205{ 218{
206 struct fw_card *card = container_of(work, struct fw_card, work.work); 219 struct fw_card *card = container_of(work, struct fw_card, work.work);
207 struct fw_device *root_device; 220 struct fw_device *root_device;
208 struct fw_node *root_node, *local_node; 221 struct fw_node *root_node;
209 unsigned long flags; 222 unsigned long flags;
210 int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; 223 int root_id, new_root_id, irm_id, local_id;
224 int gap_count, generation, grace, rcode;
211 bool do_reset = false; 225 bool do_reset = false;
212 bool root_device_is_running; 226 bool root_device_is_running;
213 bool root_device_is_cmc; 227 bool root_device_is_cmc;
214 __be32 lock_data[2]; 228 __be32 lock_data[2];
215 229
216 spin_lock_irqsave(&card->lock, flags); 230 spin_lock_irqsave(&card->lock, flags);
217 local_node = card->local_node;
218 root_node = card->root_node;
219 231
220 if (local_node == NULL) { 232 if (card->local_node == NULL) {
221 spin_unlock_irqrestore(&card->lock, flags); 233 spin_unlock_irqrestore(&card->lock, flags);
222 goto out_put_card; 234 goto out_put_card;
223 } 235 }
224 fw_node_get(local_node);
225 fw_node_get(root_node);
226 236
227 generation = card->generation; 237 generation = card->generation;
238 root_node = card->root_node;
239 fw_node_get(root_node);
228 root_device = root_node->data; 240 root_device = root_node->data;
229 root_device_is_running = root_device && 241 root_device_is_running = root_device &&
230 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 242 atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
231 root_device_is_cmc = root_device && root_device->cmc; 243 root_device_is_cmc = root_device && root_device->cmc;
232 root_id = root_node->node_id; 244 root_id = root_node->node_id;
233 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); 245 irm_id = card->irm_node->node_id;
246 local_id = card->local_node->node_id;
247
248 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
234 249
235 if (is_next_generation(generation, card->bm_generation) || 250 if (is_next_generation(generation, card->bm_generation) ||
236 (card->bm_generation != generation && grace)) { 251 (card->bm_generation != generation && grace)) {
@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
246 * next generation. 261 * next generation.
247 */ 262 */
248 263
249 irm_id = card->irm_node->node_id;
250 if (!card->irm_node->link_on) { 264 if (!card->irm_node->link_on) {
251 new_root_id = local_node->node_id; 265 new_root_id = local_id;
252 fw_notify("IRM has link off, making local node (%02x) root.\n", 266 fw_notify("IRM has link off, making local node (%02x) root.\n",
253 new_root_id); 267 new_root_id);
254 goto pick_me; 268 goto pick_me;
255 } 269 }
256 270
257 lock_data[0] = cpu_to_be32(0x3f); 271 lock_data[0] = cpu_to_be32(0x3f);
258 lock_data[1] = cpu_to_be32(local_node->node_id); 272 lock_data[1] = cpu_to_be32(local_id);
259 273
260 spin_unlock_irqrestore(&card->lock, flags); 274 spin_unlock_irqrestore(&card->lock, flags);
261 275
@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
269 goto out; 283 goto out;
270 284
271 if (rcode == RCODE_COMPLETE && 285 if (rcode == RCODE_COMPLETE &&
272 lock_data[0] != cpu_to_be32(0x3f)) 286 lock_data[0] != cpu_to_be32(0x3f)) {
273 /* Somebody else is BM, let them do the work. */ 287
288 /* Somebody else is BM. Only act as IRM. */
289 if (local_id == irm_id)
290 allocate_broadcast_channel(card, generation);
291
274 goto out; 292 goto out;
293 }
275 294
276 spin_lock_irqsave(&card->lock, flags); 295 spin_lock_irqsave(&card->lock, flags);
277 296
@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
282 * do a bus reset and pick the local node as 301 * do a bus reset and pick the local node as
283 * root, and thus, IRM. 302 * root, and thus, IRM.
284 */ 303 */
285 new_root_id = local_node->node_id; 304 new_root_id = local_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n", 305 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id); 306 new_root_id);
288 goto pick_me; 307 goto pick_me;
289 } 308 }
290 } else if (card->bm_generation != generation) { 309 } else if (card->bm_generation != generation) {
291 /* 310 /*
292 * OK, we weren't BM in the last generation, and it's 311 * We weren't BM in the last generation, and the last
293 * less than 100ms since last bus reset. Reschedule 312 * bus reset is less than 125ms ago. Reschedule this job.
294 * this task 100ms from now.
295 */ 313 */
296 spin_unlock_irqrestore(&card->lock, flags); 314 spin_unlock_irqrestore(&card->lock, flags);
297 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); 315 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
298 goto out; 316 goto out;
299 } 317 }
300 318
@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
310 * Either link_on is false, or we failed to read the 328 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root. 329 * config rom. In either case, pick another root.
312 */ 330 */
313 new_root_id = local_node->node_id; 331 new_root_id = local_id;
314 } else if (!root_device_is_running) { 332 } else if (!root_device_is_running) {
315 /* 333 /*
316 * If we haven't probed this device yet, bail out now 334 * If we haven't probed this device yet, bail out now
@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
332 * successfully read the config rom, but it's not 350 * successfully read the config rom, but it's not
333 * cycle master capable. 351 * cycle master capable.
334 */ 352 */
335 new_root_id = local_node->node_id; 353 new_root_id = local_id;
336 } 354 }
337 355
338 pick_me: 356 pick_me:
@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
363 card->index, new_root_id, gap_count); 381 card->index, new_root_id, gap_count);
364 fw_send_phy_config(card, new_root_id, generation, gap_count); 382 fw_send_phy_config(card, new_root_id, generation, gap_count);
365 fw_core_initiate_bus_reset(card, 1); 383 fw_core_initiate_bus_reset(card, 1);
384 /* Will allocate broadcast channel after the reset. */
385 } else {
386 if (local_id == irm_id)
387 allocate_broadcast_channel(card, generation);
366 } 388 }
389
367 out: 390 out:
368 fw_node_put(root_node); 391 fw_node_put(root_node);
369 fw_node_put(local_node);
370 out_put_card: 392 out_put_card:
371 fw_card_put(card); 393 fw_card_put(card);
372} 394}
373 395
374static void 396static void flush_timer_callback(unsigned long data)
375flush_timer_callback(unsigned long data)
376{ 397{
377 struct fw_card *card = (struct fw_card *)data; 398 struct fw_card *card = (struct fw_card *)data;
378 399
379 fw_flush_transactions(card); 400 fw_flush_transactions(card);
380} 401}
381 402
382void 403void fw_card_initialize(struct fw_card *card,
383fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 404 const struct fw_card_driver *driver,
384 struct device *device) 405 struct device *device)
385{ 406{
386 static atomic_t index = ATOMIC_INIT(-1); 407 static atomic_t index = ATOMIC_INIT(-1);
387 408
@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
406} 427}
407EXPORT_SYMBOL(fw_card_initialize); 428EXPORT_SYMBOL(fw_card_initialize);
408 429
409int 430int fw_card_add(struct fw_card *card,
410fw_card_add(struct fw_card *card, 431 u32 max_receive, u32 link_speed, u64 guid)
411 u32 max_receive, u32 link_speed, u64 guid)
412{ 432{
413 u32 *config_rom; 433 u32 *config_rom;
414 size_t length; 434 size_t length;
415 int err; 435 int ret;
416 436
417 card->max_receive = max_receive; 437 card->max_receive = max_receive;
418 card->link_speed = link_speed; 438 card->link_speed = link_speed;
@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
423 list_add_tail(&card->link, &card_list); 443 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex); 444 mutex_unlock(&card_mutex);
425 445
426 err = card->driver->enable(card, config_rom, length); 446 ret = card->driver->enable(card, config_rom, length);
427 if (err < 0) { 447 if (ret < 0) {
428 mutex_lock(&card_mutex); 448 mutex_lock(&card_mutex);
429 list_del(&card->link); 449 list_del(&card->link);
430 mutex_unlock(&card_mutex); 450 mutex_unlock(&card_mutex);
431 } 451 }
432 return err; 452
453 return ret;
433} 454}
434EXPORT_SYMBOL(fw_card_add); 455EXPORT_SYMBOL(fw_card_add);
435 456
@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
442 * dummy driver just fails all IO. 463 * dummy driver just fails all IO.
443 */ 464 */
444 465
445static int 466static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
446dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
447{ 467{
448 BUG(); 468 BUG();
449 return -1; 469 return -1;
450} 470}
451 471
452static int 472static int dummy_update_phy_reg(struct fw_card *card, int address,
453dummy_update_phy_reg(struct fw_card *card, int address, 473 int clear_bits, int set_bits)
454 int clear_bits, int set_bits)
455{ 474{
456 return -ENODEV; 475 return -ENODEV;
457} 476}
458 477
459static int 478static int dummy_set_config_rom(struct fw_card *card,
460dummy_set_config_rom(struct fw_card *card, 479 u32 *config_rom, size_t length)
461 u32 *config_rom, size_t length)
462{ 480{
463 /* 481 /*
464 * We take the card out of card_list before setting the dummy 482 * We take the card out of card_list before setting the dummy
@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
468 return -1; 486 return -1;
469} 487}
470 488
471static void 489static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
472dummy_send_request(struct fw_card *card, struct fw_packet *packet)
473{ 490{
474 packet->callback(packet, card, -ENODEV); 491 packet->callback(packet, card, -ENODEV);
475} 492}
476 493
477static void 494static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
478dummy_send_response(struct fw_card *card, struct fw_packet *packet)
479{ 495{
480 packet->callback(packet, card, -ENODEV); 496 packet->callback(packet, card, -ENODEV);
481} 497}
482 498
483static int 499static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
484dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
485{ 500{
486 return -ENOENT; 501 return -ENOENT;
487} 502}
488 503
489static int 504static int dummy_enable_phys_dma(struct fw_card *card,
490dummy_enable_phys_dma(struct fw_card *card, 505 int node_id, int generation)
491 int node_id, int generation)
492{ 506{
493 return -ENODEV; 507 return -ENODEV;
494} 508}
@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
503 .enable_phys_dma = dummy_enable_phys_dma, 517 .enable_phys_dma = dummy_enable_phys_dma,
504}; 518};
505 519
506void 520void fw_card_release(struct kref *kref)
507fw_card_release(struct kref *kref)
508{ 521{
509 struct fw_card *card = container_of(kref, struct fw_card, kref); 522 struct fw_card *card = container_of(kref, struct fw_card, kref);
510 523
511 complete(&card->done); 524 complete(&card->done);
512} 525}
513 526
514void 527void fw_core_remove_card(struct fw_card *card)
515fw_core_remove_card(struct fw_card *card)
516{ 528{
517 card->driver->update_phy_reg(card, 4, 529 card->driver->update_phy_reg(card, 4,
518 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 530 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
536} 548}
537EXPORT_SYMBOL(fw_core_remove_card); 549EXPORT_SYMBOL(fw_core_remove_card);
538 550
539int 551int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
540fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
541{ 552{
542 int reg = short_reset ? 5 : 1; 553 int reg = short_reset ? 5 : 1;
543 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 554 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index ed03234cbea8..7eb6594cc3e5 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -18,87 +18,162 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/compat.h>
22#include <linux/kernel.h> 22#include <linux/delay.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h> 23#include <linux/device.h>
26#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-cdev.h>
26#include <linux/idr.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/kref.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
27#include <linux/poll.h> 33#include <linux/poll.h>
28#include <linux/preempt.h> 34#include <linux/preempt.h>
35#include <linux/spinlock.h>
29#include <linux/time.h> 36#include <linux/time.h>
30#include <linux/delay.h> 37#include <linux/vmalloc.h>
31#include <linux/mm.h> 38#include <linux/wait.h>
32#include <linux/idr.h> 39#include <linux/workqueue.h>
33#include <linux/compat.h> 40
34#include <linux/firewire-cdev.h>
35#include <asm/system.h> 41#include <asm/system.h>
36#include <asm/uaccess.h> 42#include <asm/uaccess.h>
37#include "fw-transaction.h" 43
38#include "fw-topology.h"
39#include "fw-device.h" 44#include "fw-device.h"
45#include "fw-topology.h"
46#include "fw-transaction.h"
47
48struct client {
49 u32 version;
50 struct fw_device *device;
51
52 spinlock_t lock;
53 bool in_shutdown;
54 struct idr resource_idr;
55 struct list_head event_list;
56 wait_queue_head_t wait;
57 u64 bus_reset_closure;
58
59 struct fw_iso_context *iso_context;
60 u64 iso_closure;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
40 63
41struct client;
42struct client_resource {
43 struct list_head link; 64 struct list_head link;
44 void (*release)(struct client *client, struct client_resource *r); 65 struct kref kref;
45 u32 handle;
46}; 66};
47 67
68static inline void client_get(struct client *client)
69{
70 kref_get(&client->kref);
71}
72
73static void client_release(struct kref *kref)
74{
75 struct client *client = container_of(kref, struct client, kref);
76
77 fw_device_put(client->device);
78 kfree(client);
79}
80
81static void client_put(struct client *client)
82{
83 kref_put(&client->kref, client_release);
84}
85
86struct client_resource;
87typedef void (*client_resource_release_fn_t)(struct client *,
88 struct client_resource *);
89struct client_resource {
90 client_resource_release_fn_t release;
91 int handle;
92};
93
94struct address_handler_resource {
95 struct client_resource resource;
96 struct fw_address_handler handler;
97 __u64 closure;
98 struct client *client;
99};
100
101struct outbound_transaction_resource {
102 struct client_resource resource;
103 struct fw_transaction transaction;
104};
105
106struct inbound_transaction_resource {
107 struct client_resource resource;
108 struct fw_request *request;
109 void *data;
110 size_t length;
111};
112
113struct descriptor_resource {
114 struct client_resource resource;
115 struct fw_descriptor descriptor;
116 u32 data[0];
117};
118
119struct iso_resource {
120 struct client_resource resource;
121 struct client *client;
122 /* Schedule work and access todo only with client->lock held. */
123 struct delayed_work work;
124 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
125 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
126 int generation;
127 u64 channels;
128 s32 bandwidth;
129 struct iso_resource_event *e_alloc, *e_dealloc;
130};
131
132static void schedule_iso_resource(struct iso_resource *);
133static void release_iso_resource(struct client *, struct client_resource *);
134
48/* 135/*
49 * dequeue_event() just kfree()'s the event, so the event has to be 136 * dequeue_event() just kfree()'s the event, so the event has to be
50 * the first field in the struct. 137 * the first field in a struct XYZ_event.
51 */ 138 */
52
53struct event { 139struct event {
54 struct { void *data; size_t size; } v[2]; 140 struct { void *data; size_t size; } v[2];
55 struct list_head link; 141 struct list_head link;
56}; 142};
57 143
58struct bus_reset { 144struct bus_reset_event {
59 struct event event; 145 struct event event;
60 struct fw_cdev_event_bus_reset reset; 146 struct fw_cdev_event_bus_reset reset;
61}; 147};
62 148
63struct response { 149struct outbound_transaction_event {
64 struct event event; 150 struct event event;
65 struct fw_transaction transaction;
66 struct client *client; 151 struct client *client;
67 struct client_resource resource; 152 struct outbound_transaction_resource r;
68 struct fw_cdev_event_response response; 153 struct fw_cdev_event_response response;
69}; 154};
70 155
71struct iso_interrupt { 156struct inbound_transaction_event {
72 struct event event; 157 struct event event;
73 struct fw_cdev_event_iso_interrupt interrupt; 158 struct fw_cdev_event_request request;
74}; 159};
75 160
76struct client { 161struct iso_interrupt_event {
77 u32 version; 162 struct event event;
78 struct fw_device *device; 163 struct fw_cdev_event_iso_interrupt interrupt;
79 spinlock_t lock; 164};
80 u32 resource_handle;
81 struct list_head resource_list;
82 struct list_head event_list;
83 wait_queue_head_t wait;
84 u64 bus_reset_closure;
85
86 struct fw_iso_context *iso_context;
87 u64 iso_closure;
88 struct fw_iso_buffer buffer;
89 unsigned long vm_start;
90 165
91 struct list_head link; 166struct iso_resource_event {
167 struct event event;
168 struct fw_cdev_event_iso_resource resource;
92}; 169};
93 170
94static inline void __user * 171static inline void __user *u64_to_uptr(__u64 value)
95u64_to_uptr(__u64 value)
96{ 172{
97 return (void __user *)(unsigned long)value; 173 return (void __user *)(unsigned long)value;
98} 174}
99 175
100static inline __u64 176static inline __u64 uptr_to_u64(void __user *ptr)
101uptr_to_u64(void __user *ptr)
102{ 177{
103 return (__u64)(unsigned long)ptr; 178 return (__u64)(unsigned long)ptr;
104} 179}
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
107{ 182{
108 struct fw_device *device; 183 struct fw_device *device;
109 struct client *client; 184 struct client *client;
110 unsigned long flags;
111 185
112 device = fw_device_get_by_devt(inode->i_rdev); 186 device = fw_device_get_by_devt(inode->i_rdev);
113 if (device == NULL) 187 if (device == NULL)
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
125 } 199 }
126 200
127 client->device = device; 201 client->device = device;
128 INIT_LIST_HEAD(&client->event_list);
129 INIT_LIST_HEAD(&client->resource_list);
130 spin_lock_init(&client->lock); 202 spin_lock_init(&client->lock);
203 idr_init(&client->resource_idr);
204 INIT_LIST_HEAD(&client->event_list);
131 init_waitqueue_head(&client->wait); 205 init_waitqueue_head(&client->wait);
206 kref_init(&client->kref);
132 207
133 file->private_data = client; 208 file->private_data = client;
134 209
135 spin_lock_irqsave(&device->card->lock, flags); 210 mutex_lock(&device->client_list_mutex);
136 list_add_tail(&client->link, &device->client_list); 211 list_add_tail(&client->link, &device->client_list);
137 spin_unlock_irqrestore(&device->card->lock, flags); 212 mutex_unlock(&device->client_list_mutex);
138 213
139 return 0; 214 return 0;
140} 215}
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event,
150 event->v[1].size = size1; 225 event->v[1].size = size1;
151 226
152 spin_lock_irqsave(&client->lock, flags); 227 spin_lock_irqsave(&client->lock, flags);
153 list_add_tail(&event->link, &client->event_list); 228 if (client->in_shutdown)
229 kfree(event);
230 else
231 list_add_tail(&event->link, &client->event_list);
154 spin_unlock_irqrestore(&client->lock, flags); 232 spin_unlock_irqrestore(&client->lock, flags);
155 233
156 wake_up_interruptible(&client->wait); 234 wake_up_interruptible(&client->wait);
157} 235}
158 236
159static int 237static int dequeue_event(struct client *client,
160dequeue_event(struct client *client, char __user *buffer, size_t count) 238 char __user *buffer, size_t count)
161{ 239{
162 unsigned long flags;
163 struct event *event; 240 struct event *event;
164 size_t size, total; 241 size_t size, total;
165 int i, retval; 242 int i, ret;
166 243
167 retval = wait_event_interruptible(client->wait, 244 ret = wait_event_interruptible(client->wait,
168 !list_empty(&client->event_list) || 245 !list_empty(&client->event_list) ||
169 fw_device_is_shutdown(client->device)); 246 fw_device_is_shutdown(client->device));
170 if (retval < 0) 247 if (ret < 0)
171 return retval; 248 return ret;
172 249
173 if (list_empty(&client->event_list) && 250 if (list_empty(&client->event_list) &&
174 fw_device_is_shutdown(client->device)) 251 fw_device_is_shutdown(client->device))
175 return -ENODEV; 252 return -ENODEV;
176 253
177 spin_lock_irqsave(&client->lock, flags); 254 spin_lock_irq(&client->lock);
178 event = container_of(client->event_list.next, struct event, link); 255 event = list_first_entry(&client->event_list, struct event, link);
179 list_del(&event->link); 256 list_del(&event->link);
180 spin_unlock_irqrestore(&client->lock, flags); 257 spin_unlock_irq(&client->lock);
181 258
182 total = 0; 259 total = 0;
183 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 260 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
184 size = min(event->v[i].size, count - total); 261 size = min(event->v[i].size, count - total);
185 if (copy_to_user(buffer + total, event->v[i].data, size)) { 262 if (copy_to_user(buffer + total, event->v[i].data, size)) {
186 retval = -EFAULT; 263 ret = -EFAULT;
187 goto out; 264 goto out;
188 } 265 }
189 total += size; 266 total += size;
190 } 267 }
191 retval = total; 268 ret = total;
192 269
193 out: 270 out:
194 kfree(event); 271 kfree(event);
195 272
196 return retval; 273 return ret;
197} 274}
198 275
199static ssize_t 276static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
200fw_device_op_read(struct file *file, 277 size_t count, loff_t *offset)
201 char __user *buffer, size_t count, loff_t *offset)
202{ 278{
203 struct client *client = file->private_data; 279 struct client *client = file->private_data;
204 280
205 return dequeue_event(client, buffer, count); 281 return dequeue_event(client, buffer, count);
206} 282}
207 283
208/* caller must hold card->lock so that node pointers can be dereferenced here */ 284static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
209static void 285 struct client *client)
210fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211 struct client *client)
212{ 286{
213 struct fw_card *card = client->device->card; 287 struct fw_card *card = client->device->card;
214 288
289 spin_lock_irq(&card->lock);
290
215 event->closure = client->bus_reset_closure; 291 event->closure = client->bus_reset_closure;
216 event->type = FW_CDEV_EVENT_BUS_RESET; 292 event->type = FW_CDEV_EVENT_BUS_RESET;
217 event->generation = client->device->generation; 293 event->generation = client->device->generation;
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
220 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 296 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
221 event->irm_node_id = card->irm_node->node_id; 297 event->irm_node_id = card->irm_node->node_id;
222 event->root_node_id = card->root_node->node_id; 298 event->root_node_id = card->root_node->node_id;
299
300 spin_unlock_irq(&card->lock);
223} 301}
224 302
225static void 303static void for_each_client(struct fw_device *device,
226for_each_client(struct fw_device *device, 304 void (*callback)(struct client *client))
227 void (*callback)(struct client *client))
228{ 305{
229 struct fw_card *card = device->card;
230 struct client *c; 306 struct client *c;
231 unsigned long flags;
232
233 spin_lock_irqsave(&card->lock, flags);
234 307
308 mutex_lock(&device->client_list_mutex);
235 list_for_each_entry(c, &device->client_list, link) 309 list_for_each_entry(c, &device->client_list, link)
236 callback(c); 310 callback(c);
311 mutex_unlock(&device->client_list_mutex);
312}
313
314static int schedule_reallocations(int id, void *p, void *data)
315{
316 struct client_resource *r = p;
237 317
238 spin_unlock_irqrestore(&card->lock, flags); 318 if (r->release == release_iso_resource)
319 schedule_iso_resource(container_of(r,
320 struct iso_resource, resource));
321 return 0;
239} 322}
240 323
241static void 324static void queue_bus_reset_event(struct client *client)
242queue_bus_reset_event(struct client *client)
243{ 325{
244 struct bus_reset *bus_reset; 326 struct bus_reset_event *e;
245 327
246 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 328 e = kzalloc(sizeof(*e), GFP_KERNEL);
247 if (bus_reset == NULL) { 329 if (e == NULL) {
248 fw_notify("Out of memory when allocating bus reset event\n"); 330 fw_notify("Out of memory when allocating bus reset event\n");
249 return; 331 return;
250 } 332 }
251 333
252 fill_bus_reset_event(&bus_reset->reset, client); 334 fill_bus_reset_event(&e->reset, client);
335
336 queue_event(client, &e->event,
337 &e->reset, sizeof(e->reset), NULL, 0);
253 338
254 queue_event(client, &bus_reset->event, 339 spin_lock_irq(&client->lock);
255 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 340 idr_for_each(&client->resource_idr, schedule_reallocations, client);
341 spin_unlock_irq(&client->lock);
256} 342}
257 343
258void fw_device_cdev_update(struct fw_device *device) 344void fw_device_cdev_update(struct fw_device *device)
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer)
274{ 360{
275 struct fw_cdev_get_info *get_info = buffer; 361 struct fw_cdev_get_info *get_info = buffer;
276 struct fw_cdev_event_bus_reset bus_reset; 362 struct fw_cdev_event_bus_reset bus_reset;
277 struct fw_card *card = client->device->card;
278 unsigned long ret = 0; 363 unsigned long ret = 0;
279 364
280 client->version = get_info->version; 365 client->version = get_info->version;
281 get_info->version = FW_CDEV_VERSION; 366 get_info->version = FW_CDEV_VERSION;
367 get_info->card = client->device->card->index;
282 368
283 down_read(&fw_device_rwsem); 369 down_read(&fw_device_rwsem);
284 370
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer)
300 client->bus_reset_closure = get_info->bus_reset_closure; 386 client->bus_reset_closure = get_info->bus_reset_closure;
301 if (get_info->bus_reset != 0) { 387 if (get_info->bus_reset != 0) {
302 void __user *uptr = u64_to_uptr(get_info->bus_reset); 388 void __user *uptr = u64_to_uptr(get_info->bus_reset);
303 unsigned long flags;
304 389
305 spin_lock_irqsave(&card->lock, flags);
306 fill_bus_reset_event(&bus_reset, client); 390 fill_bus_reset_event(&bus_reset, client);
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 391 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
310 return -EFAULT; 392 return -EFAULT;
311 } 393 }
312 394
313 get_info->card = card->index;
314
315 return 0; 395 return 0;
316} 396}
317 397
318static void 398static int add_client_resource(struct client *client,
319add_client_resource(struct client *client, struct client_resource *resource) 399 struct client_resource *resource, gfp_t gfp_mask)
320{ 400{
321 unsigned long flags; 401 unsigned long flags;
402 int ret;
403
404 retry:
405 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
406 return -ENOMEM;
322 407
323 spin_lock_irqsave(&client->lock, flags); 408 spin_lock_irqsave(&client->lock, flags);
324 list_add_tail(&resource->link, &client->resource_list); 409 if (client->in_shutdown)
325 resource->handle = client->resource_handle++; 410 ret = -ECANCELED;
411 else
412 ret = idr_get_new(&client->resource_idr, resource,
413 &resource->handle);
414 if (ret >= 0) {
415 client_get(client);
416 if (resource->release == release_iso_resource)
417 schedule_iso_resource(container_of(resource,
418 struct iso_resource, resource));
419 }
326 spin_unlock_irqrestore(&client->lock, flags); 420 spin_unlock_irqrestore(&client->lock, flags);
421
422 if (ret == -EAGAIN)
423 goto retry;
424
425 return ret < 0 ? ret : 0;
327} 426}
328 427
329static int 428static int release_client_resource(struct client *client, u32 handle,
330release_client_resource(struct client *client, u32 handle, 429 client_resource_release_fn_t release,
331 struct client_resource **resource) 430 struct client_resource **resource)
332{ 431{
333 struct client_resource *r; 432 struct client_resource *r;
334 unsigned long flags;
335 433
336 spin_lock_irqsave(&client->lock, flags); 434 spin_lock_irq(&client->lock);
337 list_for_each_entry(r, &client->resource_list, link) { 435 if (client->in_shutdown)
338 if (r->handle == handle) { 436 r = NULL;
339 list_del(&r->link); 437 else
340 break; 438 r = idr_find(&client->resource_idr, handle);
341 } 439 if (r && r->release == release)
342 } 440 idr_remove(&client->resource_idr, handle);
343 spin_unlock_irqrestore(&client->lock, flags); 441 spin_unlock_irq(&client->lock);
344 442
345 if (&r->link == &client->resource_list) 443 if (!(r && r->release == release))
346 return -EINVAL; 444 return -EINVAL;
347 445
348 if (resource) 446 if (resource)
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle,
350 else 448 else
351 r->release(client, r); 449 r->release(client, r);
352 450
451 client_put(client);
452
353 return 0; 453 return 0;
354} 454}
355 455
356static void 456static void release_transaction(struct client *client,
357release_transaction(struct client *client, struct client_resource *resource) 457 struct client_resource *resource)
358{ 458{
359 struct response *response = 459 struct outbound_transaction_resource *r = container_of(resource,
360 container_of(resource, struct response, resource); 460 struct outbound_transaction_resource, resource);
361 461
362 fw_cancel_transaction(client->device->card, &response->transaction); 462 fw_cancel_transaction(client->device->card, &r->transaction);
363} 463}
364 464
365static void 465static void complete_transaction(struct fw_card *card, int rcode,
366complete_transaction(struct fw_card *card, int rcode, 466 void *payload, size_t length, void *data)
367 void *payload, size_t length, void *data)
368{ 467{
369 struct response *response = data; 468 struct outbound_transaction_event *e = data;
370 struct client *client = response->client; 469 struct fw_cdev_event_response *rsp = &e->response;
470 struct client *client = e->client;
371 unsigned long flags; 471 unsigned long flags;
372 struct fw_cdev_event_response *r = &response->response;
373 472
374 if (length < r->length) 473 if (length < rsp->length)
375 r->length = length; 474 rsp->length = length;
376 if (rcode == RCODE_COMPLETE) 475 if (rcode == RCODE_COMPLETE)
377 memcpy(r->data, payload, r->length); 476 memcpy(rsp->data, payload, rsp->length);
378 477
379 spin_lock_irqsave(&client->lock, flags); 478 spin_lock_irqsave(&client->lock, flags);
380 list_del(&response->resource.link); 479 /*
480 * 1. If called while in shutdown, the idr tree must be left untouched.
481 * The idr handle will be removed and the client reference will be
482 * dropped later.
483 * 2. If the call chain was release_client_resource ->
484 * release_transaction -> complete_transaction (instead of a normal
485 * conclusion of the transaction), i.e. if this resource was already
486 * unregistered from the idr, the client reference will be dropped
487 * by release_client_resource and we must not drop it here.
488 */
489 if (!client->in_shutdown &&
490 idr_find(&client->resource_idr, e->r.resource.handle)) {
491 idr_remove(&client->resource_idr, e->r.resource.handle);
492 /* Drop the idr's reference */
493 client_put(client);
494 }
381 spin_unlock_irqrestore(&client->lock, flags); 495 spin_unlock_irqrestore(&client->lock, flags);
382 496
383 r->type = FW_CDEV_EVENT_RESPONSE; 497 rsp->type = FW_CDEV_EVENT_RESPONSE;
384 r->rcode = rcode; 498 rsp->rcode = rcode;
385 499
386 /* 500 /*
387 * In the case that sizeof(*r) doesn't align with the position of the 501 * In the case that sizeof(*rsp) doesn't align with the position of the
388 * data, and the read is short, preserve an extra copy of the data 502 * data, and the read is short, preserve an extra copy of the data
389 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless 503 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
390 * for short reads and some apps depended on it, this is both safe 504 * for short reads and some apps depended on it, this is both safe
391 * and prudent for compatibility. 505 * and prudent for compatibility.
392 */ 506 */
393 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) 507 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
394 queue_event(client, &response->event, r, sizeof(*r), 508 queue_event(client, &e->event, rsp, sizeof(*rsp),
395 r->data, r->length); 509 rsp->data, rsp->length);
396 else 510 else
397 queue_event(client, &response->event, r, sizeof(*r) + r->length, 511 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
398 NULL, 0); 512 NULL, 0);
513
514 /* Drop the transaction callback's reference */
515 client_put(client);
399} 516}
400 517
401static int ioctl_send_request(struct client *client, void *buffer) 518static int init_request(struct client *client,
519 struct fw_cdev_send_request *request,
520 int destination_id, int speed)
402{ 521{
403 struct fw_device *device = client->device; 522 struct outbound_transaction_event *e;
404 struct fw_cdev_send_request *request = buffer; 523 int ret;
405 struct response *response;
406 524
407 /* What is the biggest size we'll accept, really? */ 525 if (request->tcode != TCODE_STREAM_DATA &&
408 if (request->length > 4096) 526 (request->length > 4096 || request->length > 512 << speed))
409 return -EINVAL; 527 return -EIO;
410 528
411 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 529 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
412 if (response == NULL) 530 if (e == NULL)
413 return -ENOMEM; 531 return -ENOMEM;
414 532
415 response->client = client; 533 e->client = client;
416 response->response.length = request->length; 534 e->response.length = request->length;
417 response->response.closure = request->closure; 535 e->response.closure = request->closure;
418 536
419 if (request->data && 537 if (request->data &&
420 copy_from_user(response->response.data, 538 copy_from_user(e->response.data,
421 u64_to_uptr(request->data), request->length)) { 539 u64_to_uptr(request->data), request->length)) {
422 kfree(response); 540 ret = -EFAULT;
423 return -EFAULT; 541 goto failed;
424 } 542 }
425 543
426 response->resource.release = release_transaction; 544 e->r.resource.release = release_transaction;
427 add_client_resource(client, &response->resource); 545 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
546 if (ret < 0)
547 goto failed;
428 548
429 fw_send_request(device->card, &response->transaction, 549 /* Get a reference for the transaction callback */
430 request->tcode & 0x1f, 550 client_get(client);
431 device->node->node_id,
432 request->generation,
433 device->max_speed,
434 request->offset,
435 response->response.data, request->length,
436 complete_transaction, response);
437 551
438 if (request->data) 552 fw_send_request(client->device->card, &e->r.transaction,
439 return sizeof(request) + request->length; 553 request->tcode, destination_id, request->generation,
440 else 554 speed, request->offset, e->response.data,
441 return sizeof(request); 555 request->length, complete_transaction, e);
556 return 0;
557
558 failed:
559 kfree(e);
560
561 return ret;
442} 562}
443 563
444struct address_handler { 564static int ioctl_send_request(struct client *client, void *buffer)
445 struct fw_address_handler handler; 565{
446 __u64 closure; 566 struct fw_cdev_send_request *request = buffer;
447 struct client *client;
448 struct client_resource resource;
449};
450 567
451struct request { 568 switch (request->tcode) {
452 struct fw_request *request; 569 case TCODE_WRITE_QUADLET_REQUEST:
453 void *data; 570 case TCODE_WRITE_BLOCK_REQUEST:
454 size_t length; 571 case TCODE_READ_QUADLET_REQUEST:
455 struct client_resource resource; 572 case TCODE_READ_BLOCK_REQUEST:
456}; 573 case TCODE_LOCK_MASK_SWAP:
574 case TCODE_LOCK_COMPARE_SWAP:
575 case TCODE_LOCK_FETCH_ADD:
576 case TCODE_LOCK_LITTLE_ADD:
577 case TCODE_LOCK_BOUNDED_ADD:
578 case TCODE_LOCK_WRAP_ADD:
579 case TCODE_LOCK_VENDOR_DEPENDENT:
580 break;
581 default:
582 return -EINVAL;
583 }
457 584
458struct request_event { 585 return init_request(client, request, client->device->node_id,
459 struct event event; 586 client->device->max_speed);
460 struct fw_cdev_event_request request; 587}
461};
462 588
463static void 589static void release_request(struct client *client,
464release_request(struct client *client, struct client_resource *resource) 590 struct client_resource *resource)
465{ 591{
466 struct request *request = 592 struct inbound_transaction_resource *r = container_of(resource,
467 container_of(resource, struct request, resource); 593 struct inbound_transaction_resource, resource);
468 594
469 fw_send_response(client->device->card, request->request, 595 fw_send_response(client->device->card, r->request,
470 RCODE_CONFLICT_ERROR); 596 RCODE_CONFLICT_ERROR);
471 kfree(request); 597 kfree(r);
472} 598}
473 599
474static void 600static void handle_request(struct fw_card *card, struct fw_request *request,
475handle_request(struct fw_card *card, struct fw_request *r, 601 int tcode, int destination, int source,
476 int tcode, int destination, int source, 602 int generation, int speed,
477 int generation, int speed, 603 unsigned long long offset,
478 unsigned long long offset, 604 void *payload, size_t length, void *callback_data)
479 void *payload, size_t length, void *callback_data)
480{ 605{
481 struct address_handler *handler = callback_data; 606 struct address_handler_resource *handler = callback_data;
482 struct request *request; 607 struct inbound_transaction_resource *r;
483 struct request_event *e; 608 struct inbound_transaction_event *e;
484 struct client *client = handler->client; 609 int ret;
485 610
486 request = kmalloc(sizeof(*request), GFP_ATOMIC); 611 r = kmalloc(sizeof(*r), GFP_ATOMIC);
487 e = kmalloc(sizeof(*e), GFP_ATOMIC); 612 e = kmalloc(sizeof(*e), GFP_ATOMIC);
488 if (request == NULL || e == NULL) { 613 if (r == NULL || e == NULL)
489 kfree(request); 614 goto failed;
490 kfree(e);
491 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
492 return;
493 }
494 615
495 request->request = r; 616 r->request = request;
496 request->data = payload; 617 r->data = payload;
497 request->length = length; 618 r->length = length;
498 619
499 request->resource.release = release_request; 620 r->resource.release = release_request;
500 add_client_resource(client, &request->resource); 621 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
622 if (ret < 0)
623 goto failed;
501 624
502 e->request.type = FW_CDEV_EVENT_REQUEST; 625 e->request.type = FW_CDEV_EVENT_REQUEST;
503 e->request.tcode = tcode; 626 e->request.tcode = tcode;
504 e->request.offset = offset; 627 e->request.offset = offset;
505 e->request.length = length; 628 e->request.length = length;
506 e->request.handle = request->resource.handle; 629 e->request.handle = r->resource.handle;
507 e->request.closure = handler->closure; 630 e->request.closure = handler->closure;
508 631
509 queue_event(client, &e->event, 632 queue_event(handler->client, &e->event,
510 &e->request, sizeof(e->request), payload, length); 633 &e->request, sizeof(e->request), payload, length);
634 return;
635
636 failed:
637 kfree(r);
638 kfree(e);
639 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
511} 640}
512 641
513static void 642static void release_address_handler(struct client *client,
514release_address_handler(struct client *client, 643 struct client_resource *resource)
515 struct client_resource *resource)
516{ 644{
517 struct address_handler *handler = 645 struct address_handler_resource *r =
518 container_of(resource, struct address_handler, resource); 646 container_of(resource, struct address_handler_resource, resource);
519 647
520 fw_core_remove_address_handler(&handler->handler); 648 fw_core_remove_address_handler(&r->handler);
521 kfree(handler); 649 kfree(r);
522} 650}
523 651
524static int ioctl_allocate(struct client *client, void *buffer) 652static int ioctl_allocate(struct client *client, void *buffer)
525{ 653{
526 struct fw_cdev_allocate *request = buffer; 654 struct fw_cdev_allocate *request = buffer;
527 struct address_handler *handler; 655 struct address_handler_resource *r;
528 struct fw_address_region region; 656 struct fw_address_region region;
657 int ret;
529 658
530 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 659 r = kmalloc(sizeof(*r), GFP_KERNEL);
531 if (handler == NULL) 660 if (r == NULL)
532 return -ENOMEM; 661 return -ENOMEM;
533 662
534 region.start = request->offset; 663 region.start = request->offset;
535 region.end = request->offset + request->length; 664 region.end = request->offset + request->length;
536 handler->handler.length = request->length; 665 r->handler.length = request->length;
537 handler->handler.address_callback = handle_request; 666 r->handler.address_callback = handle_request;
538 handler->handler.callback_data = handler; 667 r->handler.callback_data = r;
539 handler->closure = request->closure; 668 r->closure = request->closure;
540 handler->client = client; 669 r->client = client;
541 670
542 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 671 ret = fw_core_add_address_handler(&r->handler, &region);
543 kfree(handler); 672 if (ret < 0) {
544 return -EBUSY; 673 kfree(r);
674 return ret;
545 } 675 }
546 676
547 handler->resource.release = release_address_handler; 677 r->resource.release = release_address_handler;
548 add_client_resource(client, &handler->resource); 678 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
549 request->handle = handler->resource.handle; 679 if (ret < 0) {
680 release_address_handler(client, &r->resource);
681 return ret;
682 }
683 request->handle = r->resource.handle;
550 684
551 return 0; 685 return 0;
552} 686}
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer)
555{ 689{
556 struct fw_cdev_deallocate *request = buffer; 690 struct fw_cdev_deallocate *request = buffer;
557 691
558 return release_client_resource(client, request->handle, NULL); 692 return release_client_resource(client, request->handle,
693 release_address_handler, NULL);
559} 694}
560 695
561static int ioctl_send_response(struct client *client, void *buffer) 696static int ioctl_send_response(struct client *client, void *buffer)
562{ 697{
563 struct fw_cdev_send_response *request = buffer; 698 struct fw_cdev_send_response *request = buffer;
564 struct client_resource *resource; 699 struct client_resource *resource;
565 struct request *r; 700 struct inbound_transaction_resource *r;
566 701
567 if (release_client_resource(client, request->handle, &resource) < 0) 702 if (release_client_resource(client, request->handle,
703 release_request, &resource) < 0)
568 return -EINVAL; 704 return -EINVAL;
569 r = container_of(resource, struct request, resource); 705
706 r = container_of(resource, struct inbound_transaction_resource,
707 resource);
570 if (request->length < r->length) 708 if (request->length < r->length)
571 r->length = request->length; 709 r->length = request->length;
572 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 710 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
588 return fw_core_initiate_bus_reset(client->device->card, short_reset); 726 return fw_core_initiate_bus_reset(client->device->card, short_reset);
589} 727}
590 728
591struct descriptor {
592 struct fw_descriptor d;
593 struct client_resource resource;
594 u32 data[0];
595};
596
597static void release_descriptor(struct client *client, 729static void release_descriptor(struct client *client,
598 struct client_resource *resource) 730 struct client_resource *resource)
599{ 731{
600 struct descriptor *descriptor = 732 struct descriptor_resource *r =
601 container_of(resource, struct descriptor, resource); 733 container_of(resource, struct descriptor_resource, resource);
602 734
603 fw_core_remove_descriptor(&descriptor->d); 735 fw_core_remove_descriptor(&r->descriptor);
604 kfree(descriptor); 736 kfree(r);
605} 737}
606 738
607static int ioctl_add_descriptor(struct client *client, void *buffer) 739static int ioctl_add_descriptor(struct client *client, void *buffer)
608{ 740{
609 struct fw_cdev_add_descriptor *request = buffer; 741 struct fw_cdev_add_descriptor *request = buffer;
610 struct descriptor *descriptor; 742 struct fw_card *card = client->device->card;
611 int retval; 743 struct descriptor_resource *r;
744 int ret;
745
746 /* Access policy: Allow this ioctl only on local nodes' device files. */
747 spin_lock_irq(&card->lock);
748 ret = client->device->node_id != card->local_node->node_id;
749 spin_unlock_irq(&card->lock);
750 if (ret)
751 return -ENOSYS;
612 752
613 if (request->length > 256) 753 if (request->length > 256)
614 return -EINVAL; 754 return -EINVAL;
615 755
616 descriptor = 756 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
617 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 757 if (r == NULL)
618 if (descriptor == NULL)
619 return -ENOMEM; 758 return -ENOMEM;
620 759
621 if (copy_from_user(descriptor->data, 760 if (copy_from_user(r->data,
622 u64_to_uptr(request->data), request->length * 4)) { 761 u64_to_uptr(request->data), request->length * 4)) {
623 kfree(descriptor); 762 ret = -EFAULT;
624 return -EFAULT; 763 goto failed;
625 } 764 }
626 765
627 descriptor->d.length = request->length; 766 r->descriptor.length = request->length;
628 descriptor->d.immediate = request->immediate; 767 r->descriptor.immediate = request->immediate;
629 descriptor->d.key = request->key; 768 r->descriptor.key = request->key;
630 descriptor->d.data = descriptor->data; 769 r->descriptor.data = r->data;
631 770
632 retval = fw_core_add_descriptor(&descriptor->d); 771 ret = fw_core_add_descriptor(&r->descriptor);
633 if (retval < 0) { 772 if (ret < 0)
634 kfree(descriptor); 773 goto failed;
635 return retval;
636 }
637 774
638 descriptor->resource.release = release_descriptor; 775 r->resource.release = release_descriptor;
639 add_client_resource(client, &descriptor->resource); 776 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
640 request->handle = descriptor->resource.handle; 777 if (ret < 0) {
778 fw_core_remove_descriptor(&r->descriptor);
779 goto failed;
780 }
781 request->handle = r->resource.handle;
641 782
642 return 0; 783 return 0;
784 failed:
785 kfree(r);
786
787 return ret;
643} 788}
644 789
645static int ioctl_remove_descriptor(struct client *client, void *buffer) 790static int ioctl_remove_descriptor(struct client *client, void *buffer)
646{ 791{
647 struct fw_cdev_remove_descriptor *request = buffer; 792 struct fw_cdev_remove_descriptor *request = buffer;
648 793
649 return release_client_resource(client, request->handle, NULL); 794 return release_client_resource(client, request->handle,
795 release_descriptor, NULL);
650} 796}
651 797
652static void 798static void iso_callback(struct fw_iso_context *context, u32 cycle,
653iso_callback(struct fw_iso_context *context, u32 cycle, 799 size_t header_length, void *header, void *data)
654 size_t header_length, void *header, void *data)
655{ 800{
656 struct client *client = data; 801 struct client *client = data;
657 struct iso_interrupt *irq; 802 struct iso_interrupt_event *e;
658 803
659 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); 804 e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
660 if (irq == NULL) 805 if (e == NULL)
661 return; 806 return;
662 807
663 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 808 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
664 irq->interrupt.closure = client->iso_closure; 809 e->interrupt.closure = client->iso_closure;
665 irq->interrupt.cycle = cycle; 810 e->interrupt.cycle = cycle;
666 irq->interrupt.header_length = header_length; 811 e->interrupt.header_length = header_length;
667 memcpy(irq->interrupt.header, header, header_length); 812 memcpy(e->interrupt.header, header, header_length);
668 queue_event(client, &irq->event, &irq->interrupt, 813 queue_event(client, &e->event, &e->interrupt,
669 sizeof(irq->interrupt) + header_length, NULL, 0); 814 sizeof(e->interrupt) + header_length, NULL, 0);
670} 815}
671 816
672static int ioctl_create_iso_context(struct client *client, void *buffer) 817static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer)
871 return 0; 1016 return 0;
872} 1017}
873 1018
1019static void iso_resource_work(struct work_struct *work)
1020{
1021 struct iso_resource_event *e;
1022 struct iso_resource *r =
1023 container_of(work, struct iso_resource, work.work);
1024 struct client *client = r->client;
1025 int generation, channel, bandwidth, todo;
1026 bool skip, free, success;
1027
1028 spin_lock_irq(&client->lock);
1029 generation = client->device->generation;
1030 todo = r->todo;
1031 /* Allow 1000ms grace period for other reallocations. */
1032 if (todo == ISO_RES_ALLOC &&
1033 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1034 if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1035 client_get(client);
1036 skip = true;
1037 } else {
1038 /* We could be called twice within the same generation. */
1039 skip = todo == ISO_RES_REALLOC &&
1040 r->generation == generation;
1041 }
1042 free = todo == ISO_RES_DEALLOC ||
1043 todo == ISO_RES_ALLOC_ONCE ||
1044 todo == ISO_RES_DEALLOC_ONCE;
1045 r->generation = generation;
1046 spin_unlock_irq(&client->lock);
1047
1048 if (skip)
1049 goto out;
1050
1051 bandwidth = r->bandwidth;
1052
1053 fw_iso_resource_manage(client->device->card, generation,
1054 r->channels, &channel, &bandwidth,
1055 todo == ISO_RES_ALLOC ||
1056 todo == ISO_RES_REALLOC ||
1057 todo == ISO_RES_ALLOC_ONCE);
1058 /*
1059 * Is this generation outdated already? As long as this resource sticks
1060 * in the idr, it will be scheduled again for a newer generation or at
1061 * shutdown.
1062 */
1063 if (channel == -EAGAIN &&
1064 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1065 goto out;
1066
1067 success = channel >= 0 || bandwidth > 0;
1068
1069 spin_lock_irq(&client->lock);
1070 /*
1071 * Transit from allocation to reallocation, except if the client
1072 * requested deallocation in the meantime.
1073 */
1074 if (r->todo == ISO_RES_ALLOC)
1075 r->todo = ISO_RES_REALLOC;
1076 /*
1077 * Allocation or reallocation failure? Pull this resource out of the
1078 * idr and prepare for deletion, unless the client is shutting down.
1079 */
1080 if (r->todo == ISO_RES_REALLOC && !success &&
1081 !client->in_shutdown &&
1082 idr_find(&client->resource_idr, r->resource.handle)) {
1083 idr_remove(&client->resource_idr, r->resource.handle);
1084 client_put(client);
1085 free = true;
1086 }
1087 spin_unlock_irq(&client->lock);
1088
1089 if (todo == ISO_RES_ALLOC && channel >= 0)
1090 r->channels = 1ULL << channel;
1091
1092 if (todo == ISO_RES_REALLOC && success)
1093 goto out;
1094
1095 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1096 e = r->e_alloc;
1097 r->e_alloc = NULL;
1098 } else {
1099 e = r->e_dealloc;
1100 r->e_dealloc = NULL;
1101 }
1102 e->resource.handle = r->resource.handle;
1103 e->resource.channel = channel;
1104 e->resource.bandwidth = bandwidth;
1105
1106 queue_event(client, &e->event,
1107 &e->resource, sizeof(e->resource), NULL, 0);
1108
1109 if (free) {
1110 cancel_delayed_work(&r->work);
1111 kfree(r->e_alloc);
1112 kfree(r->e_dealloc);
1113 kfree(r);
1114 }
1115 out:
1116 client_put(client);
1117}
1118
1119static void schedule_iso_resource(struct iso_resource *r)
1120{
1121 client_get(r->client);
1122 if (!schedule_delayed_work(&r->work, 0))
1123 client_put(r->client);
1124}
1125
1126static void release_iso_resource(struct client *client,
1127 struct client_resource *resource)
1128{
1129 struct iso_resource *r =
1130 container_of(resource, struct iso_resource, resource);
1131
1132 spin_lock_irq(&client->lock);
1133 r->todo = ISO_RES_DEALLOC;
1134 schedule_iso_resource(r);
1135 spin_unlock_irq(&client->lock);
1136}
1137
1138static int init_iso_resource(struct client *client,
1139 struct fw_cdev_allocate_iso_resource *request, int todo)
1140{
1141 struct iso_resource_event *e1, *e2;
1142 struct iso_resource *r;
1143 int ret;
1144
1145 if ((request->channels == 0 && request->bandwidth == 0) ||
1146 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1147 request->bandwidth < 0)
1148 return -EINVAL;
1149
1150 r = kmalloc(sizeof(*r), GFP_KERNEL);
1151 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1152 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1153 if (r == NULL || e1 == NULL || e2 == NULL) {
1154 ret = -ENOMEM;
1155 goto fail;
1156 }
1157
1158 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1159 r->client = client;
1160 r->todo = todo;
1161 r->generation = -1;
1162 r->channels = request->channels;
1163 r->bandwidth = request->bandwidth;
1164 r->e_alloc = e1;
1165 r->e_dealloc = e2;
1166
1167 e1->resource.closure = request->closure;
1168 e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1169 e2->resource.closure = request->closure;
1170 e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1171
1172 if (todo == ISO_RES_ALLOC) {
1173 r->resource.release = release_iso_resource;
1174 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1175 if (ret < 0)
1176 goto fail;
1177 } else {
1178 r->resource.release = NULL;
1179 r->resource.handle = -1;
1180 schedule_iso_resource(r);
1181 }
1182 request->handle = r->resource.handle;
1183
1184 return 0;
1185 fail:
1186 kfree(r);
1187 kfree(e1);
1188 kfree(e2);
1189
1190 return ret;
1191}
1192
1193static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1194{
1195 struct fw_cdev_allocate_iso_resource *request = buffer;
1196
1197 return init_iso_resource(client, request, ISO_RES_ALLOC);
1198}
1199
1200static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1201{
1202 struct fw_cdev_deallocate *request = buffer;
1203
1204 return release_client_resource(client, request->handle,
1205 release_iso_resource, NULL);
1206}
1207
1208static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1209{
1210 struct fw_cdev_allocate_iso_resource *request = buffer;
1211
1212 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1213}
1214
1215static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1216{
1217 struct fw_cdev_allocate_iso_resource *request = buffer;
1218
1219 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1220}
1221
1222/*
1223 * Returns a speed code: Maximum speed to or from this device,
1224 * limited by the device's link speed, the local node's link speed,
1225 * and all PHY port speeds between the two links.
1226 */
1227static int ioctl_get_speed(struct client *client, void *buffer)
1228{
1229 return client->device->max_speed;
1230}
1231
1232static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1233{
1234 struct fw_cdev_send_request *request = buffer;
1235
1236 switch (request->tcode) {
1237 case TCODE_WRITE_QUADLET_REQUEST:
1238 case TCODE_WRITE_BLOCK_REQUEST:
1239 break;
1240 default:
1241 return -EINVAL;
1242 }
1243
1244 /* Security policy: Only allow accesses to Units Space. */
1245 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1246 return -EACCES;
1247
1248 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1249}
1250
1251static int ioctl_send_stream_packet(struct client *client, void *buffer)
1252{
1253 struct fw_cdev_send_stream_packet *p = buffer;
1254 struct fw_cdev_send_request request;
1255 int dest;
1256
1257 if (p->speed > client->device->card->link_speed ||
1258 p->length > 1024 << p->speed)
1259 return -EIO;
1260
1261 if (p->tag > 3 || p->channel > 63 || p->sy > 15)
1262 return -EINVAL;
1263
1264 dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
1265 request.tcode = TCODE_STREAM_DATA;
1266 request.length = p->length;
1267 request.closure = p->closure;
1268 request.data = p->data;
1269 request.generation = p->generation;
1270
1271 return init_request(client, &request, dest, p->speed);
1272}
1273
874static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 1274static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
875 ioctl_get_info, 1275 ioctl_get_info,
876 ioctl_send_request, 1276 ioctl_send_request,
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
885 ioctl_start_iso, 1285 ioctl_start_iso,
886 ioctl_stop_iso, 1286 ioctl_stop_iso,
887 ioctl_get_cycle_timer, 1287 ioctl_get_cycle_timer,
1288 ioctl_allocate_iso_resource,
1289 ioctl_deallocate_iso_resource,
1290 ioctl_allocate_iso_resource_once,
1291 ioctl_deallocate_iso_resource_once,
1292 ioctl_get_speed,
1293 ioctl_send_broadcast_request,
1294 ioctl_send_stream_packet,
888}; 1295};
889 1296
890static int 1297static int dispatch_ioctl(struct client *client,
891dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 1298 unsigned int cmd, void __user *arg)
892{ 1299{
893 char buffer[256]; 1300 char buffer[256];
894 int retval; 1301 int ret;
895 1302
896 if (_IOC_TYPE(cmd) != '#' || 1303 if (_IOC_TYPE(cmd) != '#' ||
897 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1304 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
903 return -EFAULT; 1310 return -EFAULT;
904 } 1311 }
905 1312
906 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 1313 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
907 if (retval < 0) 1314 if (ret < 0)
908 return retval; 1315 return ret;
909 1316
910 if (_IOC_DIR(cmd) & _IOC_READ) { 1317 if (_IOC_DIR(cmd) & _IOC_READ) {
911 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1318 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
913 return -EFAULT; 1320 return -EFAULT;
914 } 1321 }
915 1322
916 return retval; 1323 return ret;
917} 1324}
918 1325
919static long 1326static long fw_device_op_ioctl(struct file *file,
920fw_device_op_ioctl(struct file *file, 1327 unsigned int cmd, unsigned long arg)
921 unsigned int cmd, unsigned long arg)
922{ 1328{
923 struct client *client = file->private_data; 1329 struct client *client = file->private_data;
924 1330
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file,
929} 1335}
930 1336
931#ifdef CONFIG_COMPAT 1337#ifdef CONFIG_COMPAT
932static long 1338static long fw_device_op_compat_ioctl(struct file *file,
933fw_device_op_compat_ioctl(struct file *file, 1339 unsigned int cmd, unsigned long arg)
934 unsigned int cmd, unsigned long arg)
935{ 1340{
936 struct client *client = file->private_data; 1341 struct client *client = file->private_data;
937 1342
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
947 struct client *client = file->private_data; 1352 struct client *client = file->private_data;
948 enum dma_data_direction direction; 1353 enum dma_data_direction direction;
949 unsigned long size; 1354 unsigned long size;
950 int page_count, retval; 1355 int page_count, ret;
951 1356
952 if (fw_device_is_shutdown(client->device)) 1357 if (fw_device_is_shutdown(client->device))
953 return -ENODEV; 1358 return -ENODEV;
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
973 else 1378 else
974 direction = DMA_FROM_DEVICE; 1379 direction = DMA_FROM_DEVICE;
975 1380
976 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 1381 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
977 page_count, direction); 1382 page_count, direction);
978 if (retval < 0) 1383 if (ret < 0)
979 return retval; 1384 return ret;
980 1385
981 retval = fw_iso_buffer_map(&client->buffer, vma); 1386 ret = fw_iso_buffer_map(&client->buffer, vma);
982 if (retval < 0) 1387 if (ret < 0)
983 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1388 fw_iso_buffer_destroy(&client->buffer, client->device->card);
984 1389
985 return retval; 1390 return ret;
1391}
1392
1393static int shutdown_resource(int id, void *p, void *data)
1394{
1395 struct client_resource *r = p;
1396 struct client *client = data;
1397
1398 r->release(client, r);
1399 client_put(client);
1400
1401 return 0;
986} 1402}
987 1403
988static int fw_device_op_release(struct inode *inode, struct file *file) 1404static int fw_device_op_release(struct inode *inode, struct file *file)
989{ 1405{
990 struct client *client = file->private_data; 1406 struct client *client = file->private_data;
991 struct event *e, *next_e; 1407 struct event *e, *next_e;
992 struct client_resource *r, *next_r;
993 unsigned long flags;
994 1408
995 if (client->buffer.pages) 1409 mutex_lock(&client->device->client_list_mutex);
996 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1410 list_del(&client->link);
1411 mutex_unlock(&client->device->client_list_mutex);
997 1412
998 if (client->iso_context) 1413 if (client->iso_context)
999 fw_iso_context_destroy(client->iso_context); 1414 fw_iso_context_destroy(client->iso_context);
1000 1415
1001 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 1416 if (client->buffer.pages)
1002 r->release(client, r); 1417 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1003 1418
1004 /* 1419 /* Freeze client->resource_idr and client->event_list */
1005 * FIXME: We should wait for the async tasklets to stop 1420 spin_lock_irq(&client->lock);
1006 * running before freeing the memory. 1421 client->in_shutdown = true;
1007 */ 1422 spin_unlock_irq(&client->lock);
1423
1424 idr_for_each(&client->resource_idr, shutdown_resource, client);
1425 idr_remove_all(&client->resource_idr);
1426 idr_destroy(&client->resource_idr);
1008 1427
1009 list_for_each_entry_safe(e, next_e, &client->event_list, link) 1428 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1010 kfree(e); 1429 kfree(e);
1011 1430
1012 spin_lock_irqsave(&client->device->card->lock, flags); 1431 client_put(client);
1013 list_del(&client->link);
1014 spin_unlock_irqrestore(&client->device->card->lock, flags);
1015
1016 fw_device_put(client->device);
1017 kfree(client);
1018 1432
1019 return 0; 1433 return 0;
1020} 1434}
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index bf53acb45652..a47e2129d83d 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -18,22 +18,26 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/ctype.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
27#include <linux/idr.h> 25#include <linux/idr.h>
28#include <linux/jiffies.h> 26#include <linux/jiffies.h>
29#include <linux/string.h> 27#include <linux/kobject.h>
28#include <linux/list.h>
29#include <linux/mutex.h>
30#include <linux/rwsem.h> 30#include <linux/rwsem.h>
31#include <linux/semaphore.h> 31#include <linux/semaphore.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34#include <linux/workqueue.h>
35
32#include <asm/system.h> 36#include <asm/system.h>
33#include <linux/ctype.h> 37
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h" 38#include "fw-device.h"
39#include "fw-topology.h"
40#include "fw-transaction.h"
37 41
38void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 42void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
39{ 43{
@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
132 vendor, model, specifier_id, version); 136 vendor, model, specifier_id, version);
133} 137}
134 138
135static int 139static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
136fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
137{ 140{
138 struct fw_unit *unit = fw_unit(dev); 141 struct fw_unit *unit = fw_unit(dev);
139 char modalias[64]; 142 char modalias[64];
@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
152}; 155};
153EXPORT_SYMBOL(fw_bus_type); 156EXPORT_SYMBOL(fw_bus_type);
154 157
155static void fw_device_release(struct device *dev)
156{
157 struct fw_device *device = fw_device(dev);
158 struct fw_card *card = device->card;
159 unsigned long flags;
160
161 /*
162 * Take the card lock so we don't set this to NULL while a
163 * FW_NODE_UPDATED callback is being handled or while the
164 * bus manager work looks at this node.
165 */
166 spin_lock_irqsave(&card->lock, flags);
167 device->node->data = NULL;
168 spin_unlock_irqrestore(&card->lock, flags);
169
170 fw_node_put(device->node);
171 kfree(device->config_rom);
172 kfree(device);
173 fw_card_put(card);
174}
175
176int fw_device_enable_phys_dma(struct fw_device *device) 158int fw_device_enable_phys_dma(struct fw_device *device)
177{ 159{
178 int generation = device->generation; 160 int generation = device->generation;
@@ -191,8 +173,8 @@ struct config_rom_attribute {
191 u32 key; 173 u32 key;
192}; 174};
193 175
194static ssize_t 176static ssize_t show_immediate(struct device *dev,
195show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) 177 struct device_attribute *dattr, char *buf)
196{ 178{
197 struct config_rom_attribute *attr = 179 struct config_rom_attribute *attr =
198 container_of(dattr, struct config_rom_attribute, attr); 180 container_of(dattr, struct config_rom_attribute, attr);
@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
223#define IMMEDIATE_ATTR(name, key) \ 205#define IMMEDIATE_ATTR(name, key) \
224 { __ATTR(name, S_IRUGO, show_immediate, NULL), key } 206 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
225 207
226static ssize_t 208static ssize_t show_text_leaf(struct device *dev,
227show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) 209 struct device_attribute *dattr, char *buf)
228{ 210{
229 struct config_rom_attribute *attr = 211 struct config_rom_attribute *attr =
230 container_of(dattr, struct config_rom_attribute, attr); 212 container_of(dattr, struct config_rom_attribute, attr);
@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
293 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), 275 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
294}; 276};
295 277
296static void 278static void init_fw_attribute_group(struct device *dev,
297init_fw_attribute_group(struct device *dev, 279 struct device_attribute *attrs,
298 struct device_attribute *attrs, 280 struct fw_attribute_group *group)
299 struct fw_attribute_group *group)
300{ 281{
301 struct device_attribute *attr; 282 struct device_attribute *attr;
302 int i, j; 283 int i, j;
@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
319 dev->groups = group->groups; 300 dev->groups = group->groups;
320} 301}
321 302
322static ssize_t 303static ssize_t modalias_show(struct device *dev,
323modalias_show(struct device *dev, 304 struct device_attribute *attr, char *buf)
324 struct device_attribute *attr, char *buf)
325{ 305{
326 struct fw_unit *unit = fw_unit(dev); 306 struct fw_unit *unit = fw_unit(dev);
327 int length; 307 int length;
@@ -332,9 +312,8 @@ modalias_show(struct device *dev,
332 return length + 1; 312 return length + 1;
333} 313}
334 314
335static ssize_t 315static ssize_t rom_index_show(struct device *dev,
336rom_index_show(struct device *dev, 316 struct device_attribute *attr, char *buf)
337 struct device_attribute *attr, char *buf)
338{ 317{
339 struct fw_device *device = fw_device(dev->parent); 318 struct fw_device *device = fw_device(dev->parent);
340 struct fw_unit *unit = fw_unit(dev); 319 struct fw_unit *unit = fw_unit(dev);
@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
349 __ATTR_NULL, 328 __ATTR_NULL,
350}; 329};
351 330
352static ssize_t 331static ssize_t config_rom_show(struct device *dev,
353config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) 332 struct device_attribute *attr, char *buf)
354{ 333{
355 struct fw_device *device = fw_device(dev); 334 struct fw_device *device = fw_device(dev);
356 size_t length; 335 size_t length;
@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
363 return length; 342 return length;
364} 343}
365 344
366static ssize_t 345static ssize_t guid_show(struct device *dev,
367guid_show(struct device *dev, struct device_attribute *attr, char *buf) 346 struct device_attribute *attr, char *buf)
368{ 347{
369 struct fw_device *device = fw_device(dev); 348 struct fw_device *device = fw_device(dev);
370 int ret; 349 int ret;
@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
383 __ATTR_NULL, 362 __ATTR_NULL,
384}; 363};
385 364
386static int 365static int read_rom(struct fw_device *device,
387read_rom(struct fw_device *device, int generation, int index, u32 *data) 366 int generation, int index, u32 *data)
388{ 367{
389 int rcode; 368 int rcode;
390 369
@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
539 518
540 kfree(old_rom); 519 kfree(old_rom);
541 ret = 0; 520 ret = 0;
542 device->cmc = rom[2] & 1 << 30; 521 device->cmc = rom[2] >> 30 & 1;
543 out: 522 out:
544 kfree(rom); 523 kfree(rom);
545 524
@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
679 fw_device_put(device); 658 fw_device_put(device);
680} 659}
681 660
661static void fw_device_release(struct device *dev)
662{
663 struct fw_device *device = fw_device(dev);
664 struct fw_card *card = device->card;
665 unsigned long flags;
666
667 /*
668 * Take the card lock so we don't set this to NULL while a
669 * FW_NODE_UPDATED callback is being handled or while the
670 * bus manager work looks at this node.
671 */
672 spin_lock_irqsave(&card->lock, flags);
673 device->node->data = NULL;
674 spin_unlock_irqrestore(&card->lock, flags);
675
676 fw_node_put(device->node);
677 kfree(device->config_rom);
678 kfree(device);
679 fw_card_put(card);
680}
681
682static struct device_type fw_device_type = { 682static struct device_type fw_device_type = {
683 .release = fw_device_release, 683 .release = fw_device_release,
684}; 684};
685 685
686static void fw_device_update(struct work_struct *work); 686static int update_unit(struct device *dev, void *data)
687{
688 struct fw_unit *unit = fw_unit(dev);
689 struct fw_driver *driver = (struct fw_driver *)dev->driver;
690
691 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
692 down(&dev->sem);
693 driver->update(unit);
694 up(&dev->sem);
695 }
696
697 return 0;
698}
699
700static void fw_device_update(struct work_struct *work)
701{
702 struct fw_device *device =
703 container_of(work, struct fw_device, work.work);
704
705 fw_device_cdev_update(device);
706 device_for_each_child(&device->device, NULL, update_unit);
707}
687 708
688/* 709/*
689 * If a device was pending for deletion because its node went away but its 710 * If a device was pending for deletion because its node went away but its
@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
735 return match; 756 return match;
736} 757}
737 758
759enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
760
761void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
762{
763 struct fw_card *card = device->card;
764 __be32 data;
765 int rcode;
766
767 if (!card->broadcast_channel_allocated)
768 return;
769
770 if (device->bc_implemented == BC_UNKNOWN) {
771 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
772 device->node_id, generation, device->max_speed,
773 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
774 &data, 4);
775 switch (rcode) {
776 case RCODE_COMPLETE:
777 if (data & cpu_to_be32(1 << 31)) {
778 device->bc_implemented = BC_IMPLEMENTED;
779 break;
780 }
781 /* else fall through to case address error */
782 case RCODE_ADDRESS_ERROR:
783 device->bc_implemented = BC_UNIMPLEMENTED;
784 }
785 }
786
787 if (device->bc_implemented == BC_IMPLEMENTED) {
788 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
789 BROADCAST_CHANNEL_VALID);
790 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
791 device->node_id, generation, device->max_speed,
792 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
793 &data, 4);
794 }
795}
796
738static void fw_device_init(struct work_struct *work) 797static void fw_device_init(struct work_struct *work)
739{ 798{
740 struct fw_device *device = 799 struct fw_device *device =
741 container_of(work, struct fw_device, work.work); 800 container_of(work, struct fw_device, work.work);
742 struct device *revived_dev; 801 struct device *revived_dev;
743 int minor, err; 802 int minor, ret;
744 803
745 /* 804 /*
746 * All failure paths here set node->data to NULL, so that we 805 * All failure paths here set node->data to NULL, so that we
@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
776 835
777 fw_device_get(device); 836 fw_device_get(device);
778 down_write(&fw_device_rwsem); 837 down_write(&fw_device_rwsem);
779 err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 838 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
780 idr_get_new(&fw_device_idr, device, &minor) : 839 idr_get_new(&fw_device_idr, device, &minor) :
781 -ENOMEM; 840 -ENOMEM;
782 up_write(&fw_device_rwsem); 841 up_write(&fw_device_rwsem);
783 842
784 if (err < 0) 843 if (ret < 0)
785 goto error; 844 goto error;
786 845
787 device->device.bus = &fw_bus_type; 846 device->device.bus = &fw_bus_type;
@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
828 device->config_rom[3], device->config_rom[4], 887 device->config_rom[3], device->config_rom[4],
829 1 << device->max_speed); 888 1 << device->max_speed);
830 device->config_rom_retries = 0; 889 device->config_rom_retries = 0;
890
891 fw_device_set_broadcast_channel(device, device->generation);
831 } 892 }
832 893
833 /* 894 /*
@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
851 put_device(&device->device); /* our reference */ 912 put_device(&device->device); /* our reference */
852} 913}
853 914
854static int update_unit(struct device *dev, void *data)
855{
856 struct fw_unit *unit = fw_unit(dev);
857 struct fw_driver *driver = (struct fw_driver *)dev->driver;
858
859 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
860 down(&dev->sem);
861 driver->update(unit);
862 up(&dev->sem);
863 }
864
865 return 0;
866}
867
868static void fw_device_update(struct work_struct *work)
869{
870 struct fw_device *device =
871 container_of(work, struct fw_device, work.work);
872
873 fw_device_cdev_update(device);
874 device_for_each_child(&device->device, NULL, update_unit);
875}
876
877enum { 915enum {
878 REREAD_BIB_ERROR, 916 REREAD_BIB_ERROR,
879 REREAD_BIB_GONE, 917 REREAD_BIB_GONE,
@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
894 if (i == 0 && q == 0) 932 if (i == 0 && q == 0)
895 return REREAD_BIB_GONE; 933 return REREAD_BIB_GONE;
896 934
897 if (i > device->config_rom_length || q != device->config_rom[i]) 935 if (q != device->config_rom[i])
898 return REREAD_BIB_CHANGED; 936 return REREAD_BIB_CHANGED;
899 } 937 }
900 938
@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1004 device->node = fw_node_get(node); 1042 device->node = fw_node_get(node);
1005 device->node_id = node->node_id; 1043 device->node_id = node->node_id;
1006 device->generation = card->generation; 1044 device->generation = card->generation;
1045 mutex_init(&device->client_list_mutex);
1007 INIT_LIST_HEAD(&device->client_list); 1046 INIT_LIST_HEAD(&device->client_list);
1008 1047
1009 /* 1048 /*
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 8ef6ec2ca21c..97588937c018 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -19,10 +19,17 @@
19#ifndef __fw_device_h 19#ifndef __fw_device_h
20#define __fw_device_h 20#define __fw_device_h
21 21
22#include <linux/device.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
25#include <linux/rwsem.h> 28#include <linux/rwsem.h>
29#include <linux/sysfs.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
26#include <asm/atomic.h> 33#include <asm/atomic.h>
27 34
28enum fw_device_state { 35enum fw_device_state {
@@ -38,6 +45,9 @@ struct fw_attribute_group {
38 struct attribute *attrs[11]; 45 struct attribute *attrs[11];
39}; 46};
40 47
48struct fw_node;
49struct fw_card;
50
41/* 51/*
42 * Note, fw_device.generation always has to be read before fw_device.node_id. 52 * Note, fw_device.generation always has to be read before fw_device.node_id.
43 * Use SMP memory barriers to ensure this. Otherwise requests will be sent 53 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
@@ -61,13 +71,18 @@ struct fw_device {
61 int node_id; 71 int node_id;
62 int generation; 72 int generation;
63 unsigned max_speed; 73 unsigned max_speed;
64 bool cmc;
65 struct fw_card *card; 74 struct fw_card *card;
66 struct device device; 75 struct device device;
76
77 struct mutex client_list_mutex;
67 struct list_head client_list; 78 struct list_head client_list;
79
68 u32 *config_rom; 80 u32 *config_rom;
69 size_t config_rom_length; 81 size_t config_rom_length;
70 int config_rom_retries; 82 int config_rom_retries;
83 unsigned cmc:1;
84 unsigned bc_implemented:2;
85
71 struct delayed_work work; 86 struct delayed_work work;
72 struct fw_attribute_group attribute_group; 87 struct fw_attribute_group attribute_group;
73}; 88};
@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
96 111
97struct fw_device *fw_device_get_by_devt(dev_t devt); 112struct fw_device *fw_device_get_by_devt(dev_t devt);
98int fw_device_enable_phys_dma(struct fw_device *device); 113int fw_device_enable_phys_dma(struct fw_device *device);
114void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
99 115
100void fw_device_cdev_update(struct fw_device *device); 116void fw_device_cdev_update(struct fw_device *device);
101void fw_device_cdev_remove(struct fw_device *device); 117void fw_device_cdev_remove(struct fw_device *device);
@@ -176,8 +192,7 @@ struct fw_driver {
176 const struct fw_device_id *id_table; 192 const struct fw_device_id *id_table;
177}; 193};
178 194
179static inline struct fw_driver * 195static inline struct fw_driver *fw_driver(struct device_driver *drv)
180fw_driver(struct device_driver *drv)
181{ 196{
182 return container_of(drv, struct fw_driver, driver); 197 return container_of(drv, struct fw_driver, driver);
183} 198}
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index e14c03dc0065..2baf1007253e 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Isochronous IO functionality 2 * Isochronous I/O functionality:
3 * - Isochronous DMA context management
4 * - Isochronous bus resource management (channels, bandwidth), client side
3 * 5 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 6 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 7 *
@@ -18,21 +20,25 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 21 */
20 22
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-constants.h>
26#include <linux/kernel.h>
25#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/vmalloc.h>
26 30
27#include "fw-transaction.h"
28#include "fw-topology.h" 31#include "fw-topology.h"
29#include "fw-device.h" 32#include "fw-transaction.h"
30 33
31int 34/*
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 35 * Isochronous DMA context management
33 int page_count, enum dma_data_direction direction) 36 */
37
38int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
39 int page_count, enum dma_data_direction direction)
34{ 40{
35 int i, j, retval = -ENOMEM; 41 int i, j;
36 dma_addr_t address; 42 dma_addr_t address;
37 43
38 buffer->page_count = page_count; 44 buffer->page_count = page_count;
@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
69 kfree(buffer->pages); 75 kfree(buffer->pages);
70 out: 76 out:
71 buffer->pages = NULL; 77 buffer->pages = NULL;
72 return retval; 78
79 return -ENOMEM;
73} 80}
74 81
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) 82int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{ 83{
77 unsigned long uaddr; 84 unsigned long uaddr;
78 int i, retval; 85 int i, err;
79 86
80 uaddr = vma->vm_start; 87 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) { 88 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]); 89 err = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval) 90 if (err)
84 return retval; 91 return err;
92
85 uaddr += PAGE_SIZE; 93 uaddr += PAGE_SIZE;
86 } 94 }
87 95
@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
105 buffer->pages = NULL; 113 buffer->pages = NULL;
106} 114}
107 115
108struct fw_iso_context * 116struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
109fw_iso_context_create(struct fw_card *card, int type, 117 int type, int channel, int speed, size_t header_size,
110 int channel, int speed, size_t header_size, 118 fw_iso_callback_t callback, void *callback_data)
111 fw_iso_callback_t callback, void *callback_data)
112{ 119{
113 struct fw_iso_context *ctx; 120 struct fw_iso_context *ctx;
114 121
115 ctx = card->driver->allocate_iso_context(card, type, header_size); 122 ctx = card->driver->allocate_iso_context(card,
123 type, channel, header_size);
116 if (IS_ERR(ctx)) 124 if (IS_ERR(ctx))
117 return ctx; 125 return ctx;
118 126
@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
134 card->driver->free_iso_context(ctx); 142 card->driver->free_iso_context(ctx);
135} 143}
136 144
137int 145int fw_iso_context_start(struct fw_iso_context *ctx,
138fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) 146 int cycle, int sync, int tags)
139{ 147{
140 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 148 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
141} 149}
142 150
143int 151int fw_iso_context_queue(struct fw_iso_context *ctx,
144fw_iso_context_queue(struct fw_iso_context *ctx, 152 struct fw_iso_packet *packet,
145 struct fw_iso_packet *packet, 153 struct fw_iso_buffer *buffer,
146 struct fw_iso_buffer *buffer, 154 unsigned long payload)
147 unsigned long payload)
148{ 155{
149 struct fw_card *card = ctx->card; 156 struct fw_card *card = ctx->card;
150 157
151 return card->driver->queue_iso(ctx, packet, buffer, payload); 158 return card->driver->queue_iso(ctx, packet, buffer, payload);
152} 159}
153 160
154int 161int fw_iso_context_stop(struct fw_iso_context *ctx)
155fw_iso_context_stop(struct fw_iso_context *ctx)
156{ 162{
157 return ctx->card->driver->stop_iso(ctx); 163 return ctx->card->driver->stop_iso(ctx);
158} 164}
165
166/*
167 * Isochronous bus resource management (channels, bandwidth), client side
168 */
169
170static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
171 int bandwidth, bool allocate)
172{
173 __be32 data[2];
174 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
175
176 /*
177 * On a 1394a IRM with low contention, try < 1 is enough.
178 * On a 1394-1995 IRM, we need at least try < 2.
179 * Let's just do try < 5.
180 */
181 for (try = 0; try < 5; try++) {
182 new = allocate ? old - bandwidth : old + bandwidth;
183 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
184 break;
185
186 data[0] = cpu_to_be32(old);
187 data[1] = cpu_to_be32(new);
188 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
189 irm_id, generation, SCODE_100,
190 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
191 data, sizeof(data))) {
192 case RCODE_GENERATION:
193 /* A generation change frees all bandwidth. */
194 return allocate ? -EAGAIN : bandwidth;
195
196 case RCODE_COMPLETE:
197 if (be32_to_cpup(data) == old)
198 return bandwidth;
199
200 old = be32_to_cpup(data);
201 /* Fall through. */
202 }
203 }
204
205 return -EIO;
206}
207
208static int manage_channel(struct fw_card *card, int irm_id, int generation,
209 u32 channels_mask, u64 offset, bool allocate)
210{
211 __be32 data[2], c, all, old;
212 int i, retry = 5;
213
214 old = all = allocate ? cpu_to_be32(~0) : 0;
215
216 for (i = 0; i < 32; i++) {
217 if (!(channels_mask & 1 << i))
218 continue;
219
220 c = cpu_to_be32(1 << (31 - i));
221 if ((old & c) != (all & c))
222 continue;
223
224 data[0] = old;
225 data[1] = old ^ c;
226 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
227 irm_id, generation, SCODE_100,
228 offset, data, sizeof(data))) {
229 case RCODE_GENERATION:
230 /* A generation change frees all channels. */
231 return allocate ? -EAGAIN : i;
232
233 case RCODE_COMPLETE:
234 if (data[0] == old)
235 return i;
236
237 old = data[0];
238
239 /* Is the IRM 1394a-2000 compliant? */
240 if ((data[0] & c) == (data[1] & c))
241 continue;
242
243 /* 1394-1995 IRM, fall through to retry. */
244 default:
245 if (retry--)
246 i--;
247 }
248 }
249
250 return -EIO;
251}
252
253static void deallocate_channel(struct fw_card *card, int irm_id,
254 int generation, int channel)
255{
256 u32 mask;
257 u64 offset;
258
259 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
260 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
261 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
262
263 manage_channel(card, irm_id, generation, mask, offset, false);
264}
265
266/**
267 * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
268 *
269 * In parameters: card, generation, channels_mask, bandwidth, allocate
270 * Out parameters: channel, bandwidth
271 * This function blocks (sleeps) during communication with the IRM.
272 *
273 * Allocates or deallocates at most one channel out of channels_mask.
274 * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
275 * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
276 * channel 0 and LSB for channel 63.)
277 * Allocates or deallocates as many bandwidth allocation units as specified.
278 *
279 * Returns channel < 0 if no channel was allocated or deallocated.
280 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
281 *
282 * If generation is stale, deallocations succeed but allocations fail with
283 * channel = -EAGAIN.
284 *
285 * If channel allocation fails, no bandwidth will be allocated either.
286 * If bandwidth allocation fails, no channel will be allocated either.
287 * But deallocations of channel and bandwidth are tried independently
288 * of each other's success.
289 */
290void fw_iso_resource_manage(struct fw_card *card, int generation,
291 u64 channels_mask, int *channel, int *bandwidth,
292 bool allocate)
293{
294 u32 channels_hi = channels_mask; /* channels 31...0 */
295 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
296 int irm_id, ret, c = -EINVAL;
297
298 spin_lock_irq(&card->lock);
299 irm_id = card->irm_node->node_id;
300 spin_unlock_irq(&card->lock);
301
302 if (channels_hi)
303 c = manage_channel(card, irm_id, generation, channels_hi,
304 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
305 if (channels_lo && c < 0) {
306 c = manage_channel(card, irm_id, generation, channels_lo,
307 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
308 if (c >= 0)
309 c += 32;
310 }
311 *channel = c;
312
313 if (allocate && channels_mask != 0 && c < 0)
314 *bandwidth = 0;
315
316 if (*bandwidth == 0)
317 return;
318
319 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
320 if (ret < 0)
321 *bandwidth = 0;
322
323 if (allocate && ret < 0 && c >= 0) {
324 deallocate_channel(card, irm_id, generation, c);
325 *channel = ret;
326 }
327}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 6d19828a93a5..1180d0be0bb4 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -205,6 +205,7 @@ struct fw_ohci {
205 205
206 u32 it_context_mask; 206 u32 it_context_mask;
207 struct iso_context *it_context_list; 207 struct iso_context *it_context_list;
208 u64 ir_context_channels;
208 u32 ir_context_mask; 209 u32 ir_context_mask;
209 struct iso_context *ir_context_list; 210 struct iso_context *ir_context_list;
210}; 211};
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
441 reg_read(ohci, OHCI1394_Version); 442 reg_read(ohci, OHCI1394_Version);
442} 443}
443 444
444static int 445static int ohci_update_phy_reg(struct fw_card *card, int addr,
445ohci_update_phy_reg(struct fw_card *card, int addr, 446 int clear_bits, int set_bits)
446 int clear_bits, int set_bits)
447{ 447{
448 struct fw_ohci *ohci = fw_ohci(card); 448 struct fw_ohci *ohci = fw_ohci(card);
449 u32 val, old; 449 u32 val, old;
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
658 } 658 }
659} 659}
660 660
661static int 661static int ar_context_init(struct ar_context *ctx,
662ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) 662 struct fw_ohci *ohci, u32 regs)
663{ 663{
664 struct ar_buffer ab; 664 struct ar_buffer ab;
665 665
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
690 flush_writes(ctx->ohci); 690 flush_writes(ctx->ohci);
691} 691}
692 692
693static struct descriptor * 693static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
694find_branch_descriptor(struct descriptor *d, int z)
695{ 694{
696 int b, key; 695 int b, key;
697 696
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
751 * Allocate a new buffer and add it to the list of free buffers for this 750 * Allocate a new buffer and add it to the list of free buffers for this
752 * context. Must be called with ohci->lock held. 751 * context. Must be called with ohci->lock held.
753 */ 752 */
754static int 753static int context_add_buffer(struct context *ctx)
755context_add_buffer(struct context *ctx)
756{ 754{
757 struct descriptor_buffer *desc; 755 struct descriptor_buffer *desc;
758 dma_addr_t uninitialized_var(bus_addr); 756 dma_addr_t uninitialized_var(bus_addr);
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
781 return 0; 779 return 0;
782} 780}
783 781
784static int 782static int context_init(struct context *ctx, struct fw_ohci *ohci,
785context_init(struct context *ctx, struct fw_ohci *ohci, 783 u32 regs, descriptor_callback_t callback)
786 u32 regs, descriptor_callback_t callback)
787{ 784{
788 ctx->ohci = ohci; 785 ctx->ohci = ohci;
789 ctx->regs = regs; 786 ctx->regs = regs;
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
814 return 0; 811 return 0;
815} 812}
816 813
817static void 814static void context_release(struct context *ctx)
818context_release(struct context *ctx)
819{ 815{
820 struct fw_card *card = &ctx->ohci->card; 816 struct fw_card *card = &ctx->ohci->card;
821 struct descriptor_buffer *desc, *tmp; 817 struct descriptor_buffer *desc, *tmp;
@@ -827,8 +823,8 @@ context_release(struct context *ctx)
827} 823}
828 824
829/* Must be called with ohci->lock held */ 825/* Must be called with ohci->lock held */
830static struct descriptor * 826static struct descriptor *context_get_descriptors(struct context *ctx,
831context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 827 int z, dma_addr_t *d_bus)
832{ 828{
833 struct descriptor *d = NULL; 829 struct descriptor *d = NULL;
834 struct descriptor_buffer *desc = ctx->buffer_tail; 830 struct descriptor_buffer *desc = ctx->buffer_tail;
@@ -912,8 +908,8 @@ struct driver_data {
912 * Must always be called with the ochi->lock held to ensure proper 908 * Must always be called with the ochi->lock held to ensure proper
913 * generation handling and locking around packet queue manipulation. 909 * generation handling and locking around packet queue manipulation.
914 */ 910 */
915static int 911static int at_context_queue_packet(struct context *ctx,
916at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 912 struct fw_packet *packet)
917{ 913{
918 struct fw_ohci *ohci = ctx->ohci; 914 struct fw_ohci *ohci = ctx->ohci;
919 dma_addr_t d_bus, uninitialized_var(payload_bus); 915 dma_addr_t d_bus, uninitialized_var(payload_bus);
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
940 */ 936 */
941 937
942 header = (__le32 *) &d[1]; 938 header = (__le32 *) &d[1];
943 if (packet->header_length > 8) { 939 switch (packet->header_length) {
940 case 16:
941 case 12:
944 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 942 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
945 (packet->speed << 16)); 943 (packet->speed << 16));
946 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 944 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
954 header[3] = (__force __le32) packet->header[3]; 952 header[3] = (__force __le32) packet->header[3];
955 953
956 d[0].req_count = cpu_to_le16(packet->header_length); 954 d[0].req_count = cpu_to_le16(packet->header_length);
957 } else { 955 break;
956
957 case 8:
958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
959 (packet->speed << 16)); 959 (packet->speed << 16));
960 header[1] = cpu_to_le32(packet->header[0]); 960 header[1] = cpu_to_le32(packet->header[0]);
961 header[2] = cpu_to_le32(packet->header[1]); 961 header[2] = cpu_to_le32(packet->header[1]);
962 d[0].req_count = cpu_to_le16(12); 962 d[0].req_count = cpu_to_le16(12);
963 break;
964
965 case 4:
966 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
967 (packet->speed << 16));
968 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
969 d[0].req_count = cpu_to_le16(8);
970 break;
971
972 default:
973 /* BUG(); */
974 packet->ack = RCODE_SEND_ERROR;
975 return -1;
963 } 976 }
964 977
965 driver_data = (struct driver_data *) &d[3]; 978 driver_data = (struct driver_data *) &d[3];
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
1095#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1108#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1096#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1109#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1097 1110
1098static void 1111static void handle_local_rom(struct fw_ohci *ohci,
1099handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1112 struct fw_packet *packet, u32 csr)
1100{ 1113{
1101 struct fw_packet response; 1114 struct fw_packet response;
1102 int tcode, length, i; 1115 int tcode, length, i;
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1122 fw_core_handle_response(&ohci->card, &response); 1135 fw_core_handle_response(&ohci->card, &response);
1123} 1136}
1124 1137
1125static void 1138static void handle_local_lock(struct fw_ohci *ohci,
1126handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1139 struct fw_packet *packet, u32 csr)
1127{ 1140{
1128 struct fw_packet response; 1141 struct fw_packet response;
1129 int tcode, length, ext_tcode, sel; 1142 int tcode, length, ext_tcode, sel;
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1164 fw_core_handle_response(&ohci->card, &response); 1177 fw_core_handle_response(&ohci->card, &response);
1165} 1178}
1166 1179
1167static void 1180static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1168handle_local_request(struct context *ctx, struct fw_packet *packet)
1169{ 1181{
1170 u64 offset; 1182 u64 offset;
1171 u32 csr; 1183 u32 csr;
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
1205 } 1217 }
1206} 1218}
1207 1219
1208static void 1220static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1209at_context_transmit(struct context *ctx, struct fw_packet *packet)
1210{ 1221{
1211 unsigned long flags; 1222 unsigned long flags;
1212 int retval; 1223 int ret;
1213 1224
1214 spin_lock_irqsave(&ctx->ohci->lock, flags); 1225 spin_lock_irqsave(&ctx->ohci->lock, flags);
1215 1226
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
1220 return; 1231 return;
1221 } 1232 }
1222 1233
1223 retval = at_context_queue_packet(ctx, packet); 1234 ret = at_context_queue_packet(ctx, packet);
1224 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1235 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1225 1236
1226 if (retval < 0) 1237 if (ret < 0)
1227 packet->callback(packet, &ctx->ohci->card, packet->ack); 1238 packet->callback(packet, &ctx->ohci->card, packet->ack);
1228 1239
1229} 1240}
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1590 return 0; 1601 return 0;
1591} 1602}
1592 1603
1593static int 1604static int ohci_set_config_rom(struct fw_card *card,
1594ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) 1605 u32 *config_rom, size_t length)
1595{ 1606{
1596 struct fw_ohci *ohci; 1607 struct fw_ohci *ohci;
1597 unsigned long flags; 1608 unsigned long flags;
1598 int retval = -EBUSY; 1609 int ret = -EBUSY;
1599 __be32 *next_config_rom; 1610 __be32 *next_config_rom;
1600 dma_addr_t uninitialized_var(next_config_rom_bus); 1611 dma_addr_t uninitialized_var(next_config_rom_bus);
1601 1612
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1649 1660
1650 reg_write(ohci, OHCI1394_ConfigROMmap, 1661 reg_write(ohci, OHCI1394_ConfigROMmap,
1651 ohci->next_config_rom_bus); 1662 ohci->next_config_rom_bus);
1652 retval = 0; 1663 ret = 0;
1653 } 1664 }
1654 1665
1655 spin_unlock_irqrestore(&ohci->lock, flags); 1666 spin_unlock_irqrestore(&ohci->lock, flags);
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1661 * controller could need to access it before the bus reset 1672 * controller could need to access it before the bus reset
1662 * takes effect. 1673 * takes effect.
1663 */ 1674 */
1664 if (retval == 0) 1675 if (ret == 0)
1665 fw_core_initiate_bus_reset(&ohci->card, 1); 1676 fw_core_initiate_bus_reset(&ohci->card, 1);
1666 else 1677 else
1667 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1678 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1668 next_config_rom, next_config_rom_bus); 1679 next_config_rom, next_config_rom_bus);
1669 1680
1670 return retval; 1681 return ret;
1671} 1682}
1672 1683
1673static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1684static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1689 struct fw_ohci *ohci = fw_ohci(card); 1700 struct fw_ohci *ohci = fw_ohci(card);
1690 struct context *ctx = &ohci->at_request_ctx; 1701 struct context *ctx = &ohci->at_request_ctx;
1691 struct driver_data *driver_data = packet->driver_data; 1702 struct driver_data *driver_data = packet->driver_data;
1692 int retval = -ENOENT; 1703 int ret = -ENOENT;
1693 1704
1694 tasklet_disable(&ctx->tasklet); 1705 tasklet_disable(&ctx->tasklet);
1695 1706
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1704 driver_data->packet = NULL; 1715 driver_data->packet = NULL;
1705 packet->ack = RCODE_CANCELLED; 1716 packet->ack = RCODE_CANCELLED;
1706 packet->callback(packet, &ohci->card, packet->ack); 1717 packet->callback(packet, &ohci->card, packet->ack);
1707 retval = 0; 1718 ret = 0;
1708
1709 out: 1719 out:
1710 tasklet_enable(&ctx->tasklet); 1720 tasklet_enable(&ctx->tasklet);
1711 1721
1712 return retval; 1722 return ret;
1713} 1723}
1714 1724
1715static int 1725static int ohci_enable_phys_dma(struct fw_card *card,
1716ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) 1726 int node_id, int generation)
1717{ 1727{
1718#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1728#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1719 return 0; 1729 return 0;
1720#else 1730#else
1721 struct fw_ohci *ohci = fw_ohci(card); 1731 struct fw_ohci *ohci = fw_ohci(card);
1722 unsigned long flags; 1732 unsigned long flags;
1723 int n, retval = 0; 1733 int n, ret = 0;
1724 1734
1725 /* 1735 /*
1726 * FIXME: Make sure this bitmask is cleared when we clear the busReset 1736 * FIXME: Make sure this bitmask is cleared when we clear the busReset
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1730 spin_lock_irqsave(&ohci->lock, flags); 1740 spin_lock_irqsave(&ohci->lock, flags);
1731 1741
1732 if (ohci->generation != generation) { 1742 if (ohci->generation != generation) {
1733 retval = -ESTALE; 1743 ret = -ESTALE;
1734 goto out; 1744 goto out;
1735 } 1745 }
1736 1746
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1748 flush_writes(ohci); 1758 flush_writes(ohci);
1749 out: 1759 out:
1750 spin_unlock_irqrestore(&ohci->lock, flags); 1760 spin_unlock_irqrestore(&ohci->lock, flags);
1751 return retval; 1761
1762 return ret;
1752#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1763#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1753} 1764}
1754 1765
1755static u64 1766static u64 ohci_get_bus_time(struct fw_card *card)
1756ohci_get_bus_time(struct fw_card *card)
1757{ 1767{
1758 struct fw_ohci *ohci = fw_ohci(card); 1768 struct fw_ohci *ohci = fw_ohci(card);
1759 u32 cycle_time; 1769 u32 cycle_time;
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
1765 return bus_time; 1775 return bus_time;
1766} 1776}
1767 1777
1778static void copy_iso_headers(struct iso_context *ctx, void *p)
1779{
1780 int i = ctx->header_length;
1781
1782 if (i + ctx->base.header_size > PAGE_SIZE)
1783 return;
1784
1785 /*
1786 * The iso header is byteswapped to little endian by
1787 * the controller, but the remaining header quadlets
1788 * are big endian. We want to present all the headers
1789 * as big endian, so we have to swap the first quadlet.
1790 */
1791 if (ctx->base.header_size > 0)
1792 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1793 if (ctx->base.header_size > 4)
1794 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1795 if (ctx->base.header_size > 8)
1796 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1797 ctx->header_length += ctx->base.header_size;
1798}
1799
1768static int handle_ir_dualbuffer_packet(struct context *context, 1800static int handle_ir_dualbuffer_packet(struct context *context,
1769 struct descriptor *d, 1801 struct descriptor *d,
1770 struct descriptor *last) 1802 struct descriptor *last)
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1775 __le32 *ir_header; 1807 __le32 *ir_header;
1776 size_t header_length; 1808 size_t header_length;
1777 void *p, *end; 1809 void *p, *end;
1778 int i;
1779 1810
1780 if (db->first_res_count != 0 && db->second_res_count != 0) { 1811 if (db->first_res_count != 0 && db->second_res_count != 0) {
1781 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { 1812 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1788 header_length = le16_to_cpu(db->first_req_count) - 1819 header_length = le16_to_cpu(db->first_req_count) -
1789 le16_to_cpu(db->first_res_count); 1820 le16_to_cpu(db->first_res_count);
1790 1821
1791 i = ctx->header_length;
1792 p = db + 1; 1822 p = db + 1;
1793 end = p + header_length; 1823 end = p + header_length;
1794 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1824 while (p < end) {
1795 /* 1825 copy_iso_headers(ctx, p);
1796 * The iso header is byteswapped to little endian by
1797 * the controller, but the remaining header quadlets
1798 * are big endian. We want to present all the headers
1799 * as big endian, so we have to swap the first
1800 * quadlet.
1801 */
1802 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1803 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1804 i += ctx->base.header_size;
1805 ctx->excess_bytes += 1826 ctx->excess_bytes +=
1806 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; 1827 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1807 p += ctx->base.header_size + 4; 1828 p += max(ctx->base.header_size, (size_t)8);
1808 } 1829 }
1809 ctx->header_length = i;
1810 1830
1811 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - 1831 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1812 le16_to_cpu(db->second_res_count); 1832 le16_to_cpu(db->second_res_count);
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
1832 struct descriptor *pd; 1852 struct descriptor *pd;
1833 __le32 *ir_header; 1853 __le32 *ir_header;
1834 void *p; 1854 void *p;
1835 int i;
1836 1855
1837 for (pd = d; pd <= last; pd++) { 1856 for (pd = d; pd <= last; pd++) {
1838 if (pd->transfer_status) 1857 if (pd->transfer_status)
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
1842 /* Descriptor(s) not done yet, stop iteration */ 1861 /* Descriptor(s) not done yet, stop iteration */
1843 return 0; 1862 return 0;
1844 1863
1845 i = ctx->header_length; 1864 p = last + 1;
1846 p = last + 1; 1865 copy_iso_headers(ctx, p);
1847
1848 if (ctx->base.header_size > 0 &&
1849 i + ctx->base.header_size <= PAGE_SIZE) {
1850 /*
1851 * The iso header is byteswapped to little endian by
1852 * the controller, but the remaining header quadlets
1853 * are big endian. We want to present all the headers
1854 * as big endian, so we have to swap the first quadlet.
1855 */
1856 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1857 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1858 ctx->header_length += ctx->base.header_size;
1859 }
1860 1866
1861 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1867 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1862 ir_header = (__le32 *) p; 1868 ir_header = (__le32 *) p;
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
1888 return 1; 1894 return 1;
1889} 1895}
1890 1896
1891static struct fw_iso_context * 1897static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1892ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) 1898 int type, int channel, size_t header_size)
1893{ 1899{
1894 struct fw_ohci *ohci = fw_ohci(card); 1900 struct fw_ohci *ohci = fw_ohci(card);
1895 struct iso_context *ctx, *list; 1901 struct iso_context *ctx, *list;
1896 descriptor_callback_t callback; 1902 descriptor_callback_t callback;
1903 u64 *channels, dont_care = ~0ULL;
1897 u32 *mask, regs; 1904 u32 *mask, regs;
1898 unsigned long flags; 1905 unsigned long flags;
1899 int index, retval = -ENOMEM; 1906 int index, ret = -ENOMEM;
1900 1907
1901 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1908 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1909 channels = &dont_care;
1902 mask = &ohci->it_context_mask; 1910 mask = &ohci->it_context_mask;
1903 list = ohci->it_context_list; 1911 list = ohci->it_context_list;
1904 callback = handle_it_packet; 1912 callback = handle_it_packet;
1905 } else { 1913 } else {
1914 channels = &ohci->ir_context_channels;
1906 mask = &ohci->ir_context_mask; 1915 mask = &ohci->ir_context_mask;
1907 list = ohci->ir_context_list; 1916 list = ohci->ir_context_list;
1908 if (ohci->use_dualbuffer) 1917 if (ohci->use_dualbuffer)
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1912 } 1921 }
1913 1922
1914 spin_lock_irqsave(&ohci->lock, flags); 1923 spin_lock_irqsave(&ohci->lock, flags);
1915 index = ffs(*mask) - 1; 1924 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
1916 if (index >= 0) 1925 if (index >= 0) {
1926 *channels &= ~(1ULL << channel);
1917 *mask &= ~(1 << index); 1927 *mask &= ~(1 << index);
1928 }
1918 spin_unlock_irqrestore(&ohci->lock, flags); 1929 spin_unlock_irqrestore(&ohci->lock, flags);
1919 1930
1920 if (index < 0) 1931 if (index < 0)
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1932 if (ctx->header == NULL) 1943 if (ctx->header == NULL)
1933 goto out; 1944 goto out;
1934 1945
1935 retval = context_init(&ctx->context, ohci, regs, callback); 1946 ret = context_init(&ctx->context, ohci, regs, callback);
1936 if (retval < 0) 1947 if (ret < 0)
1937 goto out_with_header; 1948 goto out_with_header;
1938 1949
1939 return &ctx->base; 1950 return &ctx->base;
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1945 *mask |= 1 << index; 1956 *mask |= 1 << index;
1946 spin_unlock_irqrestore(&ohci->lock, flags); 1957 spin_unlock_irqrestore(&ohci->lock, flags);
1947 1958
1948 return ERR_PTR(retval); 1959 return ERR_PTR(ret);
1949} 1960}
1950 1961
1951static int ohci_start_iso(struct fw_iso_context *base, 1962static int ohci_start_iso(struct fw_iso_context *base,
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
2024 } else { 2035 } else {
2025 index = ctx - ohci->ir_context_list; 2036 index = ctx - ohci->ir_context_list;
2026 ohci->ir_context_mask |= 1 << index; 2037 ohci->ir_context_mask |= 1 << index;
2038 ohci->ir_context_channels |= 1ULL << base->channel;
2027 } 2039 }
2028 2040
2029 spin_unlock_irqrestore(&ohci->lock, flags); 2041 spin_unlock_irqrestore(&ohci->lock, flags);
2030} 2042}
2031 2043
2032static int 2044static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2033ohci_queue_iso_transmit(struct fw_iso_context *base, 2045 struct fw_iso_packet *packet,
2034 struct fw_iso_packet *packet, 2046 struct fw_iso_buffer *buffer,
2035 struct fw_iso_buffer *buffer, 2047 unsigned long payload)
2036 unsigned long payload)
2037{ 2048{
2038 struct iso_context *ctx = container_of(base, struct iso_context, base); 2049 struct iso_context *ctx = container_of(base, struct iso_context, base);
2039 struct descriptor *d, *last, *pd; 2050 struct descriptor *d, *last, *pd;
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
2128 return 0; 2139 return 0;
2129} 2140}
2130 2141
2131static int 2142static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2132ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, 2143 struct fw_iso_packet *packet,
2133 struct fw_iso_packet *packet, 2144 struct fw_iso_buffer *buffer,
2134 struct fw_iso_buffer *buffer, 2145 unsigned long payload)
2135 unsigned long payload)
2136{ 2146{
2137 struct iso_context *ctx = container_of(base, struct iso_context, base); 2147 struct iso_context *ctx = container_of(base, struct iso_context, base);
2138 struct db_descriptor *db = NULL; 2148 struct db_descriptor *db = NULL;
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2151 z = 2; 2161 z = 2;
2152 2162
2153 /* 2163 /*
2154 * The OHCI controller puts the status word in the header 2164 * The OHCI controller puts the isochronous header and trailer in the
2155 * buffer too, so we need 4 extra bytes per packet. 2165 * buffer, so we need at least 8 bytes.
2156 */ 2166 */
2157 packet_count = p->header_length / ctx->base.header_size; 2167 packet_count = p->header_length / ctx->base.header_size;
2158 header_size = packet_count * (ctx->base.header_size + 4); 2168 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2159 2169
2160 /* Get header size in number of descriptors. */ 2170 /* Get header size in number of descriptors. */
2161 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2171 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2173 db = (struct db_descriptor *) d; 2183 db = (struct db_descriptor *) d;
2174 db->control = cpu_to_le16(DESCRIPTOR_STATUS | 2184 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2175 DESCRIPTOR_BRANCH_ALWAYS); 2185 DESCRIPTOR_BRANCH_ALWAYS);
2176 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 2186 db->first_size =
2187 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2177 if (p->skip && rest == p->payload_length) { 2188 if (p->skip && rest == p->payload_length) {
2178 db->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2189 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2179 db->first_req_count = db->first_size; 2190 db->first_req_count = db->first_size;
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2208 return 0; 2219 return 0;
2209} 2220}
2210 2221
2211static int 2222static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2212ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2223 struct fw_iso_packet *packet,
2213 struct fw_iso_packet *packet, 2224 struct fw_iso_buffer *buffer,
2214 struct fw_iso_buffer *buffer, 2225 unsigned long payload)
2215 unsigned long payload)
2216{ 2226{
2217 struct iso_context *ctx = container_of(base, struct iso_context, base); 2227 struct iso_context *ctx = container_of(base, struct iso_context, base);
2218 struct descriptor *d = NULL, *pd = NULL; 2228 struct descriptor *d = NULL, *pd = NULL;
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2223 int page, offset, packet_count, header_size, payload_per_buffer; 2233 int page, offset, packet_count, header_size, payload_per_buffer;
2224 2234
2225 /* 2235 /*
2226 * The OHCI controller puts the status word in the 2236 * The OHCI controller puts the isochronous header and trailer in the
2227 * buffer too, so we need 4 extra bytes per packet. 2237 * buffer, so we need at least 8 bytes.
2228 */ 2238 */
2229 packet_count = p->header_length / ctx->base.header_size; 2239 packet_count = p->header_length / ctx->base.header_size;
2230 header_size = ctx->base.header_size + 4; 2240 header_size = max(ctx->base.header_size, (size_t)8);
2231 2241
2232 /* Get header size in number of descriptors. */ 2242 /* Get header size in number of descriptors. */
2233 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2243 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2286 return 0; 2296 return 0;
2287} 2297}
2288 2298
2289static int 2299static int ohci_queue_iso(struct fw_iso_context *base,
2290ohci_queue_iso(struct fw_iso_context *base, 2300 struct fw_iso_packet *packet,
2291 struct fw_iso_packet *packet, 2301 struct fw_iso_buffer *buffer,
2292 struct fw_iso_buffer *buffer, 2302 unsigned long payload)
2293 unsigned long payload)
2294{ 2303{
2295 struct iso_context *ctx = container_of(base, struct iso_context, base); 2304 struct iso_context *ctx = container_of(base, struct iso_context, base);
2296 unsigned long flags; 2305 unsigned long flags;
2297 int retval; 2306 int ret;
2298 2307
2299 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2308 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2300 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2309 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2301 retval = ohci_queue_iso_transmit(base, packet, buffer, payload); 2310 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2302 else if (ctx->context.ohci->use_dualbuffer) 2311 else if (ctx->context.ohci->use_dualbuffer)
2303 retval = ohci_queue_iso_receive_dualbuffer(base, packet, 2312 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2304 buffer, payload); 2313 buffer, payload);
2305 else 2314 else
2306 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2315 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2307 buffer, 2316 buffer, payload);
2308 payload);
2309 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2317 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2310 2318
2311 return retval; 2319 return ret;
2312} 2320}
2313 2321
2314static const struct fw_card_driver ohci_driver = { 2322static const struct fw_card_driver ohci_driver = {
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
2357#define ohci_pmac_off(dev) 2365#define ohci_pmac_off(dev)
2358#endif /* CONFIG_PPC_PMAC */ 2366#endif /* CONFIG_PPC_PMAC */
2359 2367
2360static int __devinit 2368static int __devinit pci_probe(struct pci_dev *dev,
2361pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) 2369 const struct pci_device_id *ent)
2362{ 2370{
2363 struct fw_ohci *ohci; 2371 struct fw_ohci *ohci;
2364 u32 bus_options, max_receive, link_speed, version; 2372 u32 bus_options, max_receive, link_speed, version;
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2440 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2448 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2441 2449
2442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2450 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2451 ohci->ir_context_channels = ~0ULL;
2443 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2452 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2444 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2453 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2445 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2454 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2467 reg_read(ohci, OHCI1394_GUIDLo); 2476 reg_read(ohci, OHCI1394_GUIDLo);
2468 2477
2469 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2478 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2470 if (err < 0) 2479 if (err)
2471 goto fail_self_id; 2480 goto fail_self_id;
2472 2481
2473 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2482 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2474 dev_name(&dev->dev), version >> 16, version & 0xff); 2483 dev_name(&dev->dev), version >> 16, version & 0xff);
2484
2475 return 0; 2485 return 0;
2476 2486
2477 fail_self_id: 2487 fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index c71c4419d9e8..2bcf51557c72 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -392,20 +392,18 @@ static const struct {
392 } 392 }
393}; 393};
394 394
395static void 395static void free_orb(struct kref *kref)
396free_orb(struct kref *kref)
397{ 396{
398 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); 397 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
399 398
400 kfree(orb); 399 kfree(orb);
401} 400}
402 401
403static void 402static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
404sbp2_status_write(struct fw_card *card, struct fw_request *request, 403 int tcode, int destination, int source,
405 int tcode, int destination, int source, 404 int generation, int speed,
406 int generation, int speed, 405 unsigned long long offset,
407 unsigned long long offset, 406 void *payload, size_t length, void *callback_data)
408 void *payload, size_t length, void *callback_data)
409{ 407{
410 struct sbp2_logical_unit *lu = callback_data; 408 struct sbp2_logical_unit *lu = callback_data;
411 struct sbp2_orb *orb; 409 struct sbp2_orb *orb;
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
451 fw_send_response(card, request, RCODE_COMPLETE); 449 fw_send_response(card, request, RCODE_COMPLETE);
452} 450}
453 451
454static void 452static void complete_transaction(struct fw_card *card, int rcode,
455complete_transaction(struct fw_card *card, int rcode, 453 void *payload, size_t length, void *data)
456 void *payload, size_t length, void *data)
457{ 454{
458 struct sbp2_orb *orb = data; 455 struct sbp2_orb *orb = data;
459 unsigned long flags; 456 unsigned long flags;
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
482 kref_put(&orb->kref, free_orb); 479 kref_put(&orb->kref, free_orb);
483} 480}
484 481
485static void 482static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
486sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 483 int node_id, int generation, u64 offset)
487 int node_id, int generation, u64 offset)
488{ 484{
489 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 485 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
490 unsigned long flags; 486 unsigned long flags;
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
531 return retval; 527 return retval;
532} 528}
533 529
534static void 530static void complete_management_orb(struct sbp2_orb *base_orb,
535complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 531 struct sbp2_status *status)
536{ 532{
537 struct sbp2_management_orb *orb = 533 struct sbp2_management_orb *orb =
538 container_of(base_orb, struct sbp2_management_orb, base); 534 container_of(base_orb, struct sbp2_management_orb, base);
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
542 complete(&orb->done); 538 complete(&orb->done);
543} 539}
544 540
545static int 541static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
546sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, 542 int generation, int function,
547 int generation, int function, int lun_or_login_id, 543 int lun_or_login_id, void *response)
548 void *response)
549{ 544{
550 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 545 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
551 struct sbp2_management_orb *orb; 546 struct sbp2_management_orb *orb;
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
652 &d, sizeof(d)); 647 &d, sizeof(d));
653} 648}
654 649
655static void 650static void complete_agent_reset_write_no_wait(struct fw_card *card,
656complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, 651 int rcode, void *payload, size_t length, void *data)
657 void *payload, size_t length, void *data)
658{ 652{
659 kfree(data); 653 kfree(data);
660} 654}
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
1299 sizeof(orb->page_table), DMA_TO_DEVICE); 1293 sizeof(orb->page_table), DMA_TO_DEVICE);
1300} 1294}
1301 1295
1302static unsigned int 1296static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1303sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1304{ 1297{
1305 int sam_status; 1298 int sam_status;
1306 1299
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1337 } 1330 }
1338} 1331}
1339 1332
1340static void 1333static void complete_command_orb(struct sbp2_orb *base_orb,
1341complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 1334 struct sbp2_status *status)
1342{ 1335{
1343 struct sbp2_command_orb *orb = 1336 struct sbp2_command_orb *orb =
1344 container_of(base_orb, struct sbp2_command_orb, base); 1337 container_of(base_orb, struct sbp2_command_orb, base);
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1384 orb->done(orb->cmd); 1377 orb->done(orb->cmd);
1385} 1378}
1386 1379
1387static int 1380static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1388sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, 1381 struct fw_device *device, struct sbp2_logical_unit *lu)
1389 struct sbp2_logical_unit *lu)
1390{ 1382{
1391 struct scatterlist *sg = scsi_sglist(orb->cmd); 1383 struct scatterlist *sg = scsi_sglist(orb->cmd);
1392 int i, n; 1384 int i, n;
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1584 * This is the concatenation of target port identifier and logical unit 1576 * This is the concatenation of target port identifier and logical unit
1585 * identifier as per SAM-2...SAM-4 annex A. 1577 * identifier as per SAM-2...SAM-4 annex A.
1586 */ 1578 */
1587static ssize_t 1579static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
1588sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, 1580 struct device_attribute *attr, char *buf)
1589 char *buf)
1590{ 1581{
1591 struct scsi_device *sdev = to_scsi_device(dev); 1582 struct scsi_device *sdev = to_scsi_device(dev);
1592 struct sbp2_logical_unit *lu; 1583 struct sbp2_logical_unit *lu;
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 8dd6703b55cd..d0deecc4de93 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
314 struct fw_node * node, 314 struct fw_node * node,
315 struct fw_node * parent); 315 struct fw_node * parent);
316 316
317static void 317static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
318for_each_fw_node(struct fw_card *card, struct fw_node *root, 318 fw_node_callback_t callback)
319 fw_node_callback_t callback)
320{ 319{
321 struct list_head list; 320 struct list_head list;
322 struct fw_node *node, *next, *child, *parent; 321 struct fw_node *node, *next, *child, *parent;
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
349 fw_node_put(node); 348 fw_node_put(node);
350} 349}
351 350
352static void 351static void report_lost_node(struct fw_card *card,
353report_lost_node(struct fw_card *card, 352 struct fw_node *node, struct fw_node *parent)
354 struct fw_node *node, struct fw_node *parent)
355{ 353{
356 fw_node_event(card, node, FW_NODE_DESTROYED); 354 fw_node_event(card, node, FW_NODE_DESTROYED);
357 fw_node_put(node); 355 fw_node_put(node);
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
360 card->bm_retries = 0; 358 card->bm_retries = 0;
361} 359}
362 360
363static void 361static void report_found_node(struct fw_card *card,
364report_found_node(struct fw_card *card, 362 struct fw_node *node, struct fw_node *parent)
365 struct fw_node *node, struct fw_node *parent)
366{ 363{
367 int b_path = (node->phy_speed == SCODE_BETA); 364 int b_path = (node->phy_speed == SCODE_BETA);
368 365
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
415 * found, lost or updated. Update the nodes in the card topology tree 412 * found, lost or updated. Update the nodes in the card topology tree
416 * as we go. 413 * as we go.
417 */ 414 */
418static void 415static void update_tree(struct fw_card *card, struct fw_node *root)
419update_tree(struct fw_card *card, struct fw_node *root)
420{ 416{
421 struct list_head list0, list1; 417 struct list_head list0, list1;
422 struct fw_node *node0, *node1, *next1; 418 struct fw_node *node0, *node1, *next1;
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
497 } 493 }
498} 494}
499 495
500static void 496static void update_topology_map(struct fw_card *card,
501update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) 497 u32 *self_ids, int self_id_count)
502{ 498{
503 int node_count; 499 int node_count;
504 500
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
510 fw_compute_block_crc(card->topology_map); 506 fw_compute_block_crc(card->topology_map);
511} 507}
512 508
513void 509void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
514fw_core_handle_bus_reset(struct fw_card *card, 510 int self_id_count, u32 *self_ids)
515 int node_id, int generation,
516 int self_id_count, u32 * self_ids)
517{ 511{
518 struct fw_node *local_node; 512 struct fw_node *local_node;
519 unsigned long flags; 513 unsigned long flags;
@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
532 526
533 spin_lock_irqsave(&card->lock, flags); 527 spin_lock_irqsave(&card->lock, flags);
534 528
529 card->broadcast_channel_allocated = false;
535 card->node_id = node_id; 530 card->node_id = node_id;
536 /* 531 /*
537 * Update node_id before generation to prevent anybody from using 532 * Update node_id before generation to prevent anybody from using
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index addb9f8ea776..3c497bb4fae4 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -19,6 +19,11 @@
19#ifndef __fw_topology_h 19#ifndef __fw_topology_h
20#define __fw_topology_h 20#define __fw_topology_h
21 21
22#include <linux/list.h>
23#include <linux/slab.h>
24
25#include <asm/atomic.h>
26
22enum { 27enum {
23 FW_NODE_CREATED, 28 FW_NODE_CREATED,
24 FW_NODE_UPDATED, 29 FW_NODE_UPDATED,
@@ -51,26 +56,22 @@ struct fw_node {
51 struct fw_node *ports[0]; 56 struct fw_node *ports[0];
52}; 57};
53 58
54static inline struct fw_node * 59static inline struct fw_node *fw_node_get(struct fw_node *node)
55fw_node_get(struct fw_node *node)
56{ 60{
57 atomic_inc(&node->ref_count); 61 atomic_inc(&node->ref_count);
58 62
59 return node; 63 return node;
60} 64}
61 65
62static inline void 66static inline void fw_node_put(struct fw_node *node)
63fw_node_put(struct fw_node *node)
64{ 67{
65 if (atomic_dec_and_test(&node->ref_count)) 68 if (atomic_dec_and_test(&node->ref_count))
66 kfree(node); 69 kfree(node);
67} 70}
68 71
69void 72struct fw_card;
70fw_destroy_nodes(struct fw_card *card); 73void fw_destroy_nodes(struct fw_card *card);
71
72int
73fw_compute_block_crc(u32 *block);
74 74
75int fw_compute_block_crc(u32 *block);
75 76
76#endif /* __fw_topology_h */ 77#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 699ac041f39a..283dac6d327d 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -64,10 +64,8 @@
64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
65#define PHY_IDENTIFIER(id) ((id) << 30) 65#define PHY_IDENTIFIER(id) ((id) << 30)
66 66
67static int 67static int close_transaction(struct fw_transaction *transaction,
68close_transaction(struct fw_transaction *transaction, 68 struct fw_card *card, int rcode)
69 struct fw_card *card, int rcode,
70 u32 *payload, size_t length)
71{ 69{
72 struct fw_transaction *t; 70 struct fw_transaction *t;
73 unsigned long flags; 71 unsigned long flags;
@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
83 spin_unlock_irqrestore(&card->lock, flags); 81 spin_unlock_irqrestore(&card->lock, flags);
84 82
85 if (&t->link != &card->transaction_list) { 83 if (&t->link != &card->transaction_list) {
86 t->callback(card, rcode, payload, length, t->callback_data); 84 t->callback(card, rcode, NULL, 0, t->callback_data);
87 return 0; 85 return 0;
88 } 86 }
89 87
@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
94 * Only valid for transactions that are potentially pending (ie have 92 * Only valid for transactions that are potentially pending (ie have
95 * been sent). 93 * been sent).
96 */ 94 */
97int 95int fw_cancel_transaction(struct fw_card *card,
98fw_cancel_transaction(struct fw_card *card, 96 struct fw_transaction *transaction)
99 struct fw_transaction *transaction)
100{ 97{
101 /* 98 /*
102 * Cancel the packet transmission if it's still queued. That 99 * Cancel the packet transmission if it's still queued. That
@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
112 * if the transaction is still pending and remove it in that case. 109 * if the transaction is still pending and remove it in that case.
113 */ 110 */
114 111
115 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); 112 return close_transaction(transaction, card, RCODE_CANCELLED);
116} 113}
117EXPORT_SYMBOL(fw_cancel_transaction); 114EXPORT_SYMBOL(fw_cancel_transaction);
118 115
119static void 116static void transmit_complete_callback(struct fw_packet *packet,
120transmit_complete_callback(struct fw_packet *packet, 117 struct fw_card *card, int status)
121 struct fw_card *card, int status)
122{ 118{
123 struct fw_transaction *t = 119 struct fw_transaction *t =
124 container_of(packet, struct fw_transaction, packet); 120 container_of(packet, struct fw_transaction, packet);
125 121
126 switch (status) { 122 switch (status) {
127 case ACK_COMPLETE: 123 case ACK_COMPLETE:
128 close_transaction(t, card, RCODE_COMPLETE, NULL, 0); 124 close_transaction(t, card, RCODE_COMPLETE);
129 break; 125 break;
130 case ACK_PENDING: 126 case ACK_PENDING:
131 t->timestamp = packet->timestamp; 127 t->timestamp = packet->timestamp;
@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
133 case ACK_BUSY_X: 129 case ACK_BUSY_X:
134 case ACK_BUSY_A: 130 case ACK_BUSY_A:
135 case ACK_BUSY_B: 131 case ACK_BUSY_B:
136 close_transaction(t, card, RCODE_BUSY, NULL, 0); 132 close_transaction(t, card, RCODE_BUSY);
137 break; 133 break;
138 case ACK_DATA_ERROR: 134 case ACK_DATA_ERROR:
139 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); 135 close_transaction(t, card, RCODE_DATA_ERROR);
140 break; 136 break;
141 case ACK_TYPE_ERROR: 137 case ACK_TYPE_ERROR:
142 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); 138 close_transaction(t, card, RCODE_TYPE_ERROR);
143 break; 139 break;
144 default: 140 default:
145 /* 141 /*
146 * In this case the ack is really a juju specific 142 * In this case the ack is really a juju specific
147 * rcode, so just forward that to the callback. 143 * rcode, so just forward that to the callback.
148 */ 144 */
149 close_transaction(t, card, status, NULL, 0); 145 close_transaction(t, card, status);
150 break; 146 break;
151 } 147 }
152} 148}
153 149
154static void 150static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
155fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
156 int destination_id, int source_id, int generation, int speed, 151 int destination_id, int source_id, int generation, int speed,
157 unsigned long long offset, void *payload, size_t length) 152 unsigned long long offset, void *payload, size_t length)
158{ 153{
159 int ext_tcode; 154 int ext_tcode;
160 155
156 if (tcode == TCODE_STREAM_DATA) {
157 packet->header[0] =
158 HEADER_DATA_LENGTH(length) |
159 destination_id |
160 HEADER_TCODE(TCODE_STREAM_DATA);
161 packet->header_length = 4;
162 packet->payload = payload;
163 packet->payload_length = length;
164
165 goto common;
166 }
167
161 if (tcode > 0x10) { 168 if (tcode > 0x10) {
162 ext_tcode = tcode & ~0x10; 169 ext_tcode = tcode & ~0x10;
163 tcode = TCODE_LOCK_REQUEST; 170 tcode = TCODE_LOCK_REQUEST;
@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
204 packet->payload_length = 0; 211 packet->payload_length = 0;
205 break; 212 break;
206 } 213 }
207 214 common:
208 packet->speed = speed; 215 packet->speed = speed;
209 packet->generation = generation; 216 packet->generation = generation;
210 packet->ack = 0; 217 packet->ack = 0;
@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
246 * @param callback function to be called when the transaction is completed 253 * @param callback function to be called when the transaction is completed
247 * @param callback_data pointer to arbitrary data, which will be 254 * @param callback_data pointer to arbitrary data, which will be
248 * passed to the callback 255 * passed to the callback
256 *
257 * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
258 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
249 */ 259 */
250void 260void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
251fw_send_request(struct fw_card *card, struct fw_transaction *t, 261 int destination_id, int generation, int speed,
252 int tcode, int destination_id, int generation, int speed, 262 unsigned long long offset, void *payload, size_t length,
253 unsigned long long offset, 263 fw_transaction_callback_t callback, void *callback_data)
254 void *payload, size_t length,
255 fw_transaction_callback_t callback, void *callback_data)
256{ 264{
257 unsigned long flags; 265 unsigned long flags;
258 int tlabel; 266 int tlabel;
@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
322 * Returns the RCODE. 330 * Returns the RCODE.
323 */ 331 */
324int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 332int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
325 int generation, int speed, unsigned long long offset, 333 int generation, int speed, unsigned long long offset,
326 void *data, size_t length) 334 void *payload, size_t length)
327{ 335{
328 struct transaction_callback_data d; 336 struct transaction_callback_data d;
329 struct fw_transaction t; 337 struct fw_transaction t;
330 338
331 init_completion(&d.done); 339 init_completion(&d.done);
332 d.payload = data; 340 d.payload = payload;
333 fw_send_request(card, &t, tcode, destination_id, generation, speed, 341 fw_send_request(card, &t, tcode, destination_id, generation, speed,
334 offset, data, length, transaction_callback, &d); 342 offset, payload, length, transaction_callback, &d);
335 wait_for_completion(&d.done); 343 wait_for_completion(&d.done);
336 344
337 return d.rcode; 345 return d.rcode;
@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
399 } 407 }
400} 408}
401 409
402static struct fw_address_handler * 410static struct fw_address_handler *lookup_overlapping_address_handler(
403lookup_overlapping_address_handler(struct list_head *list, 411 struct list_head *list, unsigned long long offset, size_t length)
404 unsigned long long offset, size_t length)
405{ 412{
406 struct fw_address_handler *handler; 413 struct fw_address_handler *handler;
407 414
@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
414 return NULL; 421 return NULL;
415} 422}
416 423
417static struct fw_address_handler * 424static struct fw_address_handler *lookup_enclosing_address_handler(
418lookup_enclosing_address_handler(struct list_head *list, 425 struct list_head *list, unsigned long long offset, size_t length)
419 unsigned long long offset, size_t length)
420{ 426{
421 struct fw_address_handler *handler; 427 struct fw_address_handler *handler;
422 428
@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
449#endif /* 0 */ 455#endif /* 0 */
450 456
451/** 457/**
452 * Allocate a range of addresses in the node space of the OHCI 458 * fw_core_add_address_handler - register for incoming requests
453 * controller. When a request is received that falls within the 459 * @handler: callback
454 * specified address range, the specified callback is invoked. The 460 * @region: region in the IEEE 1212 node space address range
455 * parameters passed to the callback give the details of the 461 *
456 * particular request. 462 * region->start, ->end, and handler->length have to be quadlet-aligned.
463 *
464 * When a request is received that falls within the specified address range,
465 * the specified callback is invoked. The parameters passed to the callback
466 * give the details of the particular request.
457 * 467 *
458 * Return value: 0 on success, non-zero otherwise. 468 * Return value: 0 on success, non-zero otherwise.
459 * The start offset of the handler's address region is determined by 469 * The start offset of the handler's address region is determined by
460 * fw_core_add_address_handler() and is returned in handler->offset. 470 * fw_core_add_address_handler() and is returned in handler->offset.
461 * The offset is quadlet-aligned.
462 */ 471 */
463int 472int fw_core_add_address_handler(struct fw_address_handler *handler,
464fw_core_add_address_handler(struct fw_address_handler *handler, 473 const struct fw_address_region *region)
465 const struct fw_address_region *region)
466{ 474{
467 struct fw_address_handler *other; 475 struct fw_address_handler *other;
468 unsigned long flags; 476 unsigned long flags;
469 int ret = -EBUSY; 477 int ret = -EBUSY;
470 478
479 if (region->start & 0xffff000000000003ULL ||
480 region->end & 0xffff000000000003ULL ||
481 region->start >= region->end ||
482 handler->length & 3 ||
483 handler->length == 0)
484 return -EINVAL;
485
471 spin_lock_irqsave(&address_handler_lock, flags); 486 spin_lock_irqsave(&address_handler_lock, flags);
472 487
473 handler->offset = roundup(region->start, 4); 488 handler->offset = region->start;
474 while (handler->offset + handler->length <= region->end) { 489 while (handler->offset + handler->length <= region->end) {
475 other = 490 other =
476 lookup_overlapping_address_handler(&address_handler_list, 491 lookup_overlapping_address_handler(&address_handler_list,
477 handler->offset, 492 handler->offset,
478 handler->length); 493 handler->length);
479 if (other != NULL) { 494 if (other != NULL) {
480 handler->offset = 495 handler->offset += other->length;
481 roundup(other->offset + other->length, 4);
482 } else { 496 } else {
483 list_add_tail(&handler->link, &address_handler_list); 497 list_add_tail(&handler->link, &address_handler_list);
484 ret = 0; 498 ret = 0;
@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
493EXPORT_SYMBOL(fw_core_add_address_handler); 507EXPORT_SYMBOL(fw_core_add_address_handler);
494 508
495/** 509/**
496 * Deallocate a range of addresses allocated with fw_allocate. This 510 * fw_core_remove_address_handler - unregister an address handler
497 * will call the associated callback one last time with a the special
498 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
499 * callback data. For convenience, the callback parameters offset and
500 * length are set to the start and the length respectively for the
501 * deallocated region, payload is set to NULL.
502 */ 511 */
503void fw_core_remove_address_handler(struct fw_address_handler *handler) 512void fw_core_remove_address_handler(struct fw_address_handler *handler)
504{ 513{
@@ -518,9 +527,8 @@ struct fw_request {
518 u32 data[0]; 527 u32 data[0];
519}; 528};
520 529
521static void 530static void free_response_callback(struct fw_packet *packet,
522free_response_callback(struct fw_packet *packet, 531 struct fw_card *card, int status)
523 struct fw_card *card, int status)
524{ 532{
525 struct fw_request *request; 533 struct fw_request *request;
526 534
@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
528 kfree(request); 536 kfree(request);
529} 537}
530 538
531void 539void fw_fill_response(struct fw_packet *response, u32 *request_header,
532fw_fill_response(struct fw_packet *response, u32 *request_header, 540 int rcode, void *payload, size_t length)
533 int rcode, void *payload, size_t length)
534{ 541{
535 int tcode, tlabel, extended_tcode, source, destination; 542 int tcode, tlabel, extended_tcode, source, destination;
536 543
@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
588} 595}
589EXPORT_SYMBOL(fw_fill_response); 596EXPORT_SYMBOL(fw_fill_response);
590 597
591static struct fw_request * 598static struct fw_request *allocate_request(struct fw_packet *p)
592allocate_request(struct fw_packet *p)
593{ 599{
594 struct fw_request *request; 600 struct fw_request *request;
595 u32 *data, length; 601 u32 *data, length;
@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
649 return request; 655 return request;
650} 656}
651 657
652void 658void fw_send_response(struct fw_card *card,
653fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) 659 struct fw_request *request, int rcode)
654{ 660{
655 /* unified transaction or broadcast transaction: don't respond */ 661 /* unified transaction or broadcast transaction: don't respond */
656 if (request->ack != ACK_PENDING || 662 if (request->ack != ACK_PENDING ||
@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
670} 676}
671EXPORT_SYMBOL(fw_send_response); 677EXPORT_SYMBOL(fw_send_response);
672 678
673void 679void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
674fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
675{ 680{
676 struct fw_address_handler *handler; 681 struct fw_address_handler *handler;
677 struct fw_request *request; 682 struct fw_request *request;
@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
719} 724}
720EXPORT_SYMBOL(fw_core_handle_request); 725EXPORT_SYMBOL(fw_core_handle_request);
721 726
722void 727void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
723fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
724{ 728{
725 struct fw_transaction *t; 729 struct fw_transaction *t;
726 unsigned long flags; 730 unsigned long flags;
@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
793 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, 797 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
794 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; 798 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
795 799
796static void 800static void handle_topology_map(struct fw_card *card, struct fw_request *request,
797handle_topology_map(struct fw_card *card, struct fw_request *request, 801 int tcode, int destination, int source, int generation,
798 int tcode, int destination, int source, 802 int speed, unsigned long long offset,
799 int generation, int speed, 803 void *payload, size_t length, void *callback_data)
800 unsigned long long offset,
801 void *payload, size_t length, void *callback_data)
802{ 804{
803 int i, start, end; 805 int i, start, end;
804 __be32 *map; 806 __be32 *map;
@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
832 { .start = CSR_REGISTER_BASE, 834 { .start = CSR_REGISTER_BASE,
833 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 835 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
834 836
835static void 837static void handle_registers(struct fw_card *card, struct fw_request *request,
836handle_registers(struct fw_card *card, struct fw_request *request, 838 int tcode, int destination, int source, int generation,
837 int tcode, int destination, int source, 839 int speed, unsigned long long offset,
838 int generation, int speed, 840 void *payload, size_t length, void *callback_data)
839 unsigned long long offset,
840 void *payload, size_t length, void *callback_data)
841{ 841{
842 int reg = offset & ~CSR_REGISTER_BASE; 842 int reg = offset & ~CSR_REGISTER_BASE;
843 unsigned long long bus_time; 843 unsigned long long bus_time;
@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
939 939
940static int __init fw_core_init(void) 940static int __init fw_core_init(void)
941{ 941{
942 int retval; 942 int ret;
943 943
944 retval = bus_register(&fw_bus_type); 944 ret = bus_register(&fw_bus_type);
945 if (retval < 0) 945 if (ret < 0)
946 return retval; 946 return ret;
947 947
948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); 948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
949 if (fw_cdev_major < 0) { 949 if (fw_cdev_major < 0) {
@@ -951,19 +951,10 @@ static int __init fw_core_init(void)
951 return fw_cdev_major; 951 return fw_cdev_major;
952 } 952 }
953 953
954 retval = fw_core_add_address_handler(&topology_map, 954 fw_core_add_address_handler(&topology_map, &topology_map_region);
955 &topology_map_region); 955 fw_core_add_address_handler(&registers, &registers_region);
956 BUG_ON(retval < 0); 956 fw_core_add_descriptor(&vendor_id_descriptor);
957 957 fw_core_add_descriptor(&model_id_descriptor);
958 retval = fw_core_add_address_handler(&registers,
959 &registers_region);
960 BUG_ON(retval < 0);
961
962 /* Add the vendor textual descriptor. */
963 retval = fw_core_add_descriptor(&vendor_id_descriptor);
964 BUG_ON(retval < 0);
965 retval = fw_core_add_descriptor(&model_id_descriptor);
966 BUG_ON(retval < 0);
967 958
968 return 0; 959 return 0;
969} 960}
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 1d78e9cc5940..dfa799068f89 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -82,14 +82,14 @@
82#define CSR_SPEED_MAP 0x2000 82#define CSR_SPEED_MAP 0x2000
83#define CSR_SPEED_MAP_END 0x3000 83#define CSR_SPEED_MAP_END 0x3000
84 84
85#define BANDWIDTH_AVAILABLE_INITIAL 4915
85#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 86#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
86#define BROADCAST_CHANNEL_VALID (1 << 30) 87#define BROADCAST_CHANNEL_VALID (1 << 30)
87 88
88#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) 89#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
89#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) 90#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
90 91
91static inline void 92static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
92fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
93{ 93{
94 u32 *dst = _dst; 94 u32 *dst = _dst;
95 __be32 *src = _src; 95 __be32 *src = _src;
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
99 dst[i] = be32_to_cpu(src[i]); 99 dst[i] = be32_to_cpu(src[i]);
100} 100}
101 101
102static inline void 102static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
103fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
104{ 103{
105 fw_memcpy_from_be32(_dst, _src, size); 104 fw_memcpy_from_be32(_dst, _src, size);
106} 105}
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
125 struct fw_card *card, int status); 124 struct fw_card *card, int status);
126 125
127typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, 126typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
128 void *data, 127 void *data, size_t length,
129 size_t length,
130 void *callback_data); 128 void *callback_data);
131 129
132/* 130/*
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
141 void *data, size_t length, 139 void *data, size_t length,
142 void *callback_data); 140 void *callback_data);
143 141
144typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
145 int node_id, int generation,
146 u32 *self_ids,
147 int self_id_count,
148 void *callback_data);
149
150struct fw_packet { 142struct fw_packet {
151 int speed; 143 int speed;
152 int generation; 144 int generation;
@@ -187,12 +179,6 @@ struct fw_transaction {
187 void *callback_data; 179 void *callback_data;
188}; 180};
189 181
190static inline struct fw_packet *
191fw_packet(struct list_head *l)
192{
193 return list_entry(l, struct fw_packet, link);
194}
195
196struct fw_address_handler { 182struct fw_address_handler {
197 u64 offset; 183 u64 offset;
198 size_t length; 184 size_t length;
@@ -201,7 +187,6 @@ struct fw_address_handler {
201 struct list_head link; 187 struct list_head link;
202}; 188};
203 189
204
205struct fw_address_region { 190struct fw_address_region {
206 u64 start; 191 u64 start;
207 u64 end; 192 u64 end;
@@ -255,6 +240,7 @@ struct fw_card {
255 int bm_retries; 240 int bm_retries;
256 int bm_generation; 241 int bm_generation;
257 242
243 bool broadcast_channel_allocated;
258 u32 broadcast_channel; 244 u32 broadcast_channel;
259 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; 245 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
260}; 246};
@@ -315,10 +301,8 @@ struct fw_iso_packet {
315struct fw_iso_context; 301struct fw_iso_context;
316 302
317typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, 303typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
318 u32 cycle, 304 u32 cycle, size_t header_length,
319 size_t header_length, 305 void *header, void *data);
320 void *header,
321 void *data);
322 306
323/* 307/*
324 * An iso buffer is just a set of pages mapped for DMA in the 308 * An iso buffer is just a set of pages mapped for DMA in the
@@ -344,36 +328,25 @@ struct fw_iso_context {
344 void *callback_data; 328 void *callback_data;
345}; 329};
346 330
347int 331int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
348fw_iso_buffer_init(struct fw_iso_buffer *buffer, 332 int page_count, enum dma_data_direction direction);
349 struct fw_card *card, 333int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
350 int page_count, 334void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
351 enum dma_data_direction direction); 335
352int 336struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
353fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 337 int type, int channel, int speed, size_t header_size,
354void 338 fw_iso_callback_t callback, void *callback_data);
355fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); 339int fw_iso_context_queue(struct fw_iso_context *ctx,
356 340 struct fw_iso_packet *packet,
357struct fw_iso_context * 341 struct fw_iso_buffer *buffer,
358fw_iso_context_create(struct fw_card *card, int type, 342 unsigned long payload);
359 int channel, int speed, size_t header_size, 343int fw_iso_context_start(struct fw_iso_context *ctx,
360 fw_iso_callback_t callback, void *callback_data); 344 int cycle, int sync, int tags);
361 345int fw_iso_context_stop(struct fw_iso_context *ctx);
362void 346void fw_iso_context_destroy(struct fw_iso_context *ctx);
363fw_iso_context_destroy(struct fw_iso_context *ctx); 347
364 348void fw_iso_resource_manage(struct fw_card *card, int generation,
365int 349 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
366fw_iso_context_queue(struct fw_iso_context *ctx,
367 struct fw_iso_packet *packet,
368 struct fw_iso_buffer *buffer,
369 unsigned long payload);
370
371int
372fw_iso_context_start(struct fw_iso_context *ctx,
373 int cycle, int sync, int tags);
374
375int
376fw_iso_context_stop(struct fw_iso_context *ctx);
377 350
378struct fw_card_driver { 351struct fw_card_driver {
379 /* 352 /*
@@ -415,7 +388,7 @@ struct fw_card_driver {
415 388
416 struct fw_iso_context * 389 struct fw_iso_context *
417 (*allocate_iso_context)(struct fw_card *card, 390 (*allocate_iso_context)(struct fw_card *card,
418 int type, size_t header_size); 391 int type, int channel, size_t header_size);
419 void (*free_iso_context)(struct fw_iso_context *ctx); 392 void (*free_iso_context)(struct fw_iso_context *ctx);
420 393
421 int (*start_iso)(struct fw_iso_context *ctx, 394 int (*start_iso)(struct fw_iso_context *ctx,
@@ -429,54 +402,45 @@ struct fw_card_driver {
429 int (*stop_iso)(struct fw_iso_context *ctx); 402 int (*stop_iso)(struct fw_iso_context *ctx);
430}; 403};
431 404
432int 405int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
433fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
434 406
435void 407void fw_send_request(struct fw_card *card, struct fw_transaction *t,
436fw_send_request(struct fw_card *card, struct fw_transaction *t,
437 int tcode, int destination_id, int generation, int speed, 408 int tcode, int destination_id, int generation, int speed,
438 unsigned long long offset, void *data, size_t length, 409 unsigned long long offset, void *payload, size_t length,
439 fw_transaction_callback_t callback, void *callback_data); 410 fw_transaction_callback_t callback, void *callback_data);
440
441int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
442 int generation, int speed, unsigned long long offset,
443 void *data, size_t length);
444
445int fw_cancel_transaction(struct fw_card *card, 411int fw_cancel_transaction(struct fw_card *card,
446 struct fw_transaction *transaction); 412 struct fw_transaction *transaction);
447
448void fw_flush_transactions(struct fw_card *card); 413void fw_flush_transactions(struct fw_card *card);
449 414int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
415 int generation, int speed, unsigned long long offset,
416 void *payload, size_t length);
450void fw_send_phy_config(struct fw_card *card, 417void fw_send_phy_config(struct fw_card *card,
451 int node_id, int generation, int gap_count); 418 int node_id, int generation, int gap_count);
452 419
420static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
421{
422 return tag << 14 | channel << 8 | sy;
423}
424
453/* 425/*
454 * Called by the topology code to inform the device code of node 426 * Called by the topology code to inform the device code of node
455 * activity; found, lost, or updated nodes. 427 * activity; found, lost, or updated nodes.
456 */ 428 */
457void 429void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
458fw_node_event(struct fw_card *card, struct fw_node *node, int event);
459 430
460/* API used by card level drivers */ 431/* API used by card level drivers */
461 432
462void 433void fw_card_initialize(struct fw_card *card,
463fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 434 const struct fw_card_driver *driver, struct device *device);
464 struct device *device); 435int fw_card_add(struct fw_card *card,
465int 436 u32 max_receive, u32 link_speed, u64 guid);
466fw_card_add(struct fw_card *card, 437void fw_core_remove_card(struct fw_card *card);
467 u32 max_receive, u32 link_speed, u64 guid); 438void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
468 439 int generation, int self_id_count, u32 *self_ids);
469void 440void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
470fw_core_remove_card(struct fw_card *card); 441void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
471 442
472void 443extern int fw_irm_set_broadcast_channel_register(struct device *dev,
473fw_core_handle_bus_reset(struct fw_card *card, 444 void *data);
474 int node_id, int generation,
475 int self_id_count, u32 *self_ids);
476void
477fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
478
479void
480fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
481 445
482#endif /* __fw_transaction_h */ 446#endif /* __fw_transaction_h */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4a5c12..4ec5061fa584 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o 13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o
14 15
15drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
16 17
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 000000000000..c77c6c6d9d2c
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
1/**
2 * \file drm_debugfs.c
3 * debugfs support for DRM
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include "drmP.h"
36
37#if defined(CONFIG_DEBUG_FS)
38
39/***************************************************
40 * Initialization, etc.
41 **************************************************/
42
43static struct drm_info_list drm_debugfs_list[] = {
44 {"name", drm_name_info, 0},
45 {"vm", drm_vm_info, 0},
46 {"clients", drm_clients_info, 0},
47 {"queues", drm_queues_info, 0},
48 {"bufs", drm_bufs_info, 0},
49 {"gem_names", drm_gem_name_info, DRIVER_GEM},
50 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
51#if DRM_DEBUG_CODE
52 {"vma", drm_vma_info, 0},
53#endif
54};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56
57
58static int drm_debugfs_open(struct inode *inode, struct file *file)
59{
60 struct drm_info_node *node = inode->i_private;
61
62 return single_open(file, node->info_ent->show, node);
63}
64
65
66static const struct file_operations drm_debugfs_fops = {
67 .owner = THIS_MODULE,
68 .open = drm_debugfs_open,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73
74
75/**
76 * Initialize a given set of debugfs files for a device
77 *
78 * \param files The array of files to create
79 * \param count The number of files given
80 * \param root DRI debugfs dir entry.
81 * \param minor device minor number
82 * \return Zero on success, non-zero on failure
83 *
84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory.
86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor)
89{
90 struct drm_device *dev = minor->dev;
91 struct dentry *ent;
92 struct drm_info_node *tmp;
93 char name[64];
94 int i, ret;
95
96 for (i = 0; i < count; i++) {
97 u32 features = files[i].driver_features;
98
99 if (features != 0 &&
100 (dev->driver->driver_features & features) != features)
101 continue;
102
103 tmp = drm_alloc(sizeof(struct drm_info_node),
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops);
107 if (!ent) {
108 DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
109 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node),
111 _DRM_DRIVER);
112 ret = -1;
113 goto fail;
114 }
115
116 tmp->minor = minor;
117 tmp->dent = ent;
118 tmp->info_ent = &files[i];
119 list_add(&(tmp->list), &(minor->debugfs_nodes.list));
120 }
121 return 0;
122
123fail:
124 drm_debugfs_remove_files(files, count, minor);
125 return ret;
126}
127EXPORT_SYMBOL(drm_debugfs_create_files);
128
129/**
130 * Initialize the DRI debugfs filesystem for a device
131 *
132 * \param dev DRM device
133 * \param minor device minor number
134 * \param root DRI debugfs dir entry.
135 *
136 * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
137 * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
138 * "/debugfs/dri/%minor%/%name%".
139 */
140int drm_debugfs_init(struct drm_minor *minor, int minor_id,
141 struct dentry *root)
142{
143 struct drm_device *dev = minor->dev;
144 char name[64];
145 int ret;
146
147 INIT_LIST_HEAD(&minor->debugfs_nodes.list);
148 sprintf(name, "%d", minor_id);
149 minor->debugfs_root = debugfs_create_dir(name, root);
150 if (!minor->debugfs_root) {
151 DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
152 return -1;
153 }
154
155 ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
156 minor->debugfs_root, minor);
157 if (ret) {
158 debugfs_remove(minor->debugfs_root);
159 minor->debugfs_root = NULL;
160 DRM_ERROR("Failed to create core drm debugfs files\n");
161 return ret;
162 }
163
164 if (dev->driver->debugfs_init) {
165 ret = dev->driver->debugfs_init(minor);
166 if (ret) {
167 DRM_ERROR("DRM: Driver failed to initialize "
168 "/debugfs/dri.\n");
169 return ret;
170 }
171 }
172 return 0;
173}
174
175
176/**
177 * Remove a list of debugfs files
178 *
179 * \param files The list of files
180 * \param count The number of files
181 * \param minor The minor of which we should remove the files
182 * \return always zero.
183 *
184 * Remove all debugfs entries created by debugfs_init().
185 */
186int drm_debugfs_remove_files(struct drm_info_list *files, int count,
187 struct drm_minor *minor)
188{
189 struct list_head *pos, *q;
190 struct drm_info_node *tmp;
191 int i;
192
193 for (i = 0; i < count; i++) {
194 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
195 tmp = list_entry(pos, struct drm_info_node, list);
196 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent);
198 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node),
200 _DRM_DRIVER);
201 }
202 }
203 }
204 return 0;
205}
206EXPORT_SYMBOL(drm_debugfs_remove_files);
207
208/**
209 * Cleanup the debugfs filesystem resources.
210 *
211 * \param minor device minor number.
212 * \return always zero.
213 *
214 * Remove all debugfs entries created by debugfs_init().
215 */
216int drm_debugfs_cleanup(struct drm_minor *minor)
217{
218 struct drm_device *dev = minor->dev;
219
220 if (!minor->debugfs_root)
221 return 0;
222
223 if (dev->driver->debugfs_cleanup)
224 dev->driver->debugfs_cleanup(minor);
225
226 drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
227
228 debugfs_remove(minor->debugfs_root);
229 minor->debugfs_root = NULL;
230
231 return 0;
232}
233
234#endif /* CONFIG_DEBUG_FS */
235
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23dc157..ed32edb17166 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
46 * OTHER DEALINGS IN THE SOFTWARE. 46 * OTHER DEALINGS IN THE SOFTWARE.
47 */ 47 */
48 48
49#include <linux/debugfs.h>
49#include "drmP.h" 50#include "drmP.h"
50#include "drm_core.h" 51#include "drm_core.h"
51 52
53
52static int drm_version(struct drm_device *dev, void *data, 54static int drm_version(struct drm_device *dev, void *data,
53 struct drm_file *file_priv); 55 struct drm_file *file_priv);
54 56
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
178 180
179 /* Clear AGP information */ 181 /* Clear AGP information */
180 if (drm_core_has_AGP(dev) && dev->agp && 182 if (drm_core_has_AGP(dev) && dev->agp &&
181 !drm_core_check_feature(dev, DRIVER_MODESET)) { 183 !drm_core_check_feature(dev, DRIVER_MODESET)) {
182 struct drm_agp_mem *entry, *tempe; 184 struct drm_agp_mem *entry, *tempe;
183 185
184 /* Remove AGP resources, but leave dev->agp 186 /* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
382 goto err_p3; 384 goto err_p3;
383 } 385 }
384 386
387 drm_debugfs_root = debugfs_create_dir("dri", NULL);
388 if (!drm_debugfs_root) {
389 DRM_ERROR("Cannot create /debugfs/dri\n");
390 ret = -1;
391 goto err_p3;
392 }
393
385 drm_mem_init(); 394 drm_mem_init();
386 395
387 DRM_INFO("Initialized %s %d.%d.%d %s\n", 396 DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
400static void __exit drm_core_exit(void) 409static void __exit drm_core_exit(void)
401{ 410{
402 remove_proc_entry("dri", NULL); 411 remove_proc_entry("dri", NULL);
412 debugfs_remove(drm_debugfs_root);
403 drm_sysfs_destroy(); 413 drm_sysfs_destroy();
404 414
405 unregister_chrdev(DRM_MAJOR, "drm"); 415 unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 000000000000..1b699768ccfb
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
1/**
2 * \file drm_info.c
3 * DRM info file implementations
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/seq_file.h>
37#include "drmP.h"
38
39/**
40 * Called when "/proc/dri/.../name" is read.
41 *
42 * Prints the device name together with the bus id if available.
43 */
44int drm_name_info(struct seq_file *m, void *data)
45{
46 struct drm_info_node *node = (struct drm_info_node *) m->private;
47 struct drm_minor *minor = node->minor;
48 struct drm_device *dev = minor->dev;
49 struct drm_master *master = minor->master;
50
51 if (!master)
52 return 0;
53
54 if (master->unique) {
55 seq_printf(m, "%s %s %s\n",
56 dev->driver->pci_driver.name,
57 pci_name(dev->pdev), master->unique);
58 } else {
59 seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
60 pci_name(dev->pdev));
61 }
62
63 return 0;
64}
65
66/**
67 * Called when "/proc/dri/.../vm" is read.
68 *
69 * Prints information about all mappings in drm_device::maplist.
70 */
71int drm_vm_info(struct seq_file *m, void *data)
72{
73 struct drm_info_node *node = (struct drm_info_node *) m->private;
74 struct drm_device *dev = node->minor->dev;
75 struct drm_map *map;
76 struct drm_map_list *r_list;
77
78 /* Hardcoded from _DRM_FRAME_BUFFER,
79 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
80 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
81 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
82 const char *type;
83 int i;
84
85 mutex_lock(&dev->struct_mutex);
86 seq_printf(m, "slot offset size type flags address mtrr\n\n");
87 i = 0;
88 list_for_each_entry(r_list, &dev->maplist, head) {
89 map = r_list->map;
90 if (!map)
91 continue;
92 if (map->type < 0 || map->type > 5)
93 type = "??";
94 else
95 type = types[map->type];
96
97 seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
98 i,
99 map->offset,
100 map->size, type, map->flags,
101 (unsigned long) r_list->user_token);
102 if (map->mtrr < 0)
103 seq_printf(m, "none\n");
104 else
105 seq_printf(m, "%4d\n", map->mtrr);
106 i++;
107 }
108 mutex_unlock(&dev->struct_mutex);
109 return 0;
110}
111
112/**
113 * Called when "/proc/dri/.../queues" is read.
114 */
115int drm_queues_info(struct seq_file *m, void *data)
116{
117 struct drm_info_node *node = (struct drm_info_node *) m->private;
118 struct drm_device *dev = node->minor->dev;
119 int i;
120 struct drm_queue *q;
121
122 mutex_lock(&dev->struct_mutex);
123 seq_printf(m, " ctx/flags use fin"
124 " blk/rw/rwf wait flushed queued"
125 " locks\n\n");
126 for (i = 0; i < dev->queue_count; i++) {
127 q = dev->queuelist[i];
128 atomic_inc(&q->use_count);
129 seq_printf(m, "%5d/0x%03x %5d %5d"
130 " %5d/%c%c/%c%c%c %5Zd\n",
131 i,
132 q->flags,
133 atomic_read(&q->use_count),
134 atomic_read(&q->finalization),
135 atomic_read(&q->block_count),
136 atomic_read(&q->block_read) ? 'r' : '-',
137 atomic_read(&q->block_write) ? 'w' : '-',
138 waitqueue_active(&q->read_queue) ? 'r' : '-',
139 waitqueue_active(&q->write_queue) ? 'w' : '-',
140 waitqueue_active(&q->flush_queue) ? 'f' : '-',
141 DRM_BUFCOUNT(&q->waitlist));
142 atomic_dec(&q->use_count);
143 }
144 mutex_unlock(&dev->struct_mutex);
145 return 0;
146}
147
148/**
149 * Called when "/proc/dri/.../bufs" is read.
150 */
151int drm_bufs_info(struct seq_file *m, void *data)
152{
153 struct drm_info_node *node = (struct drm_info_node *) m->private;
154 struct drm_device *dev = node->minor->dev;
155 struct drm_device_dma *dma;
156 int i, seg_pages;
157
158 mutex_lock(&dev->struct_mutex);
159 dma = dev->dma;
160 if (!dma) {
161 mutex_unlock(&dev->struct_mutex);
162 return 0;
163 }
164
165 seq_printf(m, " o size count free segs pages kB\n\n");
166 for (i = 0; i <= DRM_MAX_ORDER; i++) {
167 if (dma->bufs[i].buf_count) {
168 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
169 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
170 i,
171 dma->bufs[i].buf_size,
172 dma->bufs[i].buf_count,
173 atomic_read(&dma->bufs[i].freelist.count),
174 dma->bufs[i].seg_count,
175 seg_pages,
176 seg_pages * PAGE_SIZE / 1024);
177 }
178 }
179 seq_printf(m, "\n");
180 for (i = 0; i < dma->buf_count; i++) {
181 if (i && !(i % 32))
182 seq_printf(m, "\n");
183 seq_printf(m, " %d", dma->buflist[i]->list);
184 }
185 seq_printf(m, "\n");
186 mutex_unlock(&dev->struct_mutex);
187 return 0;
188}
189
190/**
191 * Called when "/proc/dri/.../vblank" is read.
192 */
193int drm_vblank_info(struct seq_file *m, void *data)
194{
195 struct drm_info_node *node = (struct drm_info_node *) m->private;
196 struct drm_device *dev = node->minor->dev;
197 int crtc;
198
199 mutex_lock(&dev->struct_mutex);
200 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
201 seq_printf(m, "CRTC %d enable: %d\n",
202 crtc, atomic_read(&dev->vblank_refcount[crtc]));
203 seq_printf(m, "CRTC %d counter: %d\n",
204 crtc, drm_vblank_count(dev, crtc));
205 seq_printf(m, "CRTC %d last wait: %d\n",
206 crtc, dev->last_vblank_wait[crtc]);
207 seq_printf(m, "CRTC %d in modeset: %d\n",
208 crtc, dev->vblank_inmodeset[crtc]);
209 }
210 mutex_unlock(&dev->struct_mutex);
211 return 0;
212}
213
214/**
215 * Called when "/proc/dri/.../clients" is read.
216 *
217 */
218int drm_clients_info(struct seq_file *m, void *data)
219{
220 struct drm_info_node *node = (struct drm_info_node *) m->private;
221 struct drm_device *dev = node->minor->dev;
222 struct drm_file *priv;
223
224 mutex_lock(&dev->struct_mutex);
225 seq_printf(m, "a dev pid uid magic ioctls\n\n");
226 list_for_each_entry(priv, &dev->filelist, lhead) {
227 seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
228 priv->authenticated ? 'y' : 'n',
229 priv->minor->index,
230 priv->pid,
231 priv->uid, priv->magic, priv->ioctl_count);
232 }
233 mutex_unlock(&dev->struct_mutex);
234 return 0;
235}
236
237
238int drm_gem_one_name_info(int id, void *ptr, void *data)
239{
240 struct drm_gem_object *obj = ptr;
241 struct seq_file *m = data;
242
243 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
244
245 seq_printf(m, "%6d %8zd %7d %8d\n",
246 obj->name, obj->size,
247 atomic_read(&obj->handlecount.refcount),
248 atomic_read(&obj->refcount.refcount));
249 return 0;
250}
251
252int drm_gem_name_info(struct seq_file *m, void *data)
253{
254 struct drm_info_node *node = (struct drm_info_node *) m->private;
255 struct drm_device *dev = node->minor->dev;
256
257 seq_printf(m, " name size handles refcount\n");
258 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
259 return 0;
260}
261
262int drm_gem_object_info(struct seq_file *m, void* data)
263{
264 struct drm_info_node *node = (struct drm_info_node *) m->private;
265 struct drm_device *dev = node->minor->dev;
266
267 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
268 seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
269 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
270 seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
271 seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
272 seq_printf(m, "%d gtt total\n", dev->gtt_total);
273 return 0;
274}
275
276#if DRM_DEBUG_CODE
277
278int drm_vma_info(struct seq_file *m, void *data)
279{
280 struct drm_info_node *node = (struct drm_info_node *) m->private;
281 struct drm_device *dev = node->minor->dev;
282 struct drm_vma_entry *pt;
283 struct vm_area_struct *vma;
284#if defined(__i386__)
285 unsigned int pgprot;
286#endif
287
288 mutex_lock(&dev->struct_mutex);
289 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
290 atomic_read(&dev->vma_count),
291 high_memory, (u64)virt_to_phys(high_memory));
292
293 list_for_each_entry(pt, &dev->vmalist, head) {
294 vma = pt->vma;
295 if (!vma)
296 continue;
297 seq_printf(m,
298 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
299 pt->pid, vma->vm_start, vma->vm_end,
300 vma->vm_flags & VM_READ ? 'r' : '-',
301 vma->vm_flags & VM_WRITE ? 'w' : '-',
302 vma->vm_flags & VM_EXEC ? 'x' : '-',
303 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
304 vma->vm_flags & VM_LOCKED ? 'l' : '-',
305 vma->vm_flags & VM_IO ? 'i' : '-',
306 vma->vm_pgoff);
307
308#if defined(__i386__)
309 pgprot = pgprot_val(vma->vm_page_prot);
310 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
311 pgprot & _PAGE_PRESENT ? 'p' : '-',
312 pgprot & _PAGE_RW ? 'w' : 'r',
313 pgprot & _PAGE_USER ? 'u' : 's',
314 pgprot & _PAGE_PWT ? 't' : 'b',
315 pgprot & _PAGE_PCD ? 'u' : 'c',
316 pgprot & _PAGE_ACCESSED ? 'a' : '-',
317 pgprot & _PAGE_DIRTY ? 'd' : '-',
318 pgprot & _PAGE_PSE ? 'm' : 'k',
319 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
320#endif
321 seq_printf(m, "\n");
322 }
323 mutex_unlock(&dev->struct_mutex);
324 return 0;
325}
326
327#endif
328
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index b756f043a5f4..9b3c5af61e98 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,697 +37,196 @@
37 * OTHER DEALINGS IN THE SOFTWARE. 37 * OTHER DEALINGS IN THE SOFTWARE.
38 */ 38 */
39 39
40#include <linux/seq_file.h>
40#include "drmP.h" 41#include "drmP.h"
41 42
42static int drm_name_info(char *buf, char **start, off_t offset, 43
43 int request, int *eof, void *data); 44/***************************************************
44static int drm_vm_info(char *buf, char **start, off_t offset, 45 * Initialization, etc.
45 int request, int *eof, void *data); 46 **************************************************/
46static int drm_clients_info(char *buf, char **start, off_t offset,
47 int request, int *eof, void *data);
48static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data);
52static int drm_vblank_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_name_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
56static int drm_gem_object_info(char *buf, char **start, off_t offset,
57 int request, int *eof, void *data);
58#if DRM_DEBUG_CODE
59static int drm_vma_info(char *buf, char **start, off_t offset,
60 int request, int *eof, void *data);
61#endif
62 47
63/** 48/**
64 * Proc file list. 49 * Proc file list.
65 */ 50 */
66static struct drm_proc_list { 51static struct drm_info_list drm_proc_list[] = {
67 const char *name; /**< file name */
68 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
69 u32 driver_features; /**< Required driver features for this entry */
70} drm_proc_list[] = {
71 {"name", drm_name_info, 0}, 52 {"name", drm_name_info, 0},
72 {"mem", drm_mem_info, 0},
73 {"vm", drm_vm_info, 0}, 53 {"vm", drm_vm_info, 0},
74 {"clients", drm_clients_info, 0}, 54 {"clients", drm_clients_info, 0},
75 {"queues", drm_queues_info, 0}, 55 {"queues", drm_queues_info, 0},
76 {"bufs", drm_bufs_info, 0}, 56 {"bufs", drm_bufs_info, 0},
77 {"vblank", drm_vblank_info, 0},
78 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
79 {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 58 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
80#if DRM_DEBUG_CODE 59#if DRM_DEBUG_CODE
81 {"vma", drm_vma_info}, 60 {"vma", drm_vma_info, 0},
82#endif 61#endif
83}; 62};
84
85#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) 63#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
86 64
65static int drm_proc_open(struct inode *inode, struct file *file)
66{
67 struct drm_info_node* node = PDE(inode)->data;
68
69 return single_open(file, node->info_ent->show, node);
70}
71
72static const struct file_operations drm_proc_fops = {
73 .owner = THIS_MODULE,
74 .open = drm_proc_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80
87/** 81/**
88 * Initialize the DRI proc filesystem for a device. 82 * Initialize a given set of proc files for a device
89 * 83 *
90 * \param dev DRM device. 84 * \param files The array of files to create
91 * \param minor device minor number. 85 * \param count The number of files given
92 * \param root DRI proc dir entry. 86 * \param root DRI proc dir entry.
93 * \param dev_root resulting DRI device proc dir entry. 87 * \param minor device minor number
94 * \return root entry pointer on success, or NULL on failure. 88 * \return Zero on success, non-zero on failure
95 * 89 *
96 * Create the DRI proc root entry "/proc/dri", the device proc root entry 90 * Create a given set of proc files represented by an array of
97 * "/proc/dri/%minor%/", and each entry in proc_list as 91 * gdm_proc_lists in the given root directory.
98 * "/proc/dri/%minor%/%name%".
99 */ 92 */
100int drm_proc_init(struct drm_minor *minor, int minor_id, 93int drm_proc_create_files(struct drm_info_list *files, int count,
101 struct proc_dir_entry *root) 94 struct proc_dir_entry *root, struct drm_minor *minor)
102{ 95{
103 struct drm_device *dev = minor->dev; 96 struct drm_device *dev = minor->dev;
104 struct proc_dir_entry *ent; 97 struct proc_dir_entry *ent;
105 int i, j, ret; 98 struct drm_info_node *tmp;
106 char name[64]; 99 char name[64];
100 int i, ret;
107 101
108 sprintf(name, "%d", minor_id); 102 for (i = 0; i < count; i++) {
109 minor->dev_root = proc_mkdir(name, root); 103 u32 features = files[i].driver_features;
110 if (!minor->dev_root) {
111 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
112 return -1;
113 }
114
115 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
116 u32 features = drm_proc_list[i].driver_features;
117 104
118 if (features != 0 && 105 if (features != 0 &&
119 (dev->driver->driver_features & features) != features) 106 (dev->driver->driver_features & features) != features)
120 continue; 107 continue;
121 108
122 ent = create_proc_entry(drm_proc_list[i].name, 109 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
123 S_IFREG | S_IRUGO, minor->dev_root); 110 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
124 if (!ent) { 111 if (!ent) {
125 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 112 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
126 name, drm_proc_list[i].name); 113 name, files[i].name);
114 drm_free(tmp, sizeof(struct drm_info_node),
115 _DRM_DRIVER);
127 ret = -1; 116 ret = -1;
128 goto fail; 117 goto fail;
129 } 118 }
130 ent->read_proc = drm_proc_list[i].f;
131 ent->data = minor;
132 }
133 119
134 if (dev->driver->proc_init) { 120 ent->proc_fops = &drm_proc_fops;
135 ret = dev->driver->proc_init(minor); 121 ent->data = tmp;
136 if (ret) { 122 tmp->minor = minor;
137 DRM_ERROR("DRM: Driver failed to initialize " 123 tmp->info_ent = &files[i];
138 "/proc/dri.\n"); 124 list_add(&(tmp->list), &(minor->proc_nodes.list));
139 goto fail;
140 }
141 } 125 }
142
143 return 0; 126 return 0;
144 fail:
145 127
146 for (j = 0; j < i; j++) 128fail:
147 remove_proc_entry(drm_proc_list[i].name, 129 for (i = 0; i < count; i++)
148 minor->dev_root); 130 remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
149 remove_proc_entry(name, root);
150 minor->dev_root = NULL;
151 return ret; 131 return ret;
152} 132}
153 133
154/** 134/**
155 * Cleanup the proc filesystem resources. 135 * Initialize the DRI proc filesystem for a device
156 * 136 *
157 * \param minor device minor number. 137 * \param dev DRM device
138 * \param minor device minor number
158 * \param root DRI proc dir entry. 139 * \param root DRI proc dir entry.
159 * \param dev_root DRI device proc dir entry. 140 * \param dev_root resulting DRI device proc dir entry.
160 * \return always zero. 141 * \return root entry pointer on success, or NULL on failure.
161 * 142 *
162 * Remove all proc entries created by proc_init(). 143 * Create the DRI proc root entry "/proc/dri", the device proc root entry
144 * "/proc/dri/%minor%/", and each entry in proc_list as
145 * "/proc/dri/%minor%/%name%".
163 */ 146 */
164int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 147int drm_proc_init(struct drm_minor *minor, int minor_id,
148 struct proc_dir_entry *root)
165{ 149{
166 struct drm_device *dev = minor->dev; 150 struct drm_device *dev = minor->dev;
167 int i;
168 char name[64]; 151 char name[64];
152 int ret;
169 153
170 if (!root || !minor->dev_root) 154 INIT_LIST_HEAD(&minor->proc_nodes.list);
171 return 0; 155 sprintf(name, "%d", minor_id);
172 156 minor->proc_root = proc_mkdir(name, root);
173 if (dev->driver->proc_cleanup) 157 if (!minor->proc_root) {
174 dev->driver->proc_cleanup(minor); 158 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
175 159 return -1;
176 for (i = 0; i < DRM_PROC_ENTRIES; i++)
177 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
178 sprintf(name, "%d", minor->index);
179 remove_proc_entry(name, root);
180
181 return 0;
182}
183
184/**
185 * Called when "/proc/dri/.../name" is read.
186 *
187 * \param buf output buffer.
188 * \param start start of output data.
189 * \param offset requested start offset.
190 * \param request requested number of bytes.
191 * \param eof whether there is no more data to return.
192 * \param data private data.
193 * \return number of written bytes.
194 *
195 * Prints the device name together with the bus id if available.
196 */
197static int drm_name_info(char *buf, char **start, off_t offset, int request,
198 int *eof, void *data)
199{
200 struct drm_minor *minor = (struct drm_minor *) data;
201 struct drm_master *master = minor->master;
202 struct drm_device *dev = minor->dev;
203 int len = 0;
204
205 if (offset > DRM_PROC_LIMIT) {
206 *eof = 1;
207 return 0;
208 } 160 }
209 161
210 if (!master) 162 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
211 return 0; 163 minor->proc_root, minor);
212 164 if (ret) {
213 *start = &buf[offset]; 165 remove_proc_entry(name, root);
214 *eof = 0; 166 minor->proc_root = NULL;
215 167 DRM_ERROR("Failed to create core drm proc files\n");
216 if (master->unique) { 168 return ret;
217 DRM_PROC_PRINT("%s %s %s\n",
218 dev->driver->pci_driver.name,
219 pci_name(dev->pdev), master->unique);
220 } else {
221 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
222 pci_name(dev->pdev));
223 } 169 }
224 170
225 if (len > request + offset) 171 if (dev->driver->proc_init) {
226 return request; 172 ret = dev->driver->proc_init(minor);
227 *eof = 1; 173 if (ret) {
228 return len - offset; 174 DRM_ERROR("DRM: Driver failed to initialize "
229} 175 "/proc/dri.\n");
230 176 return ret;
231/**
232 * Called when "/proc/dri/.../vm" is read.
233 *
234 * \param buf output buffer.
235 * \param start start of output data.
236 * \param offset requested start offset.
237 * \param request requested number of bytes.
238 * \param eof whether there is no more data to return.
239 * \param data private data.
240 * \return number of written bytes.
241 *
242 * Prints information about all mappings in drm_device::maplist.
243 */
244static int drm__vm_info(char *buf, char **start, off_t offset, int request,
245 int *eof, void *data)
246{
247 struct drm_minor *minor = (struct drm_minor *) data;
248 struct drm_device *dev = minor->dev;
249 int len = 0;
250 struct drm_map *map;
251 struct drm_map_list *r_list;
252
253 /* Hardcoded from _DRM_FRAME_BUFFER,
254 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
255 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
256 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
257 const char *type;
258 int i;
259
260 if (offset > DRM_PROC_LIMIT) {
261 *eof = 1;
262 return 0;
263 }
264
265 *start = &buf[offset];
266 *eof = 0;
267
268 DRM_PROC_PRINT("slot offset size type flags "
269 "address mtrr\n\n");
270 i = 0;
271 list_for_each_entry(r_list, &dev->maplist, head) {
272 map = r_list->map;
273 if (!map)
274 continue;
275 if (map->type < 0 || map->type > 5)
276 type = "??";
277 else
278 type = types[map->type];
279 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
280 i,
281 map->offset,
282 map->size, type, map->flags,
283 (unsigned long) r_list->user_token);
284 if (map->mtrr < 0) {
285 DRM_PROC_PRINT("none\n");
286 } else {
287 DRM_PROC_PRINT("%4d\n", map->mtrr);
288 } 177 }
289 i++;
290 }
291
292 if (len > request + offset)
293 return request;
294 *eof = 1;
295 return len - offset;
296}
297
298/**
299 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
300 */
301static int drm_vm_info(char *buf, char **start, off_t offset, int request,
302 int *eof, void *data)
303{
304 struct drm_minor *minor = (struct drm_minor *) data;
305 struct drm_device *dev = minor->dev;
306 int ret;
307
308 mutex_lock(&dev->struct_mutex);
309 ret = drm__vm_info(buf, start, offset, request, eof, data);
310 mutex_unlock(&dev->struct_mutex);
311 return ret;
312}
313
314/**
315 * Called when "/proc/dri/.../queues" is read.
316 *
317 * \param buf output buffer.
318 * \param start start of output data.
319 * \param offset requested start offset.
320 * \param request requested number of bytes.
321 * \param eof whether there is no more data to return.
322 * \param data private data.
323 * \return number of written bytes.
324 */
325static int drm__queues_info(char *buf, char **start, off_t offset,
326 int request, int *eof, void *data)
327{
328 struct drm_minor *minor = (struct drm_minor *) data;
329 struct drm_device *dev = minor->dev;
330 int len = 0;
331 int i;
332 struct drm_queue *q;
333
334 if (offset > DRM_PROC_LIMIT) {
335 *eof = 1;
336 return 0;
337 } 178 }
338 179 return 0;
339 *start = &buf[offset];
340 *eof = 0;
341
342 DRM_PROC_PRINT(" ctx/flags use fin"
343 " blk/rw/rwf wait flushed queued"
344 " locks\n\n");
345 for (i = 0; i < dev->queue_count; i++) {
346 q = dev->queuelist[i];
347 atomic_inc(&q->use_count);
348 DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
349 "%5d/0x%03x %5d %5d"
350 " %5d/%c%c/%c%c%c %5Zd\n",
351 i,
352 q->flags,
353 atomic_read(&q->use_count),
354 atomic_read(&q->finalization),
355 atomic_read(&q->block_count),
356 atomic_read(&q->block_read) ? 'r' : '-',
357 atomic_read(&q->block_write) ? 'w' : '-',
358 waitqueue_active(&q->read_queue) ? 'r' : '-',
359 waitqueue_active(&q->
360 write_queue) ? 'w' : '-',
361 waitqueue_active(&q->
362 flush_queue) ? 'f' : '-',
363 DRM_BUFCOUNT(&q->waitlist));
364 atomic_dec(&q->use_count);
365 }
366
367 if (len > request + offset)
368 return request;
369 *eof = 1;
370 return len - offset;
371}
372
373/**
374 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
375 */
376static int drm_queues_info(char *buf, char **start, off_t offset, int request,
377 int *eof, void *data)
378{
379 struct drm_minor *minor = (struct drm_minor *) data;
380 struct drm_device *dev = minor->dev;
381 int ret;
382
383 mutex_lock(&dev->struct_mutex);
384 ret = drm__queues_info(buf, start, offset, request, eof, data);
385 mutex_unlock(&dev->struct_mutex);
386 return ret;
387} 180}
388 181
389/** 182int drm_proc_remove_files(struct drm_info_list *files, int count,
390 * Called when "/proc/dri/.../bufs" is read. 183 struct drm_minor *minor)
391 *
392 * \param buf output buffer.
393 * \param start start of output data.
394 * \param offset requested start offset.
395 * \param request requested number of bytes.
396 * \param eof whether there is no more data to return.
397 * \param data private data.
398 * \return number of written bytes.
399 */
400static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
401 int *eof, void *data)
402{ 184{
403 struct drm_minor *minor = (struct drm_minor *) data; 185 struct list_head *pos, *q;
404 struct drm_device *dev = minor->dev; 186 struct drm_info_node *tmp;
405 int len = 0;
406 struct drm_device_dma *dma = dev->dma;
407 int i; 187 int i;
408 188
409 if (!dma || offset > DRM_PROC_LIMIT) { 189 for (i = 0; i < count; i++) {
410 *eof = 1; 190 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
411 return 0; 191 tmp = list_entry(pos, struct drm_info_node, list);
412 } 192 if (tmp->info_ent == &files[i]) {
413 193 remove_proc_entry(files[i].name,
414 *start = &buf[offset]; 194 minor->proc_root);
415 *eof = 0; 195 list_del(pos);
416 196 drm_free(tmp, sizeof(struct drm_info_node),
417 DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); 197 _DRM_DRIVER);
418 for (i = 0; i <= DRM_MAX_ORDER; i++) { 198 }
419 if (dma->bufs[i].buf_count) 199 }
420 DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
421 i,
422 dma->bufs[i].buf_size,
423 dma->bufs[i].buf_count,
424 atomic_read(&dma->bufs[i]
425 .freelist.count),
426 dma->bufs[i].seg_count,
427 dma->bufs[i].seg_count
428 * (1 << dma->bufs[i].page_order),
429 (dma->bufs[i].seg_count
430 * (1 << dma->bufs[i].page_order))
431 * PAGE_SIZE / 1024);
432 }
433 DRM_PROC_PRINT("\n");
434 for (i = 0; i < dma->buf_count; i++) {
435 if (i && !(i % 32))
436 DRM_PROC_PRINT("\n");
437 DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
438 } 200 }
439 DRM_PROC_PRINT("\n"); 201 return 0;
440
441 if (len > request + offset)
442 return request;
443 *eof = 1;
444 return len - offset;
445}
446
447/**
448 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
449 */
450static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
451 int *eof, void *data)
452{
453 struct drm_minor *minor = (struct drm_minor *) data;
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 ret = drm__bufs_info(buf, start, offset, request, eof, data);
459 mutex_unlock(&dev->struct_mutex);
460 return ret;
461} 202}
462 203
463/** 204/**
464 * Called when "/proc/dri/.../vblank" is read. 205 * Cleanup the proc filesystem resources.
465 * 206 *
466 * \param buf output buffer. 207 * \param minor device minor number.
467 * \param start start of output data. 208 * \param root DRI proc dir entry.
468 * \param offset requested start offset. 209 * \param dev_root DRI device proc dir entry.
469 * \param request requested number of bytes. 210 * \return always zero.
470 * \param eof whether there is no more data to return.
471 * \param data private data.
472 * \return number of written bytes.
473 */
474static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
475 int *eof, void *data)
476{
477 struct drm_minor *minor = (struct drm_minor *) data;
478 struct drm_device *dev = minor->dev;
479 int len = 0;
480 int crtc;
481
482 if (offset > DRM_PROC_LIMIT) {
483 *eof = 1;
484 return 0;
485 }
486
487 *start = &buf[offset];
488 *eof = 0;
489
490 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
491 DRM_PROC_PRINT("CRTC %d enable: %d\n",
492 crtc, atomic_read(&dev->vblank_refcount[crtc]));
493 DRM_PROC_PRINT("CRTC %d counter: %d\n",
494 crtc, drm_vblank_count(dev, crtc));
495 DRM_PROC_PRINT("CRTC %d last wait: %d\n",
496 crtc, dev->last_vblank_wait[crtc]);
497 DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
498 crtc, dev->vblank_inmodeset[crtc]);
499 }
500
501 if (len > request + offset)
502 return request;
503 *eof = 1;
504 return len - offset;
505}
506
507/**
508 * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
509 */
510static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
511 int *eof, void *data)
512{
513 struct drm_minor *minor = (struct drm_minor *) data;
514 struct drm_device *dev = minor->dev;
515 int ret;
516
517 mutex_lock(&dev->struct_mutex);
518 ret = drm__vblank_info(buf, start, offset, request, eof, data);
519 mutex_unlock(&dev->struct_mutex);
520 return ret;
521}
522
523/**
524 * Called when "/proc/dri/.../clients" is read.
525 * 211 *
526 * \param buf output buffer. 212 * Remove all proc entries created by proc_init().
527 * \param start start of output data.
528 * \param offset requested start offset.
529 * \param request requested number of bytes.
530 * \param eof whether there is no more data to return.
531 * \param data private data.
532 * \return number of written bytes.
533 */ 213 */
534static int drm__clients_info(char *buf, char **start, off_t offset, 214int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
535 int request, int *eof, void *data)
536{ 215{
537 struct drm_minor *minor = (struct drm_minor *) data;
538 struct drm_device *dev = minor->dev; 216 struct drm_device *dev = minor->dev;
539 int len = 0; 217 char name[64];
540 struct drm_file *priv;
541 218
542 if (offset > DRM_PROC_LIMIT) { 219 if (!root || !minor->proc_root)
543 *eof = 1;
544 return 0; 220 return 0;
545 }
546
547 *start = &buf[offset];
548 *eof = 0;
549
550 DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
551 list_for_each_entry(priv, &dev->filelist, lhead) {
552 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
553 priv->authenticated ? 'y' : 'n',
554 priv->minor->index,
555 priv->pid,
556 priv->uid, priv->magic, priv->ioctl_count);
557 }
558 221
559 if (len > request + offset) 222 if (dev->driver->proc_cleanup)
560 return request; 223 dev->driver->proc_cleanup(minor);
561 *eof = 1;
562 return len - offset;
563}
564
565/**
566 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
567 */
568static int drm_clients_info(char *buf, char **start, off_t offset,
569 int request, int *eof, void *data)
570{
571 struct drm_minor *minor = (struct drm_minor *) data;
572 struct drm_device *dev = minor->dev;
573 int ret;
574
575 mutex_lock(&dev->struct_mutex);
576 ret = drm__clients_info(buf, start, offset, request, eof, data);
577 mutex_unlock(&dev->struct_mutex);
578 return ret;
579}
580
581struct drm_gem_name_info_data {
582 int len;
583 char *buf;
584 int eof;
585};
586 224
587static int drm_gem_one_name_info(int id, void *ptr, void *data) 225 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
588{
589 struct drm_gem_object *obj = ptr;
590 struct drm_gem_name_info_data *nid = data;
591 226
592 DRM_INFO("name %d size %zd\n", obj->name, obj->size); 227 sprintf(name, "%d", minor->index);
593 if (nid->eof) 228 remove_proc_entry(name, root);
594 return 0;
595 229
596 nid->len += sprintf(&nid->buf[nid->len],
597 "%6d %8zd %7d %8d\n",
598 obj->name, obj->size,
599 atomic_read(&obj->handlecount.refcount),
600 atomic_read(&obj->refcount.refcount));
601 if (nid->len > DRM_PROC_LIMIT) {
602 nid->eof = 1;
603 return 0;
604 }
605 return 0; 230 return 0;
606} 231}
607 232
608static int drm_gem_name_info(char *buf, char **start, off_t offset,
609 int request, int *eof, void *data)
610{
611 struct drm_minor *minor = (struct drm_minor *) data;
612 struct drm_device *dev = minor->dev;
613 struct drm_gem_name_info_data nid;
614
615 if (offset > DRM_PROC_LIMIT) {
616 *eof = 1;
617 return 0;
618 }
619
620 nid.len = sprintf(buf, " name size handles refcount\n");
621 nid.buf = buf;
622 nid.eof = 0;
623 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
624
625 *start = &buf[offset];
626 *eof = 0;
627 if (nid.len > request + offset)
628 return request;
629 *eof = 1;
630 return nid.len - offset;
631}
632
633static int drm_gem_object_info(char *buf, char **start, off_t offset,
634 int request, int *eof, void *data)
635{
636 struct drm_minor *minor = (struct drm_minor *) data;
637 struct drm_device *dev = minor->dev;
638 int len = 0;
639
640 if (offset > DRM_PROC_LIMIT) {
641 *eof = 1;
642 return 0;
643 }
644
645 *start = &buf[offset];
646 *eof = 0;
647 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
648 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
649 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
650 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
651 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
652 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
653 if (len > request + offset)
654 return request;
655 *eof = 1;
656 return len - offset;
657}
658
659#if DRM_DEBUG_CODE
660
661static int drm__vma_info(char *buf, char **start, off_t offset, int request,
662 int *eof, void *data)
663{
664 struct drm_minor *minor = (struct drm_minor *) data;
665 struct drm_device *dev = minor->dev;
666 int len = 0;
667 struct drm_vma_entry *pt;
668 struct vm_area_struct *vma;
669#if defined(__i386__)
670 unsigned int pgprot;
671#endif
672
673 if (offset > DRM_PROC_LIMIT) {
674 *eof = 1;
675 return 0;
676 }
677
678 *start = &buf[offset];
679 *eof = 0;
680
681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
682 atomic_read(&dev->vma_count),
683 high_memory, (u64)virt_to_phys(high_memory));
684 list_for_each_entry(pt, &dev->vmalist, head) {
685 if (!(vma = pt->vma))
686 continue;
687 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
688 pt->pid,
689 vma->vm_start,
690 vma->vm_end,
691 vma->vm_flags & VM_READ ? 'r' : '-',
692 vma->vm_flags & VM_WRITE ? 'w' : '-',
693 vma->vm_flags & VM_EXEC ? 'x' : '-',
694 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
695 vma->vm_flags & VM_LOCKED ? 'l' : '-',
696 vma->vm_flags & VM_IO ? 'i' : '-',
697 vma->vm_pgoff);
698
699#if defined(__i386__)
700 pgprot = pgprot_val(vma->vm_page_prot);
701 DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
702 pgprot & _PAGE_PRESENT ? 'p' : '-',
703 pgprot & _PAGE_RW ? 'w' : 'r',
704 pgprot & _PAGE_USER ? 'u' : 's',
705 pgprot & _PAGE_PWT ? 't' : 'b',
706 pgprot & _PAGE_PCD ? 'u' : 'c',
707 pgprot & _PAGE_ACCESSED ? 'a' : '-',
708 pgprot & _PAGE_DIRTY ? 'd' : '-',
709 pgprot & _PAGE_PSE ? 'm' : 'k',
710 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
711#endif
712 DRM_PROC_PRINT("\n");
713 }
714
715 if (len > request + offset)
716 return request;
717 *eof = 1;
718 return len - offset;
719}
720
721static int drm_vma_info(char *buf, char **start, off_t offset, int request,
722 int *eof, void *data)
723{
724 struct drm_minor *minor = (struct drm_minor *) data;
725 struct drm_device *dev = minor->dev;
726 int ret;
727
728 mutex_lock(&dev->struct_mutex);
729 ret = drm__vma_info(buf, start, offset, request, eof, data);
730 mutex_unlock(&dev->struct_mutex);
731 return ret;
732}
733#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b22bf2..48f33be8fd0f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
50 50
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root;
53 54
54static int drm_minor_get_id(struct drm_device *dev, int type) 55static int drm_minor_get_id(struct drm_device *dev, int type)
55{ 56{
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
313 goto err_mem; 314 goto err_mem;
314 } 315 }
315 } else 316 } else
316 new_minor->dev_root = NULL; 317 new_minor->proc_root = NULL;
318
319#if defined(CONFIG_DEBUG_FS)
320 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
321 if (ret) {
322 DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
323 goto err_g2;
324 }
325#endif
317 326
318 ret = drm_sysfs_device_add(new_minor); 327 ret = drm_sysfs_device_add(new_minor);
319 if (ret) { 328 if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
451 460
452 if (minor->type == DRM_MINOR_LEGACY) 461 if (minor->type == DRM_MINOR_LEGACY)
453 drm_proc_cleanup(minor, drm_proc_root); 462 drm_proc_cleanup(minor, drm_proc_root);
463#if defined(CONFIG_DEBUG_FS)
464 drm_debugfs_cleanup(minor);
465#endif
466
454 drm_sysfs_device_remove(minor); 467 drm_sysfs_device_remove(minor);
455 468
456 idr_remove(&drm_minors_idr, minor->index); 469 idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba39d832..51c5a050aa73 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_suspend.o \ 7 i915_suspend.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_debugfs.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 intel_display.o \ 12 intel_display.o \
13 intel_crt.o \ 13 intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e48b89..a818b377e1f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
41int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42{ 42{
43 drm_i915_private_t *dev_priv = dev->dev_private; 43 drm_i915_private_t *dev_priv = dev->dev_private;
44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 44 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 45 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
47 u32 last_acthd = I915_READ(acthd_reg); 46 u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
58 if (ring->space >= n) 57 if (ring->space >= n)
59 return 0; 58 return 0;
60 59
61 if (master_priv->sarea_priv) 60 if (dev->primary->master) {
62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 61 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62 if (master_priv->sarea_priv)
63 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64 }
65
63 66
64 if (ring->head != last_head) 67 if (ring->head != last_head)
65 i = 0; 68 i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
356 return ret; 359 return ret;
357} 360}
358 361
359static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) 362static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
360{ 363{
361 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
362 int i; 365 int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
370 for (i = 0; i < dwords;) { 373 for (i = 0; i < dwords;) {
371 int cmd, sz; 374 int cmd, sz;
372 375
373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 376 cmd = buffer[i];
374 return -EINVAL;
375 377
376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 378 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
377 return -EINVAL; 379 return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
379 OUT_RING(cmd); 381 OUT_RING(cmd);
380 382
381 while (++i, --sz) { 383 while (++i, --sz) {
382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 384 OUT_RING(buffer[i]);
383 sizeof(cmd))) {
384 return -EINVAL;
385 }
386 OUT_RING(cmd);
387 } 385 }
388 } 386 }
389 387
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
397 395
398int 396int
399i915_emit_box(struct drm_device *dev, 397i915_emit_box(struct drm_device *dev,
400 struct drm_clip_rect __user *boxes, 398 struct drm_clip_rect *boxes,
401 int i, int DR1, int DR4) 399 int i, int DR1, int DR4)
402{ 400{
403 drm_i915_private_t *dev_priv = dev->dev_private; 401 drm_i915_private_t *dev_priv = dev->dev_private;
404 struct drm_clip_rect box; 402 struct drm_clip_rect box = boxes[i];
405 RING_LOCALS; 403 RING_LOCALS;
406 404
407 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
408 return -EFAULT;
409 }
410
411 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 405 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
412 DRM_ERROR("Bad box %d,%d..%d,%d\n", 406 DRM_ERROR("Bad box %d,%d..%d,%d\n",
413 box.x1, box.y1, box.x2, box.y2); 407 box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
460} 454}
461 455
462static int i915_dispatch_cmdbuffer(struct drm_device * dev, 456static int i915_dispatch_cmdbuffer(struct drm_device * dev,
463 drm_i915_cmdbuffer_t * cmd) 457 drm_i915_cmdbuffer_t *cmd,
458 struct drm_clip_rect *cliprects,
459 void *cmdbuf)
464{ 460{
465 int nbox = cmd->num_cliprects; 461 int nbox = cmd->num_cliprects;
466 int i = 0, count, ret; 462 int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
476 472
477 for (i = 0; i < count; i++) { 473 for (i = 0; i < count; i++) {
478 if (i < nbox) { 474 if (i < nbox) {
479 ret = i915_emit_box(dev, cmd->cliprects, i, 475 ret = i915_emit_box(dev, cliprects, i,
480 cmd->DR1, cmd->DR4); 476 cmd->DR1, cmd->DR4);
481 if (ret) 477 if (ret)
482 return ret; 478 return ret;
483 } 479 }
484 480
485 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); 481 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
486 if (ret) 482 if (ret)
487 return ret; 483 return ret;
488 } 484 }
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
492} 488}
493 489
494static int i915_dispatch_batchbuffer(struct drm_device * dev, 490static int i915_dispatch_batchbuffer(struct drm_device * dev,
495 drm_i915_batchbuffer_t * batch) 491 drm_i915_batchbuffer_t * batch,
492 struct drm_clip_rect *cliprects)
496{ 493{
497 drm_i915_private_t *dev_priv = dev->dev_private; 494 drm_i915_private_t *dev_priv = dev->dev_private;
498 struct drm_clip_rect __user *boxes = batch->cliprects;
499 int nbox = batch->num_cliprects; 495 int nbox = batch->num_cliprects;
500 int i = 0, count; 496 int i = 0, count;
501 RING_LOCALS; 497 RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 507
512 for (i = 0; i < count; i++) { 508 for (i = 0; i < count; i++) {
513 if (i < nbox) { 509 if (i < nbox) {
514 int ret = i915_emit_box(dev, boxes, i, 510 int ret = i915_emit_box(dev, cliprects, i,
515 batch->DR1, batch->DR4); 511 batch->DR1, batch->DR4);
516 if (ret) 512 if (ret)
517 return ret; 513 return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
626 master_priv->sarea_priv; 622 master_priv->sarea_priv;
627 drm_i915_batchbuffer_t *batch = data; 623 drm_i915_batchbuffer_t *batch = data;
628 int ret; 624 int ret;
625 struct drm_clip_rect *cliprects = NULL;
629 626
630 if (!dev_priv->allow_batchbuffer) { 627 if (!dev_priv->allow_batchbuffer) {
631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 628 DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
637 634
638 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
639 636
640 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 637 if (batch->num_cliprects < 0)
641 batch->num_cliprects * 638 return -EINVAL;
642 sizeof(struct drm_clip_rect))) 639
643 return -EFAULT; 640 if (batch->num_cliprects) {
641 cliprects = drm_calloc(batch->num_cliprects,
642 sizeof(struct drm_clip_rect),
643 DRM_MEM_DRIVER);
644 if (cliprects == NULL)
645 return -ENOMEM;
646
647 ret = copy_from_user(cliprects, batch->cliprects,
648 batch->num_cliprects *
649 sizeof(struct drm_clip_rect));
650 if (ret != 0)
651 goto fail_free;
652 }
644 653
645 mutex_lock(&dev->struct_mutex); 654 mutex_lock(&dev->struct_mutex);
646 ret = i915_dispatch_batchbuffer(dev, batch); 655 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
647 mutex_unlock(&dev->struct_mutex); 656 mutex_unlock(&dev->struct_mutex);
648 657
649 if (sarea_priv) 658 if (sarea_priv)
650 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 659 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661fail_free:
662 drm_free(cliprects,
663 batch->num_cliprects * sizeof(struct drm_clip_rect),
664 DRM_MEM_DRIVER);
665
651 return ret; 666 return ret;
652} 667}
653 668
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
659 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 674 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
660 master_priv->sarea_priv; 675 master_priv->sarea_priv;
661 drm_i915_cmdbuffer_t *cmdbuf = data; 676 drm_i915_cmdbuffer_t *cmdbuf = data;
677 struct drm_clip_rect *cliprects = NULL;
678 void *batch_data;
662 int ret; 679 int ret;
663 680
664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
666 683
667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668 685
669 if (cmdbuf->num_cliprects && 686 if (cmdbuf->num_cliprects < 0)
670 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 687 return -EINVAL;
671 cmdbuf->num_cliprects * 688
672 sizeof(struct drm_clip_rect))) { 689 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
673 DRM_ERROR("Fault accessing cliprects\n"); 690 if (batch_data == NULL)
674 return -EFAULT; 691 return -ENOMEM;
692
693 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
694 if (ret != 0)
695 goto fail_batch_free;
696
697 if (cmdbuf->num_cliprects) {
698 cliprects = drm_calloc(cmdbuf->num_cliprects,
699 sizeof(struct drm_clip_rect),
700 DRM_MEM_DRIVER);
701 if (cliprects == NULL)
702 goto fail_batch_free;
703
704 ret = copy_from_user(cliprects, cmdbuf->cliprects,
705 cmdbuf->num_cliprects *
706 sizeof(struct drm_clip_rect));
707 if (ret != 0)
708 goto fail_clip_free;
675 } 709 }
676 710
677 mutex_lock(&dev->struct_mutex); 711 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 712 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
679 mutex_unlock(&dev->struct_mutex); 713 mutex_unlock(&dev->struct_mutex);
680 if (ret) { 714 if (ret) {
681 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
682 return ret; 716 goto fail_batch_free;
683 } 717 }
684 718
685 if (sarea_priv) 719 if (sarea_priv)
686 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
687 return 0; 721
722fail_batch_free:
723 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724fail_clip_free:
725 drm_free(cliprects,
726 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727 DRM_MEM_DRIVER);
728
729 return ret;
688} 730}
689 731
690static int i915_flip_bufs(struct drm_device *dev, void *data, 732static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0bae71..dcb91f5df6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
150 .get_reg_ofs = drm_core_get_reg_ofs, 150 .get_reg_ofs = drm_core_get_reg_ofs,
151 .master_create = i915_master_create, 151 .master_create = i915_master_create,
152 .master_destroy = i915_master_destroy, 152 .master_destroy = i915_master_destroy,
153 .proc_init = i915_gem_proc_init, 153#if defined(CONFIG_DEBUG_FS)
154 .proc_cleanup = i915_gem_proc_cleanup, 154 .debugfs_init = i915_gem_debugfs_init,
155 .debugfs_cleanup = i915_gem_debugfs_cleanup,
156#endif
155 .gem_init_object = i915_gem_init_object, 157 .gem_init_object = i915_gem_init_object,
156 .gem_free_object = i915_gem_free_object, 158 .gem_free_object = i915_gem_free_object,
157 .gem_vm_ops = &i915_gem_vm_ops, 159 .gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..c1685d0c704f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
404 /** AGP memory structure for our GTT binding. */ 404 /** AGP memory structure for our GTT binding. */
405 DRM_AGP_MEM *agp_mem; 405 DRM_AGP_MEM *agp_mem;
406 406
407 struct page **page_list; 407 struct page **pages;
408 int pages_refcount;
408 409
409 /** 410 /**
410 * Current offset of the object in GTT space. 411 * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
519extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 520extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
520 unsigned long arg); 521 unsigned long arg);
521extern int i915_emit_box(struct drm_device *dev, 522extern int i915_emit_box(struct drm_device *dev,
522 struct drm_clip_rect __user *boxes, 523 struct drm_clip_rect *boxes,
523 int i, int DR1, int DR4); 524 int i, int DR1, int DR4);
524 525
525/* i915_irq.c */ 526/* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
604int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 605int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
605 struct drm_file *file_priv); 606 struct drm_file *file_priv);
606void i915_gem_load(struct drm_device *dev); 607void i915_gem_load(struct drm_device *dev);
607int i915_gem_proc_init(struct drm_minor *minor);
608void i915_gem_proc_cleanup(struct drm_minor *minor);
609int i915_gem_init_object(struct drm_gem_object *obj); 608int i915_gem_init_object(struct drm_gem_object *obj);
610void i915_gem_free_object(struct drm_gem_object *obj); 609void i915_gem_free_object(struct drm_gem_object *obj);
611int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 610int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
649 const char *where, uint32_t mark); 648 const char *where, uint32_t mark);
650void i915_dump_lru(struct drm_device *dev, const char *where); 649void i915_dump_lru(struct drm_device *dev, const char *where);
651 650
651/* i915_debugfs.c */
652int i915_gem_debugfs_init(struct drm_minor *minor);
653void i915_gem_debugfs_cleanup(struct drm_minor *minor);
654
652/* i915_suspend.c */ 655/* i915_suspend.c */
653extern int i915_save_state(struct drm_device *dev); 656extern int i915_save_state(struct drm_device *dev);
654extern int i915_restore_state(struct drm_device *dev); 657extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E22 || \ 787 (dev)->pci_device == 0x2E22 || \
785 IS_GM45(dev)) 788 IS_GM45(dev))
786 789
790#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
791#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
792#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
793
787#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 794#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
788 (dev)->pci_device == 0x29B2 || \ 795 (dev)->pci_device == 0x29B2 || \
789 (dev)->pci_device == 0x29D2) 796 (dev)->pci_device == 0x29D2 || \
797 (IS_IGD(dev)))
790 798
791#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 799#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
792 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 800 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
793 801
794#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 802#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
795 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 803 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
804 IS_IGD(dev))
796 805
797#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 806#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
798/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 807/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4016cb..b52cba0f16d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return 0; 136 return 0;
137} 137}
138 138
139static inline int
140fast_shmem_read(struct page **pages,
141 loff_t page_base, int page_offset,
142 char __user *data,
143 int length)
144{
145 char __iomem *vaddr;
146 int ret;
147
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL)
150 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0);
153
154 return ret;
155}
156
157static inline int
158slow_shmem_copy(struct page *dst_page,
159 int dst_offset,
160 struct page *src_page,
161 int src_offset,
162 int length)
163{
164 char *dst_vaddr, *src_vaddr;
165
166 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
167 if (dst_vaddr == NULL)
168 return -ENOMEM;
169
170 src_vaddr = kmap_atomic(src_page, KM_USER1);
171 if (src_vaddr == NULL) {
172 kunmap_atomic(dst_vaddr, KM_USER0);
173 return -ENOMEM;
174 }
175
176 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
177
178 kunmap_atomic(src_vaddr, KM_USER1);
179 kunmap_atomic(dst_vaddr, KM_USER0);
180
181 return 0;
182}
183
184/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
188 */
189static int
190i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
191 struct drm_i915_gem_pread *args,
192 struct drm_file *file_priv)
193{
194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
195 ssize_t remain;
196 loff_t offset, page_base;
197 char __user *user_data;
198 int page_offset, page_length;
199 int ret;
200
201 user_data = (char __user *) (uintptr_t) args->data_ptr;
202 remain = args->size;
203
204 mutex_lock(&dev->struct_mutex);
205
206 ret = i915_gem_object_get_pages(obj);
207 if (ret != 0)
208 goto fail_unlock;
209
210 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
211 args->size);
212 if (ret != 0)
213 goto fail_put_pages;
214
215 obj_priv = obj->driver_private;
216 offset = args->offset;
217
218 while (remain > 0) {
219 /* Operation in this page
220 *
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
224 */
225 page_base = (offset & ~(PAGE_SIZE-1));
226 page_offset = offset & (PAGE_SIZE-1);
227 page_length = remain;
228 if ((page_offset + remain) > PAGE_SIZE)
229 page_length = PAGE_SIZE - page_offset;
230
231 ret = fast_shmem_read(obj_priv->pages,
232 page_base, page_offset,
233 user_data, page_length);
234 if (ret)
235 goto fail_put_pages;
236
237 remain -= page_length;
238 user_data += page_length;
239 offset += page_length;
240 }
241
242fail_put_pages:
243 i915_gem_object_put_pages(obj);
244fail_unlock:
245 mutex_unlock(&dev->struct_mutex);
246
247 return ret;
248}
249
250/**
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
255 */
256static int
257i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 struct mm_struct *mm = current->mm;
263 struct page **user_pages;
264 ssize_t remain;
265 loff_t offset, pinned_pages, i;
266 loff_t first_data_page, last_data_page, num_pages;
267 int shmem_page_index, shmem_page_offset;
268 int data_page_index, data_page_offset;
269 int page_length;
270 int ret;
271 uint64_t data_ptr = args->data_ptr;
272
273 remain = args->size;
274
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
278 */
279 first_data_page = data_ptr / PAGE_SIZE;
280 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
281 num_pages = last_data_page - first_data_page + 1;
282
283 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
284 if (user_pages == NULL)
285 return -ENOMEM;
286
287 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) {
292 ret = -EFAULT;
293 goto fail_put_user_pages;
294 }
295
296 mutex_lock(&dev->struct_mutex);
297
298 ret = i915_gem_object_get_pages(obj);
299 if (ret != 0)
300 goto fail_unlock;
301
302 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
303 args->size);
304 if (ret != 0)
305 goto fail_put_pages;
306
307 obj_priv = obj->driver_private;
308 offset = args->offset;
309
310 while (remain > 0) {
311 /* Operation in this page
312 *
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
318 */
319 shmem_page_index = offset / PAGE_SIZE;
320 shmem_page_offset = offset & ~PAGE_MASK;
321 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
322 data_page_offset = data_ptr & ~PAGE_MASK;
323
324 page_length = remain;
325 if ((shmem_page_offset + page_length) > PAGE_SIZE)
326 page_length = PAGE_SIZE - shmem_page_offset;
327 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset;
329
330 ret = slow_shmem_copy(user_pages[data_page_index],
331 data_page_offset,
332 obj_priv->pages[shmem_page_index],
333 shmem_page_offset,
334 page_length);
335 if (ret)
336 goto fail_put_pages;
337
338 remain -= page_length;
339 data_ptr += page_length;
340 offset += page_length;
341 }
342
343fail_put_pages:
344 i915_gem_object_put_pages(obj);
345fail_unlock:
346 mutex_unlock(&dev->struct_mutex);
347fail_put_user_pages:
348 for (i = 0; i < pinned_pages; i++) {
349 SetPageDirty(user_pages[i]);
350 page_cache_release(user_pages[i]);
351 }
352 kfree(user_pages);
353
354 return ret;
355}
356
139/** 357/**
140 * Reads data from the object referenced by handle. 358 * Reads data from the object referenced by handle.
141 * 359 *
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
148 struct drm_i915_gem_pread *args = data; 366 struct drm_i915_gem_pread *args = data;
149 struct drm_gem_object *obj; 367 struct drm_gem_object *obj;
150 struct drm_i915_gem_object *obj_priv; 368 struct drm_i915_gem_object *obj_priv;
151 ssize_t read;
152 loff_t offset;
153 int ret; 369 int ret;
154 370
155 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
167 return -EINVAL; 383 return -EINVAL;
168 } 384 }
169 385
170 mutex_lock(&dev->struct_mutex); 386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
171 387 if (ret != 0)
172 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
173 args->size);
174 if (ret != 0) {
175 drm_gem_object_unreference(obj);
176 mutex_unlock(&dev->struct_mutex);
177 return ret;
178 }
179
180 offset = args->offset;
181
182 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183 args->size, &offset);
184 if (read != args->size) {
185 drm_gem_object_unreference(obj);
186 mutex_unlock(&dev->struct_mutex);
187 if (read < 0)
188 return read;
189 else
190 return -EINVAL;
191 }
192 389
193 drm_gem_object_unreference(obj); 390 drm_gem_object_unreference(obj);
194 mutex_unlock(&dev->struct_mutex);
195 391
196 return 0; 392 return ret;
197} 393}
198 394
199/* This is the fast write path which cannot handle 395/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
223 */ 419 */
224 420
225static inline int 421static inline int
226slow_user_write(struct io_mapping *mapping, 422slow_kernel_write(struct io_mapping *mapping,
227 loff_t page_base, int page_offset, 423 loff_t gtt_base, int gtt_offset,
228 char __user *user_data, 424 struct page *user_page, int user_offset,
229 int length) 425 int length)
230{ 426{
231 char __iomem *vaddr; 427 char *src_vaddr, *dst_vaddr;
232 unsigned long unwritten; 428 unsigned long unwritten;
233 429
234 vaddr = io_mapping_map_wc(mapping, page_base); 430 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
235 if (vaddr == NULL) 431 src_vaddr = kmap_atomic(user_page, KM_USER1);
236 return -EFAULT; 432 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
237 unwritten = __copy_from_user(vaddr + page_offset, 433 src_vaddr + user_offset,
238 user_data, length); 434 length);
239 io_mapping_unmap(vaddr); 435 kunmap_atomic(src_vaddr, KM_USER1);
436 io_mapping_unmap_atomic(dst_vaddr);
240 if (unwritten) 437 if (unwritten)
241 return -EFAULT; 438 return -EFAULT;
242 return 0; 439 return 0;
243} 440}
244 441
442static inline int
443fast_shmem_write(struct page **pages,
444 loff_t page_base, int page_offset,
445 char __user *data,
446 int length)
447{
448 char __iomem *vaddr;
449
450 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
451 if (vaddr == NULL)
452 return -ENOMEM;
453 __copy_from_user_inatomic(vaddr + page_offset, data, length);
454 kunmap_atomic(vaddr, KM_USER0);
455
456 return 0;
457}
458
459/**
460 * This is the fast pwrite path, where we copy the data directly from the
461 * user into the GTT, uncached.
462 */
245static int 463static int
246i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 464i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
247 struct drm_i915_gem_pwrite *args, 465 struct drm_i915_gem_pwrite *args,
248 struct drm_file *file_priv) 466 struct drm_file *file_priv)
249{ 467{
250 struct drm_i915_gem_object *obj_priv = obj->driver_private; 468 struct drm_i915_gem_object *obj_priv = obj->driver_private;
251 drm_i915_private_t *dev_priv = dev->dev_private; 469 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
273 491
274 obj_priv = obj->driver_private; 492 obj_priv = obj->driver_private;
275 offset = obj_priv->gtt_offset + args->offset; 493 offset = obj_priv->gtt_offset + args->offset;
276 obj_priv->dirty = 1;
277 494
278 while (remain > 0) { 495 while (remain > 0) {
279 /* Operation in this page 496 /* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
292 page_offset, user_data, page_length); 509 page_offset, user_data, page_length);
293 510
294 /* If we get a fault while copying data, then (presumably) our 511 /* If we get a fault while copying data, then (presumably) our
295 * source page isn't available. In this case, use the 512 * source page isn't available. Return the error and we'll
296 * non-atomic function 513 * retry in the slow path.
297 */ 514 */
298 if (ret) { 515 if (ret)
299 ret = slow_user_write (dev_priv->mm.gtt_mapping, 516 goto fail;
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail;
304 }
305 517
306 remain -= page_length; 518 remain -= page_length;
307 user_data += page_length; 519 user_data += page_length;
@@ -315,39 +527,284 @@ fail:
315 return ret; 527 return ret;
316} 528}
317 529
530/**
531 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
532 * the memory and maps it using kmap_atomic for copying.
533 *
534 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
535 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
536 */
318static int 537static int
319i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 538i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
320 struct drm_i915_gem_pwrite *args, 539 struct drm_i915_gem_pwrite *args,
321 struct drm_file *file_priv) 540 struct drm_file *file_priv)
322{ 541{
542 struct drm_i915_gem_object *obj_priv = obj->driver_private;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544 ssize_t remain;
545 loff_t gtt_page_base, offset;
546 loff_t first_data_page, last_data_page, num_pages;
547 loff_t pinned_pages, i;
548 struct page **user_pages;
549 struct mm_struct *mm = current->mm;
550 int gtt_page_offset, data_page_offset, data_page_index, page_length;
323 int ret; 551 int ret;
324 loff_t offset; 552 uint64_t data_ptr = args->data_ptr;
325 ssize_t written; 553
554 remain = args->size;
555
556 /* Pin the user pages containing the data. We can't fault while
557 * holding the struct mutex, and all of the pwrite implementations
558 * want to hold it while dereferencing the user data.
559 */
560 first_data_page = data_ptr / PAGE_SIZE;
561 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
562 num_pages = last_data_page - first_data_page + 1;
563
564 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
565 if (user_pages == NULL)
566 return -ENOMEM;
567
568 down_read(&mm->mmap_sem);
569 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
570 num_pages, 0, 0, user_pages, NULL);
571 up_read(&mm->mmap_sem);
572 if (pinned_pages < num_pages) {
573 ret = -EFAULT;
574 goto out_unpin_pages;
575 }
326 576
327 mutex_lock(&dev->struct_mutex); 577 mutex_lock(&dev->struct_mutex);
578 ret = i915_gem_object_pin(obj, 0);
579 if (ret)
580 goto out_unlock;
581
582 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
583 if (ret)
584 goto out_unpin_object;
585
586 obj_priv = obj->driver_private;
587 offset = obj_priv->gtt_offset + args->offset;
588
589 while (remain > 0) {
590 /* Operation in this page
591 *
592 * gtt_page_base = page offset within aperture
593 * gtt_page_offset = offset within page in aperture
594 * data_page_index = page number in get_user_pages return
595 * data_page_offset = offset with data_page_index page.
596 * page_length = bytes to copy for this page
597 */
598 gtt_page_base = offset & PAGE_MASK;
599 gtt_page_offset = offset & ~PAGE_MASK;
600 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
601 data_page_offset = data_ptr & ~PAGE_MASK;
602
603 page_length = remain;
604 if ((gtt_page_offset + page_length) > PAGE_SIZE)
605 page_length = PAGE_SIZE - gtt_page_offset;
606 if ((data_page_offset + page_length) > PAGE_SIZE)
607 page_length = PAGE_SIZE - data_page_offset;
608
609 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
610 gtt_page_base, gtt_page_offset,
611 user_pages[data_page_index],
612 data_page_offset,
613 page_length);
614
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
618 */
619 if (ret)
620 goto out_unpin_object;
621
622 remain -= page_length;
623 offset += page_length;
624 data_ptr += page_length;
625 }
626
627out_unpin_object:
628 i915_gem_object_unpin(obj);
629out_unlock:
630 mutex_unlock(&dev->struct_mutex);
631out_unpin_pages:
632 for (i = 0; i < pinned_pages; i++)
633 page_cache_release(user_pages[i]);
634 kfree(user_pages);
635
636 return ret;
637}
638
639/**
640 * This is the fast shmem pwrite path, which attempts to directly
641 * copy_from_user into the kmapped pages backing the object.
642 */
643static int
644i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
645 struct drm_i915_gem_pwrite *args,
646 struct drm_file *file_priv)
647{
648 struct drm_i915_gem_object *obj_priv = obj->driver_private;
649 ssize_t remain;
650 loff_t offset, page_base;
651 char __user *user_data;
652 int page_offset, page_length;
653 int ret;
654
655 user_data = (char __user *) (uintptr_t) args->data_ptr;
656 remain = args->size;
657
658 mutex_lock(&dev->struct_mutex);
659
660 ret = i915_gem_object_get_pages(obj);
661 if (ret != 0)
662 goto fail_unlock;
328 663
329 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 664 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
330 if (ret) { 665 if (ret != 0)
331 mutex_unlock(&dev->struct_mutex); 666 goto fail_put_pages;
332 return ret; 667
668 obj_priv = obj->driver_private;
669 offset = args->offset;
670 obj_priv->dirty = 1;
671
672 while (remain > 0) {
673 /* Operation in this page
674 *
675 * page_base = page offset within aperture
676 * page_offset = offset within page
677 * page_length = bytes to copy for this page
678 */
679 page_base = (offset & ~(PAGE_SIZE-1));
680 page_offset = offset & (PAGE_SIZE-1);
681 page_length = remain;
682 if ((page_offset + remain) > PAGE_SIZE)
683 page_length = PAGE_SIZE - page_offset;
684
685 ret = fast_shmem_write(obj_priv->pages,
686 page_base, page_offset,
687 user_data, page_length);
688 if (ret)
689 goto fail_put_pages;
690
691 remain -= page_length;
692 user_data += page_length;
693 offset += page_length;
333 } 694 }
334 695
696fail_put_pages:
697 i915_gem_object_put_pages(obj);
698fail_unlock:
699 mutex_unlock(&dev->struct_mutex);
700
701 return ret;
702}
703
704/**
705 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
706 * the memory and maps it using kmap_atomic for copying.
707 *
708 * This avoids taking mmap_sem for faulting on the user's address while the
709 * struct_mutex is held.
710 */
711static int
712i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
713 struct drm_i915_gem_pwrite *args,
714 struct drm_file *file_priv)
715{
716 struct drm_i915_gem_object *obj_priv = obj->driver_private;
717 struct mm_struct *mm = current->mm;
718 struct page **user_pages;
719 ssize_t remain;
720 loff_t offset, pinned_pages, i;
721 loff_t first_data_page, last_data_page, num_pages;
722 int shmem_page_index, shmem_page_offset;
723 int data_page_index, data_page_offset;
724 int page_length;
725 int ret;
726 uint64_t data_ptr = args->data_ptr;
727
728 remain = args->size;
729
730 /* Pin the user pages containing the data. We can't fault while
731 * holding the struct mutex, and all of the pwrite implementations
732 * want to hold it while dereferencing the user data.
733 */
734 first_data_page = data_ptr / PAGE_SIZE;
735 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
736 num_pages = last_data_page - first_data_page + 1;
737
738 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
739 if (user_pages == NULL)
740 return -ENOMEM;
741
742 down_read(&mm->mmap_sem);
743 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
744 num_pages, 0, 0, user_pages, NULL);
745 up_read(&mm->mmap_sem);
746 if (pinned_pages < num_pages) {
747 ret = -EFAULT;
748 goto fail_put_user_pages;
749 }
750
751 mutex_lock(&dev->struct_mutex);
752
753 ret = i915_gem_object_get_pages(obj);
754 if (ret != 0)
755 goto fail_unlock;
756
757 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
758 if (ret != 0)
759 goto fail_put_pages;
760
761 obj_priv = obj->driver_private;
335 offset = args->offset; 762 offset = args->offset;
763 obj_priv->dirty = 1;
336 764
337 written = vfs_write(obj->filp, 765 while (remain > 0) {
338 (char __user *)(uintptr_t) args->data_ptr, 766 /* Operation in this page
339 args->size, &offset); 767 *
340 if (written != args->size) { 768 * shmem_page_index = page number within shmem file
341 mutex_unlock(&dev->struct_mutex); 769 * shmem_page_offset = offset within page in shmem file
342 if (written < 0) 770 * data_page_index = page number in get_user_pages return
343 return written; 771 * data_page_offset = offset with data_page_index page.
344 else 772 * page_length = bytes to copy for this page
345 return -EINVAL; 773 */
774 shmem_page_index = offset / PAGE_SIZE;
775 shmem_page_offset = offset & ~PAGE_MASK;
776 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
777 data_page_offset = data_ptr & ~PAGE_MASK;
778
779 page_length = remain;
780 if ((shmem_page_offset + page_length) > PAGE_SIZE)
781 page_length = PAGE_SIZE - shmem_page_offset;
782 if ((data_page_offset + page_length) > PAGE_SIZE)
783 page_length = PAGE_SIZE - data_page_offset;
784
785 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
786 shmem_page_offset,
787 user_pages[data_page_index],
788 data_page_offset,
789 page_length);
790 if (ret)
791 goto fail_put_pages;
792
793 remain -= page_length;
794 data_ptr += page_length;
795 offset += page_length;
346 } 796 }
347 797
798fail_put_pages:
799 i915_gem_object_put_pages(obj);
800fail_unlock:
348 mutex_unlock(&dev->struct_mutex); 801 mutex_unlock(&dev->struct_mutex);
802fail_put_user_pages:
803 for (i = 0; i < pinned_pages; i++)
804 page_cache_release(user_pages[i]);
805 kfree(user_pages);
349 806
350 return 0; 807 return ret;
351} 808}
352 809
353/** 810/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
388 if (obj_priv->phys_obj) 845 if (obj_priv->phys_obj)
389 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 846 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
390 else if (obj_priv->tiling_mode == I915_TILING_NONE && 847 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
391 dev->gtt_total != 0) 848 dev->gtt_total != 0) {
392 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 849 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
393 else 850 if (ret == -EFAULT) {
394 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 851 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
852 file_priv);
853 }
854 } else {
855 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
856 if (ret == -EFAULT) {
857 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
858 file_priv);
859 }
860 }
395 861
396#if WATCH_PWRITE 862#if WATCH_PWRITE
397 if (ret) 863 if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
816} 1282}
817 1283
818static void 1284static void
819i915_gem_object_free_page_list(struct drm_gem_object *obj) 1285i915_gem_object_put_pages(struct drm_gem_object *obj)
820{ 1286{
821 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
822 int page_count = obj->size / PAGE_SIZE; 1288 int page_count = obj->size / PAGE_SIZE;
823 int i; 1289 int i;
824 1290
825 if (obj_priv->page_list == NULL) 1291 BUG_ON(obj_priv->pages_refcount == 0);
826 return;
827 1292
1293 if (--obj_priv->pages_refcount != 0)
1294 return;
828 1295
829 for (i = 0; i < page_count; i++) 1296 for (i = 0; i < page_count; i++)
830 if (obj_priv->page_list[i] != NULL) { 1297 if (obj_priv->pages[i] != NULL) {
831 if (obj_priv->dirty) 1298 if (obj_priv->dirty)
832 set_page_dirty(obj_priv->page_list[i]); 1299 set_page_dirty(obj_priv->pages[i]);
833 mark_page_accessed(obj_priv->page_list[i]); 1300 mark_page_accessed(obj_priv->pages[i]);
834 page_cache_release(obj_priv->page_list[i]); 1301 page_cache_release(obj_priv->pages[i]);
835 } 1302 }
836 obj_priv->dirty = 0; 1303 obj_priv->dirty = 0;
837 1304
838 drm_free(obj_priv->page_list, 1305 drm_free(obj_priv->pages,
839 page_count * sizeof(struct page *), 1306 page_count * sizeof(struct page *),
840 DRM_MEM_DRIVER); 1307 DRM_MEM_DRIVER);
841 obj_priv->page_list = NULL; 1308 obj_priv->pages = NULL;
842} 1309}
843 1310
844static void 1311static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1290 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1757 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1291 i915_gem_clear_fence_reg(obj); 1758 i915_gem_clear_fence_reg(obj);
1292 1759
1293 i915_gem_object_free_page_list(obj); 1760 i915_gem_object_put_pages(obj);
1294 1761
1295 if (obj_priv->gtt_space) { 1762 if (obj_priv->gtt_space) {
1296 atomic_dec(&dev->gtt_count); 1763 atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1409} 1876}
1410 1877
1411static int 1878static int
1412i915_gem_object_get_page_list(struct drm_gem_object *obj) 1879i915_gem_object_get_pages(struct drm_gem_object *obj)
1413{ 1880{
1414 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1881 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1415 int page_count, i; 1882 int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1418 struct page *page; 1885 struct page *page;
1419 int ret; 1886 int ret;
1420 1887
1421 if (obj_priv->page_list) 1888 if (obj_priv->pages_refcount++ != 0)
1422 return 0; 1889 return 0;
1423 1890
1424 /* Get the list of pages out of our struct file. They'll be pinned 1891 /* Get the list of pages out of our struct file. They'll be pinned
1425 * at this point until we release them. 1892 * at this point until we release them.
1426 */ 1893 */
1427 page_count = obj->size / PAGE_SIZE; 1894 page_count = obj->size / PAGE_SIZE;
1428 BUG_ON(obj_priv->page_list != NULL); 1895 BUG_ON(obj_priv->pages != NULL);
1429 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1896 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1430 DRM_MEM_DRIVER); 1897 DRM_MEM_DRIVER);
1431 if (obj_priv->page_list == NULL) { 1898 if (obj_priv->pages == NULL) {
1432 DRM_ERROR("Faled to allocate page list\n"); 1899 DRM_ERROR("Faled to allocate page list\n");
1900 obj_priv->pages_refcount--;
1433 return -ENOMEM; 1901 return -ENOMEM;
1434 } 1902 }
1435 1903
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1440 if (IS_ERR(page)) { 1908 if (IS_ERR(page)) {
1441 ret = PTR_ERR(page); 1909 ret = PTR_ERR(page);
1442 DRM_ERROR("read_mapping_page failed: %d\n", ret); 1910 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1443 i915_gem_object_free_page_list(obj); 1911 i915_gem_object_put_pages(obj);
1444 return ret; 1912 return ret;
1445 } 1913 }
1446 obj_priv->page_list[i] = page; 1914 obj_priv->pages[i] = page;
1447 } 1915 }
1448 return 0; 1916 return 0;
1449} 1917}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1766 DRM_INFO("Binding object of size %d at 0x%08x\n", 2234 DRM_INFO("Binding object of size %d at 0x%08x\n",
1767 obj->size, obj_priv->gtt_offset); 2235 obj->size, obj_priv->gtt_offset);
1768#endif 2236#endif
1769 ret = i915_gem_object_get_page_list(obj); 2237 ret = i915_gem_object_get_pages(obj);
1770 if (ret) { 2238 if (ret) {
1771 drm_mm_put_block(obj_priv->gtt_space); 2239 drm_mm_put_block(obj_priv->gtt_space);
1772 obj_priv->gtt_space = NULL; 2240 obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1778 * into the GTT. 2246 * into the GTT.
1779 */ 2247 */
1780 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2248 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1781 obj_priv->page_list, 2249 obj_priv->pages,
1782 page_count, 2250 page_count,
1783 obj_priv->gtt_offset, 2251 obj_priv->gtt_offset,
1784 obj_priv->agp_type); 2252 obj_priv->agp_type);
1785 if (obj_priv->agp_mem == NULL) { 2253 if (obj_priv->agp_mem == NULL) {
1786 i915_gem_object_free_page_list(obj); 2254 i915_gem_object_put_pages(obj);
1787 drm_mm_put_block(obj_priv->gtt_space); 2255 drm_mm_put_block(obj_priv->gtt_space);
1788 obj_priv->gtt_space = NULL; 2256 obj_priv->gtt_space = NULL;
1789 return -ENOMEM; 2257 return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1810 * to GPU, and we can ignore the cache flush because it'll happen 2278 * to GPU, and we can ignore the cache flush because it'll happen
1811 * again at bind time. 2279 * again at bind time.
1812 */ 2280 */
1813 if (obj_priv->page_list == NULL) 2281 if (obj_priv->pages == NULL)
1814 return; 2282 return;
1815 2283
1816 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 2284 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1817} 2285}
1818 2286
1819/** Flushes any GPU write domain for the object if it's dirty. */ 2287/** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1913static int 2381static int
1914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2382i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1915{ 2383{
1916 struct drm_device *dev = obj->dev;
1917 int ret; 2384 int ret;
1918 2385
1919 i915_gem_object_flush_gpu_write_domain(obj); 2386 i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1932 /* Flush the CPU cache if it's still invalid. */ 2399 /* Flush the CPU cache if it's still invalid. */
1933 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2400 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1934 i915_gem_clflush_object(obj); 2401 i915_gem_clflush_object(obj);
1935 drm_agp_chipset_flush(dev);
1936 2402
1937 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2403 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1938 } 2404 }
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2144static void 2610static void
2145i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 2611i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2146{ 2612{
2147 struct drm_device *dev = obj->dev;
2148 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2613 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2149 2614
2150 if (!obj_priv->page_cpu_valid) 2615 if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2158 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 2623 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2159 if (obj_priv->page_cpu_valid[i]) 2624 if (obj_priv->page_cpu_valid[i])
2160 continue; 2625 continue;
2161 drm_clflush_pages(obj_priv->page_list + i, 1); 2626 drm_clflush_pages(obj_priv->pages + i, 1);
2162 } 2627 }
2163 drm_agp_chipset_flush(dev);
2164 } 2628 }
2165 2629
2166 /* Free the page_cpu_valid mappings which are now stale, whether 2630 /* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2224 if (obj_priv->page_cpu_valid[i]) 2688 if (obj_priv->page_cpu_valid[i])
2225 continue; 2689 continue;
2226 2690
2227 drm_clflush_pages(obj_priv->page_list + i, 1); 2691 drm_clflush_pages(obj_priv->pages + i, 1);
2228 2692
2229 obj_priv->page_cpu_valid[i] = 1; 2693 obj_priv->page_cpu_valid[i] = 1;
2230 } 2694 }
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2245static int 2709static int
2246i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 2710i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2247 struct drm_file *file_priv, 2711 struct drm_file *file_priv,
2248 struct drm_i915_gem_exec_object *entry) 2712 struct drm_i915_gem_exec_object *entry,
2713 struct drm_i915_gem_relocation_entry *relocs)
2249{ 2714{
2250 struct drm_device *dev = obj->dev; 2715 struct drm_device *dev = obj->dev;
2251 drm_i915_private_t *dev_priv = dev->dev_private; 2716 drm_i915_private_t *dev_priv = dev->dev_private;
2252 struct drm_i915_gem_relocation_entry reloc;
2253 struct drm_i915_gem_relocation_entry __user *relocs;
2254 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2717 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2255 int i, ret; 2718 int i, ret;
2256 void __iomem *reloc_page; 2719 void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2262 2725
2263 entry->offset = obj_priv->gtt_offset; 2726 entry->offset = obj_priv->gtt_offset;
2264 2727
2265 relocs = (struct drm_i915_gem_relocation_entry __user *)
2266 (uintptr_t) entry->relocs_ptr;
2267 /* Apply the relocations, using the GTT aperture to avoid cache 2728 /* Apply the relocations, using the GTT aperture to avoid cache
2268 * flushing requirements. 2729 * flushing requirements.
2269 */ 2730 */
2270 for (i = 0; i < entry->relocation_count; i++) { 2731 for (i = 0; i < entry->relocation_count; i++) {
2732 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2271 struct drm_gem_object *target_obj; 2733 struct drm_gem_object *target_obj;
2272 struct drm_i915_gem_object *target_obj_priv; 2734 struct drm_i915_gem_object *target_obj_priv;
2273 uint32_t reloc_val, reloc_offset; 2735 uint32_t reloc_val, reloc_offset;
2274 uint32_t __iomem *reloc_entry; 2736 uint32_t __iomem *reloc_entry;
2275 2737
2276 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2277 if (ret != 0) {
2278 i915_gem_object_unpin(obj);
2279 return ret;
2280 }
2281
2282 target_obj = drm_gem_object_lookup(obj->dev, file_priv, 2738 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2283 reloc.target_handle); 2739 reloc->target_handle);
2284 if (target_obj == NULL) { 2740 if (target_obj == NULL) {
2285 i915_gem_object_unpin(obj); 2741 i915_gem_object_unpin(obj);
2286 return -EBADF; 2742 return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2292 */ 2748 */
2293 if (target_obj_priv->gtt_space == NULL) { 2749 if (target_obj_priv->gtt_space == NULL) {
2294 DRM_ERROR("No GTT space found for object %d\n", 2750 DRM_ERROR("No GTT space found for object %d\n",
2295 reloc.target_handle); 2751 reloc->target_handle);
2296 drm_gem_object_unreference(target_obj); 2752 drm_gem_object_unreference(target_obj);
2297 i915_gem_object_unpin(obj); 2753 i915_gem_object_unpin(obj);
2298 return -EINVAL; 2754 return -EINVAL;
2299 } 2755 }
2300 2756
2301 if (reloc.offset > obj->size - 4) { 2757 if (reloc->offset > obj->size - 4) {
2302 DRM_ERROR("Relocation beyond object bounds: " 2758 DRM_ERROR("Relocation beyond object bounds: "
2303 "obj %p target %d offset %d size %d.\n", 2759 "obj %p target %d offset %d size %d.\n",
2304 obj, reloc.target_handle, 2760 obj, reloc->target_handle,
2305 (int) reloc.offset, (int) obj->size); 2761 (int) reloc->offset, (int) obj->size);
2306 drm_gem_object_unreference(target_obj); 2762 drm_gem_object_unreference(target_obj);
2307 i915_gem_object_unpin(obj); 2763 i915_gem_object_unpin(obj);
2308 return -EINVAL; 2764 return -EINVAL;
2309 } 2765 }
2310 if (reloc.offset & 3) { 2766 if (reloc->offset & 3) {
2311 DRM_ERROR("Relocation not 4-byte aligned: " 2767 DRM_ERROR("Relocation not 4-byte aligned: "
2312 "obj %p target %d offset %d.\n", 2768 "obj %p target %d offset %d.\n",
2313 obj, reloc.target_handle, 2769 obj, reloc->target_handle,
2314 (int) reloc.offset); 2770 (int) reloc->offset);
2315 drm_gem_object_unreference(target_obj); 2771 drm_gem_object_unreference(target_obj);
2316 i915_gem_object_unpin(obj); 2772 i915_gem_object_unpin(obj);
2317 return -EINVAL; 2773 return -EINVAL;
2318 } 2774 }
2319 2775
2320 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 2776 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2321 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 2777 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2322 DRM_ERROR("reloc with read/write CPU domains: " 2778 DRM_ERROR("reloc with read/write CPU domains: "
2323 "obj %p target %d offset %d " 2779 "obj %p target %d offset %d "
2324 "read %08x write %08x", 2780 "read %08x write %08x",
2325 obj, reloc.target_handle, 2781 obj, reloc->target_handle,
2326 (int) reloc.offset, 2782 (int) reloc->offset,
2327 reloc.read_domains, 2783 reloc->read_domains,
2328 reloc.write_domain); 2784 reloc->write_domain);
2329 drm_gem_object_unreference(target_obj); 2785 drm_gem_object_unreference(target_obj);
2330 i915_gem_object_unpin(obj); 2786 i915_gem_object_unpin(obj);
2331 return -EINVAL; 2787 return -EINVAL;
2332 } 2788 }
2333 2789
2334 if (reloc.write_domain && target_obj->pending_write_domain && 2790 if (reloc->write_domain && target_obj->pending_write_domain &&
2335 reloc.write_domain != target_obj->pending_write_domain) { 2791 reloc->write_domain != target_obj->pending_write_domain) {
2336 DRM_ERROR("Write domain conflict: " 2792 DRM_ERROR("Write domain conflict: "
2337 "obj %p target %d offset %d " 2793 "obj %p target %d offset %d "
2338 "new %08x old %08x\n", 2794 "new %08x old %08x\n",
2339 obj, reloc.target_handle, 2795 obj, reloc->target_handle,
2340 (int) reloc.offset, 2796 (int) reloc->offset,
2341 reloc.write_domain, 2797 reloc->write_domain,
2342 target_obj->pending_write_domain); 2798 target_obj->pending_write_domain);
2343 drm_gem_object_unreference(target_obj); 2799 drm_gem_object_unreference(target_obj);
2344 i915_gem_object_unpin(obj); 2800 i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2351 "presumed %08x delta %08x\n", 2807 "presumed %08x delta %08x\n",
2352 __func__, 2808 __func__,
2353 obj, 2809 obj,
2354 (int) reloc.offset, 2810 (int) reloc->offset,
2355 (int) reloc.target_handle, 2811 (int) reloc->target_handle,
2356 (int) reloc.read_domains, 2812 (int) reloc->read_domains,
2357 (int) reloc.write_domain, 2813 (int) reloc->write_domain,
2358 (int) target_obj_priv->gtt_offset, 2814 (int) target_obj_priv->gtt_offset,
2359 (int) reloc.presumed_offset, 2815 (int) reloc->presumed_offset,
2360 reloc.delta); 2816 reloc->delta);
2361#endif 2817#endif
2362 2818
2363 target_obj->pending_read_domains |= reloc.read_domains; 2819 target_obj->pending_read_domains |= reloc->read_domains;
2364 target_obj->pending_write_domain |= reloc.write_domain; 2820 target_obj->pending_write_domain |= reloc->write_domain;
2365 2821
2366 /* If the relocation already has the right value in it, no 2822 /* If the relocation already has the right value in it, no
2367 * more work needs to be done. 2823 * more work needs to be done.
2368 */ 2824 */
2369 if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 2825 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2370 drm_gem_object_unreference(target_obj); 2826 drm_gem_object_unreference(target_obj);
2371 continue; 2827 continue;
2372 } 2828 }
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2381 /* Map the page containing the relocation we're going to 2837 /* Map the page containing the relocation we're going to
2382 * perform. 2838 * perform.
2383 */ 2839 */
2384 reloc_offset = obj_priv->gtt_offset + reloc.offset; 2840 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2385 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 2841 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2386 (reloc_offset & 2842 (reloc_offset &
2387 ~(PAGE_SIZE - 1))); 2843 ~(PAGE_SIZE - 1)));
2388 reloc_entry = (uint32_t __iomem *)(reloc_page + 2844 reloc_entry = (uint32_t __iomem *)(reloc_page +
2389 (reloc_offset & (PAGE_SIZE - 1))); 2845 (reloc_offset & (PAGE_SIZE - 1)));
2390 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 2846 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2391 2847
2392#if WATCH_BUF 2848#if WATCH_BUF
2393 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 2849 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2394 obj, (unsigned int) reloc.offset, 2850 obj, (unsigned int) reloc->offset,
2395 readl(reloc_entry), reloc_val); 2851 readl(reloc_entry), reloc_val);
2396#endif 2852#endif
2397 writel(reloc_val, reloc_entry); 2853 writel(reloc_val, reloc_entry);
2398 io_mapping_unmap_atomic(reloc_page); 2854 io_mapping_unmap_atomic(reloc_page);
2399 2855
2400 /* Write the updated presumed offset for this entry back out 2856 /* The updated presumed offset for this entry will be
2401 * to the user. 2857 * copied back out to the user.
2402 */ 2858 */
2403 reloc.presumed_offset = target_obj_priv->gtt_offset; 2859 reloc->presumed_offset = target_obj_priv->gtt_offset;
2404 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2405 if (ret != 0) {
2406 drm_gem_object_unreference(target_obj);
2407 i915_gem_object_unpin(obj);
2408 return ret;
2409 }
2410 2860
2411 drm_gem_object_unreference(target_obj); 2861 drm_gem_object_unreference(target_obj);
2412 } 2862 }
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2423static int 2873static int
2424i915_dispatch_gem_execbuffer(struct drm_device *dev, 2874i915_dispatch_gem_execbuffer(struct drm_device *dev,
2425 struct drm_i915_gem_execbuffer *exec, 2875 struct drm_i915_gem_execbuffer *exec,
2876 struct drm_clip_rect *cliprects,
2426 uint64_t exec_offset) 2877 uint64_t exec_offset)
2427{ 2878{
2428 drm_i915_private_t *dev_priv = dev->dev_private; 2879 drm_i915_private_t *dev_priv = dev->dev_private;
2429 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2430 (uintptr_t) exec->cliprects_ptr;
2431 int nbox = exec->num_cliprects; 2880 int nbox = exec->num_cliprects;
2432 int i = 0, count; 2881 int i = 0, count;
2433 uint32_t exec_start, exec_len; 2882 uint32_t exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
2448 2897
2449 for (i = 0; i < count; i++) { 2898 for (i = 0; i < count; i++) {
2450 if (i < nbox) { 2899 if (i < nbox) {
2451 int ret = i915_emit_box(dev, boxes, i, 2900 int ret = i915_emit_box(dev, cliprects, i,
2452 exec->DR1, exec->DR4); 2901 exec->DR1, exec->DR4);
2453 if (ret) 2902 if (ret)
2454 return ret; 2903 return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2504 return ret; 2953 return ret;
2505} 2954}
2506 2955
2956static int
2957i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
2958 uint32_t buffer_count,
2959 struct drm_i915_gem_relocation_entry **relocs)
2960{
2961 uint32_t reloc_count = 0, reloc_index = 0, i;
2962 int ret;
2963
2964 *relocs = NULL;
2965 for (i = 0; i < buffer_count; i++) {
2966 if (reloc_count + exec_list[i].relocation_count < reloc_count)
2967 return -EINVAL;
2968 reloc_count += exec_list[i].relocation_count;
2969 }
2970
2971 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
2972 if (*relocs == NULL)
2973 return -ENOMEM;
2974
2975 for (i = 0; i < buffer_count; i++) {
2976 struct drm_i915_gem_relocation_entry __user *user_relocs;
2977
2978 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
2979
2980 ret = copy_from_user(&(*relocs)[reloc_index],
2981 user_relocs,
2982 exec_list[i].relocation_count *
2983 sizeof(**relocs));
2984 if (ret != 0) {
2985 drm_free(*relocs, reloc_count * sizeof(**relocs),
2986 DRM_MEM_DRIVER);
2987 *relocs = NULL;
2988 return ret;
2989 }
2990
2991 reloc_index += exec_list[i].relocation_count;
2992 }
2993
2994 return ret;
2995}
2996
2997static int
2998i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
2999 uint32_t buffer_count,
3000 struct drm_i915_gem_relocation_entry *relocs)
3001{
3002 uint32_t reloc_count = 0, i;
3003 int ret;
3004
3005 for (i = 0; i < buffer_count; i++) {
3006 struct drm_i915_gem_relocation_entry __user *user_relocs;
3007
3008 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3009
3010 if (ret == 0) {
3011 ret = copy_to_user(user_relocs,
3012 &relocs[reloc_count],
3013 exec_list[i].relocation_count *
3014 sizeof(*relocs));
3015 }
3016
3017 reloc_count += exec_list[i].relocation_count;
3018 }
3019
3020 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3021
3022 return ret;
3023}
3024
2507int 3025int
2508i915_gem_execbuffer(struct drm_device *dev, void *data, 3026i915_gem_execbuffer(struct drm_device *dev, void *data,
2509 struct drm_file *file_priv) 3027 struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2515 struct drm_gem_object **object_list = NULL; 3033 struct drm_gem_object **object_list = NULL;
2516 struct drm_gem_object *batch_obj; 3034 struct drm_gem_object *batch_obj;
2517 struct drm_i915_gem_object *obj_priv; 3035 struct drm_i915_gem_object *obj_priv;
2518 int ret, i, pinned = 0; 3036 struct drm_clip_rect *cliprects = NULL;
3037 struct drm_i915_gem_relocation_entry *relocs;
3038 int ret, ret2, i, pinned = 0;
2519 uint64_t exec_offset; 3039 uint64_t exec_offset;
2520 uint32_t seqno, flush_domains; 3040 uint32_t seqno, flush_domains, reloc_index;
2521 int pin_tries; 3041 int pin_tries;
2522 3042
2523#if WATCH_EXEC 3043#if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2551 goto pre_mutex_err; 3071 goto pre_mutex_err;
2552 } 3072 }
2553 3073
3074 if (args->num_cliprects != 0) {
3075 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3076 DRM_MEM_DRIVER);
3077 if (cliprects == NULL)
3078 goto pre_mutex_err;
3079
3080 ret = copy_from_user(cliprects,
3081 (struct drm_clip_rect __user *)
3082 (uintptr_t) args->cliprects_ptr,
3083 sizeof(*cliprects) * args->num_cliprects);
3084 if (ret != 0) {
3085 DRM_ERROR("copy %d cliprects failed: %d\n",
3086 args->num_cliprects, ret);
3087 goto pre_mutex_err;
3088 }
3089 }
3090
3091 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3092 &relocs);
3093 if (ret != 0)
3094 goto pre_mutex_err;
3095
2554 mutex_lock(&dev->struct_mutex); 3096 mutex_lock(&dev->struct_mutex);
2555 3097
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 3098 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2593 /* Pin and relocate */ 3135 /* Pin and relocate */
2594 for (pin_tries = 0; ; pin_tries++) { 3136 for (pin_tries = 0; ; pin_tries++) {
2595 ret = 0; 3137 ret = 0;
3138 reloc_index = 0;
3139
2596 for (i = 0; i < args->buffer_count; i++) { 3140 for (i = 0; i < args->buffer_count; i++) {
2597 object_list[i]->pending_read_domains = 0; 3141 object_list[i]->pending_read_domains = 0;
2598 object_list[i]->pending_write_domain = 0; 3142 object_list[i]->pending_write_domain = 0;
2599 ret = i915_gem_object_pin_and_relocate(object_list[i], 3143 ret = i915_gem_object_pin_and_relocate(object_list[i],
2600 file_priv, 3144 file_priv,
2601 &exec_list[i]); 3145 &exec_list[i],
3146 &relocs[reloc_index]);
2602 if (ret) 3147 if (ret)
2603 break; 3148 break;
2604 pinned = i + 1; 3149 pinned = i + 1;
3150 reloc_index += exec_list[i].relocation_count;
2605 } 3151 }
2606 /* success */ 3152 /* success */
2607 if (ret == 0) 3153 if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2687#endif 3233#endif
2688 3234
2689 /* Exec the batchbuffer */ 3235 /* Exec the batchbuffer */
2690 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 3236 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
2691 if (ret) { 3237 if (ret) {
2692 DRM_ERROR("dispatch failed %d\n", ret); 3238 DRM_ERROR("dispatch failed %d\n", ret);
2693 goto err; 3239 goto err;
@@ -2751,11 +3297,27 @@ err:
2751 args->buffer_count, ret); 3297 args->buffer_count, ret);
2752 } 3298 }
2753 3299
3300 /* Copy the updated relocations out regardless of current error
3301 * state. Failure to update the relocs would mean that the next
3302 * time userland calls execbuf, it would do so with presumed offset
3303 * state that didn't match the actual object state.
3304 */
3305 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3306 relocs);
3307 if (ret2 != 0) {
3308 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3309
3310 if (ret == 0)
3311 ret = ret2;
3312 }
3313
2754pre_mutex_err: 3314pre_mutex_err:
2755 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3315 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2756 DRM_MEM_DRIVER); 3316 DRM_MEM_DRIVER);
2757 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 3317 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2758 DRM_MEM_DRIVER); 3318 DRM_MEM_DRIVER);
3319 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3320 DRM_MEM_DRIVER);
2759 3321
2760 return ret; 3322 return ret;
2761} 3323}
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
3192 3754
3193 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 3755 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3194 3756
3195 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 3757 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3196 if (dev_priv->hw_status_page == NULL) { 3758 if (dev_priv->hw_status_page == NULL) {
3197 DRM_ERROR("Failed to map status page.\n"); 3759 DRM_ERROR("Failed to map status page.\n");
3198 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3760 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
3222 obj = dev_priv->hws_obj; 3784 obj = dev_priv->hws_obj;
3223 obj_priv = obj->driver_private; 3785 obj_priv = obj->driver_private;
3224 3786
3225 kunmap(obj_priv->page_list[0]); 3787 kunmap(obj_priv->pages[0]);
3226 i915_gem_object_unpin(obj); 3788 i915_gem_object_unpin(obj);
3227 drm_gem_object_unreference(obj); 3789 drm_gem_object_unreference(obj);
3228 dev_priv->hws_obj = NULL; 3790 dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3525 if (!obj_priv->phys_obj) 4087 if (!obj_priv->phys_obj)
3526 return; 4088 return;
3527 4089
3528 ret = i915_gem_object_get_page_list(obj); 4090 ret = i915_gem_object_get_pages(obj);
3529 if (ret) 4091 if (ret)
3530 goto out; 4092 goto out;
3531 4093
3532 page_count = obj->size / PAGE_SIZE; 4094 page_count = obj->size / PAGE_SIZE;
3533 4095
3534 for (i = 0; i < page_count; i++) { 4096 for (i = 0; i < page_count; i++) {
3535 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4097 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3536 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4098 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3537 4099
3538 memcpy(dst, src, PAGE_SIZE); 4100 memcpy(dst, src, PAGE_SIZE);
3539 kunmap_atomic(dst, KM_USER0); 4101 kunmap_atomic(dst, KM_USER0);
3540 } 4102 }
3541 drm_clflush_pages(obj_priv->page_list, page_count); 4103 drm_clflush_pages(obj_priv->pages, page_count);
3542 drm_agp_chipset_flush(dev); 4104 drm_agp_chipset_flush(dev);
3543out: 4105out:
3544 obj_priv->phys_obj->cur_obj = NULL; 4106 obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3581 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4143 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3582 obj_priv->phys_obj->cur_obj = obj; 4144 obj_priv->phys_obj->cur_obj = obj;
3583 4145
3584 ret = i915_gem_object_get_page_list(obj); 4146 ret = i915_gem_object_get_pages(obj);
3585 if (ret) { 4147 if (ret) {
3586 DRM_ERROR("failed to get page list\n"); 4148 DRM_ERROR("failed to get page list\n");
3587 goto out; 4149 goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3590 page_count = obj->size / PAGE_SIZE; 4152 page_count = obj->size / PAGE_SIZE;
3591 4153
3592 for (i = 0; i < page_count; i++) { 4154 for (i = 0; i < page_count; i++) {
3593 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4155 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3594 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4156 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3595 4157
3596 memcpy(dst, src, PAGE_SIZE); 4158 memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 000000000000..455ec970b385
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#define DRM_I915_RING_DEBUG 1
36
37
38#if defined(CONFIG_DEBUG_FS)
39
40#define ACTIVE_LIST 1
41#define FLUSHING_LIST 2
42#define INACTIVE_LIST 3
43
44static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
45{
46 if (obj_priv->user_pin_count > 0)
47 return "P";
48 else if (obj_priv->pin_count > 0)
49 return "p";
50 else
51 return " ";
52}
53
54static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
55{
56 switch (obj_priv->tiling_mode) {
57 default:
58 case I915_TILING_NONE: return " ";
59 case I915_TILING_X: return "X";
60 case I915_TILING_Y: return "Y";
61 }
62}
63
64static int i915_gem_object_list_info(struct seq_file *m, void *data)
65{
66 struct drm_info_node *node = (struct drm_info_node *) m->private;
67 uintptr_t list = (uintptr_t) node->info_ent->data;
68 struct list_head *head;
69 struct drm_device *dev = node->minor->dev;
70 drm_i915_private_t *dev_priv = dev->dev_private;
71 struct drm_i915_gem_object *obj_priv;
72
73 switch (list) {
74 case ACTIVE_LIST:
75 seq_printf(m, "Active:\n");
76 head = &dev_priv->mm.active_list;
77 break;
78 case INACTIVE_LIST:
79 seq_printf(m, "Inctive:\n");
80 head = &dev_priv->mm.inactive_list;
81 break;
82 case FLUSHING_LIST:
83 seq_printf(m, "Flushing:\n");
84 head = &dev_priv->mm.flushing_list;
85 break;
86 default:
87 DRM_INFO("Ooops, unexpected list\n");
88 return 0;
89 }
90
91 list_for_each_entry(obj_priv, head, list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94
95 seq_printf(m, " %p: %s %08x %08x %d",
96 obj,
97 get_pin_flag(obj_priv),
98 obj->read_domains, obj->write_domain,
99 obj_priv->last_rendering_seqno);
100
101 if (obj->name)
102 seq_printf(m, " (name: %d)", obj->name);
103 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
104 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
105 seq_printf(m, "\n");
106 }
107 return 0;
108}
109
110static int i915_gem_request_info(struct seq_file *m, void *data)
111{
112 struct drm_info_node *node = (struct drm_info_node *) m->private;
113 struct drm_device *dev = node->minor->dev;
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 struct drm_i915_gem_request *gem_request;
116
117 seq_printf(m, "Request:\n");
118 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
119 seq_printf(m, " %d @ %d\n",
120 gem_request->seqno,
121 (int) (jiffies - gem_request->emitted_jiffies));
122 }
123 return 0;
124}
125
126static int i915_gem_seqno_info(struct seq_file *m, void *data)
127{
128 struct drm_info_node *node = (struct drm_info_node *) m->private;
129 struct drm_device *dev = node->minor->dev;
130 drm_i915_private_t *dev_priv = dev->dev_private;
131
132 if (dev_priv->hw_status_page != NULL) {
133 seq_printf(m, "Current sequence: %d\n",
134 i915_get_gem_seqno(dev));
135 } else {
136 seq_printf(m, "Current sequence: hws uninitialized\n");
137 }
138 seq_printf(m, "Waiter sequence: %d\n",
139 dev_priv->mm.waiting_gem_seqno);
140 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
141 return 0;
142}
143
144
145static int i915_interrupt_info(struct seq_file *m, void *data)
146{
147 struct drm_info_node *node = (struct drm_info_node *) m->private;
148 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private;
150
151 seq_printf(m, "Interrupt enable: %08x\n",
152 I915_READ(IER));
153 seq_printf(m, "Interrupt identity: %08x\n",
154 I915_READ(IIR));
155 seq_printf(m, "Interrupt mask: %08x\n",
156 I915_READ(IMR));
157 seq_printf(m, "Pipe A stat: %08x\n",
158 I915_READ(PIPEASTAT));
159 seq_printf(m, "Pipe B stat: %08x\n",
160 I915_READ(PIPEBSTAT));
161 seq_printf(m, "Interrupts received: %d\n",
162 atomic_read(&dev_priv->irq_received));
163 if (dev_priv->hw_status_page != NULL) {
164 seq_printf(m, "Current sequence: %d\n",
165 i915_get_gem_seqno(dev));
166 } else {
167 seq_printf(m, "Current sequence: hws uninitialized\n");
168 }
169 seq_printf(m, "Waiter sequence: %d\n",
170 dev_priv->mm.waiting_gem_seqno);
171 seq_printf(m, "IRQ sequence: %d\n",
172 dev_priv->mm.irq_gem_seqno);
173 return 0;
174}
175
176static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
177{
178 struct drm_info_node *node = (struct drm_info_node *) m->private;
179 struct drm_device *dev = node->minor->dev;
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int i;
182
183 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
184 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
185 for (i = 0; i < dev_priv->num_fence_regs; i++) {
186 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
187
188 if (obj == NULL) {
189 seq_printf(m, "Fenced object[%2d] = unused\n", i);
190 } else {
191 struct drm_i915_gem_object *obj_priv;
192
193 obj_priv = obj->driver_private;
194 seq_printf(m, "Fenced object[%2d] = %p: %s "
195 "%08x %08zx %08x %s %08x %08x %d",
196 i, obj, get_pin_flag(obj_priv),
197 obj_priv->gtt_offset,
198 obj->size, obj_priv->stride,
199 get_tiling_flag(obj_priv),
200 obj->read_domains, obj->write_domain,
201 obj_priv->last_rendering_seqno);
202 if (obj->name)
203 seq_printf(m, " (name: %d)", obj->name);
204 seq_printf(m, "\n");
205 }
206 }
207
208 return 0;
209}
210
211static int i915_hws_info(struct seq_file *m, void *data)
212{
213 struct drm_info_node *node = (struct drm_info_node *) m->private;
214 struct drm_device *dev = node->minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int i;
217 volatile u32 *hws;
218
219 hws = (volatile u32 *)dev_priv->hw_status_page;
220 if (hws == NULL)
221 return 0;
222
223 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
224 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
225 i * 4,
226 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
227 }
228 return 0;
229}
230
231static struct drm_info_list i915_gem_debugfs_list[] = {
232 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
233 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
234 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
235 {"i915_gem_request", i915_gem_request_info, 0},
236 {"i915_gem_seqno", i915_gem_seqno_info, 0},
237 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
238 {"i915_gem_interrupt", i915_interrupt_info, 0},
239 {"i915_gem_hws", i915_hws_info, 0},
240};
241#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
242
243int i915_gem_debugfs_init(struct drm_minor *minor)
244{
245 return drm_debugfs_create_files(i915_gem_debugfs_list,
246 I915_GEM_DEBUGFS_ENTRIES,
247 minor->debugfs_root, minor);
248}
249
250void i915_gem_debugfs_cleanup(struct drm_minor *minor)
251{
252 drm_debugfs_remove_files(i915_gem_debugfs_list,
253 I915_GEM_DEBUGFS_ENTRIES, minor);
254}
255
256#endif /* CONFIG_DEBUG_FS */
257
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de0cd8b..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies));
172 }
173 if (len > request + offset)
174 return request;
175 *eof = 1;
176 return len - offset;
177}
178
179static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
180 int request, int *eof, void *data)
181{
182 struct drm_minor *minor = (struct drm_minor *) data;
183 struct drm_device *dev = minor->dev;
184 drm_i915_private_t *dev_priv = dev->dev_private;
185 int len = 0;
186
187 if (offset > DRM_PROC_LIMIT) {
188 *eof = 1;
189 return 0;
190 }
191
192 *start = &buf[offset];
193 *eof = 0;
194 if (dev_priv->hw_status_page != NULL) {
195 DRM_PROC_PRINT("Current sequence: %d\n",
196 i915_get_gem_seqno(dev));
197 } else {
198 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
199 }
200 DRM_PROC_PRINT("Waiter sequence: %d\n",
201 dev_priv->mm.waiting_gem_seqno);
202 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
203 if (len > request + offset)
204 return request;
205 *eof = 1;
206 return len - offset;
207}
208
209
210static int i915_interrupt_info(char *buf, char **start, off_t offset,
211 int request, int *eof, void *data)
212{
213 struct drm_minor *minor = (struct drm_minor *) data;
214 struct drm_device *dev = minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int len = 0;
217
218 if (offset > DRM_PROC_LIMIT) {
219 *eof = 1;
220 return 0;
221 }
222
223 *start = &buf[offset];
224 *eof = 0;
225 DRM_PROC_PRINT("Interrupt enable: %08x\n",
226 I915_READ(IER));
227 DRM_PROC_PRINT("Interrupt identity: %08x\n",
228 I915_READ(IIR));
229 DRM_PROC_PRINT("Interrupt mask: %08x\n",
230 I915_READ(IMR));
231 DRM_PROC_PRINT("Pipe A stat: %08x\n",
232 I915_READ(PIPEASTAT));
233 DRM_PROC_PRINT("Pipe B stat: %08x\n",
234 I915_READ(PIPEBSTAT));
235 DRM_PROC_PRINT("Interrupts received: %d\n",
236 atomic_read(&dev_priv->irq_received));
237 if (dev_priv->hw_status_page != NULL) {
238 DRM_PROC_PRINT("Current sequence: %d\n",
239 i915_get_gem_seqno(dev));
240 } else {
241 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
242 }
243 DRM_PROC_PRINT("Waiter sequence: %d\n",
244 dev_priv->mm.waiting_gem_seqno);
245 DRM_PROC_PRINT("IRQ sequence: %d\n",
246 dev_priv->mm.irq_gem_seqno);
247 if (len > request + offset)
248 return request;
249 *eof = 1;
250 return len - offset;
251}
252
253static int i915_hws_info(char *buf, char **start, off_t offset,
254 int request, int *eof, void *data)
255{
256 struct drm_minor *minor = (struct drm_minor *) data;
257 struct drm_device *dev = minor->dev;
258 drm_i915_private_t *dev_priv = dev->dev_private;
259 int len = 0, i;
260 volatile u32 *hws;
261
262 if (offset > DRM_PROC_LIMIT) {
263 *eof = 1;
264 return 0;
265 }
266
267 hws = (volatile u32 *)dev_priv->hw_status_page;
268 if (hws == NULL) {
269 *eof = 1;
270 return 0;
271 }
272
273 *start = &buf[offset];
274 *eof = 0;
275 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
276 DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
277 i * 4,
278 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
279 }
280 if (len > request + offset)
281 return request;
282 *eof = 1;
283 return len - offset;
284}
285
286static struct drm_proc_list {
287 /** file name */
288 const char *name;
289 /** proc callback*/
290 int (*f) (char *, char **, off_t, int, int *, void *);
291} i915_gem_proc_list[] = {
292 {"i915_gem_active", i915_gem_active_info},
293 {"i915_gem_flushing", i915_gem_flushing_info},
294 {"i915_gem_inactive", i915_gem_inactive_info},
295 {"i915_gem_request", i915_gem_request_info},
296 {"i915_gem_seqno", i915_gem_seqno_info},
297 {"i915_gem_interrupt", i915_interrupt_info},
298 {"i915_gem_hws", i915_hws_info},
299};
300
301#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
302
303int i915_gem_proc_init(struct drm_minor *minor)
304{
305 struct proc_dir_entry *ent;
306 int i, j;
307
308 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
309 ent = create_proc_entry(i915_gem_proc_list[i].name,
310 S_IFREG | S_IRUGO, minor->dev_root);
311 if (!ent) {
312 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
313 i915_gem_proc_list[i].name);
314 for (j = 0; j < i; j++)
315 remove_proc_entry(i915_gem_proc_list[i].name,
316 minor->dev_root);
317 return -1;
318 }
319 ent->read_proc = i915_gem_proc_list[i].f;
320 ent->data = minor;
321 }
322 return 0;
323}
324
325void i915_gem_proc_cleanup(struct drm_minor *minor)
326{
327 int i;
328
329 if (!minor->dev_root)
330 return;
331
332 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
333 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
334}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191ef934..4cce1aef438e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
96 */ 96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || 99 } else if (IS_MOBILE(dev)) {
100 IS_GM45(dev)) {
101 uint32_t dcc; 100 uint32_t dcc;
102 101
103 /* On 915-945 and GM965, channel interleave by the CPU is 102 /* On mobile 9xx chipsets, channel interleave by the CPU is
104 * determined by DCC. The CPU will alternate based on bit 6 103 * determined by DCC. For single-channel, neither the CPU
105 * in interleaved mode, and the GPU will then also alternate 104 * nor the GPU do swizzling. For dual channel interleaved,
106 * on bit 6, 9, and 10 for X, but the CPU may also optionally 105 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
107 * alternate based on bit 17 (XOR not disabled and XOR 106 * 9 for Y tiled. The CPU's interleave is independent, and
108 * bit == 17). 107 * can be based on either bit 11 (haven't seen this yet) or
108 * bit 17 (common).
109 */ 109 */
110 dcc = I915_READ(DCC); 110 dcc = I915_READ(DCC);
111 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 111 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
115 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
116 break; 116 break;
117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
118 if (IS_I915G(dev) || IS_I915GM(dev) || 118 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 /* This is the base swizzling by the GPU for
120 * tiled buffers.
121 */
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 122 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 123 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) && 124 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 125 /* Bit 11 swizzling by the CPU in addition. */
124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
126 */
127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
128 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
129 } else { 128 } else {
130 /* Bit 17 or perhaps other swizzling */ 129 /* Bit 17 swizzling by the CPU in addition. */
131 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
132 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
133 } 132 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d899413..377cc588f5e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
362#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
362 363
363#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 364#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
364#define I915_CRC_ERROR_ENABLE (1UL<<29) 365#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
435 */ 436 */
436#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 437#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
437#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 438#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
439#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
438/* i830, required in DVO non-gang */ 440/* i830, required in DVO non-gang */
439#define PLL_P2_DIVIDE_BY_4 (1 << 23) 441#define PLL_P2_DIVIDE_BY_4 (1 << 23)
440#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 442#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
501#define FPB0 0x06048 503#define FPB0 0x06048
502#define FPB1 0x0604c 504#define FPB1 0x0604c
503#define FP_N_DIV_MASK 0x003f0000 505#define FP_N_DIV_MASK 0x003f0000
506#define FP_N_IGD_DIV_MASK 0x00ff0000
504#define FP_N_DIV_SHIFT 16 507#define FP_N_DIV_SHIFT 16
505#define FP_M1_DIV_MASK 0x00003f00 508#define FP_M1_DIV_MASK 0x00003f00
506#define FP_M1_DIV_SHIFT 8 509#define FP_M1_DIV_SHIFT 8
507#define FP_M2_DIV_MASK 0x0000003f 510#define FP_M2_DIV_MASK 0x0000003f
511#define FP_M2_IGD_DIV_MASK 0x000000ff
508#define FP_M2_DIV_SHIFT 0 512#define FP_M2_DIV_SHIFT 0
509#define DPLL_TEST 0x606c 513#define DPLL_TEST 0x606c
510#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 514#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
629#define TV_HOTPLUG_INT_EN (1 << 18) 633#define TV_HOTPLUG_INT_EN (1 << 18)
630#define CRT_HOTPLUG_INT_EN (1 << 9) 634#define CRT_HOTPLUG_INT_EN (1 << 9)
631#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 635#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
636#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
637/* must use period 64 on GM45 according to docs */
638#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
639#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
640#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
641#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
642#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
643#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
644#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
645#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
646#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
647#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
648#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
649#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
650#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
651
632 652
633#define PORT_HOTPLUG_STAT 0x61114 653#define PORT_HOTPLUG_STAT 0x61114
634#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 654#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
856 */ 876 */
857# define TV_ENC_C0_FIX (1 << 10) 877# define TV_ENC_C0_FIX (1 << 10)
858/** Bits that must be preserved by software */ 878/** Bits that must be preserved by software */
859# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) 879# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
860# define TV_FUSE_STATE_MASK (3 << 4) 880# define TV_FUSE_STATE_MASK (3 << 4)
861/** Read-only state that reports all features enabled */ 881/** Read-only state that reports all features enabled */
862# define TV_FUSE_STATE_ENABLED (0 << 4) 882# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715ace3a0..de621aad85b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
162 u8 panel_type; 162 u8 panel_type;
163 u8 rsvd1; 163 u8 rsvd1;
164 /* LVDS capabilities, stored in a dword */ 164 /* LVDS capabilities, stored in a dword */
165 u8 rsvd2:1;
166 u8 lvds_edid:1;
167 u8 pixel_dither:1;
168 u8 pfit_ratio_auto:1;
169 u8 pfit_gfx_mode_enhanced:1;
170 u8 pfit_text_mode_enhanced:1;
171 u8 pfit_mode:2; 165 u8 pfit_mode:2;
166 u8 pfit_text_mode_enhanced:1;
167 u8 pfit_gfx_mode_enhanced:1;
168 u8 pfit_ratio_auto:1;
169 u8 pixel_dither:1;
170 u8 lvds_edid:1;
171 u8 rsvd2:1;
172 u8 rsvd4; 172 u8 rsvd4;
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed3466e83..2b6d44381c31 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64static int intel_crt_mode_valid(struct drm_connector *connector, 64static int intel_crt_mode_valid(struct drm_connector *connector,
65 struct drm_display_mode *mode) 65 struct drm_display_mode *mode)
66{ 66{
67 struct drm_device *dev = connector->dev;
68
69 int max_clock = 0;
67 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
68 return MODE_NO_DBLESCAN; 71 return MODE_NO_DBLESCAN;
69 72
70 if (mode->clock > 400000 || mode->clock < 25000) 73 if (mode->clock < 25000)
71 return MODE_CLOCK_RANGE; 74 return MODE_CLOCK_LOW;
75
76 if (!IS_I9XX(dev))
77 max_clock = 350000;
78 else
79 max_clock = 400000;
80 if (mode->clock > max_clock)
81 return MODE_CLOCK_HIGH;
72 82
73 return MODE_OK; 83 return MODE_OK;
74} 84}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
114 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
115 125
116 if (intel_crtc->pipe == 0) 126 if (intel_crtc->pipe == 0) {
117 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
118 else 128 I915_WRITE(BCLRPAT_A, 0);
129 } else {
119 adpa |= ADPA_PIPE_B_SELECT; 130 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0);
132 }
120 133
121 I915_WRITE(ADPA, adpa); 134 I915_WRITE(ADPA, adpa);
122} 135}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
133{ 146{
134 struct drm_device *dev = connector->dev; 147 struct drm_device *dev = connector->dev;
135 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 temp; 149 u32 hotplug_en;
137 150 int i, tries = 0;
138 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 151 /*
139 152 * On 4 series desktop, CRT detect sequence need to be done twice
140 temp = I915_READ(PORT_HOTPLUG_EN); 153 * to get a reliable result.
141 154 */
142 I915_WRITE(PORT_HOTPLUG_EN,
143 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
144 155
145 do { 156 if (IS_G4X(dev) && !IS_GM45(dev))
146 if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) 157 tries = 2;
147 break; 158 else
148 msleep(1); 159 tries = 1;
149 } while (time_after(timeout, jiffies)); 160 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
161 hotplug_en &= ~(CRT_HOTPLUG_MASK);
162 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
163
164 if (IS_GM45(dev))
165 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
166
167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
168
169 for (i = 0; i < tries ; i++) {
170 unsigned long timeout;
171 /* turn on the FORCE_DETECT */
172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
173 timeout = jiffies + msecs_to_jiffies(1000);
174 /* wait for FORCE_DETECT to go off */
175 do {
176 if (!(I915_READ(PORT_HOTPLUG_EN) &
177 CRT_HOTPLUG_FORCE_DETECT))
178 break;
179 msleep(1);
180 } while (time_after(timeout, jiffies));
181 }
150 182
151 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 183 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
152 CRT_HOTPLUG_MONITOR_COLOR) 184 CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2834276cb38..d9c50ff94d76 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
56} intel_p2_t; 56} intel_p2_t;
57 57
58#define INTEL_P2_NUM 2 58#define INTEL_P2_NUM 2
59 59typedef struct intel_limit intel_limit_t;
60typedef struct { 60struct intel_limit {
61 intel_range_t dot, vco, n, m, m1, m2, p, p1; 61 intel_range_t dot, vco, n, m, m1, m2, p, p1;
62 intel_p2_t p2; 62 intel_p2_t p2;
63} intel_limit_t; 63 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
64 int, int, intel_clock_t *);
65};
64 66
65#define I8XX_DOT_MIN 25000 67#define I8XX_DOT_MIN 25000
66#define I8XX_DOT_MAX 350000 68#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 92#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 93#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 94#define I9XX_VCO_MAX 2800000
95#define IGD_VCO_MIN 1700000
96#define IGD_VCO_MAX 3500000
93#define I9XX_N_MIN 1 97#define I9XX_N_MIN 1
94#define I9XX_N_MAX 6 98#define I9XX_N_MAX 6
99/* IGD's Ncounter is a ring counter */
100#define IGD_N_MIN 3
101#define IGD_N_MAX 6
95#define I9XX_M_MIN 70 102#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 103#define I9XX_M_MAX 120
104#define IGD_M_MIN 2
105#define IGD_M_MAX 256
97#define I9XX_M1_MIN 10 106#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 22 107#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 108#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 109#define I9XX_M2_MAX 9
110/* IGD M1 is reserved, and must be 0 */
111#define IGD_M1_MIN 0
112#define IGD_M1_MAX 0
113#define IGD_M2_MIN 0
114#define IGD_M2_MAX 254
101#define I9XX_P_SDVO_DAC_MIN 5 115#define I9XX_P_SDVO_DAC_MIN 5
102#define I9XX_P_SDVO_DAC_MAX 80 116#define I9XX_P_SDVO_DAC_MAX 80
103#define I9XX_P_LVDS_MIN 7 117#define I9XX_P_LVDS_MIN 7
104#define I9XX_P_LVDS_MAX 98 118#define I9XX_P_LVDS_MAX 98
119#define IGD_P_LVDS_MIN 7
120#define IGD_P_LVDS_MAX 112
105#define I9XX_P1_MIN 1 121#define I9XX_P1_MIN 1
106#define I9XX_P1_MAX 8 122#define I9XX_P1_MAX 8
107#define I9XX_P2_SDVO_DAC_SLOW 10 123#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
115#define INTEL_LIMIT_I8XX_LVDS 1 131#define INTEL_LIMIT_I8XX_LVDS 1
116#define INTEL_LIMIT_I9XX_SDVO_DAC 2 132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
117#define INTEL_LIMIT_I9XX_LVDS 3 133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140
141/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000
143#define G4X_DOT_SDVO_MAX 270000
144#define G4X_VCO_MIN 1750000
145#define G4X_VCO_MAX 3500000
146#define G4X_N_SDVO_MIN 1
147#define G4X_N_SDVO_MAX 4
148#define G4X_M_SDVO_MIN 104
149#define G4X_M_SDVO_MAX 138
150#define G4X_M1_SDVO_MIN 17
151#define G4X_M1_SDVO_MAX 23
152#define G4X_M2_SDVO_MIN 5
153#define G4X_M2_SDVO_MAX 11
154#define G4X_P_SDVO_MIN 10
155#define G4X_P_SDVO_MAX 30
156#define G4X_P1_SDVO_MIN 1
157#define G4X_P1_SDVO_MAX 3
158#define G4X_P2_SDVO_SLOW 10
159#define G4X_P2_SDVO_FAST 10
160#define G4X_P2_SDVO_LIMIT 270000
161
162/*The parameter is for HDMI_DAC on G4x platform*/
163#define G4X_DOT_HDMI_DAC_MIN 22000
164#define G4X_DOT_HDMI_DAC_MAX 400000
165#define G4X_N_HDMI_DAC_MIN 1
166#define G4X_N_HDMI_DAC_MAX 4
167#define G4X_M_HDMI_DAC_MIN 104
168#define G4X_M_HDMI_DAC_MAX 138
169#define G4X_M1_HDMI_DAC_MIN 16
170#define G4X_M1_HDMI_DAC_MAX 23
171#define G4X_M2_HDMI_DAC_MIN 5
172#define G4X_M2_HDMI_DAC_MAX 11
173#define G4X_P_HDMI_DAC_MIN 5
174#define G4X_P_HDMI_DAC_MAX 80
175#define G4X_P1_HDMI_DAC_MIN 1
176#define G4X_P1_HDMI_DAC_MAX 8
177#define G4X_P2_HDMI_DAC_SLOW 10
178#define G4X_P2_HDMI_DAC_FAST 5
179#define G4X_P2_HDMI_DAC_LIMIT 165000
180
181/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
182#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
184#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
185#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
186#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
187#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
188#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
190#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
192#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
193#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
194#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
196#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
197#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
199
200/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
201#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
203#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
204#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
205#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
206#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
207#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
208#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
209#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
210#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
211#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
212#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
213#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
214#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
215#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218
219static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock);
222static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock);
118 225
119static const intel_limit_t intel_limits[] = { 226static const intel_limit_t intel_limits[] = {
120 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 227 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
128 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 235 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
129 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 236 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
130 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 237 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
238 .find_pll = intel_find_best_PLL,
131 }, 239 },
132 { /* INTEL_LIMIT_I8XX_LVDS */ 240 { /* INTEL_LIMIT_I8XX_LVDS */
133 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 241 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
140 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 248 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
141 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 249 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
142 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 250 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
251 .find_pll = intel_find_best_PLL,
143 }, 252 },
144 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 253 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
145 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
152 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
153 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 262 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
154 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 263 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
264 .find_pll = intel_find_best_PLL,
155 }, 265 },
156 { /* INTEL_LIMIT_I9XX_LVDS */ 266 { /* INTEL_LIMIT_I9XX_LVDS */
157 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 267 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
167 */ 277 */
168 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 278 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
169 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 279 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
280 .find_pll = intel_find_best_PLL,
281 },
282 /* below parameter and function is for G4X Chipset Family*/
283 { /* INTEL_LIMIT_G4X_SDVO */
284 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
285 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
286 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
287 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
288 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
289 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
290 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
291 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
292 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
293 .p2_slow = G4X_P2_SDVO_SLOW,
294 .p2_fast = G4X_P2_SDVO_FAST
295 },
296 .find_pll = intel_g4x_find_best_PLL,
297 },
298 { /* INTEL_LIMIT_G4X_HDMI_DAC */
299 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
300 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
301 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
302 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
303 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
304 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
305 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
306 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
307 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
308 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
309 .p2_fast = G4X_P2_HDMI_DAC_FAST
310 },
311 .find_pll = intel_g4x_find_best_PLL,
312 },
313 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
314 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
315 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
316 .vco = { .min = G4X_VCO_MIN,
317 .max = G4X_VCO_MAX },
318 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
319 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
320 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
321 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
322 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
323 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
324 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
325 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
326 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
327 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
328 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
329 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
330 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
331 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
332 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
333 },
334 .find_pll = intel_g4x_find_best_PLL,
335 },
336 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
337 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
338 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
339 .vco = { .min = G4X_VCO_MIN,
340 .max = G4X_VCO_MAX },
341 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
342 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
343 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
344 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
345 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
346 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
347 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
348 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
349 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
350 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
351 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
352 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
353 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
354 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
355 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
356 },
357 .find_pll = intel_g4x_find_best_PLL,
358 },
359 { /* INTEL_LIMIT_IGD_SDVO */
360 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
361 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
362 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
363 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
364 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
365 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
366 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
170 }, 370 },
371 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
373 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
374 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
375 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
376 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
377 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
378 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
379 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
380 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
383 },
384
171}; 385};
172 386
387static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
388{
389 struct drm_device *dev = crtc->dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391 const intel_limit_t *limit;
392
393 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
394 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
395 LVDS_CLKB_POWER_UP)
396 /* LVDS with dual channel */
397 limit = &intel_limits
398 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
399 else
400 /* LVDS with dual channel */
401 limit = &intel_limits
402 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
403 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
404 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
405 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
406 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
407 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
408 } else /* The option is for other outputs */
409 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
410
411 return limit;
412}
413
173static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 414static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
174{ 415{
175 struct drm_device *dev = crtc->dev; 416 struct drm_device *dev = crtc->dev;
176 const intel_limit_t *limit; 417 const intel_limit_t *limit;
177 418
178 if (IS_I9XX(dev)) { 419 if (IS_G4X(dev)) {
420 limit = intel_g4x_limit(crtc);
421 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
179 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
180 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 423 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
181 else 424 else
182 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 425 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
426 } else if (IS_IGD(dev)) {
427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
429 else
430 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
183 } else { 431 } else {
184 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 432 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
185 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 433 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 437 return limit;
190} 438}
191 439
192static void intel_clock(int refclk, intel_clock_t *clock) 440/* m1 is reserved as 0 in IGD, n is a ring counter */
441static void igd_clock(int refclk, intel_clock_t *clock)
193{ 442{
443 clock->m = clock->m2 + 2;
444 clock->p = clock->p1 * clock->p2;
445 clock->vco = refclk * clock->m / clock->n;
446 clock->dot = clock->vco / clock->p;
447}
448
449static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
450{
451 if (IS_IGD(dev)) {
452 igd_clock(refclk, clock);
453 return;
454 }
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 455 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2; 456 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2); 457 clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
226static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 487static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
227{ 488{
228 const intel_limit_t *limit = intel_limit (crtc); 489 const intel_limit_t *limit = intel_limit (crtc);
490 struct drm_device *dev = crtc->dev;
229 491
230 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 492 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
231 INTELPllInvalid ("p1 out of range\n"); 493 INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
235 INTELPllInvalid ("m2 out of range\n"); 497 INTELPllInvalid ("m2 out of range\n");
236 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 498 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
237 INTELPllInvalid ("m1 out of range\n"); 499 INTELPllInvalid ("m1 out of range\n");
238 if (clock->m1 <= clock->m2) 500 if (clock->m1 <= clock->m2 && !IS_IGD(dev))
239 INTELPllInvalid ("m1 <= m2\n"); 501 INTELPllInvalid ("m1 <= m2\n");
240 if (clock->m < limit->m.min || limit->m.max < clock->m) 502 if (clock->m < limit->m.min || limit->m.max < clock->m)
241 INTELPllInvalid ("m out of range\n"); 503 INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
252 return true; 514 return true;
253} 515}
254 516
255/** 517static bool
256 * Returns a set of divisors for the desired target clock with the given 518intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 * refclk, or FALSE. The returned values represent the clock equation: 519 int target, int refclk, intel_clock_t *best_clock)
258 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 520
259 */
260static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
261 int refclk, intel_clock_t *best_clock)
262{ 521{
263 struct drm_device *dev = crtc->dev; 522 struct drm_device *dev = crtc->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_i915_private *dev_priv = dev->dev_private;
265 intel_clock_t clock; 524 intel_clock_t clock;
266 const intel_limit_t *limit = intel_limit(crtc);
267 int err = target; 525 int err = target;
268 526
269 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
289 memset (best_clock, 0, sizeof (*best_clock)); 547 memset (best_clock, 0, sizeof (*best_clock));
290 548
291 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 549 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
292 for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && 550 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
293 clock.m2 <= limit->m2.max; clock.m2++) { 551 /* m1 is always 0 in IGD */
552 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
553 break;
294 for (clock.n = limit->n.min; clock.n <= limit->n.max; 554 for (clock.n = limit->n.min; clock.n <= limit->n.max;
295 clock.n++) { 555 clock.n++) {
296 for (clock.p1 = limit->p1.min; 556 for (clock.p1 = limit->p1.min;
297 clock.p1 <= limit->p1.max; clock.p1++) { 557 clock.p1 <= limit->p1.max; clock.p1++) {
298 int this_err; 558 int this_err;
299 559
300 intel_clock(refclk, &clock); 560 intel_clock(dev, refclk, &clock);
301 561
302 if (!intel_PLL_is_valid(crtc, &clock)) 562 if (!intel_PLL_is_valid(crtc, &clock))
303 continue; 563 continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
315 return (err != target); 575 return (err != target);
316} 576}
317 577
578static bool
579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
580 int target, int refclk, intel_clock_t *best_clock)
581{
582 struct drm_device *dev = crtc->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 intel_clock_t clock;
585 int max_n;
586 bool found;
587 /* approximately equals target * 0.00488 */
588 int err_most = (target >> 8) + (target >> 10);
589 found = false;
590
591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
592 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
593 LVDS_CLKB_POWER_UP)
594 clock.p2 = limit->p2.p2_fast;
595 else
596 clock.p2 = limit->p2.p2_slow;
597 } else {
598 if (target < limit->p2.dot_limit)
599 clock.p2 = limit->p2.p2_slow;
600 else
601 clock.p2 = limit->p2.p2_fast;
602 }
603
604 memset(best_clock, 0, sizeof(*best_clock));
605 max_n = limit->n.max;
606 /* based on hardware requriment prefer smaller n to precision */
607 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
608 /* based on hardware requirment prefere larger m1,m2, p1 */
609 for (clock.m1 = limit->m1.max;
610 clock.m1 >= limit->m1.min; clock.m1--) {
611 for (clock.m2 = limit->m2.max;
612 clock.m2 >= limit->m2.min; clock.m2--) {
613 for (clock.p1 = limit->p1.max;
614 clock.p1 >= limit->p1.min; clock.p1--) {
615 int this_err;
616
617 intel_clock(dev, refclk, &clock);
618 if (!intel_PLL_is_valid(crtc, &clock))
619 continue;
620 this_err = abs(clock.dot - target) ;
621 if (this_err < err_most) {
622 *best_clock = clock;
623 err_most = this_err;
624 max_n = clock.n;
625 found = true;
626 }
627 }
628 }
629 }
630 }
631
632 return found;
633}
634
318void 635void
319intel_wait_for_vblank(struct drm_device *dev) 636intel_wait_for_vblank(struct drm_device *dev)
320{ 637{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
634 return 400000; 951 return 400000;
635 else if (IS_I915G(dev)) 952 else if (IS_I915G(dev))
636 return 333000; 953 return 333000;
637 else if (IS_I945GM(dev) || IS_845G(dev)) 954 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
638 return 200000; 955 return 200000;
639 else if (IS_I915GM(dev)) { 956 else if (IS_I915GM(dev)) {
640 u16 gcfgc = 0; 957 u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
733 bool is_crt = false, is_lvds = false, is_tv = false; 1050 bool is_crt = false, is_lvds = false, is_tv = false;
734 struct drm_mode_config *mode_config = &dev->mode_config; 1051 struct drm_mode_config *mode_config = &dev->mode_config;
735 struct drm_connector *connector; 1052 struct drm_connector *connector;
1053 const intel_limit_t *limit;
736 int ret; 1054 int ret;
737 1055
738 drm_vblank_pre_modeset(dev, pipe); 1056 drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
776 refclk = 48000; 1094 refclk = 48000;
777 } 1095 }
778 1096
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 1097 /*
1098 * Returns a set of divisors for the desired target clock with the given
1099 * refclk, or FALSE. The returned values represent the clock equation:
1100 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
1101 */
1102 limit = intel_limit(crtc);
1103 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 1104 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return -EINVAL; 1106 return -EINVAL;
783 } 1107 }
784 1108
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1109 if (IS_IGD(dev))
1110 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1111 else
1112 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
786 1113
787 dpll = DPLL_VGA_MODE_DIS; 1114 dpll = DPLL_VGA_MODE_DIS;
788 if (IS_I9XX(dev)) { 1115 if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
799 } 1126 }
800 1127
801 /* compute bitmask from p1 value */ 1128 /* compute bitmask from p1 value */
802 dpll |= (1 << (clock.p1 - 1)) << 16; 1129 if (IS_IGD(dev))
1130 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1131 else
1132 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
803 switch (clock.p2) { 1133 switch (clock.p2) {
804 case 5: 1134 case 5:
805 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1135 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1279 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 1609 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
1280 1610
1281 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 1611 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1282 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 1612 if (IS_IGD(dev)) {
1283 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 1613 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
1614 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
1615 } else {
1616 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1617 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1618 }
1619
1284 if (IS_I9XX(dev)) { 1620 if (IS_I9XX(dev)) {
1285 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 1621 if (IS_IGD(dev))
1622 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
1623 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
1624 else
1625 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
1286 DPLL_FPA01_P1_POST_DIV_SHIFT); 1626 DPLL_FPA01_P1_POST_DIV_SHIFT);
1287 1627
1288 switch (dpll & DPLL_MODE_MASK) { 1628 switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1301 } 1641 }
1302 1642
1303 /* XXX: Handle the 100Mhz refclk */ 1643 /* XXX: Handle the 100Mhz refclk */
1304 intel_clock(96000, &clock); 1644 intel_clock(dev, 96000, &clock);
1305 } else { 1645 } else {
1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1646 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1307 1647
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1313 if ((dpll & PLL_REF_INPUT_MASK) == 1653 if ((dpll & PLL_REF_INPUT_MASK) ==
1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1654 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1315 /* XXX: might not be 66MHz */ 1655 /* XXX: might not be 66MHz */
1316 intel_clock(66000, &clock); 1656 intel_clock(dev, 66000, &clock);
1317 } else 1657 } else
1318 intel_clock(48000, &clock); 1658 intel_clock(dev, 48000, &clock);
1319 } else { 1659 } else {
1320 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1660 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1321 clock.p1 = 2; 1661 clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1328 else 1668 else
1329 clock.p2 = 2; 1669 clock.p2 = 2;
1330 1670
1331 intel_clock(48000, &clock); 1671 intel_clock(dev, 48000, &clock);
1332 } 1672 }
1333 } 1673 }
1334 1674
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
1474 1814
1475 if (IS_I9XX(dev)) { 1815 if (IS_I9XX(dev)) {
1476 int found; 1816 int found;
1817 u32 reg;
1477 1818
1478 if (I915_READ(SDVOB) & SDVO_DETECTED) { 1819 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1479 found = intel_sdvo_init(dev, SDVOB); 1820 found = intel_sdvo_init(dev, SDVOB);
1480 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1821 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1481 intel_hdmi_init(dev, SDVOB); 1822 intel_hdmi_init(dev, SDVOB);
1482 } 1823 }
1483 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { 1824
1825 /* Before G4X SDVOC doesn't have its own detect register */
1826 if (IS_G4X(dev))
1827 reg = SDVOC;
1828 else
1829 reg = SDVOB;
1830
1831 if (I915_READ(reg) & SDVO_DETECTED) {
1484 found = intel_sdvo_init(dev, SDVOC); 1832 found = intel_sdvo_init(dev, SDVOC);
1485 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1833 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1486 intel_hdmi_init(dev, SDVOC); 1834 intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af98854..6619f26e46a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
265 pfit_control = 0; 265 pfit_control = 0;
266 266
267 if (!IS_I965G(dev)) { 267 if (!IS_I965G(dev)) {
268 if (dev_priv->panel_wants_dither) 268 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
269 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 269 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
270 } 270 }
271 else 271 else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d67369b..ceca9471a75a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
217 */ 217 */
218static const struct color_conversion ntsc_m_csc_composite = { 218static const struct color_conversion ntsc_m_csc_composite = {
219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
222}; 222};
223 223
224static const struct video_levels ntsc_m_levels_composite = { 224static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
226}; 226};
227 227
228static const struct color_conversion ntsc_m_csc_svideo = { 228static const struct color_conversion ntsc_m_csc_svideo = {
229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
232}; 232};
233 233
234static const struct video_levels ntsc_m_levels_svideo = { 234static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
237 237
238static const struct color_conversion ntsc_j_csc_composite = { 238static const struct color_conversion ntsc_j_csc_composite = {
239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, 239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, 240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, 241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
242}; 242};
243 243
244static const struct video_levels ntsc_j_levels_composite = { 244static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
247 247
248static const struct color_conversion ntsc_j_csc_svideo = { 248static const struct color_conversion ntsc_j_csc_svideo = {
249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, 249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, 250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, 251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
252}; 252};
253 253
254static const struct video_levels ntsc_j_levels_svideo = { 254static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
257 257
258static const struct color_conversion pal_csc_composite = { 258static const struct color_conversion pal_csc_composite = {
259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, 259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, 260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, 261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
262}; 262};
263 263
264static const struct video_levels pal_levels_composite = { 264static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
267 267
268static const struct color_conversion pal_csc_svideo = { 268static const struct color_conversion pal_csc_svideo = {
269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, 269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, 270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, 271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
272}; 272};
273 273
274static const struct video_levels pal_levels_svideo = { 274static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
277 277
278static const struct color_conversion pal_m_csc_composite = { 278static const struct color_conversion pal_m_csc_composite = {
279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
282}; 282};
283 283
284static const struct video_levels pal_m_levels_composite = { 284static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
286}; 286};
287 287
288static const struct color_conversion pal_m_csc_svideo = { 288static const struct color_conversion pal_m_csc_svideo = {
289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
292}; 292};
293 293
294static const struct video_levels pal_m_levels_svideo = { 294static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
297 297
298static const struct color_conversion pal_n_csc_composite = { 298static const struct color_conversion pal_n_csc_composite = {
299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
302}; 302};
303 303
304static const struct video_levels pal_n_levels_composite = { 304static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
306}; 306};
307 307
308static const struct color_conversion pal_n_csc_svideo = { 308static const struct color_conversion pal_n_csc_svideo = {
309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
312}; 312};
313 313
314static const struct video_levels pal_n_levels_svideo = { 314static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
319 * Component connections 319 * Component connections
320 */ 320 */
321static const struct color_conversion sdtv_csc_yprpb = { 321static const struct color_conversion sdtv_csc_yprpb = {
322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, 322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, 323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, 324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
325}; 325};
326 326
327static const struct color_conversion sdtv_csc_rgb = { 327static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
331}; 331};
332 332
333static const struct color_conversion hdtv_csc_yprpb = { 333static const struct color_conversion hdtv_csc_yprpb = {
334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, 334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, 335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, 336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
337}; 337};
338 338
339static const struct color_conversion hdtv_csc_rgb = { 339static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
414static const struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 108000,
418 .refresh = 29970, 418 .refresh = 29970,
419 .oversample = TV_OVERSAMPLE_8X, 419 .oversample = TV_OVERSAMPLE_8X,
420 .component_only = 0, 420 .component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
442 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 442 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
443 443
444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
445 .dda1_inc = 136, 445 .dda1_inc = 135,
446 .dda2_inc = 7624, .dda2_size = 20013, 446 .dda2_inc = 20800, .dda2_size = 27456,
447 .dda3_inc = 0, .dda3_size = 0, 447 .dda3_inc = 0, .dda3_size = 0,
448 .sc_reset = TV_SC_RESET_EVERY_4, 448 .sc_reset = TV_SC_RESET_EVERY_4,
449 .pal_burst = false, 449 .pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
457 }, 457 },
458 { 458 {
459 .name = "NTSC-443", 459 .name = "NTSC-443",
460 .clock = 107520, 460 .clock = 108000,
461 .refresh = 29970, 461 .refresh = 29970,
462 .oversample = TV_OVERSAMPLE_8X, 462 .oversample = TV_OVERSAMPLE_8X,
463 .component_only = 0, 463 .component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
485 485
486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
487 .dda1_inc = 168, 487 .dda1_inc = 168,
488 .dda2_inc = 18557, .dda2_size = 20625, 488 .dda2_inc = 4093, .dda2_size = 27456,
489 .dda3_inc = 0, .dda3_size = 0, 489 .dda3_inc = 310, .dda3_size = 525,
490 .sc_reset = TV_SC_RESET_EVERY_8, 490 .sc_reset = TV_SC_RESET_NEVER,
491 .pal_burst = true, 491 .pal_burst = false,
492 492
493 .composite_levels = &ntsc_m_levels_composite, 493 .composite_levels = &ntsc_m_levels_composite,
494 .composite_color = &ntsc_m_csc_composite, 494 .composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
499 }, 499 },
500 { 500 {
501 .name = "NTSC-J", 501 .name = "NTSC-J",
502 .clock = 107520, 502 .clock = 108000,
503 .refresh = 29970, 503 .refresh = 29970,
504 .oversample = TV_OVERSAMPLE_8X, 504 .oversample = TV_OVERSAMPLE_8X,
505 .component_only = 0, 505 .component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
527 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 527 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
528 528
529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
530 .dda1_inc = 136, 530 .dda1_inc = 135,
531 .dda2_inc = 7624, .dda2_size = 20013, 531 .dda2_inc = 20800, .dda2_size = 27456,
532 .dda3_inc = 0, .dda3_size = 0, 532 .dda3_inc = 0, .dda3_size = 0,
533 .sc_reset = TV_SC_RESET_EVERY_4, 533 .sc_reset = TV_SC_RESET_EVERY_4,
534 .pal_burst = false, 534 .pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
542 }, 542 },
543 { 543 {
544 .name = "PAL-M", 544 .name = "PAL-M",
545 .clock = 107520, 545 .clock = 108000,
546 .refresh = 29970, 546 .refresh = 29970,
547 .oversample = TV_OVERSAMPLE_8X, 547 .oversample = TV_OVERSAMPLE_8X,
548 .component_only = 0, 548 .component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
570 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 570 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
571 571
572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
573 .dda1_inc = 136, 573 .dda1_inc = 135,
574 .dda2_inc = 7624, .dda2_size = 20013, 574 .dda2_inc = 16704, .dda2_size = 27456,
575 .dda3_inc = 0, .dda3_size = 0, 575 .dda3_inc = 0, .dda3_size = 0,
576 .sc_reset = TV_SC_RESET_EVERY_4, 576 .sc_reset = TV_SC_RESET_EVERY_8,
577 .pal_burst = false, 577 .pal_burst = true,
578 578
579 .composite_levels = &pal_m_levels_composite, 579 .composite_levels = &pal_m_levels_composite,
580 .composite_color = &pal_m_csc_composite, 580 .composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
586 { 586 {
587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
588 .name = "PAL-N", 588 .name = "PAL-N",
589 .clock = 107520, 589 .clock = 108000,
590 .refresh = 25000, 590 .refresh = 25000,
591 .oversample = TV_OVERSAMPLE_8X, 591 .oversample = TV_OVERSAMPLE_8X,
592 .component_only = 0, 592 .component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
615 615
616 616
617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
618 .dda1_inc = 168, 618 .dda1_inc = 135,
619 .dda2_inc = 18557, .dda2_size = 20625, 619 .dda2_inc = 23578, .dda2_size = 27648,
620 .dda3_inc = 0, .dda3_size = 0, 620 .dda3_inc = 134, .dda3_size = 625,
621 .sc_reset = TV_SC_RESET_EVERY_8, 621 .sc_reset = TV_SC_RESET_EVERY_8,
622 .pal_burst = true, 622 .pal_burst = true,
623 623
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
631 { 631 {
632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
633 .name = "PAL", 633 .name = "PAL",
634 .clock = 107520, 634 .clock = 108000,
635 .refresh = 25000, 635 .refresh = 25000,
636 .oversample = TV_OVERSAMPLE_8X, 636 .oversample = TV_OVERSAMPLE_8X,
637 .component_only = 0, 637 .component_only = 0,
638 638
639 .hsync_end = 64, .hblank_end = 128, 639 .hsync_end = 64, .hblank_end = 142,
640 .hblank_start = 844, .htotal = 863, 640 .hblank_start = 844, .htotal = 863,
641 641
642 .progressive = false, .trilevel_sync = false, 642 .progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
659 659
660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
661 .dda1_inc = 168, 661 .dda1_inc = 168,
662 .dda2_inc = 18557, .dda2_size = 20625, 662 .dda2_inc = 4122, .dda2_size = 27648,
663 .dda3_inc = 0, .dda3_size = 0, 663 .dda3_inc = 67, .dda3_size = 625,
664 .sc_reset = TV_SC_RESET_EVERY_8, 664 .sc_reset = TV_SC_RESET_EVERY_8,
665 .pal_burst = true, 665 .pal_burst = true,
666 666
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
689 .veq_ena = false, 689 .veq_ena = false,
690 690
691 .vi_end_f1 = 44, .vi_end_f2 = 44, 691 .vi_end_f1 = 44, .vi_end_f2 = 44,
692 .nbr_end = 496, 692 .nbr_end = 479,
693 693
694 .burst_ena = false, 694 .burst_ena = false,
695 695
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
713 .veq_ena = false, 713 .veq_ena = false,
714 714
715 .vi_end_f1 = 44, .vi_end_f2 = 44, 715 .vi_end_f1 = 44, .vi_end_f2 = 44,
716 .nbr_end = 496, 716 .nbr_end = 479,
717 717
718 .burst_ena = false, 718 .burst_ena = false,
719 719
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
876 .component_only = 1, 876 .component_only = 1,
877 877
878 .hsync_end = 88, .hblank_end = 235, 878 .hsync_end = 88, .hblank_end = 235,
879 .hblank_start = 2155, .htotal = 2200, 879 .hblank_start = 2155, .htotal = 2201,
880 880
881 .progressive = false, .trilevel_sync = true, 881 .progressive = false, .trilevel_sync = true,
882 882
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
1086 return MODE_OK; 1086 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1087 return MODE_CLOCK_RANGE;
1088} 1088}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1135 if (!tv_mode) 1135 if (!tv_mode)
1136 return; /* can't happen (mode_prepare prevents this) */ 1136 return; /* can't happen (mode_prepare prevents this) */
1137 1137
1138 tv_ctl = 0; 1138 tv_ctl = I915_READ(TV_CTL);
1139 tv_ctl &= TV_CTL_SAVE;
1139 1140
1140 switch (tv_priv->type) { 1141 switch (tv_priv->type) {
1141 default: 1142 default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1215 /* dda1 implies valid video levels */ 1216 /* dda1 implies valid video levels */
1216 if (tv_mode->dda1_inc) { 1217 if (tv_mode->dda1_inc) {
1217 scctl1 |= TV_SC_DDA1_EN; 1218 scctl1 |= TV_SC_DDA1_EN;
1218 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1219 } 1219 }
1220 1220
1221 if (tv_mode->dda2_inc) 1221 if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1225 scctl1 |= TV_SC_DDA3_EN; 1225 scctl1 |= TV_SC_DDA3_EN;
1226 1226
1227 scctl1 |= tv_mode->sc_reset; 1227 scctl1 |= tv_mode->sc_reset;
1228 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1228 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1229 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1229 1230
1230 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1231 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1266 color_conversion->av); 1267 color_conversion->av);
1267 } 1268 }
1268 1269
1269 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1270 if (IS_I965G(dev))
1271 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1272 else
1273 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
1274
1270 if (video_levels) 1275 if (video_levels)
1271 I915_WRITE(TV_CLR_LEVEL, 1276 I915_WRITE(TV_CLR_LEVEL,
1272 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1277 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1401 tv_dac = I915_READ(TV_DAC); 1406 tv_dac = I915_READ(TV_DAC);
1402 I915_WRITE(TV_DAC, save_tv_dac); 1407 I915_WRITE(TV_DAC, save_tv_dac);
1403 I915_WRITE(TV_CTL, save_tv_ctl); 1408 I915_WRITE(TV_CTL, save_tv_ctl);
1409 intel_wait_for_vblank(dev);
1404 } 1410 }
1405 /* 1411 /*
1406 * A B C 1412 * A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
1451 mode = reported_modes[0]; 1457 mode = reported_modes[0];
1452 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1458 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1453 1459
1454 if (encoder->crtc) { 1460 if (encoder->crtc && encoder->crtc->enabled) {
1455 type = intel_tv_detect_type(encoder->crtc, intel_output); 1461 type = intel_tv_detect_type(encoder->crtc, intel_output);
1456 } else { 1462 } else {
1457 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1463 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
1462 type = -1; 1468 type = -1;
1463 } 1469 }
1464 1470
1471 tv_priv->type = type;
1472
1465 if (type < 0) 1473 if (type < 0)
1466 return connector_status_disconnected; 1474 return connector_status_disconnected;
1467 1475
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
1495 struct drm_display_mode *mode_ptr; 1503 struct drm_display_mode *mode_ptr;
1496 struct intel_output *intel_output = to_intel_output(connector); 1504 struct intel_output *intel_output = to_intel_output(connector);
1497 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1505 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1498 int j; 1506 int j, count = 0;
1507 u64 tmp;
1499 1508
1500 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); 1509 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
1501 j++) { 1510 j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1510 && !tv_mode->component_only)) 1519 && !tv_mode->component_only))
1511 continue; 1520 continue;
1512 1521
1513 mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), 1522 mode_ptr = drm_mode_create(connector->dev);
1514 DRM_MEM_DRIVER); 1523 if (!mode_ptr)
1524 continue;
1515 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); 1525 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
1516 1526
1517 mode_ptr->hdisplay = hactive_s; 1527 mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
1528 mode_ptr->vsync_end = mode_ptr->vsync_start + 1; 1538 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1529 mode_ptr->vtotal = vactive_s + 33; 1539 mode_ptr->vtotal = vactive_s + 33;
1530 1540
1531 mode_ptr->clock = (int) (tv_mode->refresh * 1541 tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
1532 mode_ptr->vtotal * 1542 tmp *= mode_ptr->htotal;
1533 mode_ptr->htotal / 1000) / 1000; 1543 tmp = div_u64(tmp, 1000000);
1544 mode_ptr->clock = (int) tmp;
1534 1545
1535 mode_ptr->type = DRM_MODE_TYPE_DRIVER; 1546 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1536 drm_mode_probed_add(connector, mode_ptr); 1547 drm_mode_probed_add(connector, mode_ptr);
1548 count++;
1537 } 1549 }
1538 1550
1539 return 0; 1551 return count;
1540} 1552}
1541 1553
1542static void 1554static void
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 31400c8ae051..d696f69ebce5 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
68 .host_reset = host_reset, 68 .host_reset = host_reset,
69}; 69};
70 70
71const static struct hpsb_address_ops map_ops = { 71static const struct hpsb_address_ops map_ops = {
72 .read = read_maps, 72 .read = read_maps,
73}; 73};
74 74
75const static struct hpsb_address_ops fcp_ops = { 75static const struct hpsb_address_ops fcp_ops = {
76 .write = write_fcp, 76 .write = write_fcp,
77}; 77};
78 78
79const static struct hpsb_address_ops reg_ops = { 79static const struct hpsb_address_ops reg_ops = {
80 .read = read_regs, 80 .read = read_regs,
81 .write = write_regs, 81 .write = write_regs,
82 .lock = lock_regs, 82 .lock = lock_regs,
83 .lock64 = lock64_regs, 83 .lock64 = lock64_regs,
84}; 84};
85 85
86const static struct hpsb_address_ops config_rom_ops = { 86static const struct hpsb_address_ops config_rom_ops = {
87 .read = read_config_rom, 87 .read = read_config_rom,
88}; 88};
89 89
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cb15bfa38d70..823a6297a1af 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
2171 * Export information about protocols/devices supported by this driver. 2171 * Export information about protocols/devices supported by this driver.
2172 */ 2172 */
2173#ifdef MODULE 2173#ifdef MODULE
2174static struct ieee1394_device_id dv1394_id_table[] = { 2174static const struct ieee1394_device_id dv1394_id_table[] = {
2175 { 2175 {
2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 1a919df809f8..4ca103577c0a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
181static void ether1394_host_reset(struct hpsb_host *host); 181static void ether1394_host_reset(struct hpsb_host *host);
182 182
183/* Function for incoming 1394 packets */ 183/* Function for incoming 1394 packets */
184const static struct hpsb_address_ops addr_ops = { 184static const struct hpsb_address_ops addr_ops = {
185 .write = ether1394_write, 185 .write = ether1394_write,
186}; 186};
187 187
@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
438 return eth1394_new_node(hi, ud); 438 return eth1394_new_node(hi, ud);
439} 439}
440 440
441static struct ieee1394_device_id eth1394_id_table[] = { 441static const struct ieee1394_device_id eth1394_id_table[] = {
442 { 442 {
443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | 443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
444 IEEE1394_MATCH_VERSION), 444 IEEE1394_MATCH_VERSION),
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 600e391c8fe7..4bc443546e04 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
478 return retval; 478 return retval;
479} 479}
480 480
481const static struct hpsb_address_ops dummy_ops; 481static const struct hpsb_address_ops dummy_ops;
482 482
483/* dummy address spaces as lower and upper bounds of the host's a.s. list */ 483/* dummy address spaces as lower and upper bounds of the host's a.s. list */
484static void init_hpsb_highlevel(struct hpsb_host *host) 484static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 53aada5bbe1e..a6d55bebe61a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) 484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
485{ 485{
486 struct hpsb_protocol_driver *driver; 486 struct hpsb_protocol_driver *driver;
487 struct ieee1394_device_id *id; 487 const struct ieee1394_device_id *id;
488 int length = 0; 488 int length = 0;
489 char *scratch = buf; 489 char *scratch = buf;
490 490
@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
658{ 658{
659 struct hpsb_protocol_driver *driver; 659 struct hpsb_protocol_driver *driver;
660 struct unit_directory *ud; 660 struct unit_directory *ud;
661 struct ieee1394_device_id *id; 661 const struct ieee1394_device_id *id;
662 662
663 /* We only match unit directories */ 663 /* We only match unit directories */
664 if (dev->platform_data != &nodemgr_ud_platform_data) 664 if (dev->platform_data != &nodemgr_ud_platform_data)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index ee5acdbd114a..749b271d3107 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
125 * probe function below can implement further protocol 125 * probe function below can implement further protocol
126 * dependent or vendor dependent checking. 126 * dependent or vendor dependent checking.
127 */ 127 */
128 struct ieee1394_device_id *id_table; 128 const struct ieee1394_device_id *id_table;
129 129
130 /* 130 /*
131 * The update function is called when the node has just 131 * The update function is called when the node has just
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bad66c65b0d6..da5f8829b503 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, 90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, 91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
92 u16 flags); 92 u16 flags);
93const static struct hpsb_address_ops arm_ops = { 93static const struct hpsb_address_ops arm_ops = {
94 .read = arm_read, 94 .read = arm_read,
95 .write = arm_write, 95 .write = arm_write,
96 .lock = arm_lock, 96 .lock = arm_lock,
@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
369{ 369{
370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
371 struct raw1394_request __user *r; 371 struct raw1394_request __user *r;
372
372 r = compat_alloc_user_space(sizeof(struct raw1394_request)); 373 r = compat_alloc_user_space(sizeof(struct raw1394_request));
373 374
374#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) 375#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
378 C(tag) || 379 C(tag) ||
379 C(sendb) || 380 C(sendb) ||
380 C(recvb)) 381 C(recvb))
381 return ERR_PTR(-EFAULT); 382 return (__force const char __user *)ERR_PTR(-EFAULT);
383
382 return (const char __user *)r; 384 return (const char __user *)r;
383} 385}
384#undef C 386#undef C
@@ -389,6 +391,7 @@ static int
389raw1394_compat_read(const char __user *buf, struct raw1394_request *r) 391raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
390{ 392{
391 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 393 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
394
392 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || 395 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
393 P(type) || 396 P(type) ||
394 P(error) || 397 P(error) ||
@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
400 P(sendb) || 403 P(sendb) ||
401 P(recvb)) 404 P(recvb))
402 return -EFAULT; 405 return -EFAULT;
406
403 return sizeof(struct compat_raw1394_req); 407 return sizeof(struct compat_raw1394_req);
404} 408}
405#undef P 409#undef P
@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2249 sizeof(struct compat_raw1394_req) != 2253 sizeof(struct compat_raw1394_req) !=
2250 sizeof(struct raw1394_request)) { 2254 sizeof(struct raw1394_request)) {
2251 buffer = raw1394_compat_write(buffer); 2255 buffer = raw1394_compat_write(buffer);
2252 if (IS_ERR(buffer)) 2256 if (IS_ERR((__force void *)buffer))
2253 return PTR_ERR(buffer); 2257 return PTR_ERR((__force void *)buffer);
2254 } else 2258 } else
2255#endif 2259#endif
2256 if (count != sizeof(struct raw1394_request)) { 2260 if (count != sizeof(struct raw1394_request)) {
@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2978 * Export information about protocols/devices supported by this driver. 2982 * Export information about protocols/devices supported by this driver.
2979 */ 2983 */
2980#ifdef MODULE 2984#ifdef MODULE
2981static struct ieee1394_device_id raw1394_id_table[] = { 2985static const struct ieee1394_device_id raw1394_id_table[] = {
2982 { 2986 {
2983 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2987 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2984 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2988 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f3fd8657ce4b..a51ab233342d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
265 .host_reset = sbp2_host_reset, 265 .host_reset = sbp2_host_reset,
266}; 266};
267 267
268const static struct hpsb_address_ops sbp2_ops = { 268static const struct hpsb_address_ops sbp2_ops = {
269 .write = sbp2_handle_status_write 269 .write = sbp2_handle_status_write
270}; 270};
271 271
@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, 275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
276 size_t, u16); 276 size_t, u16);
277 277
278const static struct hpsb_address_ops sbp2_physdma_ops = { 278static const struct hpsb_address_ops sbp2_physdma_ops = {
279 .read = sbp2_handle_physdma_read, 279 .read = sbp2_handle_physdma_read,
280 .write = sbp2_handle_physdma_write, 280 .write = sbp2_handle_physdma_write,
281}; 281};
@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
285/* 285/*
286 * Interface to driver core and IEEE 1394 core 286 * Interface to driver core and IEEE 1394 core
287 */ 287 */
288static struct ieee1394_device_id sbp2_id_table[] = { 288static const struct ieee1394_device_id sbp2_id_table[] = {
289 { 289 {
290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, 291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1413 "(firmware_revision 0x%06x, vendor_id 0x%06x," 1413 "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1414 " model_id 0x%06x)", 1414 " model_id 0x%06x)",
1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1416 workarounds, firmware_revision, 1416 workarounds, firmware_revision, ud->vendor_id,
1417 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1418 model); 1417 model);
1419 1418
1420 /* We would need one SCSI host template for each target to adjust 1419 /* We would need one SCSI host template for each target to adjust
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 679a918a5cc7..d287ba79821d 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
1294 * Export information about protocols/devices supported by this driver. 1294 * Export information about protocols/devices supported by this driver.
1295 */ 1295 */
1296#ifdef MODULE 1296#ifdef MODULE
1297static struct ieee1394_device_id video1394_id_table[] = { 1297static const struct ieee1394_device_id video1394_id_table[] = {
1298 { 1298 {
1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, 1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 8d3e4c6f237e..ecb1f6fd6276 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1602,8 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1602 netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); 1602 netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
1603 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); 1603 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
1604 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1604 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1605 netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
1606 netdev->features |= NETIF_F_LLTX;
1607 1605
1608 /* Fill in the port structure */ 1606 /* Fill in the port structure */
1609 nesvnic->netdev = netdev; 1607 nesvnic->netdev = netdev;
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index b55d9ccaf33e..32526f103b59 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
115} 115}
116 116
117static const char *debug_fcp_opcode(unsigned int opcode, 117static const char *debug_fcp_opcode(unsigned int opcode,
118 const u8 *data, size_t length) 118 const u8 *data, int length)
119{ 119{
120 switch (opcode) { 120 switch (opcode) {
121 case AVC_OPCODE_VENDOR: break; 121 case AVC_OPCODE_VENDOR: break;
@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; 135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; 136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; 137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
138 case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
138 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; 139 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
139 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; 140 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
140 } 141 }
141 return "Vendor"; 142 return "Vendor";
142} 143}
143 144
144static void debug_fcp(const u8 *data, size_t length) 145static void debug_fcp(const u8 *data, int length)
145{ 146{
146 unsigned int subunit_type, subunit_id, op; 147 unsigned int subunit_type, subunit_id, op;
147 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; 148 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
266 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; 267 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
267 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; 268 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
268 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; 269 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
269 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; 270 if (fdtv->type == FIREDTV_DVB_S2)
271 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
272 else
273 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
270 274
271 c->operand[4] = (params->frequency >> 24) & 0xff; 275 c->operand[4] = (params->frequency >> 24) & 0xff;
272 c->operand[5] = (params->frequency >> 16) & 0xff; 276 c->operand[5] = (params->frequency >> 16) & 0xff;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 00d46e137b2a..92285d0089c2 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
81 81
82 /* go */ 82 /* go */
83 sb->s_flags |= MS_ACTIVE; 83 sb->s_flags |= MS_ACTIVE;
84 return simple_set_mnt(mnt, sb); 84 simple_set_mnt(mnt, sb);
85
86 return 0;
85 87
86 /* new mountpoint for an already mounted superblock */ 88 /* new mountpoint for an already mounted superblock */
87already_mounted: 89already_mounted:
88 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", 90 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
89 mtd->index, mtd->name); 91 mtd->index, mtd->name);
90 ret = simple_set_mnt(mnt, sb); 92 simple_set_mnt(mnt, sb);
93 ret = 0;
91 goto out_put; 94 goto out_put;
92 95
93out_error: 96out_error:
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5b91a85fe107..4f08bd995836 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -353,9 +353,6 @@ el2_probe1(struct net_device *dev, int ioaddr)
353 353
354 dev->netdev_ops = &el2_netdev_ops; 354 dev->netdev_ops = &el2_netdev_ops;
355 dev->ethtool_ops = &netdev_ethtool_ops; 355 dev->ethtool_ops = &netdev_ethtool_ops;
356#ifdef CONFIG_NET_POLL_CONTROLLER
357 dev->poll_controller = eip_poll;
358#endif
359 356
360 retval = register_netdev(dev); 357 retval = register_netdev(dev);
361 if (retval) 358 if (retval)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e5ffc1c606c1..f062b424704e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY
972 Enable the verify after the buffer write useful for debugging purpose. 972 Enable the verify after the buffer write useful for debugging purpose.
973 If unsure, say N. 973 If unsure, say N.
974 974
975config ETHOC
976 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
977 depends on NET_ETHERNET
978 select MII
979 select PHYLIB
980 help
981 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
982
975config SMC911X 983config SMC911X
976 tristate "SMSC LAN911[5678] support" 984 tristate "SMSC LAN911[5678] support"
977 select CRC32 985 select CRC32
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 758ecdf4c820..98409c9dd445 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o 230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
231obj-$(CONFIG_MLX4_CORE) += mlx4/ 231obj-$(CONFIG_MLX4_CORE) += mlx4/
232obj-$(CONFIG_ENC28J60) += enc28j60.o 232obj-$(CONFIG_ENC28J60) += enc28j60.o
233obj-$(CONFIG_ETHOC) += ethoc.o
233 234
234obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 235obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
235 236
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 071a851a2ea1..eac73382c087 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -143,6 +143,22 @@ out:
143} 143}
144#endif 144#endif
145 145
146static const struct net_device_ops ac_netdev_ops = {
147 .ndo_open = ac_open,
148 .ndo_stop = ac_close_card,
149
150 .ndo_start_xmit = ei_start_xmit,
151 .ndo_tx_timeout = ei_tx_timeout,
152 .ndo_get_stats = ei_get_stats,
153 .ndo_set_multicast_list = ei_set_multicast_list,
154 .ndo_validate_addr = eth_validate_addr,
155 .ndo_set_mac_address = eth_mac_addr,
156 .ndo_change_mtu = eth_change_mtu,
157#ifdef CONFIG_NET_POLL_CONTROLLER
158 .ndo_poll_controller = ei_poll,
159#endif
160};
161
146static int __init ac_probe1(int ioaddr, struct net_device *dev) 162static int __init ac_probe1(int ioaddr, struct net_device *dev)
147{ 163{
148 int i, retval; 164 int i, retval;
@@ -253,11 +269,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
253 ei_status.block_output = &ac_block_output; 269 ei_status.block_output = &ac_block_output;
254 ei_status.get_8390_hdr = &ac_get_8390_hdr; 270 ei_status.get_8390_hdr = &ac_get_8390_hdr;
255 271
256 dev->open = &ac_open; 272 dev->netdev_ops = &ac_netdev_ops;
257 dev->stop = &ac_close_card;
258#ifdef CONFIG_NET_POLL_CONTROLLER
259 dev->poll_controller = ei_poll;
260#endif
261 NS8390_init(dev, 0); 273 NS8390_init(dev, 0);
262 274
263 retval = register_netdev(dev); 275 retval = register_netdev(dev);
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 54819a34ba0a..7f8325419803 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -171,7 +171,6 @@ static unsigned int cops_debug = COPS_DEBUG;
171 171
172struct cops_local 172struct cops_local
173{ 173{
174 struct net_device_stats stats;
175 int board; /* Holds what board type is. */ 174 int board; /* Holds what board type is. */
176 int nodeid; /* Set to 1 once have nodeid. */ 175 int nodeid; /* Set to 1 once have nodeid. */
177 unsigned char node_acquire; /* Node ID when acquired. */ 176 unsigned char node_acquire; /* Node ID when acquired. */
@@ -197,7 +196,6 @@ static int cops_send_packet (struct sk_buff *skb, struct net_device *dev);
197static void set_multicast_list (struct net_device *dev); 196static void set_multicast_list (struct net_device *dev);
198static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 197static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
199static int cops_close (struct net_device *dev); 198static int cops_close (struct net_device *dev);
200static struct net_device_stats *cops_get_stats (struct net_device *dev);
201 199
202static void cleanup_card(struct net_device *dev) 200static void cleanup_card(struct net_device *dev)
203{ 201{
@@ -260,6 +258,15 @@ out:
260 return ERR_PTR(err); 258 return ERR_PTR(err);
261} 259}
262 260
261static const struct net_device_ops cops_netdev_ops = {
262 .ndo_open = cops_open,
263 .ndo_stop = cops_close,
264 .ndo_start_xmit = cops_send_packet,
265 .ndo_tx_timeout = cops_timeout,
266 .ndo_do_ioctl = cops_ioctl,
267 .ndo_set_multicast_list = set_multicast_list,
268};
269
263/* 270/*
264 * This is the real probe routine. Linux has a history of friendly device 271 * This is the real probe routine. Linux has a history of friendly device
265 * probes on the ISA bus. A good device probes avoids doing writes, and 272 * probes on the ISA bus. A good device probes avoids doing writes, and
@@ -333,16 +340,9 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
333 /* Copy local board variable to lp struct. */ 340 /* Copy local board variable to lp struct. */
334 lp->board = board; 341 lp->board = board;
335 342
336 dev->hard_start_xmit = cops_send_packet; 343 dev->netdev_ops = &cops_netdev_ops;
337 dev->tx_timeout = cops_timeout;
338 dev->watchdog_timeo = HZ * 2; 344 dev->watchdog_timeo = HZ * 2;
339 345
340 dev->get_stats = cops_get_stats;
341 dev->open = cops_open;
342 dev->stop = cops_close;
343 dev->do_ioctl = cops_ioctl;
344 dev->set_multicast_list = set_multicast_list;
345 dev->mc_list = NULL;
346 346
347 /* Tell the user where the card is and what mode we're in. */ 347 /* Tell the user where the card is and what mode we're in. */
348 if(board==DAYNA) 348 if(board==DAYNA)
@@ -797,7 +797,7 @@ static void cops_rx(struct net_device *dev)
797 { 797 {
798 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", 798 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
799 dev->name); 799 dev->name);
800 lp->stats.rx_dropped++; 800 dev->stats.rx_dropped++;
801 while(pkt_len--) /* Discard packet */ 801 while(pkt_len--) /* Discard packet */
802 inb(ioaddr); 802 inb(ioaddr);
803 spin_unlock_irqrestore(&lp->lock, flags); 803 spin_unlock_irqrestore(&lp->lock, flags);
@@ -819,7 +819,7 @@ static void cops_rx(struct net_device *dev)
819 { 819 {
820 printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", 820 printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
821 dev->name, pkt_len); 821 dev->name, pkt_len);
822 lp->stats.tx_errors++; 822 dev->stats.tx_errors++;
823 dev_kfree_skb_any(skb); 823 dev_kfree_skb_any(skb);
824 return; 824 return;
825 } 825 }
@@ -836,7 +836,7 @@ static void cops_rx(struct net_device *dev)
836 if(rsp_type != LAP_RESPONSE) 836 if(rsp_type != LAP_RESPONSE)
837 { 837 {
838 printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); 838 printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
839 lp->stats.tx_errors++; 839 dev->stats.tx_errors++;
840 dev_kfree_skb_any(skb); 840 dev_kfree_skb_any(skb);
841 return; 841 return;
842 } 842 }
@@ -846,8 +846,8 @@ static void cops_rx(struct net_device *dev)
846 skb_reset_transport_header(skb); /* Point to data (Skip header). */ 846 skb_reset_transport_header(skb); /* Point to data (Skip header). */
847 847
848 /* Update the counters. */ 848 /* Update the counters. */
849 lp->stats.rx_packets++; 849 dev->stats.rx_packets++;
850 lp->stats.rx_bytes += skb->len; 850 dev->stats.rx_bytes += skb->len;
851 851
852 /* Send packet to a higher place. */ 852 /* Send packet to a higher place. */
853 netif_rx(skb); 853 netif_rx(skb);
@@ -858,7 +858,7 @@ static void cops_timeout(struct net_device *dev)
858 struct cops_local *lp = netdev_priv(dev); 858 struct cops_local *lp = netdev_priv(dev);
859 int ioaddr = dev->base_addr; 859 int ioaddr = dev->base_addr;
860 860
861 lp->stats.tx_errors++; 861 dev->stats.tx_errors++;
862 if(lp->board==TANGENT) 862 if(lp->board==TANGENT)
863 { 863 {
864 if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) 864 if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
@@ -916,8 +916,8 @@ static int cops_send_packet(struct sk_buff *skb, struct net_device *dev)
916 spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ 916 spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
917 917
918 /* Done sending packet, update counters and cleanup. */ 918 /* Done sending packet, update counters and cleanup. */
919 lp->stats.tx_packets++; 919 dev->stats.tx_packets++;
920 lp->stats.tx_bytes += skb->len; 920 dev->stats.tx_bytes += skb->len;
921 dev->trans_start = jiffies; 921 dev->trans_start = jiffies;
922 dev_kfree_skb (skb); 922 dev_kfree_skb (skb);
923 return 0; 923 return 0;
@@ -986,15 +986,6 @@ static int cops_close(struct net_device *dev)
986 return 0; 986 return 0;
987} 987}
988 988
989/*
990 * Get the current statistics.
991 * This may be called with the card open or closed.
992 */
993static struct net_device_stats *cops_get_stats(struct net_device *dev)
994{
995 struct cops_local *lp = netdev_priv(dev);
996 return &lp->stats;
997}
998 989
999#ifdef MODULE 990#ifdef MODULE
1000static struct net_device *cops_dev; 991static struct net_device *cops_dev;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dc4d49605603..78cc71469136 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -261,7 +261,6 @@ static unsigned char *ltdmacbuf;
261 261
262struct ltpc_private 262struct ltpc_private
263{ 263{
264 struct net_device_stats stats;
265 struct atalk_addr my_addr; 264 struct atalk_addr my_addr;
266}; 265};
267 266
@@ -699,7 +698,6 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
699static struct timer_list ltpc_timer; 698static struct timer_list ltpc_timer;
700 699
701static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev); 700static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
702static struct net_device_stats *ltpc_get_stats(struct net_device *dev);
703 701
704static int read_30 ( struct net_device *dev) 702static int read_30 ( struct net_device *dev)
705{ 703{
@@ -726,8 +724,6 @@ static int sendup_buffer (struct net_device *dev)
726 int dnode, snode, llaptype, len; 724 int dnode, snode, llaptype, len;
727 int sklen; 725 int sklen;
728 struct sk_buff *skb; 726 struct sk_buff *skb;
729 struct ltpc_private *ltpc_priv = netdev_priv(dev);
730 struct net_device_stats *stats = &ltpc_priv->stats;
731 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; 727 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
732 728
733 if (ltc->command != LT_RCVLAP) { 729 if (ltc->command != LT_RCVLAP) {
@@ -779,8 +775,8 @@ static int sendup_buffer (struct net_device *dev)
779 775
780 skb_reset_transport_header(skb); 776 skb_reset_transport_header(skb);
781 777
782 stats->rx_packets++; 778 dev->stats.rx_packets++;
783 stats->rx_bytes+=skb->len; 779 dev->stats.rx_bytes += skb->len;
784 780
785 /* toss it onwards */ 781 /* toss it onwards */
786 netif_rx(skb); 782 netif_rx(skb);
@@ -904,10 +900,6 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
904 /* in kernel 1.3.xx, on entry skb->data points to ddp header, 900 /* in kernel 1.3.xx, on entry skb->data points to ddp header,
905 * and skb->len is the length of the ddp data + ddp header 901 * and skb->len is the length of the ddp data + ddp header
906 */ 902 */
907
908 struct ltpc_private *ltpc_priv = netdev_priv(dev);
909 struct net_device_stats *stats = &ltpc_priv->stats;
910
911 int i; 903 int i;
912 struct lt_sendlap cbuf; 904 struct lt_sendlap cbuf;
913 unsigned char *hdr; 905 unsigned char *hdr;
@@ -936,20 +928,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
936 printk("\n"); 928 printk("\n");
937 } 929 }
938 930
939 stats->tx_packets++; 931 dev->stats.tx_packets++;
940 stats->tx_bytes+=skb->len; 932 dev->stats.tx_bytes += skb->len;
941 933
942 dev_kfree_skb(skb); 934 dev_kfree_skb(skb);
943 return 0; 935 return 0;
944} 936}
945 937
946static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
947{
948 struct ltpc_private *ltpc_priv = netdev_priv(dev);
949 struct net_device_stats *stats = &ltpc_priv->stats;
950 return stats;
951}
952
953/* initialization stuff */ 938/* initialization stuff */
954 939
955static int __init ltpc_probe_dma(int base, int dma) 940static int __init ltpc_probe_dma(int base, int dma)
@@ -1027,6 +1012,12 @@ static int __init ltpc_probe_dma(int base, int dma)
1027 return (want & 2) ? 3 : 1; 1012 return (want & 2) ? 3 : 1;
1028} 1013}
1029 1014
1015static const struct net_device_ops ltpc_netdev = {
1016 .ndo_start_xmit = ltpc_xmit,
1017 .ndo_do_ioctl = ltpc_ioctl,
1018 .ndo_set_multicast_list = set_multicast_list,
1019};
1020
1030struct net_device * __init ltpc_probe(void) 1021struct net_device * __init ltpc_probe(void)
1031{ 1022{
1032 struct net_device *dev; 1023 struct net_device *dev;
@@ -1133,14 +1124,7 @@ struct net_device * __init ltpc_probe(void)
1133 else 1124 else
1134 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); 1125 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1135 1126
1136 /* Fill in the fields of the device structure with ethernet-generic values. */ 1127 dev->netdev_ops = &ltpc_netdev;
1137 dev->hard_start_xmit = ltpc_xmit;
1138 dev->get_stats = ltpc_get_stats;
1139
1140 /* add the ltpc-specific things */
1141 dev->do_ioctl = &ltpc_ioctl;
1142
1143 dev->set_multicast_list = &set_multicast_list;
1144 dev->mc_list = NULL; 1128 dev->mc_list = NULL;
1145 dev->base_addr = io; 1129 dev->base_addr = io;
1146 dev->irq = irq; 1130 dev->irq = irq;
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index ced70799b898..18b566ad4fd1 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -249,6 +249,17 @@ out:
249 return ERR_PTR(err); 249 return ERR_PTR(err);
250} 250}
251 251
252static const struct net_device_ops at1700_netdev_ops = {
253 .ndo_open = net_open,
254 .ndo_stop = net_close,
255 .ndo_start_xmit = net_send_packet,
256 .ndo_set_multicast_list = set_rx_mode,
257 .ndo_tx_timeout = net_tx_timeout,
258 .ndo_change_mtu = eth_change_mtu,
259 .ndo_set_mac_address = eth_mac_addr,
260 .ndo_validate_addr = eth_validate_addr,
261};
262
252/* The Fujitsu datasheet suggests that the NIC be probed for by checking its 263/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
253 "signature", the default bit pattern after a reset. This *doesn't* work -- 264 "signature", the default bit pattern after a reset. This *doesn't* work --
254 there is no way to reset the bus interface without a complete power-cycle! 265 there is no way to reset the bus interface without a complete power-cycle!
@@ -448,13 +459,7 @@ found:
448 if (net_debug) 459 if (net_debug)
449 printk(version); 460 printk(version);
450 461
451 memset(lp, 0, sizeof(struct net_local)); 462 dev->netdev_ops = &at1700_netdev_ops;
452
453 dev->open = net_open;
454 dev->stop = net_close;
455 dev->hard_start_xmit = net_send_packet;
456 dev->set_multicast_list = &set_rx_mode;
457 dev->tx_timeout = net_tx_timeout;
458 dev->watchdog_timeo = TX_TIMEOUT; 463 dev->watchdog_timeo = TX_TIMEOUT;
459 464
460 spin_lock_init(&lp->lock); 465 spin_lock_init(&lp->lock);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index f901fee79a20..9b75aa630062 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "be.h" 18#include "be.h"
19#include <asm/div64.h>
19 20
20MODULE_VERSION(DRV_VER); 21MODULE_VERSION(DRV_VER);
21MODULE_DEVICE_TABLE(pci, be_dev_ids); 22MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -290,6 +291,17 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
290 return &adapter->stats.net_stats; 291 return &adapter->stats.net_stats;
291} 292}
292 293
294static u32 be_calc_rate(u64 bytes, unsigned long ticks)
295{
296 u64 rate = bytes;
297
298 do_div(rate, ticks / HZ);
299 rate <<= 3; /* bytes/sec -> bits/sec */
300 do_div(rate, 1000000ul); /* MB/Sec */
301
302 return rate;
303}
304
293static void be_tx_rate_update(struct be_adapter *adapter) 305static void be_tx_rate_update(struct be_adapter *adapter)
294{ 306{
295 struct be_drvr_stats *stats = drvr_stats(adapter); 307 struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -303,11 +315,9 @@ static void be_tx_rate_update(struct be_adapter *adapter)
303 315
304 /* Update tx rate once in two seconds */ 316 /* Update tx rate once in two seconds */
305 if ((now - stats->be_tx_jiffies) > 2 * HZ) { 317 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
306 u32 r; 318 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
307 r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) / 319 - stats->be_tx_bytes_prev,
308 ((now - stats->be_tx_jiffies) / HZ); 320 now - stats->be_tx_jiffies);
309 r = r / 1000000; /* M bytes/s */
310 stats->be_tx_rate = r * 8; /* M bits/s */
311 stats->be_tx_jiffies = now; 321 stats->be_tx_jiffies = now;
312 stats->be_tx_bytes_prev = stats->be_tx_bytes; 322 stats->be_tx_bytes_prev = stats->be_tx_bytes;
313 } 323 }
@@ -599,7 +609,6 @@ static void be_rx_rate_update(struct be_adapter *adapter)
599{ 609{
600 struct be_drvr_stats *stats = drvr_stats(adapter); 610 struct be_drvr_stats *stats = drvr_stats(adapter);
601 ulong now = jiffies; 611 ulong now = jiffies;
602 u32 rate;
603 612
604 /* Wrapped around */ 613 /* Wrapped around */
605 if (time_before(now, stats->be_rx_jiffies)) { 614 if (time_before(now, stats->be_rx_jiffies)) {
@@ -611,10 +620,9 @@ static void be_rx_rate_update(struct be_adapter *adapter)
611 if ((now - stats->be_rx_jiffies) < 2 * HZ) 620 if ((now - stats->be_rx_jiffies) < 2 * HZ)
612 return; 621 return;
613 622
614 rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) / 623 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
615 ((now - stats->be_rx_jiffies) / HZ); 624 - stats->be_rx_bytes_prev,
616 rate = rate / 1000000; /* MB/Sec */ 625 now - stats->be_rx_jiffies);
617 stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */
618 stats->be_rx_jiffies = now; 626 stats->be_rx_jiffies = now;
619 stats->be_rx_bytes_prev = stats->be_rx_bytes; 627 stats->be_rx_bytes_prev = stats->be_rx_bytes;
620} 628}
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ff6497658a45..7433b88eed7e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -501,6 +501,21 @@ static void net_poll_controller(struct net_device *dev)
501} 501}
502#endif 502#endif
503 503
504static const struct net_device_ops net_ops = {
505 .ndo_open = net_open,
506 .ndo_stop = net_close,
507 .ndo_tx_timeout = net_timeout,
508 .ndo_start_xmit = net_send_packet,
509 .ndo_get_stats = net_get_stats,
510 .ndo_set_multicast_list = set_multicast_list,
511 .ndo_set_mac_address = set_mac_address,
512#ifdef CONFIG_NET_POLL_CONTROLLER
513 .ndo_poll_controller = net_poll_controller,
514#endif
515 .ndo_change_mtu = eth_change_mtu,
516 .ndo_validate_addr = eth_validate_addr,
517};
518
504/* This is the real probe routine. Linux has a history of friendly device 519/* This is the real probe routine. Linux has a history of friendly device
505 probes on the ISA bus. A good device probes avoids doing writes, and 520 probes on the ISA bus. A good device probes avoids doing writes, and
506 verifies that the correct device exists and functions. 521 verifies that the correct device exists and functions.
@@ -843,17 +858,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
843 /* print the ethernet address. */ 858 /* print the ethernet address. */
844 printk(", MAC %pM", dev->dev_addr); 859 printk(", MAC %pM", dev->dev_addr);
845 860
846 dev->open = net_open; 861 dev->netdev_ops = &net_ops;
847 dev->stop = net_close; 862 dev->watchdog_timeo = HZ;
848 dev->tx_timeout = net_timeout;
849 dev->watchdog_timeo = HZ;
850 dev->hard_start_xmit = net_send_packet;
851 dev->get_stats = net_get_stats;
852 dev->set_multicast_list = set_multicast_list;
853 dev->set_mac_address = set_mac_address;
854#ifdef CONFIG_NET_POLL_CONTROLLER
855 dev->poll_controller = net_poll_controller;
856#endif
857 863
858 printk("\n"); 864 printk("\n");
859 if (net_debug) 865 if (net_debug)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 71eaa431371d..714df2b675e6 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,6 +85,8 @@ struct fl_pg_chunk {
85 struct page *page; 85 struct page *page;
86 void *va; 86 void *va;
87 unsigned int offset; 87 unsigned int offset;
88 u64 *p_cnt;
89 DECLARE_PCI_UNMAP_ADDR(mapping);
88}; 90};
89 91
90struct rx_desc; 92struct rx_desc;
@@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */
101 struct fl_pg_chunk pg_chunk;/* page chunk cache */ 103 struct fl_pg_chunk pg_chunk;/* page chunk cache */
102 unsigned int use_pages; /* whether FL uses pages or sk_buffs */ 104 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
103 unsigned int order; /* order of page allocations */ 105 unsigned int order; /* order of page allocations */
106 unsigned int alloc_size; /* size of allocated buffer */
104 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 107 struct rx_desc *desc; /* address of HW Rx descriptor ring */
105 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 108 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
106 dma_addr_t phys_addr; /* physical address of HW ring start */ 109 dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -291,6 +294,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
291 294
292void t3_sge_start(struct adapter *adap); 295void t3_sge_start(struct adapter *adap);
293void t3_sge_stop(struct adapter *adap); 296void t3_sge_stop(struct adapter *adap);
297void t3_start_sge_timers(struct adapter *adap);
294void t3_stop_sge_timers(struct adapter *adap); 298void t3_stop_sge_timers(struct adapter *adap);
295void t3_free_sge_resources(struct adapter *adap); 299void t3_free_sge_resources(struct adapter *adap);
296void t3_sge_err_intr_handler(struct adapter *adapter); 300void t3_sge_err_intr_handler(struct adapter *adapter);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 9ee021e750c8..e508dc32f3ec 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -191,7 +191,8 @@ struct mdio_ops {
191}; 191};
192 192
193struct adapter_info { 193struct adapter_info {
194 unsigned char nports; /* # of ports */ 194 unsigned char nports0; /* # of ports on channel 0 */
195 unsigned char nports1; /* # of ports on channel 1 */
195 unsigned char phy_base_addr; /* MDIO PHY base address */ 196 unsigned char phy_base_addr; /* MDIO PHY base address */
196 unsigned int gpio_out; /* GPIO output settings */ 197 unsigned int gpio_out; /* GPIO output settings */
197 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ 198 unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
@@ -422,6 +423,7 @@ struct adapter_params {
422 unsigned short b_wnd[NCCTRL_WIN]; 423 unsigned short b_wnd[NCCTRL_WIN];
423 424
424 unsigned int nports; /* # of ethernet ports */ 425 unsigned int nports; /* # of ethernet ports */
426 unsigned int chan_map; /* bitmap of in-use Tx channels */
425 unsigned int stats_update_period; /* MAC stats accumulation period */ 427 unsigned int stats_update_period; /* MAC stats accumulation period */
426 unsigned int linkpoll_period; /* link poll period in 0.1s */ 428 unsigned int linkpoll_period; /* link poll period in 0.1s */
427 unsigned int rev; /* chip revision */ 429 unsigned int rev; /* chip revision */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8be89621bf7..2c2aaa741450 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap)
602 &adap->params.sge.qset[qset_idx], ntxq, dev, 602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j)); 603 netdev_get_tx_queue(dev, j));
604 if (err) { 604 if (err) {
605 t3_stop_sge_timers(adap);
606 t3_free_sge_resources(adap); 605 t3_free_sge_resources(adap);
607 return err; 606 return err;
608 } 607 }
@@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap)
1046 setup_rss(adap); 1045 setup_rss(adap);
1047 if (!(adap->flags & NAPI_INIT)) 1046 if (!(adap->flags & NAPI_INIT))
1048 init_napi(adap); 1047 init_napi(adap);
1048
1049 t3_start_sge_timers(adap);
1049 adap->flags |= FULL_INIT_DONE; 1050 adap->flags |= FULL_INIT_DONE;
1050 } 1051 }
1051 1052
@@ -2870,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev)
2870{ 2871{
2871 struct adapter *adapter = pci_get_drvdata(pdev); 2872 struct adapter *adapter = pci_get_drvdata(pdev);
2872 2873
2874 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2876
2873 t3_resume_ports(adapter); 2877 t3_resume_ports(adapter);
2874} 2878}
2875 2879
@@ -3002,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3002 static int version_printed; 3006 static int version_printed;
3003 3007
3004 int i, err, pci_using_dac = 0; 3008 int i, err, pci_using_dac = 0;
3005 unsigned long mmio_start, mmio_len; 3009 resource_size_t mmio_start, mmio_len;
3006 const struct adapter_info *ai; 3010 const struct adapter_info *ai;
3007 struct adapter *adapter = NULL; 3011 struct adapter *adapter = NULL;
3008 struct port_info *pi; 3012 struct port_info *pi;
@@ -3082,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3082 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3086 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3083 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3087 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3084 3088
3085 for (i = 0; i < ai->nports; ++i) { 3089 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3086 struct net_device *netdev; 3090 struct net_device *netdev;
3087 3091
3088 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); 3092 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
@@ -3172,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3172 3176
3173out_free_dev: 3177out_free_dev:
3174 iounmap(adapter->regs); 3178 iounmap(adapter->regs);
3175 for (i = ai->nports - 1; i >= 0; --i) 3179 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3176 if (adapter->port[i]) 3180 if (adapter->port[i])
3177 free_netdev(adapter->port[i]); 3181 free_netdev(adapter->port[i]);
3178 3182
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a7555cb3fa4a..26d3587f3399 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -50,6 +50,7 @@
50#define SGE_RX_COPY_THRES 256 50#define SGE_RX_COPY_THRES 256
51#define SGE_RX_PULL_LEN 128 51#define SGE_RX_PULL_LEN 128
52 52
53#define SGE_PG_RSVD SMP_CACHE_BYTES
53/* 54/*
54 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. 55 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs 56 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
@@ -57,8 +58,10 @@
57 */ 58 */
58#define FL0_PG_CHUNK_SIZE 2048 59#define FL0_PG_CHUNK_SIZE 2048
59#define FL0_PG_ORDER 0 60#define FL0_PG_ORDER 0
61#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
60#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) 62#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 63#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
64#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
62 65
63#define SGE_RX_DROP_THRES 16 66#define SGE_RX_DROP_THRES 16
64#define RX_RECLAIM_PERIOD (HZ/4) 67#define RX_RECLAIM_PERIOD (HZ/4)
@@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q)
345 return q->in_use - r < (q->size >> 1); 348 return q->in_use - r < (q->size >> 1);
346} 349}
347 350
348static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d) 351static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
349{ 353{
350 if (q->use_pages) { 354 if (q->use_pages && d->pg_chunk.page) {
351 if (d->pg_chunk.page) 355 (*d->pg_chunk.p_cnt)--;
352 put_page(d->pg_chunk.page); 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping),
359 q->alloc_size, PCI_DMA_FROMDEVICE);
360
361 put_page(d->pg_chunk.page);
353 d->pg_chunk.page = NULL; 362 d->pg_chunk.page = NULL;
354 } else { 363 } else {
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
355 kfree_skb(d->skb); 366 kfree_skb(d->skb);
356 d->skb = NULL; 367 d->skb = NULL;
357 } 368 }
@@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
372 while (q->credits--) { 383 while (q->credits--) {
373 struct rx_sw_desc *d = &q->sdesc[cidx]; 384 struct rx_sw_desc *d = &q->sdesc[cidx];
374 385
375 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 386
376 q->buf_size, PCI_DMA_FROMDEVICE); 387 clear_rx_desc(pdev, q, d);
377 clear_rx_desc(q, d);
378 if (++cidx == q->size) 388 if (++cidx == q->size)
379 cidx = 0; 389 cidx = 0;
380 } 390 }
@@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
417 return 0; 427 return 0;
418} 428}
419 429
420static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, 430static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 unsigned int gen)
432{
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 wmb();
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 return 0;
439}
440
441static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
421 unsigned int order) 443 unsigned int order)
422{ 444{
423 if (!q->pg_chunk.page) { 445 if (!q->pg_chunk.page) {
446 dma_addr_t mapping;
447
424 q->pg_chunk.page = alloc_pages(gfp, order); 448 q->pg_chunk.page = alloc_pages(gfp, order);
425 if (unlikely(!q->pg_chunk.page)) 449 if (unlikely(!q->pg_chunk.page))
426 return -ENOMEM; 450 return -ENOMEM;
427 q->pg_chunk.va = page_address(q->pg_chunk.page); 451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 SGE_PG_RSVD;
428 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
429 } 458 }
430 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
431 460
461 prefetch(sd->pg_chunk.p_cnt);
462
432 q->pg_chunk.offset += q->buf_size; 463 q->pg_chunk.offset += q->buf_size;
433 if (q->pg_chunk.offset == (PAGE_SIZE << order)) 464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
434 q->pg_chunk.page = NULL; 465 q->pg_chunk.page = NULL;
@@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
436 q->pg_chunk.va += q->buf_size; 467 q->pg_chunk.va += q->buf_size;
437 get_page(q->pg_chunk.page); 468 get_page(q->pg_chunk.page);
438 } 469 }
470
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
473 else
474 *sd->pg_chunk.p_cnt += 1;
475
439 return 0; 476 return 0;
440} 477}
441 478
@@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
460 */ 497 */
461static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 498static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
462{ 499{
463 void *buf_start;
464 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
465 struct rx_desc *d = &q->desc[q->pidx]; 501 struct rx_desc *d = &q->desc[q->pidx];
466 unsigned int count = 0; 502 unsigned int count = 0;
467 503
468 while (n--) { 504 while (n--) {
505 dma_addr_t mapping;
469 int err; 506 int err;
470 507
471 if (q->use_pages) { 508 if (q->use_pages) {
472 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { 509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
510 q->order))) {
473nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
474 break; 512 break;
475 } 513 }
476 buf_start = sd->pg_chunk.va; 514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping);
517
518 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping,
520 q->buf_size - SGE_PG_RSVD,
521 PCI_DMA_FROMDEVICE);
477 } else { 522 } else {
478 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 523 void *buf_start;
479 524
525 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
480 if (!skb) 526 if (!skb)
481 goto nomem; 527 goto nomem;
482 528
483 sd->skb = skb; 529 sd->skb = skb;
484 buf_start = skb->data; 530 buf_start = skb->data;
485 } 531 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
486 532 q->gen, adap->pdev);
487 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 533 if (unlikely(err)) {
488 adap->pdev); 534 clear_rx_desc(adap->pdev, q, sd);
489 if (unlikely(err)) { 535 break;
490 clear_rx_desc(q, sd); 536 }
491 break;
492 } 537 }
493 538
494 d++; 539 d++;
@@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
795 struct sk_buff *newskb, *skb; 840 struct sk_buff *newskb, *skb;
796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 841 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
797 842
798 newskb = skb = q->pg_skb; 843 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
799 844
845 newskb = skb = q->pg_skb;
800 if (!skb && (len <= SGE_RX_COPY_THRES)) { 846 if (!skb && (len <= SGE_RX_COPY_THRES)) {
801 newskb = alloc_skb(len, GFP_ATOMIC); 847 newskb = alloc_skb(len, GFP_ATOMIC);
802 if (likely(newskb != NULL)) { 848 if (likely(newskb != NULL)) {
803 __skb_put(newskb, len); 849 __skb_put(newskb, len);
804 pci_dma_sync_single_for_cpu(adap->pdev, 850 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
805 pci_unmap_addr(sd, dma_addr), len,
806 PCI_DMA_FROMDEVICE); 851 PCI_DMA_FROMDEVICE);
807 memcpy(newskb->data, sd->pg_chunk.va, len); 852 memcpy(newskb->data, sd->pg_chunk.va, len);
808 pci_dma_sync_single_for_device(adap->pdev, 853 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
809 pci_unmap_addr(sd, dma_addr), len, 854 len,
810 PCI_DMA_FROMDEVICE); 855 PCI_DMA_FROMDEVICE);
811 } else if (!drop_thres) 856 } else if (!drop_thres)
812 return NULL; 857 return NULL;
813recycle: 858recycle:
@@ -820,16 +865,25 @@ recycle:
820 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) 865 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
821 goto recycle; 866 goto recycle;
822 867
868 prefetch(sd->pg_chunk.p_cnt);
869
823 if (!skb) 870 if (!skb)
824 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 871 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
872
825 if (unlikely(!newskb)) { 873 if (unlikely(!newskb)) {
826 if (!drop_thres) 874 if (!drop_thres)
827 return NULL; 875 return NULL;
828 goto recycle; 876 goto recycle;
829 } 877 }
830 878
831 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
832 fl->buf_size, PCI_DMA_FROMDEVICE); 880 PCI_DMA_FROMDEVICE);
881 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt)
883 pci_unmap_page(adap->pdev,
884 pci_unmap_addr(&sd->pg_chunk, mapping),
885 fl->alloc_size,
886 PCI_DMA_FROMDEVICE);
833 if (!skb) { 887 if (!skb) {
834 __skb_put(newskb, SGE_RX_PULL_LEN); 888 __skb_put(newskb, SGE_RX_PULL_LEN);
835 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 889 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@@ -1089,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1089 struct tx_desc *d = &q->desc[pidx]; 1143 struct tx_desc *d = &q->desc[pidx];
1090 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; 1144 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1091 1145
1092 cpl->len = htonl(skb->len | 0x80000000); 1146 cpl->len = htonl(skb->len);
1093 cntrl = V_TXPKT_INTF(pi->port_id); 1147 cntrl = V_TXPKT_INTF(pi->port_id);
1094 1148
1095 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1149 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1958 skb_pull(skb, sizeof(*p) + pad); 2012 skb_pull(skb, sizeof(*p) + pad);
1959 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2013 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1960 pi = netdev_priv(skb->dev); 2014 pi = netdev_priv(skb->dev);
1961 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && 2015 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
1962 !p->fragment) { 2016 p->csum == htons(0xffff) && !p->fragment) {
1963 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2017 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1964 skb->ip_summed = CHECKSUM_UNNECESSARY; 2018 skb->ip_summed = CHECKSUM_UNNECESSARY;
1965 } else 2019 } else
@@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2034 fl->credits--; 2088 fl->credits--;
2035 2089
2036 len -= offset; 2090 len -= offset;
2037 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 2091 pci_dma_sync_single_for_cpu(adap->pdev,
2038 fl->buf_size, PCI_DMA_FROMDEVICE); 2092 pci_unmap_addr(sd, dma_addr),
2093 fl->buf_size - SGE_PG_RSVD,
2094 PCI_DMA_FROMDEVICE);
2039 2095
2040 prefetch(&qs->lro_frag_tbl); 2096 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping),
2100 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE);
2102
2103 prefetch(qs->lro_va);
2041 2104
2042 rx_frag += nr_frags; 2105 rx_frag += nr_frags;
2043 rx_frag->page = sd->pg_chunk.page; 2106 rx_frag->page = sd->pg_chunk.page;
@@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2047 qs->lro_frag_tbl.nr_frags++; 2110 qs->lro_frag_tbl.nr_frags++;
2048 qs->lro_frag_tbl.len = frag_len; 2111 qs->lro_frag_tbl.len = frag_len;
2049 2112
2113
2050 if (!complete) 2114 if (!complete)
2051 return; 2115 return;
2052 2116
@@ -2236,6 +2300,8 @@ no_mem:
2236 if (fl->use_pages) { 2300 if (fl->use_pages) {
2237 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2301 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2238 2302
2303 prefetch(&qs->lro_frag_tbl);
2304
2239 prefetch(addr); 2305 prefetch(addr);
2240#if L1_CACHE_BYTES < 128 2306#if L1_CACHE_BYTES < 128
2241 prefetch(addr + L1_CACHE_BYTES); 2307 prefetch(addr + L1_CACHE_BYTES);
@@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2972 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; 3038 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2973 q->fl[0].order = FL0_PG_ORDER; 3039 q->fl[0].order = FL0_PG_ORDER;
2974 q->fl[1].order = FL1_PG_ORDER; 3040 q->fl[1].order = FL1_PG_ORDER;
3041 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3042 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
2975 3043
2976 spin_lock_irq(&adapter->sge.reg_lock); 3044 spin_lock_irq(&adapter->sge.reg_lock);
2977 3045
2978 /* FL threshold comparison uses < */ 3046 /* FL threshold comparison uses < */
2979 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 3047 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2980 q->rspq.phys_addr, q->rspq.size, 3048 q->rspq.phys_addr, q->rspq.size,
2981 q->fl[0].buf_size, 1, 0); 3049 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
2982 if (ret) 3050 if (ret)
2983 goto err_unlock; 3051 goto err_unlock;
2984 3052
2985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 3053 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2986 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, 3054 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2987 q->fl[i].phys_addr, q->fl[i].size, 3055 q->fl[i].phys_addr, q->fl[i].size,
2988 q->fl[i].buf_size, p->cong_thres, 1, 3056 q->fl[i].buf_size - SGE_PG_RSVD,
2989 0); 3057 p->cong_thres, 1, 0);
2990 if (ret) 3058 if (ret)
2991 goto err_unlock; 3059 goto err_unlock;
2992 } 3060 }
@@ -3044,9 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3044 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3112 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3045 V_NEWTIMER(q->rspq.holdoff_tmr)); 3113 V_NEWTIMER(q->rspq.holdoff_tmr));
3046 3114
3047 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3048 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3049
3050 return 0; 3115 return 0;
3051 3116
3052err_unlock: 3117err_unlock:
@@ -3057,6 +3122,27 @@ err:
3057} 3122}
3058 3123
3059/** 3124/**
3125 * t3_start_sge_timers - start SGE timer call backs
3126 * @adap: the adapter
3127 *
3128 * Starts each SGE queue set's timer call back
3129 */
3130void t3_start_sge_timers(struct adapter *adap)
3131{
3132 int i;
3133
3134 for (i = 0; i < SGE_QSETS; ++i) {
3135 struct sge_qset *q = &adap->sge.qs[i];
3136
3137 if (q->tx_reclaim_timer.function)
3138 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3139
3140 if (q->rx_reclaim_timer.function)
3141 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3142 }
3143}
3144
3145/**
3060 * t3_stop_sge_timers - stop SGE timer call backs 3146 * t3_stop_sge_timers - stop SGE timer call backs
3061 * @adap: the adapter 3147 * @adap: the adapter
3062 * 3148 *
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ff262a04ded0..31ed31a3428b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy)
493} 493}
494 494
495static const struct adapter_info t3_adap_info[] = { 495static const struct adapter_info t3_adap_info[] = {
496 {2, 0, 496 {1, 1, 0,
497 F_GPIO2_OEN | F_GPIO4_OEN | 497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"}, 499 &mi1_mdio_ops, "Chelsio PE9000"},
500 {2, 0, 500 {1, 1, 0,
501 F_GPIO2_OEN | F_GPIO4_OEN | 501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, 502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"}, 503 &mi1_mdio_ops, "Chelsio T302"},
504 {1, 0, 504 {1, 0, 0,
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | 505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"}, 508 &mi1_mdio_ext_ops, "Chelsio T310"},
509 {2, 0, 509 {1, 1, 0,
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | 510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | 511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
@@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = {
514 &mi1_mdio_ext_ops, "Chelsio T320"}, 514 &mi1_mdio_ext_ops, "Chelsio T320"},
515 {}, 515 {},
516 {}, 516 {},
517 {1, 0, 517 {1, 0, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | 518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, 520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
@@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id, 2128static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2129 unsigned int type) 2129 unsigned int type)
2130{ 2130{
2131 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); 2131 if (type == F_RESPONSEQ) {
2132 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); 2132 /*
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); 2133 * Can't write the Response Queue Context bits for
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); 2134 * Interrupt Armed or the Reserve bits after the chip
2135 * has been initialized out of reset. Writing to these
2136 * bits can confuse the hardware.
2137 */
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2142 } else {
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2145 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2147 }
2135 t3_write_reg(adapter, A_SG_CONTEXT_CMD, 2148 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2136 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); 2149 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2137 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 2150 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2138 0, SG_CONTEXT_CMD_ATTEMPTS, 1); 2151 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2139} 2152}
2140 2153
2154/**
2155 * clear_sge_ctxt - completely clear an SGE context
2156 * @adapter: the adapter
2157 * @id: the context id
2158 * @type: the context type
2159 *
2160 * Completely clear an SGE context. Used predominantly at post-reset
2161 * initialization. Note in particular that we don't skip writing to any
2162 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2163 * does ...
2164 */
2141static int clear_sge_ctxt(struct adapter *adap, unsigned int id, 2165static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2142 unsigned int type) 2166 unsigned int type)
2143{ 2167{
@@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2145 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); 2169 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2146 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); 2170 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2147 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); 2171 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2148 return t3_sge_write_context(adap, id, type); 2172 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2173 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2174 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2175 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2176 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2177 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2178 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2179 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2149} 2180}
2150 2181
2151/** 2182/**
@@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2729 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2760 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2730 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2761 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2731 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2762 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2732 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1)); 2763 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2733 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2764 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2734 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2765 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2735 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | 2766 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2736 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); 2767 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2737 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, 2768 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2738 F_IPV6ENABLE | F_NICMODE); 2769 F_IPV6ENABLE | F_NICMODE);
@@ -3196,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3196} 3227}
3197 3228
3198/* 3229/*
3199 * Perform the bits of HW initialization that are dependent on the number 3230 * Perform the bits of HW initialization that are dependent on the Tx
3200 * of available ports. 3231 * channels being used.
3201 */ 3232 */
3202static void init_hw_for_avail_ports(struct adapter *adap, int nports) 3233static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3203{ 3234{
3204 int i; 3235 int i;
3205 3236
3206 if (nports == 1) { 3237 if (chan_map != 3) { /* one channel */
3207 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); 3238 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3208 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 3239 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3209 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | 3240 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3210 F_PORT0ACTIVE | F_ENFORCEPKT); 3241 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3211 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff); 3242 F_TPTXPORT1EN | F_PORT1ACTIVE));
3212 } else { 3243 t3_write_reg(adap, A_PM1_TX_CFG,
3244 chan_map == 1 ? 0xffffffff : 0);
3245 } else { /* two channels */
3213 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 3246 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3214 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 3247 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3215 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, 3248 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
@@ -3517,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3517 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); 3550 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3518 t3_write_reg(adapter, A_PM1_RX_MODE, 0); 3551 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3519 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3552 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3520 init_hw_for_avail_ports(adapter, adapter->params.nports); 3553 chan_init_hw(adapter, adapter->params.chan_map);
3521 t3_sge_init(adapter, &adapter->params.sge); 3554 t3_sge_init(adapter, &adapter->params.sge);
3522 3555
3523 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3556 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3754,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3754 get_pci_mode(adapter, &adapter->params.pci); 3787 get_pci_mode(adapter, &adapter->params.pci);
3755 3788
3756 adapter->params.info = ai; 3789 adapter->params.info = ai;
3757 adapter->params.nports = ai->nports; 3790 adapter->params.nports = ai->nports0 + ai->nports1;
3791 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3758 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 3792 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3759 /* 3793 /*
3760 * We used to only run the "adapter check task" once a second if 3794 * We used to only run the "adapter check task" once a second if
@@ -3785,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3785 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); 3819 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3786 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); 3820 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3787 3821
3788 p->nchan = ai->nports; 3822 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3789 p->pmrx_size = t3_mc7_size(&adapter->pmrx); 3823 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3790 p->pmtx_size = t3_mc7_size(&adapter->pmtx); 3824 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3791 p->cm_size = t3_mc7_size(&adapter->cm); 3825 p->cm_size = t3_mc7_size(&adapter->cm);
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 55625dbbae5a..357f565851ed 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -566,6 +566,18 @@ MODULE_LICENSE("GPL");
566 outw(CSR0, DEPCA_ADDR);\ 566 outw(CSR0, DEPCA_ADDR);\
567 outw(STOP, DEPCA_DATA) 567 outw(STOP, DEPCA_DATA)
568 568
569static const struct net_device_ops depca_netdev_ops = {
570 .ndo_open = depca_open,
571 .ndo_start_xmit = depca_start_xmit,
572 .ndo_stop = depca_close,
573 .ndo_set_multicast_list = set_multicast_list,
574 .ndo_do_ioctl = depca_ioctl,
575 .ndo_tx_timeout = depca_tx_timeout,
576 .ndo_change_mtu = eth_change_mtu,
577 .ndo_set_mac_address = eth_mac_addr,
578 .ndo_validate_addr = eth_validate_addr,
579};
580
569static int __init depca_hw_init (struct net_device *dev, struct device *device) 581static int __init depca_hw_init (struct net_device *dev, struct device *device)
570{ 582{
571 struct depca_private *lp; 583 struct depca_private *lp;
@@ -793,12 +805,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
793 } 805 }
794 806
795 /* The DEPCA-specific entries in the device structure. */ 807 /* The DEPCA-specific entries in the device structure. */
796 dev->open = &depca_open; 808 dev->netdev_ops = &depca_netdev_ops;
797 dev->hard_start_xmit = &depca_start_xmit;
798 dev->stop = &depca_close;
799 dev->set_multicast_list = &set_multicast_list;
800 dev->do_ioctl = &depca_ioctl;
801 dev->tx_timeout = depca_tx_timeout;
802 dev->watchdog_timeo = TX_TIMEOUT; 809 dev->watchdog_timeo = TX_TIMEOUT;
803 810
804 dev->mem_start = 0; 811 dev->mem_start = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index e187c88ae145..cc2ab6412c73 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -739,6 +739,17 @@ static void __init eepro_print_info (struct net_device *dev)
739 739
740static const struct ethtool_ops eepro_ethtool_ops; 740static const struct ethtool_ops eepro_ethtool_ops;
741 741
742static const struct net_device_ops eepro_netdev_ops = {
743 .ndo_open = eepro_open,
744 .ndo_stop = eepro_close,
745 .ndo_start_xmit = eepro_send_packet,
746 .ndo_set_multicast_list = set_multicast_list,
747 .ndo_tx_timeout = eepro_tx_timeout,
748 .ndo_change_mtu = eth_change_mtu,
749 .ndo_set_mac_address = eth_mac_addr,
750 .ndo_validate_addr = eth_validate_addr,
751};
752
742/* This is the real probe routine. Linux has a history of friendly device 753/* This is the real probe routine. Linux has a history of friendly device
743 probes on the ISA bus. A good device probe avoids doing writes, and 754 probes on the ISA bus. A good device probe avoids doing writes, and
744 verifies that the correct device exists and functions. */ 755 verifies that the correct device exists and functions. */
@@ -851,11 +862,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
851 } 862 }
852 } 863 }
853 864
854 dev->open = eepro_open; 865 dev->netdev_ops = &eepro_netdev_ops;
855 dev->stop = eepro_close;
856 dev->hard_start_xmit = eepro_send_packet;
857 dev->set_multicast_list = &set_multicast_list;
858 dev->tx_timeout = eepro_tx_timeout;
859 dev->watchdog_timeo = TX_TIMEOUT; 866 dev->watchdog_timeo = TX_TIMEOUT;
860 dev->ethtool_ops = &eepro_ethtool_ops; 867 dev->ethtool_ops = &eepro_ethtool_ops;
861 868
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 9ff3f2f5e382..1686dca28748 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1043,6 +1043,17 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
1043 lp->last_tx = jiffies; 1043 lp->last_tx = jiffies;
1044} 1044}
1045 1045
1046static const struct net_device_ops eexp_netdev_ops = {
1047 .ndo_open = eexp_open,
1048 .ndo_stop = eexp_close,
1049 .ndo_start_xmit = eexp_xmit,
1050 .ndo_set_multicast_list = eexp_set_multicast,
1051 .ndo_tx_timeout = eexp_timeout,
1052 .ndo_change_mtu = eth_change_mtu,
1053 .ndo_set_mac_address = eth_mac_addr,
1054 .ndo_validate_addr = eth_validate_addr,
1055};
1056
1046/* 1057/*
1047 * Sanity check the suspected EtherExpress card 1058 * Sanity check the suspected EtherExpress card
1048 * Read hardware address, reset card, size memory and initialize buffer 1059 * Read hardware address, reset card, size memory and initialize buffer
@@ -1163,11 +1174,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
1163 lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); 1174 lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
1164 lp->width = buswidth; 1175 lp->width = buswidth;
1165 1176
1166 dev->open = eexp_open; 1177 dev->netdev_ops = &eexp_netdev_ops;
1167 dev->stop = eexp_close;
1168 dev->hard_start_xmit = eexp_xmit;
1169 dev->set_multicast_list = &eexp_set_multicast;
1170 dev->tx_timeout = eexp_timeout;
1171 dev->watchdog_timeo = 2*HZ; 1178 dev->watchdog_timeo = 2*HZ;
1172 1179
1173 return register_netdev(dev); 1180 return register_netdev(dev);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 5c048f2fd74f..0d8b6da046f2 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -475,6 +475,17 @@ out:
475} 475}
476#endif 476#endif
477 477
478static const struct net_device_ops eth16i_netdev_ops = {
479 .ndo_open = eth16i_open,
480 .ndo_stop = eth16i_close,
481 .ndo_start_xmit = eth16i_tx,
482 .ndo_set_multicast_list = eth16i_multicast,
483 .ndo_tx_timeout = eth16i_timeout,
484 .ndo_change_mtu = eth_change_mtu,
485 .ndo_set_mac_address = eth_mac_addr,
486 .ndo_validate_addr = eth_validate_addr,
487};
488
478static int __init eth16i_probe1(struct net_device *dev, int ioaddr) 489static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
479{ 490{
480 struct eth16i_local *lp = netdev_priv(dev); 491 struct eth16i_local *lp = netdev_priv(dev);
@@ -549,12 +560,7 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
549 BITCLR(ioaddr + CONFIG_REG_1, POWERUP); 560 BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
550 561
551 /* Initialize the device structure */ 562 /* Initialize the device structure */
552 memset(lp, 0, sizeof(struct eth16i_local)); 563 dev->netdev_ops = &eth16i_netdev_ops;
553 dev->open = eth16i_open;
554 dev->stop = eth16i_close;
555 dev->hard_start_xmit = eth16i_tx;
556 dev->set_multicast_list = eth16i_multicast;
557 dev->tx_timeout = eth16i_timeout;
558 dev->watchdog_timeo = TX_TIMEOUT; 564 dev->watchdog_timeo = TX_TIMEOUT;
559 spin_lock_init(&lp->lock); 565 spin_lock_init(&lp->lock);
560 566
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
new file mode 100644
index 000000000000..91a9b1a33764
--- /dev/null
+++ b/drivers/net/ethoc.c
@@ -0,0 +1,1112 @@
1/*
2 * linux/drivers/net/ethoc.c
3 *
4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
5 * Copyright (C) 2008-2009 Avionic Design GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Written by Thierry Reding <thierry.reding@avionic-design.de>
12 */
13
14#include <linux/etherdevice.h>
15#include <linux/crc32.h>
16#include <linux/io.h>
17#include <linux/mii.h>
18#include <linux/phy.h>
19#include <linux/platform_device.h>
20#include <net/ethoc.h>
21
22/* register offsets */
23#define MODER 0x00
24#define INT_SOURCE 0x04
25#define INT_MASK 0x08
26#define IPGT 0x0c
27#define IPGR1 0x10
28#define IPGR2 0x14
29#define PACKETLEN 0x18
30#define COLLCONF 0x1c
31#define TX_BD_NUM 0x20
32#define CTRLMODER 0x24
33#define MIIMODER 0x28
34#define MIICOMMAND 0x2c
35#define MIIADDRESS 0x30
36#define MIITX_DATA 0x34
37#define MIIRX_DATA 0x38
38#define MIISTATUS 0x3c
39#define MAC_ADDR0 0x40
40#define MAC_ADDR1 0x44
41#define ETH_HASH0 0x48
42#define ETH_HASH1 0x4c
43#define ETH_TXCTRL 0x50
44
45/* mode register */
46#define MODER_RXEN (1 << 0) /* receive enable */
47#define MODER_TXEN (1 << 1) /* transmit enable */
48#define MODER_NOPRE (1 << 2) /* no preamble */
49#define MODER_BRO (1 << 3) /* broadcast address */
50#define MODER_IAM (1 << 4) /* individual address mode */
51#define MODER_PRO (1 << 5) /* promiscuous mode */
52#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
53#define MODER_LOOP (1 << 7) /* loopback */
54#define MODER_NBO (1 << 8) /* no back-off */
55#define MODER_EDE (1 << 9) /* excess defer enable */
56#define MODER_FULLD (1 << 10) /* full duplex */
57#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
58#define MODER_DCRC (1 << 12) /* delayed CRC enable */
59#define MODER_CRC (1 << 13) /* CRC enable */
60#define MODER_HUGE (1 << 14) /* huge packets enable */
61#define MODER_PAD (1 << 15) /* padding enabled */
62#define MODER_RSM (1 << 16) /* receive small packets */
63
64/* interrupt source and mask registers */
65#define INT_MASK_TXF (1 << 0) /* transmit frame */
66#define INT_MASK_TXE (1 << 1) /* transmit error */
67#define INT_MASK_RXF (1 << 2) /* receive frame */
68#define INT_MASK_RXE (1 << 3) /* receive error */
69#define INT_MASK_BUSY (1 << 4)
70#define INT_MASK_TXC (1 << 5) /* transmit control frame */
71#define INT_MASK_RXC (1 << 6) /* receive control frame */
72
73#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
74#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
75
76#define INT_MASK_ALL ( \
77 INT_MASK_TXF | INT_MASK_TXE | \
78 INT_MASK_RXF | INT_MASK_RXE | \
79 INT_MASK_TXC | INT_MASK_RXC | \
80 INT_MASK_BUSY \
81 )
82
83/* packet length register */
84#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
85#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
86#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
87 PACKETLEN_MAX(max))
88
89/* transmit buffer number register */
90#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
91
92/* control module mode register */
93#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
94#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
95#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
96
97/* MII mode register */
98#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
99#define MIIMODER_NOPRE (1 << 8) /* no preamble */
100
101/* MII command register */
102#define MIICOMMAND_SCAN (1 << 0) /* scan status */
103#define MIICOMMAND_READ (1 << 1) /* read status */
104#define MIICOMMAND_WRITE (1 << 2) /* write control data */
105
106/* MII address register */
107#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
108#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
109#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
110 MIIADDRESS_RGAD(reg))
111
112/* MII transmit data register */
113#define MIITX_DATA_VAL(x) ((x) & 0xffff)
114
115/* MII receive data register */
116#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
117
118/* MII status register */
119#define MIISTATUS_LINKFAIL (1 << 0)
120#define MIISTATUS_BUSY (1 << 1)
121#define MIISTATUS_INVALID (1 << 2)
122
123/* TX buffer descriptor */
124#define TX_BD_CS (1 << 0) /* carrier sense lost */
125#define TX_BD_DF (1 << 1) /* defer indication */
126#define TX_BD_LC (1 << 2) /* late collision */
127#define TX_BD_RL (1 << 3) /* retransmission limit */
128#define TX_BD_RETRY_MASK (0x00f0)
129#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
130#define TX_BD_UR (1 << 8) /* transmitter underrun */
131#define TX_BD_CRC (1 << 11) /* TX CRC enable */
132#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
133#define TX_BD_WRAP (1 << 13)
134#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
135#define TX_BD_READY (1 << 15) /* TX buffer ready */
136#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
137#define TX_BD_LEN_MASK (0xffff << 16)
138
139#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
140 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
141
142/* RX buffer descriptor */
143#define RX_BD_LC (1 << 0) /* late collision */
144#define RX_BD_CRC (1 << 1) /* RX CRC error */
145#define RX_BD_SF (1 << 2) /* short frame */
146#define RX_BD_TL (1 << 3) /* too long */
147#define RX_BD_DN (1 << 4) /* dribble nibble */
148#define RX_BD_IS (1 << 5) /* invalid symbol */
149#define RX_BD_OR (1 << 6) /* receiver overrun */
150#define RX_BD_MISS (1 << 7)
151#define RX_BD_CF (1 << 8) /* control frame */
152#define RX_BD_WRAP (1 << 13)
153#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
154#define RX_BD_EMPTY (1 << 15)
155#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
156
157#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
158 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
159
160#define ETHOC_BUFSIZ 1536
161#define ETHOC_ZLEN 64
162#define ETHOC_BD_BASE 0x400
163#define ETHOC_TIMEOUT (HZ / 2)
164#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
165
166/**
167 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region
170 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent
173 * @num_rx: number of receive buffers
174 * @cur_rx: current receive buffer
175 * @netdev: pointer to network device structure
176 * @napi: NAPI structure
177 * @stats: network device statistics
178 * @msg_enable: device state flags
179 * @rx_lock: receive lock
180 * @lock: device lock
181 * @phy: attached PHY
182 * @mdio: MDIO bus for PHY access
183 * @phy_id: address of attached PHY
184 */
185struct ethoc {
186 void __iomem *iobase;
187 void __iomem *membase;
188
189 unsigned int num_tx;
190 unsigned int cur_tx;
191 unsigned int dty_tx;
192
193 unsigned int num_rx;
194 unsigned int cur_rx;
195
196 struct net_device *netdev;
197 struct napi_struct napi;
198 struct net_device_stats stats;
199 u32 msg_enable;
200
201 spinlock_t rx_lock;
202 spinlock_t lock;
203
204 struct phy_device *phy;
205 struct mii_bus *mdio;
206 s8 phy_id;
207};
208
209/**
210 * struct ethoc_bd - buffer descriptor
211 * @stat: buffer statistics
212 * @addr: physical memory address
213 */
214struct ethoc_bd {
215 u32 stat;
216 u32 addr;
217};
218
219static u32 ethoc_read(struct ethoc *dev, loff_t offset)
220{
221 return ioread32(dev->iobase + offset);
222}
223
224static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
225{
226 iowrite32(data, dev->iobase + offset);
227}
228
229static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
230{
231 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
232 bd->stat = ethoc_read(dev, offset + 0);
233 bd->addr = ethoc_read(dev, offset + 4);
234}
235
236static void ethoc_write_bd(struct ethoc *dev, int index,
237 const struct ethoc_bd *bd)
238{
239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
240 ethoc_write(dev, offset + 0, bd->stat);
241 ethoc_write(dev, offset + 4, bd->addr);
242}
243
244static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
245{
246 u32 imask = ethoc_read(dev, INT_MASK);
247 imask |= mask;
248 ethoc_write(dev, INT_MASK, imask);
249}
250
251static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
252{
253 u32 imask = ethoc_read(dev, INT_MASK);
254 imask &= ~mask;
255 ethoc_write(dev, INT_MASK, imask);
256}
257
258static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
259{
260 ethoc_write(dev, INT_SOURCE, mask);
261}
262
263static void ethoc_enable_rx_and_tx(struct ethoc *dev)
264{
265 u32 mode = ethoc_read(dev, MODER);
266 mode |= MODER_RXEN | MODER_TXEN;
267 ethoc_write(dev, MODER, mode);
268}
269
270static void ethoc_disable_rx_and_tx(struct ethoc *dev)
271{
272 u32 mode = ethoc_read(dev, MODER);
273 mode &= ~(MODER_RXEN | MODER_TXEN);
274 ethoc_write(dev, MODER, mode);
275}
276
277static int ethoc_init_ring(struct ethoc *dev)
278{
279 struct ethoc_bd bd;
280 int i;
281
282 dev->cur_tx = 0;
283 dev->dty_tx = 0;
284 dev->cur_rx = 0;
285
286 /* setup transmission buffers */
287 bd.addr = 0;
288 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289
290 for (i = 0; i < dev->num_tx; i++) {
291 if (i == dev->num_tx - 1)
292 bd.stat |= TX_BD_WRAP;
293
294 ethoc_write_bd(dev, i, &bd);
295 bd.addr += ETHOC_BUFSIZ;
296 }
297
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300
301 for (i = 0; i < dev->num_rx; i++) {
302 if (i == dev->num_rx - 1)
303 bd.stat |= RX_BD_WRAP;
304
305 ethoc_write_bd(dev, dev->num_tx + i, &bd);
306 bd.addr += ETHOC_BUFSIZ;
307 }
308
309 return 0;
310}
311
312static int ethoc_reset(struct ethoc *dev)
313{
314 u32 mode;
315
316 /* TODO: reset controller? */
317
318 ethoc_disable_rx_and_tx(dev);
319
320 /* TODO: setup registers */
321
322 /* enable FCS generation and automatic padding */
323 mode = ethoc_read(dev, MODER);
324 mode |= MODER_CRC | MODER_PAD;
325 ethoc_write(dev, MODER, mode);
326
327 /* set full-duplex mode */
328 mode = ethoc_read(dev, MODER);
329 mode |= MODER_FULLD;
330 ethoc_write(dev, MODER, mode);
331 ethoc_write(dev, IPGT, 0x15);
332
333 ethoc_ack_irq(dev, INT_MASK_ALL);
334 ethoc_enable_irq(dev, INT_MASK_ALL);
335 ethoc_enable_rx_and_tx(dev);
336 return 0;
337}
338
339static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
340 struct ethoc_bd *bd)
341{
342 struct net_device *netdev = dev->netdev;
343 unsigned int ret = 0;
344
345 if (bd->stat & RX_BD_TL) {
346 dev_err(&netdev->dev, "RX: frame too long\n");
347 dev->stats.rx_length_errors++;
348 ret++;
349 }
350
351 if (bd->stat & RX_BD_SF) {
352 dev_err(&netdev->dev, "RX: frame too short\n");
353 dev->stats.rx_length_errors++;
354 ret++;
355 }
356
357 if (bd->stat & RX_BD_DN) {
358 dev_err(&netdev->dev, "RX: dribble nibble\n");
359 dev->stats.rx_frame_errors++;
360 }
361
362 if (bd->stat & RX_BD_CRC) {
363 dev_err(&netdev->dev, "RX: wrong CRC\n");
364 dev->stats.rx_crc_errors++;
365 ret++;
366 }
367
368 if (bd->stat & RX_BD_OR) {
369 dev_err(&netdev->dev, "RX: overrun\n");
370 dev->stats.rx_over_errors++;
371 ret++;
372 }
373
374 if (bd->stat & RX_BD_MISS)
375 dev->stats.rx_missed_errors++;
376
377 if (bd->stat & RX_BD_LC) {
378 dev_err(&netdev->dev, "RX: late collision\n");
379 dev->stats.collisions++;
380 ret++;
381 }
382
383 return ret;
384}
385
386static int ethoc_rx(struct net_device *dev, int limit)
387{
388 struct ethoc *priv = netdev_priv(dev);
389 int count;
390
391 for (count = 0; count < limit; ++count) {
392 unsigned int entry;
393 struct ethoc_bd bd;
394
395 entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
396 ethoc_read_bd(priv, entry, &bd);
397 if (bd.stat & RX_BD_EMPTY)
398 break;
399
400 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size);
403 if (likely(skb)) {
404 void *src = priv->membase + bd.addr;
405 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev);
407 dev->last_rx = jiffies;
408 priv->stats.rx_packets++;
409 priv->stats.rx_bytes += size;
410 netif_receive_skb(skb);
411 } else {
412 if (net_ratelimit())
413 dev_warn(&dev->dev, "low on memory - "
414 "packet dropped\n");
415
416 priv->stats.rx_dropped++;
417 break;
418 }
419 }
420
421 /* clear the buffer descriptor so it can be reused */
422 bd.stat &= ~RX_BD_STATS;
423 bd.stat |= RX_BD_EMPTY;
424 ethoc_write_bd(priv, entry, &bd);
425 priv->cur_rx++;
426 }
427
428 return count;
429}
430
431static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
432{
433 struct net_device *netdev = dev->netdev;
434
435 if (bd->stat & TX_BD_LC) {
436 dev_err(&netdev->dev, "TX: late collision\n");
437 dev->stats.tx_window_errors++;
438 }
439
440 if (bd->stat & TX_BD_RL) {
441 dev_err(&netdev->dev, "TX: retransmit limit\n");
442 dev->stats.tx_aborted_errors++;
443 }
444
445 if (bd->stat & TX_BD_UR) {
446 dev_err(&netdev->dev, "TX: underrun\n");
447 dev->stats.tx_fifo_errors++;
448 }
449
450 if (bd->stat & TX_BD_CS) {
451 dev_err(&netdev->dev, "TX: carrier sense lost\n");
452 dev->stats.tx_carrier_errors++;
453 }
454
455 if (bd->stat & TX_BD_STATS)
456 dev->stats.tx_errors++;
457
458 dev->stats.collisions += (bd->stat >> 4) & 0xf;
459 dev->stats.tx_bytes += bd->stat >> 16;
460 dev->stats.tx_packets++;
461 return 0;
462}
463
464static void ethoc_tx(struct net_device *dev)
465{
466 struct ethoc *priv = netdev_priv(dev);
467
468 spin_lock(&priv->lock);
469
470 while (priv->dty_tx != priv->cur_tx) {
471 unsigned int entry = priv->dty_tx % priv->num_tx;
472 struct ethoc_bd bd;
473
474 ethoc_read_bd(priv, entry, &bd);
475 if (bd.stat & TX_BD_READY)
476 break;
477
478 entry = (++priv->dty_tx) % priv->num_tx;
479 (void)ethoc_update_tx_stats(priv, &bd);
480 }
481
482 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
483 netif_wake_queue(dev);
484
485 ethoc_ack_irq(priv, INT_MASK_TX);
486 spin_unlock(&priv->lock);
487}
488
489static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
490{
491 struct net_device *dev = (struct net_device *)dev_id;
492 struct ethoc *priv = netdev_priv(dev);
493 u32 pending;
494
495 ethoc_disable_irq(priv, INT_MASK_ALL);
496 pending = ethoc_read(priv, INT_SOURCE);
497 if (unlikely(pending == 0)) {
498 ethoc_enable_irq(priv, INT_MASK_ALL);
499 return IRQ_NONE;
500 }
501
502 ethoc_ack_irq(priv, INT_MASK_ALL);
503
504 if (pending & INT_MASK_BUSY) {
505 dev_err(&dev->dev, "packet dropped\n");
506 priv->stats.rx_dropped++;
507 }
508
509 if (pending & INT_MASK_RX) {
510 if (napi_schedule_prep(&priv->napi))
511 __napi_schedule(&priv->napi);
512 } else {
513 ethoc_enable_irq(priv, INT_MASK_RX);
514 }
515
516 if (pending & INT_MASK_TX)
517 ethoc_tx(dev);
518
519 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
520 return IRQ_HANDLED;
521}
522
523static int ethoc_get_mac_address(struct net_device *dev, void *addr)
524{
525 struct ethoc *priv = netdev_priv(dev);
526 u8 *mac = (u8 *)addr;
527 u32 reg;
528
529 reg = ethoc_read(priv, MAC_ADDR0);
530 mac[2] = (reg >> 24) & 0xff;
531 mac[3] = (reg >> 16) & 0xff;
532 mac[4] = (reg >> 8) & 0xff;
533 mac[5] = (reg >> 0) & 0xff;
534
535 reg = ethoc_read(priv, MAC_ADDR1);
536 mac[0] = (reg >> 8) & 0xff;
537 mac[1] = (reg >> 0) & 0xff;
538
539 return 0;
540}
541
542static int ethoc_poll(struct napi_struct *napi, int budget)
543{
544 struct ethoc *priv = container_of(napi, struct ethoc, napi);
545 int work_done = 0;
546
547 work_done = ethoc_rx(priv->netdev, budget);
548 if (work_done < budget) {
549 ethoc_enable_irq(priv, INT_MASK_RX);
550 napi_complete(napi);
551 }
552
553 return work_done;
554}
555
556static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
557{
558 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
559 struct ethoc *priv = bus->priv;
560
561 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
562 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
563
564 while (time_before(jiffies, timeout)) {
565 u32 status = ethoc_read(priv, MIISTATUS);
566 if (!(status & MIISTATUS_BUSY)) {
567 u32 data = ethoc_read(priv, MIIRX_DATA);
568 /* reset MII command register */
569 ethoc_write(priv, MIICOMMAND, 0);
570 return data;
571 }
572
573 schedule();
574 }
575
576 return -EBUSY;
577}
578
579static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
580{
581 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
582 struct ethoc *priv = bus->priv;
583
584 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
585 ethoc_write(priv, MIITX_DATA, val);
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
587
588 while (time_before(jiffies, timeout)) {
589 u32 stat = ethoc_read(priv, MIISTATUS);
590 if (!(stat & MIISTATUS_BUSY))
591 return 0;
592
593 schedule();
594 }
595
596 return -EBUSY;
597}
598
599static int ethoc_mdio_reset(struct mii_bus *bus)
600{
601 return 0;
602}
603
604static void ethoc_mdio_poll(struct net_device *dev)
605{
606}
607
608static int ethoc_mdio_probe(struct net_device *dev)
609{
610 struct ethoc *priv = netdev_priv(dev);
611 struct phy_device *phy;
612 int i;
613
614 for (i = 0; i < PHY_MAX_ADDR; i++) {
615 phy = priv->mdio->phy_map[i];
616 if (phy) {
617 if (priv->phy_id != -1) {
618 /* attach to specified PHY */
619 if (priv->phy_id == phy->addr)
620 break;
621 } else {
622 /* autoselect PHY if none was specified */
623 if (phy->addr != 0)
624 break;
625 }
626 }
627 }
628
629 if (!phy) {
630 dev_err(&dev->dev, "no PHY found\n");
631 return -ENXIO;
632 }
633
634 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0,
635 PHY_INTERFACE_MODE_GMII);
636 if (IS_ERR(phy)) {
637 dev_err(&dev->dev, "could not attach to PHY\n");
638 return PTR_ERR(phy);
639 }
640
641 priv->phy = phy;
642 return 0;
643}
644
645static int ethoc_open(struct net_device *dev)
646{
647 struct ethoc *priv = netdev_priv(dev);
648 unsigned int min_tx = 2;
649 unsigned int num_bd;
650 int ret;
651
652 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
653 dev->name, dev);
654 if (ret)
655 return ret;
656
657 /* calculate the number of TX/RX buffers */
658 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
659 priv->num_tx = min(min_tx, num_bd / 4);
660 priv->num_rx = num_bd - priv->num_tx;
661 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
662
663 ethoc_init_ring(priv);
664 ethoc_reset(priv);
665
666 if (netif_queue_stopped(dev)) {
667 dev_dbg(&dev->dev, " resuming queue\n");
668 netif_wake_queue(dev);
669 } else {
670 dev_dbg(&dev->dev, " starting queue\n");
671 netif_start_queue(dev);
672 }
673
674 phy_start(priv->phy);
675 napi_enable(&priv->napi);
676
677 if (netif_msg_ifup(priv)) {
678 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
679 dev->base_addr, dev->mem_start, dev->mem_end);
680 }
681
682 return 0;
683}
684
685static int ethoc_stop(struct net_device *dev)
686{
687 struct ethoc *priv = netdev_priv(dev);
688
689 napi_disable(&priv->napi);
690
691 if (priv->phy)
692 phy_stop(priv->phy);
693
694 ethoc_disable_rx_and_tx(priv);
695 free_irq(dev->irq, dev);
696
697 if (!netif_queue_stopped(dev))
698 netif_stop_queue(dev);
699
700 return 0;
701}
702
703static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
704{
705 struct ethoc *priv = netdev_priv(dev);
706 struct mii_ioctl_data *mdio = if_mii(ifr);
707 struct phy_device *phy = NULL;
708
709 if (!netif_running(dev))
710 return -EINVAL;
711
712 if (cmd != SIOCGMIIPHY) {
713 if (mdio->phy_id >= PHY_MAX_ADDR)
714 return -ERANGE;
715
716 phy = priv->mdio->phy_map[mdio->phy_id];
717 if (!phy)
718 return -ENODEV;
719 } else {
720 phy = priv->phy;
721 }
722
723 return phy_mii_ioctl(phy, mdio, cmd);
724}
725
726static int ethoc_config(struct net_device *dev, struct ifmap *map)
727{
728 return -ENOSYS;
729}
730
731static int ethoc_set_mac_address(struct net_device *dev, void *addr)
732{
733 struct ethoc *priv = netdev_priv(dev);
734 u8 *mac = (u8 *)addr;
735
736 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
737 (mac[4] << 8) | (mac[5] << 0));
738 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
739
740 return 0;
741}
742
743static void ethoc_set_multicast_list(struct net_device *dev)
744{
745 struct ethoc *priv = netdev_priv(dev);
746 u32 mode = ethoc_read(priv, MODER);
747 struct dev_mc_list *mc = NULL;
748 u32 hash[2] = { 0, 0 };
749
750 /* set loopback mode if requested */
751 if (dev->flags & IFF_LOOPBACK)
752 mode |= MODER_LOOP;
753 else
754 mode &= ~MODER_LOOP;
755
756 /* receive broadcast frames if requested */
757 if (dev->flags & IFF_BROADCAST)
758 mode &= ~MODER_BRO;
759 else
760 mode |= MODER_BRO;
761
762 /* enable promiscuous mode if requested */
763 if (dev->flags & IFF_PROMISC)
764 mode |= MODER_PRO;
765 else
766 mode &= ~MODER_PRO;
767
768 ethoc_write(priv, MODER, mode);
769
770 /* receive multicast frames */
771 if (dev->flags & IFF_ALLMULTI) {
772 hash[0] = 0xffffffff;
773 hash[1] = 0xffffffff;
774 } else {
775 for (mc = dev->mc_list; mc; mc = mc->next) {
776 u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
777 int bit = (crc >> 26) & 0x3f;
778 hash[bit >> 5] |= 1 << (bit & 0x1f);
779 }
780 }
781
782 ethoc_write(priv, ETH_HASH0, hash[0]);
783 ethoc_write(priv, ETH_HASH1, hash[1]);
784}
785
786static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
787{
788 return -ENOSYS;
789}
790
791static void ethoc_tx_timeout(struct net_device *dev)
792{
793 struct ethoc *priv = netdev_priv(dev);
794 u32 pending = ethoc_read(priv, INT_SOURCE);
795 if (likely(pending))
796 ethoc_interrupt(dev->irq, dev);
797}
798
799static struct net_device_stats *ethoc_stats(struct net_device *dev)
800{
801 struct ethoc *priv = netdev_priv(dev);
802 return &priv->stats;
803}
804
805static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
806{
807 struct ethoc *priv = netdev_priv(dev);
808 struct ethoc_bd bd;
809 unsigned int entry;
810 void *dest;
811
812 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
813 priv->stats.tx_errors++;
814 return -EMSGSIZE;
815 }
816
817 entry = priv->cur_tx % priv->num_tx;
818 spin_lock_irq(&priv->lock);
819 priv->cur_tx++;
820
821 ethoc_read_bd(priv, entry, &bd);
822 if (unlikely(skb->len < ETHOC_ZLEN))
823 bd.stat |= TX_BD_PAD;
824 else
825 bd.stat &= ~TX_BD_PAD;
826
827 dest = priv->membase + bd.addr;
828 memcpy_toio(dest, skb->data, skb->len);
829
830 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
831 bd.stat |= TX_BD_LEN(skb->len);
832 ethoc_write_bd(priv, entry, &bd);
833
834 bd.stat |= TX_BD_READY;
835 ethoc_write_bd(priv, entry, &bd);
836
837 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
838 dev_dbg(&dev->dev, "stopping queue\n");
839 netif_stop_queue(dev);
840 }
841
842 dev->trans_start = jiffies;
843 dev_kfree_skb(skb);
844
845 spin_unlock_irq(&priv->lock);
846 return NETDEV_TX_OK;
847}
848
849static const struct net_device_ops ethoc_netdev_ops = {
850 .ndo_open = ethoc_open,
851 .ndo_stop = ethoc_stop,
852 .ndo_do_ioctl = ethoc_ioctl,
853 .ndo_set_config = ethoc_config,
854 .ndo_set_mac_address = ethoc_set_mac_address,
855 .ndo_set_multicast_list = ethoc_set_multicast_list,
856 .ndo_change_mtu = ethoc_change_mtu,
857 .ndo_tx_timeout = ethoc_tx_timeout,
858 .ndo_get_stats = ethoc_stats,
859 .ndo_start_xmit = ethoc_start_xmit,
860};
861
862/**
863 * ethoc_probe() - initialize OpenCores ethernet MAC
864 * pdev: platform device
865 */
866static int ethoc_probe(struct platform_device *pdev)
867{
868 struct net_device *netdev = NULL;
869 struct resource *res = NULL;
870 struct resource *mmio = NULL;
871 struct resource *mem = NULL;
872 struct ethoc *priv = NULL;
873 unsigned int phy;
874 int ret = 0;
875
876 /* allocate networking device */
877 netdev = alloc_etherdev(sizeof(struct ethoc));
878 if (!netdev) {
879 dev_err(&pdev->dev, "cannot allocate network device\n");
880 ret = -ENOMEM;
881 goto out;
882 }
883
884 SET_NETDEV_DEV(netdev, &pdev->dev);
885 platform_set_drvdata(pdev, netdev);
886
887 /* obtain I/O memory space */
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 if (!res) {
890 dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
891 ret = -ENXIO;
892 goto free;
893 }
894
895 mmio = devm_request_mem_region(&pdev->dev, res->start,
896 res->end - res->start + 1, res->name);
897 if (!res) {
898 dev_err(&pdev->dev, "cannot request I/O memory space\n");
899 ret = -ENXIO;
900 goto free;
901 }
902
903 netdev->base_addr = mmio->start;
904
905 /* obtain buffer memory space */
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
907 if (!res) {
908 dev_err(&pdev->dev, "cannot obtain memory space\n");
909 ret = -ENXIO;
910 goto free;
911 }
912
913 mem = devm_request_mem_region(&pdev->dev, res->start,
914 res->end - res->start + 1, res->name);
915 if (!mem) {
916 dev_err(&pdev->dev, "cannot request memory space\n");
917 ret = -ENXIO;
918 goto free;
919 }
920
921 netdev->mem_start = mem->start;
922 netdev->mem_end = mem->end;
923
924 /* obtain device IRQ number */
925 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
926 if (!res) {
927 dev_err(&pdev->dev, "cannot obtain IRQ\n");
928 ret = -ENXIO;
929 goto free;
930 }
931
932 netdev->irq = res->start;
933
934 /* setup driver-private data */
935 priv = netdev_priv(netdev);
936 priv->netdev = netdev;
937
938 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
939 mmio->end - mmio->start + 1);
940 if (!priv->iobase) {
941 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
942 ret = -ENXIO;
943 goto error;
944 }
945
946 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
947 mem->end - mem->start + 1);
948 if (!priv->membase) {
949 dev_err(&pdev->dev, "cannot remap memory space\n");
950 ret = -ENXIO;
951 goto error;
952 }
953
954 /* Allow the platform setup code to pass in a MAC address. */
955 if (pdev->dev.platform_data) {
956 struct ethoc_platform_data *pdata =
957 (struct ethoc_platform_data *)pdev->dev.platform_data;
958 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
959 priv->phy_id = pdata->phy_id;
960 }
961
962 /* Check that the given MAC address is valid. If it isn't, read the
963 * current MAC from the controller. */
964 if (!is_valid_ether_addr(netdev->dev_addr))
965 ethoc_get_mac_address(netdev, netdev->dev_addr);
966
967 /* Check the MAC again for validity, if it still isn't choose and
968 * program a random one. */
969 if (!is_valid_ether_addr(netdev->dev_addr))
970 random_ether_addr(netdev->dev_addr);
971
972 ethoc_set_mac_address(netdev, netdev->dev_addr);
973
974 /* register MII bus */
975 priv->mdio = mdiobus_alloc();
976 if (!priv->mdio) {
977 ret = -ENOMEM;
978 goto free;
979 }
980
981 priv->mdio->name = "ethoc-mdio";
982 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
983 priv->mdio->name, pdev->id);
984 priv->mdio->read = ethoc_mdio_read;
985 priv->mdio->write = ethoc_mdio_write;
986 priv->mdio->reset = ethoc_mdio_reset;
987 priv->mdio->priv = priv;
988
989 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
990 if (!priv->mdio->irq) {
991 ret = -ENOMEM;
992 goto free_mdio;
993 }
994
995 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
996 priv->mdio->irq[phy] = PHY_POLL;
997
998 ret = mdiobus_register(priv->mdio);
999 if (ret) {
1000 dev_err(&netdev->dev, "failed to register MDIO bus\n");
1001 goto free_mdio;
1002 }
1003
1004 ret = ethoc_mdio_probe(netdev);
1005 if (ret) {
1006 dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1007 goto error;
1008 }
1009
1010 ether_setup(netdev);
1011
1012 /* setup the net_device structure */
1013 netdev->netdev_ops = &ethoc_netdev_ops;
1014 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1015 netdev->features |= 0;
1016
1017 /* setup NAPI */
1018 memset(&priv->napi, 0, sizeof(priv->napi));
1019 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1020
1021 spin_lock_init(&priv->rx_lock);
1022 spin_lock_init(&priv->lock);
1023
1024 ret = register_netdev(netdev);
1025 if (ret < 0) {
1026 dev_err(&netdev->dev, "failed to register interface\n");
1027 goto error;
1028 }
1029
1030 goto out;
1031
1032error:
1033 mdiobus_unregister(priv->mdio);
1034free_mdio:
1035 kfree(priv->mdio->irq);
1036 mdiobus_free(priv->mdio);
1037free:
1038 free_netdev(netdev);
1039out:
1040 return ret;
1041}
1042
1043/**
1044 * ethoc_remove() - shutdown OpenCores ethernet MAC
1045 * @pdev: platform device
1046 */
1047static int ethoc_remove(struct platform_device *pdev)
1048{
1049 struct net_device *netdev = platform_get_drvdata(pdev);
1050 struct ethoc *priv = netdev_priv(netdev);
1051
1052 platform_set_drvdata(pdev, NULL);
1053
1054 if (netdev) {
1055 phy_disconnect(priv->phy);
1056 priv->phy = NULL;
1057
1058 if (priv->mdio) {
1059 mdiobus_unregister(priv->mdio);
1060 kfree(priv->mdio->irq);
1061 mdiobus_free(priv->mdio);
1062 }
1063
1064 unregister_netdev(netdev);
1065 free_netdev(netdev);
1066 }
1067
1068 return 0;
1069}
1070
1071#ifdef CONFIG_PM
1072static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1073{
1074 return -ENOSYS;
1075}
1076
1077static int ethoc_resume(struct platform_device *pdev)
1078{
1079 return -ENOSYS;
1080}
1081#else
1082# define ethoc_suspend NULL
1083# define ethoc_resume NULL
1084#endif
1085
1086static struct platform_driver ethoc_driver = {
1087 .probe = ethoc_probe,
1088 .remove = ethoc_remove,
1089 .suspend = ethoc_suspend,
1090 .resume = ethoc_resume,
1091 .driver = {
1092 .name = "ethoc",
1093 },
1094};
1095
1096static int __init ethoc_init(void)
1097{
1098 return platform_driver_register(&ethoc_driver);
1099}
1100
1101static void __exit ethoc_exit(void)
1102{
1103 platform_driver_unregister(&ethoc_driver);
1104}
1105
1106module_init(ethoc_init);
1107module_exit(ethoc_exit);
1108
1109MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1110MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1111MODULE_LICENSE("GPL v2");
1112
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b852303c9362..1a685a04d4b2 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -388,6 +388,18 @@ static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
388 return err; 388 return err;
389} 389}
390 390
391static const struct net_device_ops ewrk3_netdev_ops = {
392 .ndo_open = ewrk3_open,
393 .ndo_start_xmit = ewrk3_queue_pkt,
394 .ndo_stop = ewrk3_close,
395 .ndo_set_multicast_list = set_multicast_list,
396 .ndo_do_ioctl = ewrk3_ioctl,
397 .ndo_tx_timeout = ewrk3_timeout,
398 .ndo_change_mtu = eth_change_mtu,
399 .ndo_set_mac_address = eth_mac_addr,
400 .ndo_validate_addr = eth_validate_addr,
401};
402
391static int __init 403static int __init
392ewrk3_hw_init(struct net_device *dev, u_long iobase) 404ewrk3_hw_init(struct net_device *dev, u_long iobase)
393{ 405{
@@ -603,16 +615,11 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
603 printk(version); 615 printk(version);
604 } 616 }
605 /* The EWRK3-specific entries in the device structure. */ 617 /* The EWRK3-specific entries in the device structure. */
606 dev->open = ewrk3_open; 618 dev->netdev_ops = &ewrk3_netdev_ops;
607 dev->hard_start_xmit = ewrk3_queue_pkt;
608 dev->stop = ewrk3_close;
609 dev->set_multicast_list = set_multicast_list;
610 dev->do_ioctl = ewrk3_ioctl;
611 if (lp->adapter_name[4] == '3') 619 if (lp->adapter_name[4] == '3')
612 SET_ETHTOOL_OPS(dev, &ethtool_ops_203); 620 SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
613 else 621 else
614 SET_ETHTOOL_OPS(dev, &ethtool_ops); 622 SET_ETHTOOL_OPS(dev, &ethtool_ops);
615 dev->tx_timeout = ewrk3_timeout;
616 dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; 623 dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
617 624
618 dev->mem_start = 0; 625 dev->mem_start = 0;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 9d81e7a48dba..6a38800be3f1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev)
1239 return err; 1239 return err;
1240} 1240}
1241 1241
1242static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp) 1242static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1243{ 1243{
1244 struct txfcb *fcb; 1244 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1245 struct sk_buff *skb = *skbp;
1246
1247 if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) {
1248 struct sk_buff *old_skb = skb;
1249 skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN);
1250 if (!skb)
1251 return NULL;
1252 dev_kfree_skb_any(old_skb);
1253 }
1254 fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1255 cacheable_memzero(fcb, GMAC_FCB_LEN); 1245 cacheable_memzero(fcb, GMAC_FCB_LEN);
1256 1246
1257 return fcb; 1247 return fcb;
@@ -1320,6 +1310,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 1310
1321 base = priv->tx_bd_base; 1311 base = priv->tx_bd_base;
1322 1312
1313 /* make space for additional header when fcb is needed */
1314 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1315 (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1316 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1317 struct sk_buff *skb_new;
1318
1319 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1320 if (!skb_new) {
1321 dev->stats.tx_errors++;
1322 kfree_skb(skb);
1323 return NETDEV_TX_OK;
1324 }
1325 kfree_skb(skb);
1326 skb = skb_new;
1327 }
1328
1323 /* total number of fragments in the SKB */ 1329 /* total number of fragments in the SKB */
1324 nr_frags = skb_shinfo(skb)->nr_frags; 1330 nr_frags = skb_shinfo(skb)->nr_frags;
1325 1331
@@ -1372,20 +1378,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1372 1378
1373 /* Set up checksumming */ 1379 /* Set up checksumming */
1374 if (CHECKSUM_PARTIAL == skb->ip_summed) { 1380 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1375 fcb = gfar_add_fcb(&skb); 1381 fcb = gfar_add_fcb(skb);
1376 if (likely(fcb != NULL)) { 1382 lstatus |= BD_LFLAG(TXBD_TOE);
1377 lstatus |= BD_LFLAG(TXBD_TOE); 1383 gfar_tx_checksum(skb, fcb);
1378 gfar_tx_checksum(skb, fcb);
1379 }
1380 } 1384 }
1381 1385
1382 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 1386 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1383 if (unlikely(NULL == fcb)) 1387 if (unlikely(NULL == fcb)) {
1384 fcb = gfar_add_fcb(&skb); 1388 fcb = gfar_add_fcb(skb);
1385 if (likely(fcb != NULL)) {
1386 lstatus |= BD_LFLAG(TXBD_TOE); 1389 lstatus |= BD_LFLAG(TXBD_TOE);
1387 gfar_tx_vlan(skb, fcb);
1388 } 1390 }
1391
1392 gfar_tx_vlan(skb, fcb);
1389 } 1393 }
1390 1394
1391 /* setup the TxBD length and buffer pointer for the first BD */ 1395 /* setup the TxBD length and buffer pointer for the first BD */
@@ -1433,7 +1437,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1433 /* Unlock priv */ 1437 /* Unlock priv */
1434 spin_unlock_irqrestore(&priv->txlock, flags); 1438 spin_unlock_irqrestore(&priv->txlock, flags);
1435 1439
1436 return 0; 1440 return NETDEV_TX_OK;
1437} 1441}
1438 1442
1439/* Stops the kernel queue, and halts the controller */ 1443/* Stops the kernel queue, and halts the controller */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 5b5bf9f9861a..c25bc0bc0b25 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -905,6 +905,17 @@ static char *ibmlana_adapter_names[] __devinitdata = {
905 NULL 905 NULL
906}; 906};
907 907
908
909static const struct net_device_ops ibmlana_netdev_ops = {
910 .ndo_open = ibmlana_open,
911 .ndo_stop = ibmlana_close,
912 .ndo_start_xmit = ibmlana_tx,
913 .ndo_set_multicast_list = ibmlana_set_multicast_list,
914 .ndo_change_mtu = eth_change_mtu,
915 .ndo_set_mac_address = eth_mac_addr,
916 .ndo_validate_addr = eth_validate_addr,
917};
918
908static int __devinit ibmlana_init_one(struct device *kdev) 919static int __devinit ibmlana_init_one(struct device *kdev)
909{ 920{
910 struct mca_device *mdev = to_mca_device(kdev); 921 struct mca_device *mdev = to_mca_device(kdev);
@@ -973,11 +984,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
973 mca_device_set_claim(mdev, 1); 984 mca_device_set_claim(mdev, 1);
974 985
975 /* set methods */ 986 /* set methods */
976 987 dev->netdev_ops = &ibmlana_netdev_ops;
977 dev->open = ibmlana_open;
978 dev->stop = ibmlana_close;
979 dev->hard_start_xmit = ibmlana_tx;
980 dev->set_multicast_list = ibmlana_set_multicast_list;
981 dev->flags |= IFF_MULTICAST; 988 dev->flags |= IFF_MULTICAST;
982 989
983 /* copy out MAC address */ 990 /* copy out MAC address */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 6f3e7f71658d..6b6548b9fda0 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1524,6 +1524,13 @@ toshoboe_close (struct pci_dev *pci_dev)
1524 free_netdev(self->netdev); 1524 free_netdev(self->netdev);
1525} 1525}
1526 1526
1527static const struct net_device_ops toshoboe_netdev_ops = {
1528 .ndo_open = toshoboe_net_open,
1529 .ndo_stop = toshoboe_net_close,
1530 .ndo_start_xmit = toshoboe_hard_xmit,
1531 .ndo_do_ioctl = toshoboe_net_ioctl,
1532};
1533
1527static int 1534static int
1528toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) 1535toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1529{ 1536{
@@ -1657,10 +1664,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1657#endif 1664#endif
1658 1665
1659 SET_NETDEV_DEV(dev, &pci_dev->dev); 1666 SET_NETDEV_DEV(dev, &pci_dev->dev);
1660 dev->hard_start_xmit = toshoboe_hard_xmit; 1667 dev->netdev_ops = &toshoboe_netdev_ops;
1661 dev->open = toshoboe_net_open;
1662 dev->stop = toshoboe_net_close;
1663 dev->do_ioctl = toshoboe_net_ioctl;
1664 1668
1665 err = register_netdev(dev); 1669 err = register_netdev(dev);
1666 if (err) 1670 if (err)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index d83d4010656d..633808d447be 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -454,6 +454,18 @@ out:
454} 454}
455#endif 455#endif
456 456
457static const struct net_device_ops lance_netdev_ops = {
458 .ndo_open = lance_open,
459 .ndo_start_xmit = lance_start_xmit,
460 .ndo_stop = lance_close,
461 .ndo_get_stats = lance_get_stats,
462 .ndo_set_multicast_list = set_multicast_list,
463 .ndo_tx_timeout = lance_tx_timeout,
464 .ndo_change_mtu = eth_change_mtu,
465 .ndo_set_mac_address = eth_mac_addr,
466 .ndo_validate_addr = eth_validate_addr,
467};
468
457static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) 469static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
458{ 470{
459 struct lance_private *lp; 471 struct lance_private *lp;
@@ -714,12 +726,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
714 printk(version); 726 printk(version);
715 727
716 /* The LANCE-specific entries in the device structure. */ 728 /* The LANCE-specific entries in the device structure. */
717 dev->open = lance_open; 729 dev->netdev_ops = &lance_netdev_ops;
718 dev->hard_start_xmit = lance_start_xmit;
719 dev->stop = lance_close;
720 dev->get_stats = lance_get_stats;
721 dev->set_multicast_list = set_multicast_list;
722 dev->tx_timeout = lance_tx_timeout;
723 dev->watchdog_timeo = TX_TIMEOUT; 730 dev->watchdog_timeo = TX_TIMEOUT;
724 731
725 err = register_netdev(dev); 732 err = register_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 4d1a059921c6..d44bddbee373 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -952,6 +952,17 @@ static void print_eth(char *add)
952 (unsigned char) add[12], (unsigned char) add[13]); 952 (unsigned char) add[12], (unsigned char) add[13]);
953} 953}
954 954
955static const struct net_device_ops i596_netdev_ops = {
956 .ndo_open = i596_open,
957 .ndo_stop = i596_close,
958 .ndo_start_xmit = i596_start_xmit,
959 .ndo_set_multicast_list = set_multicast_list,
960 .ndo_tx_timeout = i596_tx_timeout,
961 .ndo_change_mtu = eth_change_mtu,
962 .ndo_set_mac_address = eth_mac_addr,
963 .ndo_validate_addr = eth_validate_addr,
964};
965
955static int __init lp486e_probe(struct net_device *dev) { 966static int __init lp486e_probe(struct net_device *dev) {
956 struct i596_private *lp; 967 struct i596_private *lp;
957 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; 968 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
@@ -1014,12 +1025,8 @@ static int __init lp486e_probe(struct net_device *dev) {
1014 printk("\n"); 1025 printk("\n");
1015 1026
1016 /* The LP486E-specific entries in the device structure. */ 1027 /* The LP486E-specific entries in the device structure. */
1017 dev->open = &i596_open; 1028 dev->netdev_ops = &i596_netdev_ops;
1018 dev->stop = &i596_close;
1019 dev->hard_start_xmit = &i596_start_xmit;
1020 dev->set_multicast_list = &set_multicast_list;
1021 dev->watchdog_timeo = 5*HZ; 1029 dev->watchdog_timeo = 5*HZ;
1022 dev->tx_timeout = i596_tx_timeout;
1023 1030
1024#if 0 1031#if 0
1025 /* selftest reports 0x320925ae - don't know what that means */ 1032 /* selftest reports 0x320925ae - don't know what that means */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a8bcc00c3302..77d44a061703 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -441,6 +441,18 @@ out:
441 return ERR_PTR(err); 441 return ERR_PTR(err);
442} 442}
443 443
444static const struct net_device_ops ni52_netdev_ops = {
445 .ndo_open = ni52_open,
446 .ndo_stop = ni52_close,
447 .ndo_get_stats = ni52_get_stats,
448 .ndo_tx_timeout = ni52_timeout,
449 .ndo_start_xmit = ni52_send_packet,
450 .ndo_set_multicast_list = set_multicast_list,
451 .ndo_change_mtu = eth_change_mtu,
452 .ndo_set_mac_address = eth_mac_addr,
453 .ndo_validate_addr = eth_validate_addr,
454};
455
444static int __init ni52_probe1(struct net_device *dev, int ioaddr) 456static int __init ni52_probe1(struct net_device *dev, int ioaddr)
445{ 457{
446 int i, size, retval; 458 int i, size, retval;
@@ -561,15 +573,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
561 printk("IRQ %d (assigned and not checked!).\n", dev->irq); 573 printk("IRQ %d (assigned and not checked!).\n", dev->irq);
562 } 574 }
563 575
564 dev->open = ni52_open; 576 dev->netdev_ops = &ni52_netdev_ops;
565 dev->stop = ni52_close;
566 dev->get_stats = ni52_get_stats;
567 dev->tx_timeout = ni52_timeout;
568 dev->watchdog_timeo = HZ/20; 577 dev->watchdog_timeo = HZ/20;
569 dev->hard_start_xmit = ni52_send_packet;
570 dev->set_multicast_list = set_multicast_list;
571
572 dev->if_port = 0;
573 578
574 return 0; 579 return 0;
575out: 580out:
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index df5f869e8d8f..6474f02bf783 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -237,7 +237,7 @@ struct priv
237 void *tmdbounce[TMDNUM]; 237 void *tmdbounce[TMDNUM];
238 int tmdbouncenum; 238 int tmdbouncenum;
239 int lock,xmit_queued; 239 int lock,xmit_queued;
240 struct net_device_stats stats; 240
241 void *self; 241 void *self;
242 int cmdr_addr; 242 int cmdr_addr;
243 int cardno; 243 int cardno;
@@ -257,7 +257,6 @@ static void ni65_timeout(struct net_device *dev);
257static int ni65_close(struct net_device *dev); 257static int ni65_close(struct net_device *dev);
258static int ni65_alloc_buffer(struct net_device *dev); 258static int ni65_alloc_buffer(struct net_device *dev);
259static void ni65_free_buffer(struct priv *p); 259static void ni65_free_buffer(struct priv *p);
260static struct net_device_stats *ni65_get_stats(struct net_device *);
261static void set_multicast_list(struct net_device *dev); 260static void set_multicast_list(struct net_device *dev);
262 261
263static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ 262static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
@@ -401,6 +400,17 @@ out:
401 return ERR_PTR(err); 400 return ERR_PTR(err);
402} 401}
403 402
403static const struct net_device_ops ni65_netdev_ops = {
404 .ndo_open = ni65_open,
405 .ndo_stop = ni65_close,
406 .ndo_start_xmit = ni65_send_packet,
407 .ndo_tx_timeout = ni65_timeout,
408 .ndo_set_multicast_list = set_multicast_list,
409 .ndo_change_mtu = eth_change_mtu,
410 .ndo_set_mac_address = eth_mac_addr,
411 .ndo_validate_addr = eth_validate_addr,
412};
413
404/* 414/*
405 * this is the real card probe .. 415 * this is the real card probe ..
406 */ 416 */
@@ -549,13 +559,9 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
549 } 559 }
550 560
551 dev->base_addr = ioaddr; 561 dev->base_addr = ioaddr;
552 dev->open = ni65_open; 562 dev->netdev_ops = &ni65_netdev_ops;
553 dev->stop = ni65_close;
554 dev->hard_start_xmit = ni65_send_packet;
555 dev->tx_timeout = ni65_timeout;
556 dev->watchdog_timeo = HZ/2; 563 dev->watchdog_timeo = HZ/2;
557 dev->get_stats = ni65_get_stats; 564
558 dev->set_multicast_list = set_multicast_list;
559 return 0; /* everything is OK */ 565 return 0; /* everything is OK */
560} 566}
561 567
@@ -901,13 +907,13 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
901 if(debuglevel > 1) 907 if(debuglevel > 1)
902 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); 908 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
903 if(csr0 & CSR0_BABL) 909 if(csr0 & CSR0_BABL)
904 p->stats.tx_errors++; 910 dev->stats.tx_errors++;
905 if(csr0 & CSR0_MISS) { 911 if(csr0 & CSR0_MISS) {
906 int i; 912 int i;
907 for(i=0;i<RMDNUM;i++) 913 for(i=0;i<RMDNUM;i++)
908 printk("%02x ",p->rmdhead[i].u.s.status); 914 printk("%02x ",p->rmdhead[i].u.s.status);
909 printk("\n"); 915 printk("\n");
910 p->stats.rx_errors++; 916 dev->stats.rx_errors++;
911 } 917 }
912 if(csr0 & CSR0_MERR) { 918 if(csr0 & CSR0_MERR) {
913 if(debuglevel > 1) 919 if(debuglevel > 1)
@@ -997,12 +1003,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
997#endif 1003#endif
998 /* checking some errors */ 1004 /* checking some errors */
999 if(tmdp->status2 & XMIT_RTRY) 1005 if(tmdp->status2 & XMIT_RTRY)
1000 p->stats.tx_aborted_errors++; 1006 dev->stats.tx_aborted_errors++;
1001 if(tmdp->status2 & XMIT_LCAR) 1007 if(tmdp->status2 & XMIT_LCAR)
1002 p->stats.tx_carrier_errors++; 1008 dev->stats.tx_carrier_errors++;
1003 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { 1009 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1004 /* this stops the xmitter */ 1010 /* this stops the xmitter */
1005 p->stats.tx_fifo_errors++; 1011 dev->stats.tx_fifo_errors++;
1006 if(debuglevel > 0) 1012 if(debuglevel > 0)
1007 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); 1013 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1008 if(p->features & INIT_RING_BEFORE_START) { 1014 if(p->features & INIT_RING_BEFORE_START) {
@@ -1016,12 +1022,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
1016 if(debuglevel > 2) 1022 if(debuglevel > 2)
1017 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); 1023 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1018 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ 1024 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1019 p->stats.tx_errors++; 1025 dev->stats.tx_errors++;
1020 tmdp->status2 = 0; 1026 tmdp->status2 = 0;
1021 } 1027 }
1022 else { 1028 else {
1023 p->stats.tx_bytes -= (short)(tmdp->blen); 1029 dev->stats.tx_bytes -= (short)(tmdp->blen);
1024 p->stats.tx_packets++; 1030 dev->stats.tx_packets++;
1025 } 1031 }
1026 1032
1027#ifdef XMT_VIA_SKB 1033#ifdef XMT_VIA_SKB
@@ -1057,7 +1063,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1057 if(!(rmdstat & RCV_ERR)) { 1063 if(!(rmdstat & RCV_ERR)) {
1058 if(rmdstat & RCV_START) 1064 if(rmdstat & RCV_START)
1059 { 1065 {
1060 p->stats.rx_length_errors++; 1066 dev->stats.rx_length_errors++;
1061 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); 1067 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1062 } 1068 }
1063 } 1069 }
@@ -1066,16 +1072,16 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1066 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", 1072 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1067 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); 1073 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1068 if(rmdstat & RCV_FRAM) 1074 if(rmdstat & RCV_FRAM)
1069 p->stats.rx_frame_errors++; 1075 dev->stats.rx_frame_errors++;
1070 if(rmdstat & RCV_OFLO) 1076 if(rmdstat & RCV_OFLO)
1071 p->stats.rx_over_errors++; 1077 dev->stats.rx_over_errors++;
1072 if(rmdstat & RCV_CRC) 1078 if(rmdstat & RCV_CRC)
1073 p->stats.rx_crc_errors++; 1079 dev->stats.rx_crc_errors++;
1074 if(rmdstat & RCV_BUF_ERR) 1080 if(rmdstat & RCV_BUF_ERR)
1075 p->stats.rx_fifo_errors++; 1081 dev->stats.rx_fifo_errors++;
1076 } 1082 }
1077 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ 1083 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1078 p->stats.rx_errors++; 1084 dev->stats.rx_errors++;
1079 } 1085 }
1080 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) 1086 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1081 { 1087 {
@@ -1106,20 +1112,20 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1106 skb_put(skb,len); 1112 skb_put(skb,len);
1107 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); 1113 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1108#endif 1114#endif
1109 p->stats.rx_packets++; 1115 dev->stats.rx_packets++;
1110 p->stats.rx_bytes += len; 1116 dev->stats.rx_bytes += len;
1111 skb->protocol=eth_type_trans(skb,dev); 1117 skb->protocol=eth_type_trans(skb,dev);
1112 netif_rx(skb); 1118 netif_rx(skb);
1113 } 1119 }
1114 else 1120 else
1115 { 1121 {
1116 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); 1122 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1117 p->stats.rx_dropped++; 1123 dev->stats.rx_dropped++;
1118 } 1124 }
1119 } 1125 }
1120 else { 1126 else {
1121 printk(KERN_INFO "%s: received runt packet\n",dev->name); 1127 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1122 p->stats.rx_errors++; 1128 dev->stats.rx_errors++;
1123 } 1129 }
1124 rmdp->blen = -(R_BUF_SIZE-8); 1130 rmdp->blen = -(R_BUF_SIZE-8);
1125 rmdp->mlen = 0; 1131 rmdp->mlen = 0;
@@ -1213,23 +1219,6 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1213 return 0; 1219 return 0;
1214} 1220}
1215 1221
1216static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1217{
1218
1219#if 0
1220 int i;
1221 struct priv *p = dev->ml_priv;
1222 for(i=0;i<RMDNUM;i++)
1223 {
1224 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
1225 printk("%02x ",rmdp->u.s.status);
1226 }
1227 printk("\n");
1228#endif
1229
1230 return &((struct priv *)dev->ml_priv)->stats;
1231}
1232
1233static void set_multicast_list(struct net_device *dev) 1222static void set_multicast_list(struct net_device *dev)
1234{ 1223{
1235 if(!ni65_lance_reinit(dev)) 1224 if(!ni65_lance_reinit(dev))
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 12a8ffffeb03..ebbbe09725fe 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -143,6 +143,17 @@ out:
143 return ERR_PTR(err); 143 return ERR_PTR(err);
144} 144}
145 145
146static const struct net_device_ops seeq8005_netdev_ops = {
147 .ndo_open = seeq8005_open,
148 .ndo_stop = seeq8005_close,
149 .ndo_start_xmit = seeq8005_send_packet,
150 .ndo_tx_timeout = seeq8005_timeout,
151 .ndo_set_multicast_list = set_multicast_list,
152 .ndo_change_mtu = eth_change_mtu,
153 .ndo_set_mac_address = eth_mac_addr,
154 .ndo_validate_addr = eth_validate_addr,
155};
156
146/* This is the real probe routine. Linux has a history of friendly device 157/* This is the real probe routine. Linux has a history of friendly device
147 probes on the ISA bus. A good device probes avoids doing writes, and 158 probes on the ISA bus. A good device probes avoids doing writes, and
148 verifies that the correct device exists and functions. */ 159 verifies that the correct device exists and functions. */
@@ -332,12 +343,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
332 } 343 }
333 } 344 }
334#endif 345#endif
335 dev->open = seeq8005_open; 346 dev->netdev_ops = &seeq8005_netdev_ops;
336 dev->stop = seeq8005_close;
337 dev->hard_start_xmit = seeq8005_send_packet;
338 dev->tx_timeout = seeq8005_timeout;
339 dev->watchdog_timeo = HZ/20; 347 dev->watchdog_timeo = HZ/20;
340 dev->set_multicast_list = set_multicast_list;
341 dev->flags &= ~IFF_MULTICAST; 348 dev->flags &= ~IFF_MULTICAST;
342 349
343 return 0; 350 return 0;
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 2033fee3143a..0291ea098a06 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -142,9 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev)
142 int base_addr = dev->base_addr; 142 int base_addr = dev->base_addr;
143 int irq = dev->irq; 143 int irq = dev->irq;
144 144
145#ifdef CONFIG_NET_POLL_CONTROLLER
146 dev->poll_controller = &ultra_poll;
147#endif
148 if (base_addr > 0x1ff) /* Check a single specified location. */ 145 if (base_addr > 0x1ff) /* Check a single specified location. */
149 return ultra_probe1(dev, base_addr); 146 return ultra_probe1(dev, base_addr);
150 else if (base_addr != 0) /* Don't probe at all. */ 147 else if (base_addr != 0) /* Don't probe at all. */
@@ -199,7 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
199 .ndo_set_mac_address = eth_mac_addr, 196 .ndo_set_mac_address = eth_mac_addr,
200 .ndo_change_mtu = eth_change_mtu, 197 .ndo_change_mtu = eth_change_mtu,
201#ifdef CONFIG_NET_POLL_CONTROLLER 198#ifdef CONFIG_NET_POLL_CONTROLLER
202 .ndo_poll_controller = ei_poll, 199 .ndo_poll_controller = ultra_poll,
203#endif 200#endif
204}; 201};
205 202
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index cb6c097a2e0a..7a554adc70fb 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -153,6 +153,22 @@ out:
153 return ERR_PTR(err); 153 return ERR_PTR(err);
154} 154}
155 155
156
157static const struct net_device_ops ultra32_netdev_ops = {
158 .ndo_open = ultra32_open,
159 .ndo_stop = ultra32_close,
160 .ndo_start_xmit = ei_start_xmit,
161 .ndo_tx_timeout = ei_tx_timeout,
162 .ndo_get_stats = ei_get_stats,
163 .ndo_set_multicast_list = ei_set_multicast_list,
164 .ndo_validate_addr = eth_validate_addr,
165 .ndo_set_mac_address = eth_mac_addr,
166 .ndo_change_mtu = eth_change_mtu,
167#ifdef CONFIG_NET_POLL_CONTROLLER
168 .ndo_poll_controller = ei_poll,
169#endif
170};
171
156static int __init ultra32_probe1(struct net_device *dev, int ioaddr) 172static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
157{ 173{
158 int i, edge, media, retval; 174 int i, edge, media, retval;
@@ -273,11 +289,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
273 ei_status.block_output = &ultra32_block_output; 289 ei_status.block_output = &ultra32_block_output;
274 ei_status.get_8390_hdr = &ultra32_get_8390_hdr; 290 ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
275 ei_status.reset_8390 = &ultra32_reset_8390; 291 ei_status.reset_8390 = &ultra32_reset_8390;
276 dev->open = &ultra32_open; 292
277 dev->stop = &ultra32_close; 293 dev->netdev_ops = &ultra32_netdev_ops;
278#ifdef CONFIG_NET_POLL_CONTROLLER
279 dev->poll_controller = ei_poll;
280#endif
281 NS8390_init(dev, 0); 294 NS8390_init(dev, 0);
282 295
283 return 0; 296 return 0;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 18d653bbd4e0..9a7973a54116 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -831,6 +831,17 @@ static int __init smc_findirq(int ioaddr)
831#endif 831#endif
832} 832}
833 833
834static const struct net_device_ops smc_netdev_ops = {
835 .ndo_open = smc_open,
836 .ndo_stop = smc_close,
837 .ndo_start_xmit = smc_wait_to_send_packet,
838 .ndo_tx_timeout = smc_timeout,
839 .ndo_set_multicast_list = smc_set_multicast_list,
840 .ndo_change_mtu = eth_change_mtu,
841 .ndo_set_mac_address = eth_mac_addr,
842 .ndo_validate_addr = eth_validate_addr,
843};
844
834/*---------------------------------------------------------------------- 845/*----------------------------------------------------------------------
835 . Function: smc_probe( int ioaddr ) 846 . Function: smc_probe( int ioaddr )
836 . 847 .
@@ -1044,12 +1055,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1044 goto err_out; 1055 goto err_out;
1045 } 1056 }
1046 1057
1047 dev->open = smc_open; 1058 dev->netdev_ops = &smc_netdev_ops;
1048 dev->stop = smc_close;
1049 dev->hard_start_xmit = smc_wait_to_send_packet;
1050 dev->tx_timeout = smc_timeout;
1051 dev->watchdog_timeo = HZ/20; 1059 dev->watchdog_timeo = HZ/20;
1052 dev->set_multicast_list = smc_set_multicast_list;
1053 1060
1054 return 0; 1061 return 0;
1055 1062
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index ad3cbc91a8fa..af8f60ca0f57 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1680,6 +1680,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1680 u8 address, u8 data) 1680 u8 address, u8 data)
1681{ 1681{
1682 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; 1682 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
1683 u32 temp;
1683 int ret; 1684 int ret;
1684 1685
1685 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data); 1686 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
@@ -1688,6 +1689,10 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1688 if (!ret) { 1689 if (!ret) {
1689 op = E2P_CMD_EPC_CMD_WRITE_ | address; 1690 op = E2P_CMD_EPC_CMD_WRITE_ | address;
1690 smsc911x_reg_write(pdata, E2P_DATA, (u32)data); 1691 smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
1692
1693 /* Workaround for hardware read-after-write restriction */
1694 temp = smsc911x_reg_read(pdata, BYTE_TEST);
1695
1691 ret = smsc911x_eeprom_send_cmd(pdata, op); 1696 ret = smsc911x_eeprom_send_cmd(pdata, op);
1692 } 1697 }
1693 1698
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 193308118f95..456f8bff40be 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -142,7 +142,7 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
142 return; 142 return;
143} 143}
144 144
145 145static struct net_device_ops madgemc_netdev_ops __read_mostly;
146 146
147static int __devinit madgemc_probe(struct device *device) 147static int __devinit madgemc_probe(struct device *device)
148{ 148{
@@ -168,7 +168,7 @@ static int __devinit madgemc_probe(struct device *device)
168 goto getout; 168 goto getout;
169 } 169 }
170 170
171 dev->dma = 0; 171 dev->netdev_ops = &madgemc_netdev_ops;
172 172
173 card = kmalloc(sizeof(struct card_info), GFP_KERNEL); 173 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
174 if (card==NULL) { 174 if (card==NULL) {
@@ -348,9 +348,6 @@ static int __devinit madgemc_probe(struct device *device)
348 348
349 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); 349 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
350 350
351 dev->open = madgemc_open;
352 dev->stop = madgemc_close;
353
354 tp->tmspriv = card; 351 tp->tmspriv = card;
355 dev_set_drvdata(device, dev); 352 dev_set_drvdata(device, dev);
356 353
@@ -758,6 +755,10 @@ static struct mca_driver madgemc_driver = {
758 755
759static int __init madgemc_init (void) 756static int __init madgemc_init (void)
760{ 757{
758 madgemc_netdev_ops = tms380tr_netdev_ops;
759 madgemc_netdev_ops.ndo_open = madgemc_open;
760 madgemc_netdev_ops.ndo_stop = madgemc_close;
761
761 return mca_register_driver (&madgemc_driver); 762 return mca_register_driver (&madgemc_driver);
762} 763}
763 764
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index b8c955f6d31a..16e8783ee9cd 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -116,6 +116,8 @@ nodev:
116 return -ENODEV; 116 return -ENODEV;
117} 117}
118 118
119static struct net_device_ops proteon_netdev_ops __read_mostly;
120
119static int __init setup_card(struct net_device *dev, struct device *pdev) 121static int __init setup_card(struct net_device *dev, struct device *pdev)
120{ 122{
121 struct net_local *tp; 123 struct net_local *tp;
@@ -167,8 +169,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
167 169
168 tp->tmspriv = NULL; 170 tp->tmspriv = NULL;
169 171
170 dev->open = proteon_open; 172 dev->netdev_ops = &proteon_netdev_ops;
171 dev->stop = tms380tr_close;
172 173
173 if (dev->irq == 0) 174 if (dev->irq == 0)
174 { 175 {
@@ -352,6 +353,10 @@ static int __init proteon_init(void)
352 struct platform_device *pdev; 353 struct platform_device *pdev;
353 int i, num = 0, err = 0; 354 int i, num = 0, err = 0;
354 355
356 proteon_netdev_ops = tms380tr_netdev_ops;
357 proteon_netdev_ops.ndo_open = proteon_open;
358 proteon_netdev_ops.ndo_stop = tms380tr_close;
359
355 err = platform_driver_register(&proteon_driver); 360 err = platform_driver_register(&proteon_driver);
356 if (err) 361 if (err)
357 return err; 362 return err;
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index c0f58f08782c..46db5c5395b2 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -133,6 +133,8 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
133 return 0; 133 return 0;
134} 134}
135 135
136static struct net_device_ops sk_isa_netdev_ops __read_mostly;
137
136static int __init setup_card(struct net_device *dev, struct device *pdev) 138static int __init setup_card(struct net_device *dev, struct device *pdev)
137{ 139{
138 struct net_local *tp; 140 struct net_local *tp;
@@ -184,8 +186,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
184 186
185 tp->tmspriv = NULL; 187 tp->tmspriv = NULL;
186 188
187 dev->open = sk_isa_open; 189 dev->netdev_ops = &sk_isa_netdev_ops;
188 dev->stop = tms380tr_close;
189 190
190 if (dev->irq == 0) 191 if (dev->irq == 0)
191 { 192 {
@@ -362,6 +363,10 @@ static int __init sk_isa_init(void)
362 struct platform_device *pdev; 363 struct platform_device *pdev;
363 int i, num = 0, err = 0; 364 int i, num = 0, err = 0;
364 365
366 sk_isa_netdev_ops = tms380tr_netdev_ops;
367 sk_isa_netdev_ops.ndo_open = sk_isa_open;
368 sk_isa_netdev_ops.ndo_stop = tms380tr_close;
369
365 err = platform_driver_register(&sk_isa_driver); 370 err = platform_driver_register(&sk_isa_driver);
366 if (err) 371 if (err)
367 return err; 372 return err;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 9d7db2c8d661..a91d9c55d78e 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -124,7 +124,6 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
124static int smctr_get_physical_drop_number(struct net_device *dev); 124static int smctr_get_physical_drop_number(struct net_device *dev);
125static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); 125static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
126static int smctr_get_station_id(struct net_device *dev); 126static int smctr_get_station_id(struct net_device *dev);
127static struct net_device_stats *smctr_get_stats(struct net_device *dev);
128static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, 127static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
129 __u16 bytes_count); 128 __u16 bytes_count);
130static int smctr_get_upstream_neighbor_addr(struct net_device *dev); 129static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
@@ -3633,6 +3632,14 @@ out:
3633 return ERR_PTR(err); 3632 return ERR_PTR(err);
3634} 3633}
3635 3634
3635static const struct net_device_ops smctr_netdev_ops = {
3636 .ndo_open = smctr_open,
3637 .ndo_stop = smctr_close,
3638 .ndo_start_xmit = smctr_send_packet,
3639 .ndo_tx_timeout = smctr_timeout,
3640 .ndo_get_stats = smctr_get_stats,
3641 .ndo_set_multicast_list = smctr_set_multicast_list,
3642};
3636 3643
3637static int __init smctr_probe1(struct net_device *dev, int ioaddr) 3644static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3638{ 3645{
@@ -3683,13 +3690,8 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3683 (unsigned int)dev->base_addr, 3690 (unsigned int)dev->base_addr,
3684 dev->irq, tp->rom_base, tp->ram_base); 3691 dev->irq, tp->rom_base, tp->ram_base);
3685 3692
3686 dev->open = smctr_open; 3693 dev->netdev_ops = &smctr_netdev_ops;
3687 dev->stop = smctr_close;
3688 dev->hard_start_xmit = smctr_send_packet;
3689 dev->tx_timeout = smctr_timeout;
3690 dev->watchdog_timeo = HZ; 3694 dev->watchdog_timeo = HZ;
3691 dev->get_stats = smctr_get_stats;
3692 dev->set_multicast_list = &smctr_set_multicast_list;
3693 return (0); 3695 return (0);
3694 3696
3695out: 3697out:
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a110326dce6f..86a479f61c0c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2009,6 +2009,9 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2009 /* Disable Rx and Tx */ 2009 /* Disable Rx and Tx */
2010 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2010 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2011 2011
2012 phy_disconnect(ugeth->phydev);
2013 ugeth->phydev = NULL;
2014
2012 ucc_geth_memclean(ugeth); 2015 ucc_geth_memclean(ugeth);
2013} 2016}
2014 2017
@@ -3345,6 +3348,14 @@ static int ucc_geth_open(struct net_device *dev)
3345 return -EINVAL; 3348 return -EINVAL;
3346 } 3349 }
3347 3350
3351 err = init_phy(dev);
3352 if (err) {
3353 if (netif_msg_ifup(ugeth))
3354 ugeth_err("%s: Cannot initialize PHY, aborting.",
3355 dev->name);
3356 return err;
3357 }
3358
3348 err = ucc_struct_init(ugeth); 3359 err = ucc_struct_init(ugeth);
3349 if (err) { 3360 if (err) {
3350 if (netif_msg_ifup(ugeth)) 3361 if (netif_msg_ifup(ugeth))
@@ -3381,13 +3392,6 @@ static int ucc_geth_open(struct net_device *dev)
3381 &ugeth->ug_regs->macstnaddr1, 3392 &ugeth->ug_regs->macstnaddr1,
3382 &ugeth->ug_regs->macstnaddr2); 3393 &ugeth->ug_regs->macstnaddr2);
3383 3394
3384 err = init_phy(dev);
3385 if (err) {
3386 if (netif_msg_ifup(ugeth))
3387 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3388 goto out_err;
3389 }
3390
3391 phy_start(ugeth->phydev); 3395 phy_start(ugeth->phydev);
3392 3396
3393 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3397 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3430,9 +3434,6 @@ static int ucc_geth_close(struct net_device *dev)
3430 3434
3431 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); 3435 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
3432 3436
3433 phy_disconnect(ugeth->phydev);
3434 ugeth->phydev = NULL;
3435
3436 netif_stop_queue(dev); 3437 netif_stop_queue(dev);
3437 3438
3438 return 0; 3439 return 0;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a07ba9371db..1d637f407a0c 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -714,19 +714,19 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
714 switch (ret) 714 switch (ret)
715 { 715 {
716 case SDLA_RET_OK: 716 case SDLA_RET_OK:
717 flp->stats.tx_packets++; 717 dev->stats.tx_packets++;
718 ret = DLCI_RET_OK; 718 ret = DLCI_RET_OK;
719 break; 719 break;
720 720
721 case SDLA_RET_CIR_OVERFLOW: 721 case SDLA_RET_CIR_OVERFLOW:
722 case SDLA_RET_BUF_OVERSIZE: 722 case SDLA_RET_BUF_OVERSIZE:
723 case SDLA_RET_NO_BUFS: 723 case SDLA_RET_NO_BUFS:
724 flp->stats.tx_dropped++; 724 dev->stats.tx_dropped++;
725 ret = DLCI_RET_DROP; 725 ret = DLCI_RET_DROP;
726 break; 726 break;
727 727
728 default: 728 default:
729 flp->stats.tx_errors++; 729 dev->stats.tx_errors++;
730 ret = DLCI_RET_ERR; 730 ret = DLCI_RET_ERR;
731 break; 731 break;
732 } 732 }
@@ -807,7 +807,7 @@ static void sdla_receive(struct net_device *dev)
807 if (i == CONFIG_DLCI_MAX) 807 if (i == CONFIG_DLCI_MAX)
808 { 808 {
809 printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci); 809 printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
810 flp->stats.rx_errors++; 810 dev->stats.rx_errors++;
811 success = 0; 811 success = 0;
812 } 812 }
813 } 813 }
@@ -819,7 +819,7 @@ static void sdla_receive(struct net_device *dev)
819 if (skb == NULL) 819 if (skb == NULL)
820 { 820 {
821 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 821 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
822 flp->stats.rx_dropped++; 822 dev->stats.rx_dropped++;
823 success = 0; 823 success = 0;
824 } 824 }
825 else 825 else
@@ -859,7 +859,7 @@ static void sdla_receive(struct net_device *dev)
859 859
860 if (success) 860 if (success)
861 { 861 {
862 flp->stats.rx_packets++; 862 dev->stats.rx_packets++;
863 dlp = netdev_priv(master); 863 dlp = netdev_priv(master);
864 (*dlp->receive)(skb, master); 864 (*dlp->receive)(skb, master);
865 } 865 }
@@ -1590,13 +1590,14 @@ fail:
1590 return err; 1590 return err;
1591} 1591}
1592 1592
1593static struct net_device_stats *sdla_stats(struct net_device *dev) 1593static const struct net_device_ops sdla_netdev_ops = {
1594{ 1594 .ndo_open = sdla_open,
1595 struct frad_local *flp; 1595 .ndo_stop = sdla_close,
1596 flp = netdev_priv(dev); 1596 .ndo_do_ioctl = sdla_ioctl,
1597 1597 .ndo_set_config = sdla_set_config,
1598 return(&flp->stats); 1598 .ndo_start_xmit = sdla_transmit,
1599} 1599 .ndo_change_mtu = sdla_change_mtu,
1600};
1600 1601
1601static void setup_sdla(struct net_device *dev) 1602static void setup_sdla(struct net_device *dev)
1602{ 1603{
@@ -1604,20 +1605,13 @@ static void setup_sdla(struct net_device *dev)
1604 1605
1605 netdev_boot_setup_check(dev); 1606 netdev_boot_setup_check(dev);
1606 1607
1608 dev->netdev_ops = &sdla_netdev_ops;
1607 dev->flags = 0; 1609 dev->flags = 0;
1608 dev->type = 0xFFFF; 1610 dev->type = 0xFFFF;
1609 dev->hard_header_len = 0; 1611 dev->hard_header_len = 0;
1610 dev->addr_len = 0; 1612 dev->addr_len = 0;
1611 dev->mtu = SDLA_MAX_MTU; 1613 dev->mtu = SDLA_MAX_MTU;
1612 1614
1613 dev->open = sdla_open;
1614 dev->stop = sdla_close;
1615 dev->do_ioctl = sdla_ioctl;
1616 dev->set_config = sdla_set_config;
1617 dev->get_stats = sdla_stats;
1618 dev->hard_start_xmit = sdla_transmit;
1619 dev->change_mtu = sdla_change_mtu;
1620
1621 flp->activate = sdla_activate; 1615 flp->activate = sdla_activate;
1622 flp->deactivate = sdla_deactivate; 1616 flp->deactivate = sdla_deactivate;
1623 flp->assoc = sdla_assoc; 1617 flp->assoc = sdla_assoc;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 612fffe100a6..8a0823588c51 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -485,6 +485,7 @@ config MWL8K
485source "drivers/net/wireless/p54/Kconfig" 485source "drivers/net/wireless/p54/Kconfig"
486source "drivers/net/wireless/ath5k/Kconfig" 486source "drivers/net/wireless/ath5k/Kconfig"
487source "drivers/net/wireless/ath9k/Kconfig" 487source "drivers/net/wireless/ath9k/Kconfig"
488source "drivers/net/wireless/ar9170/Kconfig"
488source "drivers/net/wireless/ipw2x00/Kconfig" 489source "drivers/net/wireless/ipw2x00/Kconfig"
489source "drivers/net/wireless/iwlwifi/Kconfig" 490source "drivers/net/wireless/iwlwifi/Kconfig"
490source "drivers/net/wireless/hostap/Kconfig" 491source "drivers/net/wireless/hostap/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index d780487c420f..50e7fba7f0ea 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,5 +57,6 @@ obj-$(CONFIG_P54_COMMON) += p54/
57 57
58obj-$(CONFIG_ATH5K) += ath5k/ 58obj-$(CONFIG_ATH5K) += ath5k/
59obj-$(CONFIG_ATH9K) += ath9k/ 59obj-$(CONFIG_ATH9K) += ath9k/
60obj-$(CONFIG_AR9170_USB) += ar9170/
60 61
61obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 62obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ar9170/Kconfig
new file mode 100644
index 000000000000..de4281fda129
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Kconfig
@@ -0,0 +1,17 @@
1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support"
3 depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER
5 help
6 This is a driver for the Atheros "otus" 802.11n USB devices.
7
8 These devices require additional firmware (2 files).
9 For now, these files can be downloaded from here:
10 http://wireless.kernel.org/en/users/Drivers/ar9170
11
12 If you choose to build a module, it'll be called ar9170usb.
13
14config AR9170_LEDS
15 bool
16 depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB)
17 default y
diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ar9170/Makefile
new file mode 100644
index 000000000000..8d91c7ee3215
--- /dev/null
+++ b/drivers/net/wireless/ar9170/Makefile
@@ -0,0 +1,3 @@
1ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o
2
3obj-$(CONFIG_AR9170_USB) += ar9170usb.o
diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ar9170/ar9170.h
new file mode 100644
index 000000000000..f4fb2e94aea0
--- /dev/null
+++ b/drivers/net/wireless/ar9170/ar9170.h
@@ -0,0 +1,209 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_H
39#define __AR9170_H
40
41#include <linux/completion.h>
42#include <linux/spinlock.h>
43#include <net/wireless.h>
44#include <net/mac80211.h>
45#ifdef CONFIG_AR9170_LEDS
46#include <linux/leds.h>
47#endif /* CONFIG_AR9170_LEDS */
48#include "eeprom.h"
49#include "hw.h"
50
51#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
52
53enum ar9170_bw {
54 AR9170_BW_20,
55 AR9170_BW_40_BELOW,
56 AR9170_BW_40_ABOVE,
57
58 __AR9170_NUM_BW,
59};
60
61enum ar9170_rf_init_mode {
62 AR9170_RFI_NONE,
63 AR9170_RFI_WARM,
64 AR9170_RFI_COLD,
65};
66
67#define AR9170_MAX_RX_BUFFER_SIZE 8192
68
69#ifdef CONFIG_AR9170_LEDS
70struct ar9170;
71
72struct ar9170_led {
73 struct ar9170 *ar;
74 struct led_classdev l;
75 char name[32];
76 unsigned int toggled;
77 bool registered;
78};
79
80#endif /* CONFIG_AR9170_LEDS */
81
82enum ar9170_device_state {
83 AR9170_UNKNOWN_STATE,
84 AR9170_STOPPED,
85 AR9170_IDLE,
86 AR9170_STARTED,
87 AR9170_ASSOCIATED,
88};
89
90struct ar9170 {
91 struct ieee80211_hw *hw;
92 struct mutex mutex;
93 enum ar9170_device_state state;
94
95 int (*open)(struct ar9170 *);
96 void (*stop)(struct ar9170 *);
97 int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int);
98 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
99 void *, u32 , void *);
100 void (*callback_cmd)(struct ar9170 *, u32 , void *);
101
102 /* interface mode settings */
103 struct ieee80211_vif *vif;
104 u8 mac_addr[ETH_ALEN];
105 u8 bssid[ETH_ALEN];
106
107 /* beaconing */
108 struct sk_buff *beacon;
109 struct work_struct beacon_work;
110
111 /* cryptographic engine */
112 u64 usedkeys;
113 bool rx_software_decryption;
114 bool disable_offload;
115
116 /* filter settings */
117 struct work_struct filter_config_work;
118 u64 cur_mc_hash, want_mc_hash;
119 u32 cur_filter, want_filter;
120 unsigned int filter_changed;
121 bool sniffer_enabled;
122
123 /* PHY */
124 struct ieee80211_channel *channel;
125 int noise[4];
126
127 /* power calibration data */
128 u8 power_5G_leg[4];
129 u8 power_2G_cck[4];
130 u8 power_2G_ofdm[4];
131 u8 power_5G_ht20[8];
132 u8 power_5G_ht40[8];
133 u8 power_2G_ht20[8];
134 u8 power_2G_ht40[8];
135
136#ifdef CONFIG_AR9170_LEDS
137 struct delayed_work led_work;
138 struct ar9170_led leds[AR9170_NUM_LEDS];
139#endif /* CONFIG_AR9170_LEDS */
140
141 /* qos queue settings */
142 spinlock_t tx_stats_lock;
143 struct ieee80211_tx_queue_stats tx_stats[5];
144 struct ieee80211_tx_queue_params edcf[5];
145
146 spinlock_t cmdlock;
147 __le32 cmdbuf[PAYLOAD_MAX + 1];
148
149 /* MAC statistics */
150 struct ieee80211_low_level_stats stats;
151
152 /* EEPROM */
153 struct ar9170_eeprom eeprom;
154
155 /* global tx status for unregistered Stations. */
156 struct sk_buff_head global_tx_status;
157 struct sk_buff_head global_tx_status_waste;
158 struct delayed_work tx_status_janitor;
159};
160
161struct ar9170_sta_info {
162 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
163};
164
165#define IS_STARTED(a) (a->state >= AR9170_STARTED)
166#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE)
167
168#define AR9170_FILTER_CHANGED_PROMISC BIT(0)
169#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
170#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
171
172/* exported interface */
173void *ar9170_alloc(size_t priv_size);
174int ar9170_register(struct ar9170 *ar, struct device *pdev);
175void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
176void ar9170_unregister(struct ar9170 *ar);
177void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
178 bool update_statistics, u16 tx_status);
179
180/* MAC */
181int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
182int ar9170_init_mac(struct ar9170 *ar);
183int ar9170_set_qos(struct ar9170 *ar);
184int ar9170_update_multicast(struct ar9170 *ar);
185int ar9170_update_frame_filter(struct ar9170 *ar);
186int ar9170_set_operating_mode(struct ar9170 *ar);
187int ar9170_set_beacon_timers(struct ar9170 *ar);
188int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
189int ar9170_update_beacon(struct ar9170 *ar);
190void ar9170_new_beacon(struct work_struct *work);
191int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
192 u8 keyidx, u8 *keydata, int keylen);
193int ar9170_disable_key(struct ar9170 *ar, u8 id);
194
195/* LEDs */
196#ifdef CONFIG_AR9170_LEDS
197int ar9170_register_leds(struct ar9170 *ar);
198void ar9170_unregister_leds(struct ar9170 *ar);
199#endif /* CONFIG_AR9170_LEDS */
200int ar9170_init_leds(struct ar9170 *ar);
201int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state);
202
203/* PHY / RF */
204int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band);
205int ar9170_init_rf(struct ar9170 *ar);
206int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
207 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw);
208
209#endif /* __AR9170_H */
diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ar9170/cmd.c
new file mode 100644
index 000000000000..f57a6200167b
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.c
@@ -0,0 +1,129 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
43{
44 int err;
45
46 if (unlikely(!IS_ACCEPTING_CMD(ar)))
47 return 0;
48
49 err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
50 if (err)
51 printk(KERN_DEBUG "%s: writing memory failed\n",
52 wiphy_name(ar->hw->wiphy));
53 return err;
54}
55
56int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
57{
58 __le32 buf[2] = {
59 cpu_to_le32(reg),
60 cpu_to_le32(val),
61 };
62 int err;
63
64 if (unlikely(!IS_ACCEPTING_CMD(ar)))
65 return 0;
66
67 err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
68 (u8 *) buf, 0, NULL);
69 if (err)
70 printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n",
71 wiphy_name(ar->hw->wiphy), reg, val);
72 return err;
73}
74
75static int ar9170_read_mreg(struct ar9170 *ar, int nregs,
76 const u32 *regs, u32 *out)
77{
78 int i, err;
79 __le32 *offs, *res;
80
81 if (unlikely(!IS_ACCEPTING_CMD(ar)))
82 return 0;
83
84 /* abuse "out" for the register offsets, must be same length */
85 offs = (__le32 *)out;
86 for (i = 0; i < nregs; i++)
87 offs[i] = cpu_to_le32(regs[i]);
88
89 /* also use the same buffer for the input */
90 res = (__le32 *)out;
91
92 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
93 4 * nregs, (u8 *)offs,
94 4 * nregs, (u8 *)res);
95 if (err)
96 return err;
97
98 /* convert result to cpu endian */
99 for (i = 0; i < nregs; i++)
100 out[i] = le32_to_cpu(res[i]);
101
102 return 0;
103}
104
105int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
106{
107 return ar9170_read_mreg(ar, 1, &reg, val);
108}
109
110int ar9170_echo_test(struct ar9170 *ar, u32 v)
111{
112 __le32 echobuf = cpu_to_le32(v);
113 __le32 echores;
114 int err;
115
116 if (unlikely(!IS_ACCEPTING_CMD(ar)))
117 return -ENODEV;
118
119 err = ar->exec_cmd(ar, AR9170_CMD_ECHO,
120 4, (u8 *)&echobuf,
121 4, (u8 *)&echores);
122 if (err)
123 return err;
124
125 if (echobuf != echores)
126 return -EINVAL;
127
128 return 0;
129}
diff --git a/drivers/net/wireless/ar9170/cmd.h b/drivers/net/wireless/ar9170/cmd.h
new file mode 100644
index 000000000000..a4f0e50e52b4
--- /dev/null
+++ b/drivers/net/wireless/ar9170/cmd.h
@@ -0,0 +1,91 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __CMD_H
39#define __CMD_H
40
41#include "ar9170.h"
42
43/* basic HW access */
44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
47int ar9170_echo_test(struct ar9170 *ar, u32 v);
48
49/*
50 * Macros to facilitate writing multiple registers in a single
51 * write-combining USB command. Note that when the first group
52 * fails the whole thing will fail without any others attempted,
53 * but you won't know which write in the group failed.
54 */
55#define ar9170_regwrite_begin(ar) \
56do { \
57 int __nreg = 0, __err = 0; \
58 struct ar9170 *__ar = ar;
59
60#define ar9170_regwrite(r, v) do { \
61 __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \
62 __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \
63 __nreg++; \
64 if ((__nreg >= PAYLOAD_MAX/2)) { \
65 if (IS_ACCEPTING_CMD(__ar)) \
66 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
67 8 * __nreg, \
68 (u8 *) &__ar->cmdbuf[1], \
69 0, NULL); \
70 __nreg = 0; \
71 if (__err) \
72 goto __regwrite_out; \
73 } \
74} while (0)
75
76#define ar9170_regwrite_finish() \
77__regwrite_out : \
78 if (__nreg) { \
79 if (IS_ACCEPTING_CMD(__ar)) \
80 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
81 8 * __nreg, \
82 (u8 *) &__ar->cmdbuf[1], \
83 0, NULL); \
84 __nreg = 0; \
85 }
86
87#define ar9170_regwrite_result() \
88 __err; \
89} while (0);
90
91#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ar9170/eeprom.h b/drivers/net/wireless/ar9170/eeprom.h
new file mode 100644
index 000000000000..d2c8cc83f1dd
--- /dev/null
+++ b/drivers/net/wireless/ar9170/eeprom.h
@@ -0,0 +1,179 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * EEPROM layout
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_EEPROM_H
39#define __AR9170_EEPROM_H
40
41#define AR5416_MAX_CHAINS 2
42#define AR5416_MODAL_SPURS 5
43
44struct ar9170_eeprom_modal {
45 __le32 antCtrlChain[AR5416_MAX_CHAINS];
46 __le32 antCtrlCommon;
47 s8 antennaGainCh[AR5416_MAX_CHAINS];
48 u8 switchSettling;
49 u8 txRxAttenCh[AR5416_MAX_CHAINS];
50 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
51 s8 adcDesiredSize;
52 s8 pgaDesiredSize;
53 u8 xlnaGainCh[AR5416_MAX_CHAINS];
54 u8 txEndToXpaOff;
55 u8 txEndToRxOn;
56 u8 txFrameToXpaOn;
57 u8 thresh62;
58 s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
59 u8 xpdGain;
60 u8 xpd;
61 s8 iqCalICh[AR5416_MAX_CHAINS];
62 s8 iqCalQCh[AR5416_MAX_CHAINS];
63 u8 pdGainOverlap;
64 u8 ob;
65 u8 db;
66 u8 xpaBiasLvl;
67 u8 pwrDecreaseFor2Chain;
68 u8 pwrDecreaseFor3Chain;
69 u8 txFrameToDataStart;
70 u8 txFrameToPaOn;
71 u8 ht40PowerIncForPdadc;
72 u8 bswAtten[AR5416_MAX_CHAINS];
73 u8 bswMargin[AR5416_MAX_CHAINS];
74 u8 swSettleHt40;
75 u8 reserved[22];
76 struct spur_channel {
77 __le16 spurChan;
78 u8 spurRangeLow;
79 u8 spurRangeHigh;
80 } __packed spur_channels[AR5416_MODAL_SPURS];
81} __packed;
82
83#define AR5416_NUM_PD_GAINS 4
84#define AR5416_PD_GAIN_ICEPTS 5
85
86struct ar9170_calibration_data_per_freq {
87 u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
88 u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
89} __packed;
90
91#define AR5416_NUM_5G_CAL_PIERS 8
92#define AR5416_NUM_2G_CAL_PIERS 4
93
94#define AR5416_NUM_5G_TARGET_PWRS 8
95#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
96#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
97#define AR5416_MAX_NUM_TGT_PWRS 8
98
99struct ar9170_calibration_target_power_legacy {
100 u8 freq;
101 u8 power[4];
102} __packed;
103
104struct ar9170_calibration_target_power_ht {
105 u8 freq;
106 u8 power[8];
107} __packed;
108
109#define AR5416_NUM_CTLS 24
110
111struct ar9170_calctl_edges {
112 u8 channel;
113#define AR9170_CALCTL_EDGE_FLAGS 0xC0
114 u8 power_flags;
115} __packed;
116
117#define AR5416_NUM_BAND_EDGES 8
118
119struct ar9170_calctl_data {
120 struct ar9170_calctl_edges
121 control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
122} __packed;
123
124
125struct ar9170_eeprom {
126 __le16 length;
127 __le16 checksum;
128 __le16 version;
129 u8 operating_flags;
130#define AR9170_OPFLAG_5GHZ 1
131#define AR9170_OPFLAG_2GHZ 2
132 u8 misc;
133 __le16 reg_domain[2];
134 u8 mac_address[6];
135 u8 rx_mask;
136 u8 tx_mask;
137 __le16 rf_silent;
138 __le16 bluetooth_options;
139 __le16 device_capabilities;
140 __le32 build_number;
141 u8 deviceType;
142 u8 reserved[33];
143
144 u8 customer_data[64];
145
146 struct ar9170_eeprom_modal
147 modal_header[2];
148
149 u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
150 u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
151
152 struct ar9170_calibration_data_per_freq
153 cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
154 cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
155
156 /* power calibration data */
157 struct ar9170_calibration_target_power_legacy
158 cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
159 struct ar9170_calibration_target_power_ht
160 cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
161 cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
162
163 struct ar9170_calibration_target_power_legacy
164 cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
165 cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
166 struct ar9170_calibration_target_power_ht
167 cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
168 cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
169
170 /* conformance testing limits */
171 u8 ctl_index[AR5416_NUM_CTLS];
172 struct ar9170_calctl_data
173 ctl_data[AR5416_NUM_CTLS];
174
175 u8 pad;
176 __le16 subsystem_id;
177} __packed;
178
179#endif /* __AR9170_EEPROM_H */
diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h
new file mode 100644
index 000000000000..13091bd9d815
--- /dev/null
+++ b/drivers/net/wireless/ar9170/hw.h
@@ -0,0 +1,417 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Hardware-specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_HW_H
39#define __AR9170_HW_H
40
41#define AR9170_MAX_CMD_LEN 64
42
43enum ar9170_cmd {
44 AR9170_CMD_RREG = 0x00,
45 AR9170_CMD_WREG = 0x01,
46 AR9170_CMD_RMEM = 0x02,
47 AR9170_CMD_WMEM = 0x03,
48 AR9170_CMD_BITAND = 0x04,
49 AR9170_CMD_BITOR = 0x05,
50 AR9170_CMD_EKEY = 0x28,
51 AR9170_CMD_DKEY = 0x29,
52 AR9170_CMD_FREQUENCY = 0x30,
53 AR9170_CMD_RF_INIT = 0x31,
54 AR9170_CMD_SYNTH = 0x32,
55 AR9170_CMD_FREQ_START = 0x33,
56 AR9170_CMD_ECHO = 0x80,
57 AR9170_CMD_TALLY = 0x81,
58 AR9170_CMD_TALLY_APD = 0x82,
59 AR9170_CMD_CONFIG = 0x83,
60 AR9170_CMD_RESET = 0x90,
61 AR9170_CMD_DKRESET = 0x91,
62 AR9170_CMD_DKTX_STATUS = 0x92,
63 AR9170_CMD_FDC = 0xA0,
64 AR9170_CMD_WREEPROM = 0xB0,
65 AR9170_CMD_WFLASH = 0xB0,
66 AR9170_CMD_FLASH_ERASE = 0xB1,
67 AR9170_CMD_FLASH_PROG = 0xB2,
68 AR9170_CMD_FLASH_CHKSUM = 0xB3,
69 AR9170_CMD_FLASH_READ = 0xB4,
70 AR9170_CMD_FW_DL_INIT = 0xB5,
71 AR9170_CMD_MEM_WREEPROM = 0xBB,
72};
73
74/* endpoints */
75#define AR9170_EP_TX 1
76#define AR9170_EP_RX 2
77#define AR9170_EP_IRQ 3
78#define AR9170_EP_CMD 4
79
80#define AR9170_EEPROM_START 0x1600
81
82#define AR9170_GPIO_REG_BASE 0x1d0100
83#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE
84#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4)
85#define AR9170_NUM_LEDS 2
86
87
88#define AR9170_USB_REG_BASE 0x1e1000
89#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
90#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1
91#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2
92#define AR9170_DMA_CTL_HIGH_SPEED 0x4
93#define AR9170_DMA_CTL_PACKET_MODE 0x8
94
95#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
96#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
97
98
99
100#define AR9170_MAC_REG_BASE 0x1c3000
101
102#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
103#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
104
105#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C)
106#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
107#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
108
109#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
110#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
111#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
112#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
113
114#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
115#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
116
117#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C)
118
119#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
120#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
121#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
122#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
123#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
124#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C)
125
126#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
127#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
128#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0)
129#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000
130#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
131#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3)
132#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70
133
134#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
135#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
136
137#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
138#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0)
139#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1)
140#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2)
141#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3)
142#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4)
143#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5)
144#define AR9170_MAC_REG_FTF_BIT6 BIT(6)
145#define AR9170_MAC_REG_FTF_BIT7 BIT(7)
146#define AR9170_MAC_REG_FTF_BEACON BIT(8)
147#define AR9170_MAC_REG_FTF_ATIM BIT(9)
148#define AR9170_MAC_REG_FTF_DEASSOC BIT(10)
149#define AR9170_MAC_REG_FTF_AUTH BIT(11)
150#define AR9170_MAC_REG_FTF_DEAUTH BIT(12)
151#define AR9170_MAC_REG_FTF_BIT13 BIT(13)
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BIT25 BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
166#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4)
167#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8)
168#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC)
169#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0)
170#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC)
171#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC)
172#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4)
173
174
175#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
176#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
177
178#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0)
179
180#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700)
181#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0
182#define AR9170_MAC_REG_POWERMGT_AP 0xa1
183#define AR9170_MAC_REG_POWERMGT_STA 0x2
184#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3
185#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24)
186
187#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
188#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
189
190#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00)
191#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04)
192#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08)
193#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C)
194#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10)
195#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14)
196#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18)
197
198#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28)
199
200#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0)
201#define AR9170_MAC_FCS_SWFCS 0x1
202#define AR9170_MAC_FCS_FIFO_PROT 0x4
203
204
205#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30)
206
207#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
208#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
209
210#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
211#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
212
213#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C)
214#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
215#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
216#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
217#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
218
219#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84)
220#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88)
221#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90)
222#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94)
223#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0)
224#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4)
225
226
227#define AR9170_PWR_REG_BASE 0x1D4000
228
229#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
230#define AR9170_PWR_CLK_AHB_40MHZ 0
231#define AR9170_PWR_CLK_AHB_20_22MHZ 1
232#define AR9170_PWR_CLK_AHB_40_44MHZ 2
233#define AR9170_PWR_CLK_AHB_80_88MHZ 3
234#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
235
236
237/* put beacon here in memory */
238#define AR9170_BEACON_BUFFER_ADDRESS 0x117900
239
240
241struct ar9170_tx_control {
242 __le16 length;
243 __le16 mac_control;
244 __le32 phy_control;
245 u8 frame_data[0];
246} __packed;
247
248/* these are either-or */
249#define AR9170_TX_MAC_PROT_RTS 0x0001
250#define AR9170_TX_MAC_PROT_CTS 0x0002
251
252#define AR9170_TX_MAC_NO_ACK 0x0004
253/* if unset, MAC will only do SIFS space before frame */
254#define AR9170_TX_MAC_BACKOFF 0x0008
255#define AR9170_TX_MAC_BURST 0x0010
256#define AR9170_TX_MAC_AGGR 0x0020
257
258/* encryption is a two-bit field */
259#define AR9170_TX_MAC_ENCR_NONE 0x0000
260#define AR9170_TX_MAC_ENCR_RC4 0x0040
261#define AR9170_TX_MAC_ENCR_CENC 0x0080
262#define AR9170_TX_MAC_ENCR_AES 0x00c0
263
264#define AR9170_TX_MAC_MMIC 0x0100
265#define AR9170_TX_MAC_HW_DURATION 0x0200
266#define AR9170_TX_MAC_QOS_SHIFT 10
267#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT)
268#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400
269#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800
270#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
271#define AR9170_TX_MAC_TXOP_RIFS 0x2000
272#define AR9170_TX_MAC_IMM_AMPDU 0x4000
273#define AR9170_TX_MAC_RATE_PROBE 0x8000
274
275/* either-or */
276#define AR9170_TX_PHY_MOD_CCK 0x00000000
277#define AR9170_TX_PHY_MOD_OFDM 0x00000001
278#define AR9170_TX_PHY_MOD_HT 0x00000002
279
280/* depends on modulation */
281#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
282#define AR9170_TX_PHY_GREENFIELD 0x00000004
283
284#define AR9170_TX_PHY_BW_SHIFT 3
285#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT)
286#define AR9170_TX_PHY_BW_20MHZ 0
287#define AR9170_TX_PHY_BW_40MHZ 2
288#define AR9170_TX_PHY_BW_40MHZ_DUP 3
289
290#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6
291#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT)
292
293#define AR9170_TX_PHY_TX_PWR_SHIFT 9
294#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT)
295
296/* not part of the hw-spec */
297#define AR9170_TX_PHY_QOS_SHIFT 25
298#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT)
299
300#define AR9170_TX_PHY_TXCHAIN_SHIFT 15
301#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT)
302#define AR9170_TX_PHY_TXCHAIN_1 1
303/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
304#define AR9170_TX_PHY_TXCHAIN_2 5
305
306#define AR9170_TX_PHY_MCS_SHIFT 18
307#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT)
308
309#define AR9170_TX_PHY_SHORT_GI 0x80000000
310
311struct ar9170_rx_head {
312 u8 plcp[12];
313};
314
315struct ar9170_rx_tail {
316 union {
317 struct {
318 u8 rssi_ant0, rssi_ant1, rssi_ant2,
319 rssi_ant0x, rssi_ant1x, rssi_ant2x,
320 rssi_combined;
321 };
322 u8 rssi[7];
323 };
324
325 u8 evm_stream0[6], evm_stream1[6];
326 u8 phy_err;
327 u8 SAidx, DAidx;
328 u8 error;
329 u8 status;
330};
331
332#define AR9170_ENC_ALG_NONE 0x0
333#define AR9170_ENC_ALG_WEP64 0x1
334#define AR9170_ENC_ALG_TKIP 0x2
335#define AR9170_ENC_ALG_AESCCMP 0x4
336#define AR9170_ENC_ALG_WEP128 0x5
337#define AR9170_ENC_ALG_WEP256 0x6
338#define AR9170_ENC_ALG_CENC 0x7
339
340#define AR9170_RX_ENC_SOFTWARE 0x8
341
342static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
343{
344 return (t->SAidx & 0xc0) >> 4 |
345 (t->DAidx & 0xc0) >> 6;
346}
347
348#define AR9170_RX_STATUS_MODULATION_MASK 0x03
349#define AR9170_RX_STATUS_MODULATION_CCK 0x00
350#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
351#define AR9170_RX_STATUS_MODULATION_HT 0x02
352#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
353
354/* depends on modulation */
355#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
356#define AR9170_RX_STATUS_GREENFIELD 0x08
357
358#define AR9170_RX_STATUS_MPDU_MASK 0x30
359#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
360#define AR9170_RX_STATUS_MPDU_FIRST 0x10
361#define AR9170_RX_STATUS_MPDU_MIDDLE 0x20
362#define AR9170_RX_STATUS_MPDU_LAST 0x30
363
364
365#define AR9170_RX_ERROR_RXTO 0x01
366#define AR9170_RX_ERROR_OVERRUN 0x02
367#define AR9170_RX_ERROR_DECRYPT 0x04
368#define AR9170_RX_ERROR_FCS 0x08
369#define AR9170_RX_ERROR_WRONG_RA 0x10
370#define AR9170_RX_ERROR_PLCP 0x20
371#define AR9170_RX_ERROR_MMIC 0x40
372
373struct ar9170_cmd_tx_status {
374 __le16 unkn;
375 u8 dst[ETH_ALEN];
376 __le32 rate;
377 __le16 status;
378} __packed;
379
380#define AR9170_TX_STATUS_COMPLETE 0x00
381#define AR9170_TX_STATUS_RETRY 0x01
382#define AR9170_TX_STATUS_FAILED 0x02
383
384struct ar9170_cmd_ba_failed_count {
385 __le16 failed;
386 __le16 rate;
387} __packed;
388
389struct ar9170_cmd_response {
390 u8 flag;
391 u8 type;
392
393 union {
394 struct ar9170_cmd_tx_status tx_status;
395 struct ar9170_cmd_ba_failed_count ba_fail_cnt;
396 u8 data[0];
397 };
398} __packed;
399
400/* QoS */
401
402/* mac80211 queue to HW/FW map */
403static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 };
404
405/* HW/FW queue to mac80211 map */
406static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 };
407
408enum ar9170_txq {
409 AR9170_TXQ_BE,
410 AR9170_TXQ_BK,
411 AR9170_TXQ_VI,
412 AR9170_TXQ_VO,
413
414 __AR9170_NUM_TXQ,
415};
416
417#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ar9170/led.c b/drivers/net/wireless/ar9170/led.c
new file mode 100644
index 000000000000..341cead7f606
--- /dev/null
+++ b/drivers/net/wireless/ar9170/led.c
@@ -0,0 +1,171 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * LED handling
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state)
43{
44 return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state);
45}
46
47int ar9170_init_leds(struct ar9170 *ar)
48{
49 int err;
50
51 /* disable LEDs */
52 /* GPIO [0/1 mode: output, 2/3: input] */
53 err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
54 if (err)
55 goto out;
56
57 /* GPIO 0/1 value: off */
58 err = ar9170_set_leds_state(ar, 0);
59
60out:
61 return err;
62}
63
64#ifdef CONFIG_AR9170_LEDS
65static void ar9170_update_leds(struct work_struct *work)
66{
67 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
68 int i, tmp, blink_delay = 1000;
69 u32 led_val = 0;
70 bool rerun = false;
71
72 if (unlikely(!IS_ACCEPTING_CMD(ar)))
73 return ;
74
75 mutex_lock(&ar->mutex);
76 for (i = 0; i < AR9170_NUM_LEDS; i++)
77 if (ar->leds[i].toggled) {
78 led_val |= 1 << i;
79
80 tmp = 70 + 200 / (ar->leds[i].toggled);
81 if (tmp < blink_delay)
82 blink_delay = tmp;
83
84 if (ar->leds[i].toggled > 1)
85 ar->leds[i].toggled = 0;
86
87 rerun = true;
88 }
89
90 ar9170_set_leds_state(ar, led_val);
91 mutex_unlock(&ar->mutex);
92
93 if (rerun)
94 queue_delayed_work(ar->hw->workqueue, &ar->led_work,
95 msecs_to_jiffies(blink_delay));
96}
97
98static void ar9170_led_brightness_set(struct led_classdev *led,
99 enum led_brightness brightness)
100{
101 struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
102 struct ar9170 *ar = arl->ar;
103
104 arl->toggled++;
105
106 if (likely(IS_ACCEPTING_CMD(ar) && brightness))
107 queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10);
108}
109
110static int ar9170_register_led(struct ar9170 *ar, int i, char *name,
111 char *trigger)
112{
113 int err;
114
115 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
116 "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
117
118 ar->leds[i].ar = ar;
119 ar->leds[i].l.name = ar->leds[i].name;
120 ar->leds[i].l.brightness_set = ar9170_led_brightness_set;
121 ar->leds[i].l.brightness = 0;
122 ar->leds[i].l.default_trigger = trigger;
123
124 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
125 &ar->leds[i].l);
126 if (err)
127 printk(KERN_ERR "%s: failed to register %s LED (%d).\n",
128 wiphy_name(ar->hw->wiphy), ar->leds[i].name, err);
129 else
130 ar->leds[i].registered = true;
131
132 return err;
133}
134
135void ar9170_unregister_leds(struct ar9170 *ar)
136{
137 int i;
138
139 cancel_delayed_work_sync(&ar->led_work);
140
141 for (i = 0; i < AR9170_NUM_LEDS; i++)
142 if (ar->leds[i].registered) {
143 led_classdev_unregister(&ar->leds[i].l);
144 ar->leds[i].registered = false;
145 }
146}
147
148int ar9170_register_leds(struct ar9170 *ar)
149{
150 int err;
151
152 INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds);
153
154 err = ar9170_register_led(ar, 0, "tx",
155 ieee80211_get_tx_led_name(ar->hw));
156 if (err)
157 goto fail;
158
159 err = ar9170_register_led(ar, 1, "assoc",
160 ieee80211_get_assoc_led_name(ar->hw));
161 if (err)
162 goto fail;
163
164 return 0;
165
166fail:
167 ar9170_unregister_leds(ar);
168 return err;
169}
170
171#endif /* CONFIG_AR9170_LEDS */
diff --git a/drivers/net/wireless/ar9170/mac.c b/drivers/net/wireless/ar9170/mac.c
new file mode 100644
index 000000000000..c8fa3073169f
--- /dev/null
+++ b/drivers/net/wireless/ar9170/mac.c
@@ -0,0 +1,452 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * MAC programming
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#include "ar9170.h"
39#include "cmd.h"
40
41int ar9170_set_qos(struct ar9170 *ar)
42{
43 ar9170_regwrite_begin(ar);
44
45 ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
46 (ar->edcf[0].cw_max << 16));
47 ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
48 (ar->edcf[1].cw_max << 16));
49 ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
50 (ar->edcf[2].cw_max << 16));
51 ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
52 (ar->edcf[3].cw_max << 16));
53 ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
54 (ar->edcf[4].cw_max << 16));
55
56 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS,
57 ((ar->edcf[0].aifs * 9 + 10)) |
58 ((ar->edcf[1].aifs * 9 + 10) << 12) |
59 ((ar->edcf[2].aifs * 9 + 10) << 24));
60 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS,
61 ((ar->edcf[2].aifs * 9 + 10) >> 8) |
62 ((ar->edcf[3].aifs * 9 + 10) << 4) |
63 ((ar->edcf[4].aifs * 9 + 10) << 16));
64
65 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
66 ar->edcf[0].txop | ar->edcf[1].txop << 16);
67 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
68 ar->edcf[1].txop | ar->edcf[3].txop << 16);
69
70 ar9170_regwrite_finish();
71
72 return ar9170_regwrite_result();
73}
74
75int ar9170_init_mac(struct ar9170 *ar)
76{
77 ar9170_regwrite_begin(ar);
78
79 ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
80
81 ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0);
82
83 /* enable MMIC */
84 ar9170_regwrite(AR9170_MAC_REG_SNIFFER,
85 AR9170_MAC_REG_SNIFFER_DEFAULTS);
86
87 ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
88
89 ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
90 ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
91 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
92
93 /* CF-END mode */
94 ar9170_regwrite(0x1c3b2c, 0x19000000);
95
96 /* NAV protects ACK only (in TXOP) */
97 ar9170_regwrite(0x1c3b38, 0x201);
98
99 /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
100 /* OTUS set AM to 0x1 */
101 ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
102
103 ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
104
105 /* AGG test code*/
106 /* Aggregation MAX number and timeout */
107 ar9170_regwrite(0x1c3b9c, 0x10000a);
108
109 ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
110 AR9170_MAC_REG_FTF_DEFAULTS);
111
112 /* Enable deaggregator, response in sniffer mode */
113 ar9170_regwrite(0x1c3c40, 0x1 | 1<<30);
114
115 /* rate sets */
116 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
117 ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
118 ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb);
119
120 /* MIMO response control */
121 ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */
122
123 /* switch MAC to OTUS interface */
124 ar9170_regwrite(0x1c3600, 0x3);
125
126 ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
127
128 /* set PHY register read timeout (??) */
129 ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
130
131 /* Disable Rx TimeOut, workaround for BB. */
132 ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
133
134 /* Set CPU clock frequency to 88/80MHz */
135 ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL,
136 AR9170_PWR_CLK_AHB_80_88MHZ |
137 AR9170_PWR_CLK_DAC_160_INV_DLY);
138
139 /* Set WLAN DMA interrupt mode: generate int per packet */
140 ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
141
142 ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
143 AR9170_MAC_FCS_FIFO_PROT);
144
145 /* Disables the CF_END frame, undocumented register */
146 ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
147 0x141E0F48);
148
149 ar9170_regwrite_finish();
150
151 return ar9170_regwrite_result();
152}
153
154static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
155{
156 static const u8 zero[ETH_ALEN] = { 0 };
157
158 if (!mac)
159 mac = zero;
160
161 ar9170_regwrite_begin(ar);
162
163 ar9170_regwrite(reg,
164 (mac[3] << 24) | (mac[2] << 16) |
165 (mac[1] << 8) | mac[0]);
166
167 ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
168
169 ar9170_regwrite_finish();
170
171 return ar9170_regwrite_result();
172}
173
174int ar9170_update_multicast(struct ar9170 *ar)
175{
176 int err;
177
178 ar9170_regwrite_begin(ar);
179 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H,
180 ar->want_mc_hash >> 32);
181 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L,
182 ar->want_mc_hash);
183
184 ar9170_regwrite_finish();
185 err = ar9170_regwrite_result();
186
187 if (err)
188 return err;
189
190 ar->cur_mc_hash = ar->want_mc_hash;
191
192 return 0;
193}
194
195int ar9170_update_frame_filter(struct ar9170 *ar)
196{
197 int err;
198
199 err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER,
200 ar->want_filter);
201
202 if (err)
203 return err;
204
205 ar->cur_filter = ar->want_filter;
206
207 return 0;
208}
209
210static int ar9170_set_promiscouous(struct ar9170 *ar)
211{
212 u32 encr_mode, sniffer;
213 int err;
214
215 err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer);
216 if (err)
217 return err;
218
219 err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode);
220 if (err)
221 return err;
222
223 if (ar->sniffer_enabled) {
224 sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
225
226 /*
227 * Rx decryption works in place.
228 *
229 * If we don't disable it, the hardware will render all
230 * encrypted frames which are encrypted with an unknown
231 * key useless.
232 */
233
234 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
235 ar->sniffer_enabled = true;
236 } else {
237 sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
238
239 if (ar->rx_software_decryption)
240 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
241 else
242 encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
243 }
244
245 ar9170_regwrite_begin(ar);
246 ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode);
247 ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
248 ar9170_regwrite_finish();
249
250 return ar9170_regwrite_result();
251}
252
253int ar9170_set_operating_mode(struct ar9170 *ar)
254{
255 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
256 u8 *mac_addr, *bssid;
257 int err;
258
259 if (ar->vif) {
260 mac_addr = ar->mac_addr;
261 bssid = ar->bssid;
262
263 switch (ar->vif->type) {
264 case NL80211_IFTYPE_MESH_POINT:
265 case NL80211_IFTYPE_ADHOC:
266 pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
267 break;
268/* case NL80211_IFTYPE_AP:
269 pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
270 break;*/
271 case NL80211_IFTYPE_WDS:
272 pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
273 break;
274 case NL80211_IFTYPE_MONITOR:
275 ar->sniffer_enabled = true;
276 ar->rx_software_decryption = true;
277 break;
278 default:
279 pm_mode |= AR9170_MAC_REG_POWERMGT_STA;
280 break;
281 }
282 } else {
283 mac_addr = NULL;
284 bssid = NULL;
285 }
286
287 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
288 if (err)
289 return err;
290
291 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
292 if (err)
293 return err;
294
295 err = ar9170_set_promiscouous(ar);
296 if (err)
297 return err;
298
299 ar9170_regwrite_begin(ar);
300
301 ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
302 ar9170_regwrite_finish();
303
304 return ar9170_regwrite_result();
305}
306
307int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry)
308{
309 u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
310
311 return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
312}
313
314int ar9170_set_beacon_timers(struct ar9170 *ar)
315{
316 u32 v = 0;
317 u32 pretbtt = 0;
318
319 v |= ar->hw->conf.beacon_int;
320
321 if (ar->vif) {
322 switch (ar->vif->type) {
323 case NL80211_IFTYPE_MESH_POINT:
324 case NL80211_IFTYPE_ADHOC:
325 v |= BIT(25);
326 break;
327 case NL80211_IFTYPE_AP:
328 v |= BIT(24);
329 pretbtt = (ar->hw->conf.beacon_int - 6) << 16;
330 break;
331 default:
332 break;
333 }
334
335 v |= ar->vif->bss_conf.dtim_period << 16;
336 }
337
338 ar9170_regwrite_begin(ar);
339
340 ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
341 ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
342 ar9170_regwrite_finish();
343 return ar9170_regwrite_result();
344}
345
346int ar9170_update_beacon(struct ar9170 *ar)
347{
348 struct sk_buff *skb;
349 __le32 *data, *old = NULL;
350 u32 word;
351 int i;
352
353 skb = ieee80211_beacon_get(ar->hw, ar->vif);
354 if (!skb)
355 return -ENOMEM;
356
357 data = (__le32 *)skb->data;
358 if (ar->beacon)
359 old = (__le32 *)ar->beacon->data;
360
361 ar9170_regwrite_begin(ar);
362 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
363 /*
364 * XXX: This accesses beyond skb data for up
365 * to the last 3 bytes!!
366 */
367
368 if (old && (data[i] == old[i]))
369 continue;
370
371 word = le32_to_cpu(data[i]);
372 ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word);
373 }
374
375 /* XXX: use skb->cb info */
376 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
377 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
378 ((skb->len + 4) << (3+16)) + 0x0400);
379 else
380 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
381 ((skb->len + 4) << (3+16)) + 0x0400);
382
383 ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
384 ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
385 ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1);
386
387 ar9170_regwrite_finish();
388
389 dev_kfree_skb(ar->beacon);
390 ar->beacon = skb;
391
392 return ar9170_regwrite_result();
393}
394
395void ar9170_new_beacon(struct work_struct *work)
396{
397 struct ar9170 *ar = container_of(work, struct ar9170,
398 beacon_work);
399 struct sk_buff *skb;
400
401 if (unlikely(!IS_STARTED(ar)))
402 return ;
403
404 mutex_lock(&ar->mutex);
405
406 if (!ar->vif)
407 goto out;
408
409 ar9170_update_beacon(ar);
410
411 rcu_read_lock();
412 while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif)))
413 ar9170_op_tx(ar->hw, skb);
414
415 rcu_read_unlock();
416
417 out:
418 mutex_unlock(&ar->mutex);
419}
420
421int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
422 u8 keyidx, u8 *keydata, int keylen)
423{
424 __le32 vals[7];
425 static const u8 bcast[ETH_ALEN] =
426 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
427 u8 dummy;
428
429 mac = mac ? : bcast;
430
431 vals[0] = cpu_to_le32((keyidx << 16) + id);
432 vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype);
433 vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 |
434 mac[3] << 8 | mac[2]);
435 memset(&vals[3], 0, 16);
436 if (keydata)
437 memcpy(&vals[3], keydata, keylen);
438
439 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
440 sizeof(vals), (u8 *)vals,
441 1, &dummy);
442}
443
444int ar9170_disable_key(struct ar9170 *ar, u8 id)
445{
446 __le32 val = cpu_to_le32(id);
447 u8 dummy;
448
449 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
450 sizeof(val), (u8 *)&val,
451 1, &dummy);
452}
diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c
new file mode 100644
index 000000000000..5996ff9f7f47
--- /dev/null
+++ b/drivers/net/wireless/ar9170/main.c
@@ -0,0 +1,1671 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/module.h>
42#include <linux/etherdevice.h>
43#include <net/mac80211.h>
44#include "ar9170.h"
45#include "hw.h"
46#include "cmd.h"
47
48static int modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51
52#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
53 .bitrate = (_bitrate), \
54 .flags = (_flags), \
55 .hw_value = (_hw_rate) | (_txpidx) << 4, \
56}
57
58static struct ieee80211_rate __ar9170_ratetable[] = {
59 RATE(10, 0, 0, 0),
60 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
61 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
62 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
63 RATE(60, 0xb, 0, 0),
64 RATE(90, 0xf, 0, 0),
65 RATE(120, 0xa, 0, 0),
66 RATE(180, 0xe, 0, 0),
67 RATE(240, 0x9, 0, 0),
68 RATE(360, 0xd, 1, 0),
69 RATE(480, 0x8, 2, 0),
70 RATE(540, 0xc, 3, 0),
71};
72#undef RATE
73
74#define ar9170_g_ratetable (__ar9170_ratetable + 0)
75#define ar9170_g_ratetable_size 12
76#define ar9170_a_ratetable (__ar9170_ratetable + 4)
77#define ar9170_a_ratetable_size 8
78
79/*
80 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
81 * array in phy.c so that we don't have to do frequency lookups!
82 */
83#define CHAN(_freq, _idx) { \
84 .center_freq = (_freq), \
85 .hw_value = (_idx), \
86 .max_power = 18, /* XXX */ \
87}
88
89static struct ieee80211_channel ar9170_2ghz_chantable[] = {
90 CHAN(2412, 0),
91 CHAN(2417, 1),
92 CHAN(2422, 2),
93 CHAN(2427, 3),
94 CHAN(2432, 4),
95 CHAN(2437, 5),
96 CHAN(2442, 6),
97 CHAN(2447, 7),
98 CHAN(2452, 8),
99 CHAN(2457, 9),
100 CHAN(2462, 10),
101 CHAN(2467, 11),
102 CHAN(2472, 12),
103 CHAN(2484, 13),
104};
105
106static struct ieee80211_channel ar9170_5ghz_chantable[] = {
107 CHAN(4920, 14),
108 CHAN(4940, 15),
109 CHAN(4960, 16),
110 CHAN(4980, 17),
111 CHAN(5040, 18),
112 CHAN(5060, 19),
113 CHAN(5080, 20),
114 CHAN(5180, 21),
115 CHAN(5200, 22),
116 CHAN(5220, 23),
117 CHAN(5240, 24),
118 CHAN(5260, 25),
119 CHAN(5280, 26),
120 CHAN(5300, 27),
121 CHAN(5320, 28),
122 CHAN(5500, 29),
123 CHAN(5520, 30),
124 CHAN(5540, 31),
125 CHAN(5560, 32),
126 CHAN(5580, 33),
127 CHAN(5600, 34),
128 CHAN(5620, 35),
129 CHAN(5640, 36),
130 CHAN(5660, 37),
131 CHAN(5680, 38),
132 CHAN(5700, 39),
133 CHAN(5745, 40),
134 CHAN(5765, 41),
135 CHAN(5785, 42),
136 CHAN(5805, 43),
137 CHAN(5825, 44),
138 CHAN(5170, 45),
139 CHAN(5190, 46),
140 CHAN(5210, 47),
141 CHAN(5230, 48),
142};
143#undef CHAN
144
145static struct ieee80211_supported_band ar9170_band_2GHz = {
146 .channels = ar9170_2ghz_chantable,
147 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
148 .bitrates = ar9170_g_ratetable,
149 .n_bitrates = ar9170_g_ratetable_size,
150};
151
152#ifdef AR9170_QUEUE_DEBUG
153/*
154 * In case some wants works with AR9170's crazy tx_status queueing techniques.
155 * He might need this rather useful probing function.
156 *
157 * NOTE: caller must hold the queue's spinlock!
158 */
159
160static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
161{
162 struct ar9170_tx_control *txc = (void *) skb->data;
163 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
164
165 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] "
166 "mac_control:%04x, phy_control:%08x]\n",
167 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
168 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control),
169 le32_to_cpu(txc->phy_control));
170}
171
172static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar,
173 struct sk_buff_head *queue)
174{
175 struct sk_buff *skb;
176 int i = 0;
177
178 printk(KERN_DEBUG "---[ cut here ]---\n");
179 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n",
180 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
181
182 skb_queue_walk(queue, skb) {
183 struct ar9170_tx_control *txc = (void *) skb->data;
184 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
185
186 printk(KERN_DEBUG "index:%d => \n", i);
187 ar9170_print_txheader(ar, skb);
188 }
189 printk(KERN_DEBUG "---[ end ]---\n");
190}
191#endif /* AR9170_QUEUE_DEBUG */
192
193static struct ieee80211_supported_band ar9170_band_5GHz = {
194 .channels = ar9170_5ghz_chantable,
195 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
196 .bitrates = ar9170_a_ratetable,
197 .n_bitrates = ar9170_a_ratetable_size,
198};
199
200void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
201 bool valid_status, u16 tx_status)
202{
203 struct ieee80211_tx_info *txinfo;
204 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
205 unsigned long flags;
206
207 spin_lock_irqsave(&ar->tx_stats_lock, flags);
208 ar->tx_stats[queue].len--;
209 if (ieee80211_queue_stopped(ar->hw, queue))
210 ieee80211_wake_queue(ar->hw, queue);
211 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
212
213 txinfo = IEEE80211_SKB_CB(skb);
214 ieee80211_tx_info_clear_status(txinfo);
215
216 switch (tx_status) {
217 case AR9170_TX_STATUS_RETRY:
218 retries = 2;
219 case AR9170_TX_STATUS_COMPLETE:
220 txinfo->flags |= IEEE80211_TX_STAT_ACK;
221 break;
222
223 case AR9170_TX_STATUS_FAILED:
224 retries = ar->hw->conf.long_frame_max_tx_count;
225 break;
226
227 default:
228 printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
229 wiphy_name(ar->hw->wiphy), tx_status);
230 break;
231 }
232
233 if (valid_status)
234 txinfo->status.rates[0].count = retries + 1;
235
236 skb_pull(skb, sizeof(struct ar9170_tx_control));
237 ieee80211_tx_status_irqsafe(ar->hw, skb);
238}
239
240static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar,
241 const u8 *mac,
242 const u32 queue,
243 struct sk_buff_head *q)
244{
245 unsigned long flags;
246 struct sk_buff *skb;
247
248 spin_lock_irqsave(&q->lock, flags);
249 skb_queue_walk(q, skb) {
250 struct ar9170_tx_control *txc = (void *) skb->data;
251 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
252 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
253 AR9170_TX_PHY_QOS_MASK) >>
254 AR9170_TX_PHY_QOS_SHIFT;
255
256 if ((queue != txc_queue) ||
257 (compare_ether_addr(ieee80211_get_DA(hdr), mac)))
258 continue;
259
260 __skb_unlink(skb, q);
261 spin_unlock_irqrestore(&q->lock, flags);
262 return skb;
263 }
264 spin_unlock_irqrestore(&q->lock, flags);
265 return NULL;
266}
267
268static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
269 const u32 queue)
270{
271 struct ieee80211_sta *sta;
272 struct sk_buff *skb;
273
274 /*
275 * Unfortunately, the firmware does not tell to which (queued) frame
276 * this transmission status report belongs to.
277 *
278 * So we have to make risky guesses - with the scarce information
279 * the firmware provided (-> destination MAC, and phy_control) -
280 * and hope that we picked the right one...
281 */
282 rcu_read_lock();
283 sta = ieee80211_find_sta(ar->hw, mac);
284
285 if (likely(sta)) {
286 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
287 skb = skb_dequeue(&sta_priv->tx_status[queue]);
288 rcu_read_unlock();
289 if (likely(skb))
290 return skb;
291 } else
292 rcu_read_unlock();
293
294 /* scan the waste queue for candidates */
295 skb = ar9170_find_skb_in_queue(ar, mac, queue,
296 &ar->global_tx_status_waste);
297 if (!skb) {
298 /* so it still _must_ be in the global list. */
299 skb = ar9170_find_skb_in_queue(ar, mac, queue,
300 &ar->global_tx_status);
301 }
302
303#ifdef AR9170_QUEUE_DEBUG
304 if (unlikely((!skb) && net_ratelimit())) {
305 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
306 "outstanding frames in this queue (%d).\n",
307 wiphy_name(ar->hw->wiphy), mac, queue);
308 }
309#endif /* AR9170_QUEUE_DEBUG */
310 return skb;
311}
312
313/*
314 * This worker tries to keep the global tx_status queue empty.
315 * So we can guarantee that incoming tx_status reports for
316 * unregistered stations are always synced with the actual
317 * frame - which we think - belongs to.
318 */
319
320static void ar9170_tx_status_janitor(struct work_struct *work)
321{
322 struct ar9170 *ar = container_of(work, struct ar9170,
323 tx_status_janitor.work);
324 struct sk_buff *skb;
325
326 if (unlikely(!IS_STARTED(ar)))
327 return ;
328
329 mutex_lock(&ar->mutex);
330 /* recycle the garbage back to mac80211... one by one. */
331 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) {
332#ifdef AR9170_QUEUE_DEBUG
333 printk(KERN_DEBUG "%s: dispose queued frame =>\n",
334 wiphy_name(ar->hw->wiphy));
335 ar9170_print_txheader(ar, skb);
336#endif /* AR9170_QUEUE_DEBUG */
337 ar9170_handle_tx_status(ar, skb, false,
338 AR9170_TX_STATUS_FAILED);
339 }
340
341 while ((skb = skb_dequeue(&ar->global_tx_status))) {
342#ifdef AR9170_QUEUE_DEBUG
343 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n",
344 wiphy_name(ar->hw->wiphy));
345
346 ar9170_print_txheader(ar, skb);
347#endif /* AR9170_QUEUE_DEBUG */
348 skb_queue_tail(&ar->global_tx_status_waste, skb);
349 }
350
351 /* recall the janitor in 100ms - if there's garbage in the can. */
352 if (skb_queue_len(&ar->global_tx_status_waste) > 0)
353 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
354 msecs_to_jiffies(100));
355
356 mutex_unlock(&ar->mutex);
357}
358
359static void ar9170_handle_command_response(struct ar9170 *ar,
360 void *buf, u32 len)
361{
362 struct ar9170_cmd_response *cmd = (void *) buf;
363
364 if ((cmd->type & 0xc0) != 0xc0) {
365 ar->callback_cmd(ar, len, buf);
366 return;
367 }
368
369 /* hardware event handlers */
370 switch (cmd->type) {
371 case 0xc1: {
372 /*
373 * TX status notification:
374 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
375 *
376 * XX always 81
377 * YY always 00
378 * M1-M6 is the MAC address
379 * R1-R4 is the transmit rate
380 * S1-S2 is the transmit status
381 */
382
383 struct sk_buff *skb;
384 u32 queue = (le32_to_cpu(cmd->tx_status.rate) &
385 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT;
386
387 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue);
388 if (unlikely(!skb))
389 return ;
390
391 ar9170_handle_tx_status(ar, skb, true,
392 le16_to_cpu(cmd->tx_status.status));
393 break;
394 }
395
396 case 0xc0:
397 /*
398 * pre-TBTT event
399 */
400 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
401 queue_work(ar->hw->workqueue, &ar->beacon_work);
402 break;
403
404 case 0xc2:
405 /*
406 * (IBSS) beacon send notification
407 * bytes: 04 c2 XX YY B4 B3 B2 B1
408 *
409 * XX always 80
410 * YY always 00
411 * B1-B4 "should" be the number of send out beacons.
412 */
413 break;
414
415 case 0xc3:
416 /* End of Atim Window */
417 break;
418
419 case 0xc4:
420 case 0xc5:
421 /* BlockACK events */
422 break;
423
424 case 0xc6:
425 /* Watchdog Interrupt */
426 break;
427
428 case 0xc9:
429 /* retransmission issue / SIFS/EIFS collision ?! */
430 break;
431
432 default:
433 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
434 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
435 break;
436 }
437}
438
439/*
440 * If the frame alignment is right (or the kernel has
441 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
442 * is only a single MPDU in the USB frame, then we can
443 * submit to mac80211 the SKB directly. However, since
444 * there may be multiple packets in one SKB in stream
445 * mode, and we need to observe the proper ordering,
446 * this is non-trivial.
447 */
448static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
449{
450 struct sk_buff *skb;
451 struct ar9170_rx_head *head = (void *)buf;
452 struct ar9170_rx_tail *tail;
453 struct ieee80211_rx_status status;
454 int mpdu_len, i;
455 u8 error, antennas = 0, decrypt;
456 __le16 fc;
457 int reserved;
458
459 if (unlikely(!IS_STARTED(ar)))
460 return ;
461
462 /* Received MPDU */
463 mpdu_len = len;
464 mpdu_len -= sizeof(struct ar9170_rx_head);
465 mpdu_len -= sizeof(struct ar9170_rx_tail);
466 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
467 BUILD_BUG_ON(sizeof(struct ar9170_rx_tail) != 24);
468
469 if (mpdu_len <= FCS_LEN)
470 return;
471
472 tail = (void *)(buf + sizeof(struct ar9170_rx_head) + mpdu_len);
473
474 for (i = 0; i < 3; i++)
475 if (tail->rssi[i] != 0x80)
476 antennas |= BIT(i);
477
478 /* post-process RSSI */
479 for (i = 0; i < 7; i++)
480 if (tail->rssi[i] & 0x80)
481 tail->rssi[i] = ((tail->rssi[i] & 0x7f) + 1) & 0x7f;
482
483 memset(&status, 0, sizeof(status));
484
485 status.band = ar->channel->band;
486 status.freq = ar->channel->center_freq;
487 status.signal = ar->noise[0] + tail->rssi_combined;
488 status.noise = ar->noise[0];
489 status.antenna = antennas;
490
491 switch (tail->status & AR9170_RX_STATUS_MODULATION_MASK) {
492 case AR9170_RX_STATUS_MODULATION_CCK:
493 if (tail->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
494 status.flag |= RX_FLAG_SHORTPRE;
495 switch (head->plcp[0]) {
496 case 0x0a:
497 status.rate_idx = 0;
498 break;
499 case 0x14:
500 status.rate_idx = 1;
501 break;
502 case 0x37:
503 status.rate_idx = 2;
504 break;
505 case 0x6e:
506 status.rate_idx = 3;
507 break;
508 default:
509 if ((!ar->sniffer_enabled) && (net_ratelimit()))
510 printk(KERN_ERR "%s: invalid plcp cck rate "
511 "(%x).\n", wiphy_name(ar->hw->wiphy),
512 head->plcp[0]);
513 return;
514 }
515 break;
516 case AR9170_RX_STATUS_MODULATION_OFDM:
517 switch (head->plcp[0] & 0xF) {
518 case 0xB:
519 status.rate_idx = 0;
520 break;
521 case 0xF:
522 status.rate_idx = 1;
523 break;
524 case 0xA:
525 status.rate_idx = 2;
526 break;
527 case 0xE:
528 status.rate_idx = 3;
529 break;
530 case 0x9:
531 status.rate_idx = 4;
532 break;
533 case 0xD:
534 status.rate_idx = 5;
535 break;
536 case 0x8:
537 status.rate_idx = 6;
538 break;
539 case 0xC:
540 status.rate_idx = 7;
541 break;
542 default:
543 if ((!ar->sniffer_enabled) && (net_ratelimit()))
544 printk(KERN_ERR "%s: invalid plcp ofdm rate "
545 "(%x).\n", wiphy_name(ar->hw->wiphy),
546 head->plcp[0]);
547 return;
548 }
549 if (status.band == IEEE80211_BAND_2GHZ)
550 status.rate_idx += 4;
551 break;
552 case AR9170_RX_STATUS_MODULATION_HT:
553 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
554 /* XXX */
555
556 if (net_ratelimit())
557 printk(KERN_ERR "%s: invalid modulation\n",
558 wiphy_name(ar->hw->wiphy));
559 return;
560 }
561
562 error = tail->error;
563
564 if (error & AR9170_RX_ERROR_MMIC) {
565 status.flag |= RX_FLAG_MMIC_ERROR;
566 error &= ~AR9170_RX_ERROR_MMIC;
567 }
568
569 if (error & AR9170_RX_ERROR_PLCP) {
570 status.flag |= RX_FLAG_FAILED_PLCP_CRC;
571 error &= ~AR9170_RX_ERROR_PLCP;
572 }
573
574 if (error & AR9170_RX_ERROR_FCS) {
575 status.flag |= RX_FLAG_FAILED_FCS_CRC;
576 error &= ~AR9170_RX_ERROR_FCS;
577 }
578
579 decrypt = ar9170_get_decrypt_type(tail);
580 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
581 decrypt != AR9170_ENC_ALG_NONE)
582 status.flag |= RX_FLAG_DECRYPTED;
583
584 /* ignore wrong RA errors */
585 error &= ~AR9170_RX_ERROR_WRONG_RA;
586
587 if (error & AR9170_RX_ERROR_DECRYPT) {
588 error &= ~AR9170_RX_ERROR_DECRYPT;
589
590 /*
591 * Rx decryption is done in place,
592 * the original data is lost anyway.
593 */
594 return ;
595 }
596
597 /* drop any other error frames */
598 if ((error) && (net_ratelimit())) {
599 printk(KERN_DEBUG "%s: errors: %#x\n",
600 wiphy_name(ar->hw->wiphy), error);
601 return;
602 }
603
604 buf += sizeof(struct ar9170_rx_head);
605 fc = *(__le16 *)buf;
606
607 if (ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc))
608 reserved = 32 + 2;
609 else
610 reserved = 32;
611
612 skb = dev_alloc_skb(mpdu_len + reserved);
613 if (!skb)
614 return;
615
616 skb_reserve(skb, reserved);
617 memcpy(skb_put(skb, mpdu_len), buf, mpdu_len);
618 ieee80211_rx_irqsafe(ar->hw, skb, &status);
619}
620
621void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
622{
623 unsigned int i, tlen, resplen;
624 u8 *tbuf, *respbuf;
625
626 tbuf = skb->data;
627 tlen = skb->len;
628
629 while (tlen >= 4) {
630 int clen = tbuf[1] << 8 | tbuf[0];
631 int wlen = (clen + 3) & ~3;
632
633 /*
634 * parse stream (if any)
635 */
636 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
637 printk(KERN_ERR "%s: missing tag!\n",
638 wiphy_name(ar->hw->wiphy));
639 return ;
640 }
641 if (wlen > tlen - 4) {
642 printk(KERN_ERR "%s: invalid RX (%d, %d, %d)\n",
643 wiphy_name(ar->hw->wiphy), clen, wlen, tlen);
644 print_hex_dump(KERN_DEBUG, "data: ",
645 DUMP_PREFIX_OFFSET,
646 16, 1, tbuf, tlen, true);
647 return ;
648 }
649 resplen = clen;
650 respbuf = tbuf + 4;
651 tbuf += wlen + 4;
652 tlen -= wlen + 4;
653
654 i = 0;
655
656 /* weird thing, but this is the same in the original driver */
657 while (resplen > 2 && i < 12 &&
658 respbuf[0] == 0xff && respbuf[1] == 0xff) {
659 i += 2;
660 resplen -= 2;
661 respbuf += 2;
662 }
663
664 if (resplen < 4)
665 continue;
666
667 /* found the 6 * 0xffff marker? */
668 if (i == 12)
669 ar9170_handle_command_response(ar, respbuf, resplen);
670 else
671 ar9170_handle_mpdu(ar, respbuf, resplen);
672 }
673
674 if (tlen)
675 printk(KERN_ERR "%s: buffer remains!\n",
676 wiphy_name(ar->hw->wiphy));
677}
678
679#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
680do { \
681 queue.aifs = ai_fs; \
682 queue.cw_min = cwmin; \
683 queue.cw_max = cwmax; \
684 queue.txop = _txop; \
685} while (0)
686
687static int ar9170_op_start(struct ieee80211_hw *hw)
688{
689 struct ar9170 *ar = hw->priv;
690 int err, i;
691
692 mutex_lock(&ar->mutex);
693
694 /* reinitialize queues statistics */
695 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
696 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++)
697 ar->tx_stats[i].limit = 8;
698
699 /* reset QoS defaults */
700 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
701 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
702 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
703 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
704 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
705
706 err = ar->open(ar);
707 if (err)
708 goto out;
709
710 err = ar9170_init_mac(ar);
711 if (err)
712 goto out;
713
714 err = ar9170_set_qos(ar);
715 if (err)
716 goto out;
717
718 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
719 if (err)
720 goto out;
721
722 err = ar9170_init_rf(ar);
723 if (err)
724 goto out;
725
726 /* start DMA */
727 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
728 if (err)
729 goto out;
730
731 ar->state = AR9170_STARTED;
732
733out:
734 mutex_unlock(&ar->mutex);
735 return err;
736}
737
738static void ar9170_op_stop(struct ieee80211_hw *hw)
739{
740 struct ar9170 *ar = hw->priv;
741
742 if (IS_STARTED(ar))
743 ar->state = AR9170_IDLE;
744
745 mutex_lock(&ar->mutex);
746
747 cancel_delayed_work_sync(&ar->tx_status_janitor);
748 cancel_work_sync(&ar->filter_config_work);
749 cancel_work_sync(&ar->beacon_work);
750 skb_queue_purge(&ar->global_tx_status_waste);
751 skb_queue_purge(&ar->global_tx_status);
752
753 if (IS_ACCEPTING_CMD(ar)) {
754 ar9170_set_leds_state(ar, 0);
755
756 /* stop DMA */
757 ar9170_write_reg(ar, 0x1c3d30, 0);
758 ar->stop(ar);
759 }
760
761 mutex_unlock(&ar->mutex);
762}
763
764int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
765{
766 struct ar9170 *ar = hw->priv;
767 struct ieee80211_hdr *hdr;
768 struct ar9170_tx_control *txc;
769 struct ieee80211_tx_info *info;
770 struct ieee80211_rate *rate = NULL;
771 struct ieee80211_tx_rate *txrate;
772 unsigned int queue = skb_get_queue_mapping(skb);
773 unsigned long flags = 0;
774 struct ar9170_sta_info *sta_info = NULL;
775 u32 power, chains;
776 u16 keytype = 0;
777 u16 len, icv = 0;
778 int err;
779 bool tx_status;
780
781 if (unlikely(!IS_STARTED(ar)))
782 goto err_free;
783
784 hdr = (void *)skb->data;
785 info = IEEE80211_SKB_CB(skb);
786 len = skb->len;
787
788 spin_lock_irqsave(&ar->tx_stats_lock, flags);
789 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
790 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
791 return NETDEV_TX_OK;
792 }
793
794 ar->tx_stats[queue].len++;
795 ar->tx_stats[queue].count++;
796 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
797 ieee80211_stop_queue(hw, queue);
798
799 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
800
801 txc = (void *)skb_push(skb, sizeof(*txc));
802
803 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
804 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
805
806 if (info->control.hw_key) {
807 icv = info->control.hw_key->icv_len;
808
809 switch (info->control.hw_key->alg) {
810 case ALG_WEP:
811 keytype = AR9170_TX_MAC_ENCR_RC4;
812 break;
813 case ALG_TKIP:
814 keytype = AR9170_TX_MAC_ENCR_RC4;
815 break;
816 case ALG_CCMP:
817 keytype = AR9170_TX_MAC_ENCR_AES;
818 break;
819 default:
820 WARN_ON(1);
821 goto err_dequeue;
822 }
823 }
824
825 /* Length */
826 txc->length = cpu_to_le16(len + icv + 4);
827
828 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
829 AR9170_TX_MAC_BACKOFF);
830 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
831 AR9170_TX_MAC_QOS_SHIFT);
832 txc->mac_control |= cpu_to_le16(keytype);
833 txc->phy_control = cpu_to_le32(0);
834
835 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
836 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
837
838 if (info->flags & IEEE80211_TX_CTL_AMPDU)
839 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
840
841 txrate = &info->control.rates[0];
842
843 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
844 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
845 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
846 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
847
848 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
849 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
850
851 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
852 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
853
854 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
855 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
856 /* this works because 40 MHz is 2 and dup is 3 */
857 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
858 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
859
860 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
861 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
862
863 if (txrate->flags & IEEE80211_TX_RC_MCS) {
864 u32 r = txrate->idx;
865 u8 *txpower;
866
867 r <<= AR9170_TX_PHY_MCS_SHIFT;
868 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK))
869 goto err_dequeue;
870 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
871 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
872
873 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
874 if (info->band == IEEE80211_BAND_5GHZ)
875 txpower = ar->power_5G_ht40;
876 else
877 txpower = ar->power_2G_ht40;
878 } else {
879 if (info->band == IEEE80211_BAND_5GHZ)
880 txpower = ar->power_5G_ht20;
881 else
882 txpower = ar->power_2G_ht20;
883 }
884
885 power = txpower[(txrate->idx) & 7];
886 } else {
887 u8 *txpower;
888 u32 mod;
889 u32 phyrate;
890 u8 idx = txrate->idx;
891
892 if (info->band != IEEE80211_BAND_2GHZ) {
893 idx += 4;
894 txpower = ar->power_5G_leg;
895 mod = AR9170_TX_PHY_MOD_OFDM;
896 } else {
897 if (idx < 4) {
898 txpower = ar->power_2G_cck;
899 mod = AR9170_TX_PHY_MOD_CCK;
900 } else {
901 mod = AR9170_TX_PHY_MOD_OFDM;
902 txpower = ar->power_2G_ofdm;
903 }
904 }
905
906 rate = &__ar9170_ratetable[idx];
907
908 phyrate = rate->hw_value & 0xF;
909 power = txpower[(rate->hw_value & 0x30) >> 4];
910 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
911
912 txc->phy_control |= cpu_to_le32(mod);
913 txc->phy_control |= cpu_to_le32(phyrate);
914 }
915
916 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
917 power &= AR9170_TX_PHY_TX_PWR_MASK;
918 txc->phy_control |= cpu_to_le32(power);
919
920 /* set TX chains */
921 if (ar->eeprom.tx_mask == 1) {
922 chains = AR9170_TX_PHY_TXCHAIN_1;
923 } else {
924 chains = AR9170_TX_PHY_TXCHAIN_2;
925
926 /* >= 36M legacy OFDM - use only one chain */
927 if (rate && rate->bitrate >= 360)
928 chains = AR9170_TX_PHY_TXCHAIN_1;
929 }
930 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
931
932 if (tx_status) {
933 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
934 /*
935 * WARNING:
936 * Putting the QoS queue bits into an unexplored territory is
937 * certainly not elegant.
938 *
939 * In my defense: This idea provides a reasonable way to
940 * smuggle valuable information to the tx_status callback.
941 * Also, the idea behind this bit-abuse came straight from
942 * the original driver code.
943 */
944
945 txc->phy_control |=
946 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
947
948 if (info->control.sta) {
949 sta_info = (void *) info->control.sta->drv_priv;
950 skb_queue_tail(&sta_info->tx_status[queue], skb);
951 } else {
952 skb_queue_tail(&ar->global_tx_status, skb);
953
954 queue_delayed_work(ar->hw->workqueue,
955 &ar->tx_status_janitor,
956 msecs_to_jiffies(100));
957 }
958 }
959
960 err = ar->tx(ar, skb, tx_status, 0);
961 if (unlikely(tx_status && err)) {
962 if (info->control.sta)
963 skb_unlink(skb, &sta_info->tx_status[queue]);
964 else
965 skb_unlink(skb, &ar->global_tx_status);
966 }
967
968 return NETDEV_TX_OK;
969
970err_dequeue:
971 spin_lock_irqsave(&ar->tx_stats_lock, flags);
972 ar->tx_stats[queue].len--;
973 ar->tx_stats[queue].count--;
974 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
975
976err_free:
977 dev_kfree_skb(skb);
978 return NETDEV_TX_OK;
979}
980
981static int ar9170_op_add_interface(struct ieee80211_hw *hw,
982 struct ieee80211_if_init_conf *conf)
983{
984 struct ar9170 *ar = hw->priv;
985 int err = 0;
986
987 mutex_lock(&ar->mutex);
988
989 if (ar->vif) {
990 err = -EBUSY;
991 goto unlock;
992 }
993
994 ar->vif = conf->vif;
995 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
996
997 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
998 ar->rx_software_decryption = true;
999 ar->disable_offload = true;
1000 }
1001
1002 ar->cur_filter = 0;
1003 ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
1004 err = ar9170_update_frame_filter(ar);
1005 if (err)
1006 goto unlock;
1007
1008 err = ar9170_set_operating_mode(ar);
1009
1010unlock:
1011 mutex_unlock(&ar->mutex);
1012 return err;
1013}
1014
1015static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1016 struct ieee80211_if_init_conf *conf)
1017{
1018 struct ar9170 *ar = hw->priv;
1019
1020 mutex_lock(&ar->mutex);
1021 ar->vif = NULL;
1022 ar->want_filter = 0;
1023 ar9170_update_frame_filter(ar);
1024 ar9170_set_beacon_timers(ar);
1025 dev_kfree_skb(ar->beacon);
1026 ar->beacon = NULL;
1027 ar->sniffer_enabled = false;
1028 ar->rx_software_decryption = false;
1029 ar9170_set_operating_mode(ar);
1030 mutex_unlock(&ar->mutex);
1031}
1032
1033static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1034{
1035 struct ar9170 *ar = hw->priv;
1036 int err = 0;
1037
1038 mutex_lock(&ar->mutex);
1039
1040 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
1041 /* TODO */
1042 err = 0;
1043 }
1044
1045 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1046 /* TODO */
1047 err = 0;
1048 }
1049
1050 if (changed & IEEE80211_CONF_CHANGE_PS) {
1051 /* TODO */
1052 err = 0;
1053 }
1054
1055 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1056 /* TODO */
1057 err = 0;
1058 }
1059
1060 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
1061 /*
1062 * is it long_frame_max_tx_count or short_frame_max_tx_count?
1063 */
1064
1065 err = ar9170_set_hwretry_limit(ar,
1066 ar->hw->conf.long_frame_max_tx_count);
1067 if (err)
1068 goto out;
1069 }
1070
1071 if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) {
1072 err = ar9170_set_beacon_timers(ar);
1073 if (err)
1074 goto out;
1075 }
1076
1077 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1078 err = ar9170_set_channel(ar, hw->conf.channel,
1079 AR9170_RFI_NONE, AR9170_BW_20);
1080 if (err)
1081 goto out;
1082 /* adjust slot time for 5 GHz */
1083 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
1084 err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
1085 9 << 10);
1086 }
1087
1088out:
1089 mutex_unlock(&ar->mutex);
1090 return err;
1091}
1092
1093static int ar9170_op_config_interface(struct ieee80211_hw *hw,
1094 struct ieee80211_vif *vif,
1095 struct ieee80211_if_conf *conf)
1096{
1097 struct ar9170 *ar = hw->priv;
1098 int err = 0;
1099
1100 mutex_lock(&ar->mutex);
1101
1102 if (conf->changed & IEEE80211_IFCC_BSSID) {
1103 memcpy(ar->bssid, conf->bssid, ETH_ALEN);
1104 err = ar9170_set_operating_mode(ar);
1105 }
1106
1107 if (conf->changed & IEEE80211_IFCC_BEACON) {
1108 err = ar9170_update_beacon(ar);
1109
1110 if (err)
1111 goto out;
1112 err = ar9170_set_beacon_timers(ar);
1113 }
1114
1115out:
1116 mutex_unlock(&ar->mutex);
1117 return err;
1118}
1119
1120static void ar9170_set_filters(struct work_struct *work)
1121{
1122 struct ar9170 *ar = container_of(work, struct ar9170,
1123 filter_config_work);
1124 int err;
1125
1126 mutex_lock(&ar->mutex);
1127 if (unlikely(!IS_STARTED(ar)))
1128 goto unlock;
1129
1130 if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) {
1131 err = ar9170_set_operating_mode(ar);
1132 if (err)
1133 goto unlock;
1134 }
1135
1136 if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) {
1137 err = ar9170_update_multicast(ar);
1138 if (err)
1139 goto unlock;
1140 }
1141
1142 if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER)
1143 err = ar9170_update_frame_filter(ar);
1144
1145unlock:
1146 mutex_unlock(&ar->mutex);
1147}
1148
1149static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1150 unsigned int changed_flags,
1151 unsigned int *new_flags,
1152 int mc_count, struct dev_mc_list *mclist)
1153{
1154 struct ar9170 *ar = hw->priv;
1155
1156 /* mask supported flags */
1157 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
1158 FIF_PROMISC_IN_BSS;
1159
1160 /*
1161 * We can support more by setting the sniffer bit and
1162 * then checking the error flags, later.
1163 */
1164
1165 if (changed_flags & FIF_ALLMULTI) {
1166 if (*new_flags & FIF_ALLMULTI) {
1167 ar->want_mc_hash = ~0ULL;
1168 } else {
1169 u64 mchash;
1170 int i;
1171
1172 /* always get broadcast frames */
1173 mchash = 1ULL << (0xff>>2);
1174
1175 for (i = 0; i < mc_count; i++) {
1176 if (WARN_ON(!mclist))
1177 break;
1178 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
1179 mclist = mclist->next;
1180 }
1181 ar->want_mc_hash = mchash;
1182 }
1183 ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST;
1184 }
1185
1186 if (changed_flags & FIF_CONTROL) {
1187 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
1188 AR9170_MAC_REG_FTF_RTS |
1189 AR9170_MAC_REG_FTF_CTS |
1190 AR9170_MAC_REG_FTF_ACK |
1191 AR9170_MAC_REG_FTF_CFE |
1192 AR9170_MAC_REG_FTF_CFE_ACK;
1193
1194 if (*new_flags & FIF_CONTROL)
1195 ar->want_filter = ar->cur_filter | filter;
1196 else
1197 ar->want_filter = ar->cur_filter & ~filter;
1198
1199 ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER;
1200 }
1201
1202 if (changed_flags & FIF_PROMISC_IN_BSS) {
1203 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
1204 ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC;
1205 }
1206
1207 if (likely(IS_STARTED(ar)))
1208 queue_work(ar->hw->workqueue, &ar->filter_config_work);
1209}
1210
1211static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1212 struct ieee80211_vif *vif,
1213 struct ieee80211_bss_conf *bss_conf,
1214 u32 changed)
1215{
1216 struct ar9170 *ar = hw->priv;
1217 int err = 0;
1218
1219 mutex_lock(&ar->mutex);
1220
1221 ar9170_regwrite_begin(ar);
1222
1223 if (changed & BSS_CHANGED_ASSOC) {
1224 ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
1225
1226#ifndef CONFIG_AR9170_LEDS
1227 /* enable assoc LED. */
1228 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
1229#endif /* CONFIG_AR9170_LEDS */
1230 }
1231
1232 if (changed & BSS_CHANGED_HT) {
1233 /* TODO */
1234 err = 0;
1235 }
1236
1237 if (changed & BSS_CHANGED_ERP_SLOT) {
1238 u32 slottime = 20;
1239
1240 if (bss_conf->use_short_slot)
1241 slottime = 9;
1242
1243 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
1244 }
1245
1246 if (changed & BSS_CHANGED_BASIC_RATES) {
1247 u32 cck, ofdm;
1248
1249 if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) {
1250 ofdm = bss_conf->basic_rates;
1251 cck = 0;
1252 } else {
1253 /* four cck rates */
1254 cck = bss_conf->basic_rates & 0xf;
1255 ofdm = bss_conf->basic_rates >> 4;
1256 }
1257 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
1258 ofdm << 8 | cck);
1259 }
1260
1261 ar9170_regwrite_finish();
1262 err = ar9170_regwrite_result();
1263 mutex_unlock(&ar->mutex);
1264}
1265
1266static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
1267{
1268 struct ar9170 *ar = hw->priv;
1269 int err;
1270 u32 tsf_low;
1271 u32 tsf_high;
1272 u64 tsf;
1273
1274 mutex_lock(&ar->mutex);
1275 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
1276 if (!err)
1277 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
1278 mutex_unlock(&ar->mutex);
1279
1280 if (WARN_ON(err))
1281 return 0;
1282
1283 tsf = tsf_high;
1284 tsf = (tsf << 32) | tsf_low;
1285 return tsf;
1286}
1287
1288static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1289 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1290 struct ieee80211_key_conf *key)
1291{
1292 struct ar9170 *ar = hw->priv;
1293 int err = 0, i;
1294 u8 ktype;
1295
1296 if ((!ar->vif) || (ar->disable_offload))
1297 return -EOPNOTSUPP;
1298
1299 switch (key->alg) {
1300 case ALG_WEP:
1301 if (key->keylen == LEN_WEP40)
1302 ktype = AR9170_ENC_ALG_WEP64;
1303 else
1304 ktype = AR9170_ENC_ALG_WEP128;
1305 break;
1306 case ALG_TKIP:
1307 ktype = AR9170_ENC_ALG_TKIP;
1308 break;
1309 case ALG_CCMP:
1310 ktype = AR9170_ENC_ALG_AESCCMP;
1311 break;
1312 default:
1313 return -EOPNOTSUPP;
1314 }
1315
1316 mutex_lock(&ar->mutex);
1317 if (cmd == SET_KEY) {
1318 if (unlikely(!IS_STARTED(ar))) {
1319 err = -EOPNOTSUPP;
1320 goto out;
1321 }
1322
1323 /* group keys need all-zeroes address */
1324 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1325 sta = NULL;
1326
1327 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
1328 for (i = 0; i < 64; i++)
1329 if (!(ar->usedkeys & BIT(i)))
1330 break;
1331 if (i == 64) {
1332 ar->rx_software_decryption = true;
1333 ar9170_set_operating_mode(ar);
1334 err = -ENOSPC;
1335 goto out;
1336 }
1337 } else {
1338 i = 64 + key->keyidx;
1339 }
1340
1341 key->hw_key_idx = i;
1342
1343 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
1344 key->key, min_t(u8, 16, key->keylen));
1345 if (err)
1346 goto out;
1347
1348 if (key->alg == ALG_TKIP) {
1349 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1350 ktype, 1, key->key + 16, 16);
1351 if (err)
1352 goto out;
1353
1354 /*
1355 * hardware is not capable generating the MMIC
1356 * for fragmented frames!
1357 */
1358 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1359 }
1360
1361 if (i < 64)
1362 ar->usedkeys |= BIT(i);
1363
1364 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1365 } else {
1366 if (unlikely(!IS_STARTED(ar))) {
1367 /* The device is gone... together with the key ;-) */
1368 err = 0;
1369 goto out;
1370 }
1371
1372 err = ar9170_disable_key(ar, key->hw_key_idx);
1373 if (err)
1374 goto out;
1375
1376 if (key->hw_key_idx < 64) {
1377 ar->usedkeys &= ~BIT(key->hw_key_idx);
1378 } else {
1379 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
1380 AR9170_ENC_ALG_NONE, 0,
1381 NULL, 0);
1382 if (err)
1383 goto out;
1384
1385 if (key->alg == ALG_TKIP) {
1386 err = ar9170_upload_key(ar, key->hw_key_idx,
1387 NULL,
1388 AR9170_ENC_ALG_NONE, 1,
1389 NULL, 0);
1390 if (err)
1391 goto out;
1392 }
1393
1394 }
1395 }
1396
1397 ar9170_regwrite_begin(ar);
1398 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
1399 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
1400 ar9170_regwrite_finish();
1401 err = ar9170_regwrite_result();
1402
1403out:
1404 mutex_unlock(&ar->mutex);
1405
1406 return err;
1407}
1408
1409static void ar9170_sta_notify(struct ieee80211_hw *hw,
1410 struct ieee80211_vif *vif,
1411 enum sta_notify_cmd cmd,
1412 struct ieee80211_sta *sta)
1413{
1414 struct ar9170 *ar = hw->priv;
1415 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1416 struct sk_buff *skb;
1417 unsigned int i;
1418
1419 switch (cmd) {
1420 case STA_NOTIFY_ADD:
1421 for (i = 0; i < ar->hw->queues; i++)
1422 skb_queue_head_init(&info->tx_status[i]);
1423 break;
1424
1425 case STA_NOTIFY_REMOVE:
1426
1427 /*
1428 * transfer all outstanding frames that need a tx_status
1429 * reports to the global tx_status queue
1430 */
1431
1432 for (i = 0; i < ar->hw->queues; i++) {
1433 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1434#ifdef AR9170_QUEUE_DEBUG
1435 printk(KERN_DEBUG "%s: queueing frame in "
1436 "global tx_status queue =>\n",
1437 wiphy_name(ar->hw->wiphy));
1438
1439 ar9170_print_txheader(ar, skb);
1440#endif /* AR9170_QUEUE_DEBUG */
1441 skb_queue_tail(&ar->global_tx_status, skb);
1442 }
1443 }
1444 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1445 msecs_to_jiffies(100));
1446 break;
1447
1448 default:
1449 break;
1450 }
1451}
1452
1453static int ar9170_get_stats(struct ieee80211_hw *hw,
1454 struct ieee80211_low_level_stats *stats)
1455{
1456 struct ar9170 *ar = hw->priv;
1457 u32 val;
1458 int err;
1459
1460 mutex_lock(&ar->mutex);
1461 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
1462 ar->stats.dot11ACKFailureCount += val;
1463
1464 memcpy(stats, &ar->stats, sizeof(*stats));
1465 mutex_unlock(&ar->mutex);
1466
1467 return 0;
1468}
1469
1470static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
1471 struct ieee80211_tx_queue_stats *tx_stats)
1472{
1473 struct ar9170 *ar = hw->priv;
1474
1475 spin_lock_bh(&ar->tx_stats_lock);
1476 memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
1477 spin_unlock_bh(&ar->tx_stats_lock);
1478
1479 return 0;
1480}
1481
1482static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1483 const struct ieee80211_tx_queue_params *param)
1484{
1485 struct ar9170 *ar = hw->priv;
1486 int ret;
1487
1488 mutex_lock(&ar->mutex);
1489 if ((param) && !(queue > ar->hw->queues)) {
1490 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1491 param, sizeof(*param));
1492
1493 ret = ar9170_set_qos(ar);
1494 } else
1495 ret = -EINVAL;
1496
1497 mutex_unlock(&ar->mutex);
1498 return ret;
1499}
1500
1501static const struct ieee80211_ops ar9170_ops = {
1502 .start = ar9170_op_start,
1503 .stop = ar9170_op_stop,
1504 .tx = ar9170_op_tx,
1505 .add_interface = ar9170_op_add_interface,
1506 .remove_interface = ar9170_op_remove_interface,
1507 .config = ar9170_op_config,
1508 .config_interface = ar9170_op_config_interface,
1509 .configure_filter = ar9170_op_configure_filter,
1510 .conf_tx = ar9170_conf_tx,
1511 .bss_info_changed = ar9170_op_bss_info_changed,
1512 .get_tsf = ar9170_op_get_tsf,
1513 .set_key = ar9170_set_key,
1514 .sta_notify = ar9170_sta_notify,
1515 .get_stats = ar9170_get_stats,
1516 .get_tx_stats = ar9170_get_tx_stats,
1517};
1518
1519void *ar9170_alloc(size_t priv_size)
1520{
1521 struct ieee80211_hw *hw;
1522 struct ar9170 *ar;
1523 int i;
1524
1525 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
1526 if (!hw)
1527 return ERR_PTR(-ENOMEM);
1528
1529 ar = hw->priv;
1530 ar->hw = hw;
1531
1532 mutex_init(&ar->mutex);
1533 spin_lock_init(&ar->cmdlock);
1534 spin_lock_init(&ar->tx_stats_lock);
1535 skb_queue_head_init(&ar->global_tx_status);
1536 skb_queue_head_init(&ar->global_tx_status_waste);
1537 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1538 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1539 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor);
1540
1541 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1542 ar->channel = &ar9170_2ghz_chantable[0];
1543
1544 /* first part of wiphy init */
1545 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1546 BIT(NL80211_IFTYPE_WDS) |
1547 BIT(NL80211_IFTYPE_ADHOC);
1548 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1549 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1550 IEEE80211_HW_SIGNAL_DBM |
1551 IEEE80211_HW_NOISE_DBM;
1552
1553 ar->hw->queues = __AR9170_NUM_TXQ;
1554 ar->hw->extra_tx_headroom = 8;
1555 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
1556
1557 ar->hw->max_rates = 1;
1558 ar->hw->max_rate_tries = 3;
1559
1560 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1561 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1562
1563 return ar;
1564}
1565
1566static int ar9170_read_eeprom(struct ar9170 *ar)
1567{
1568#define RW 8 /* number of words to read at once */
1569#define RB (sizeof(u32) * RW)
1570 DECLARE_MAC_BUF(mbuf);
1571 u8 *eeprom = (void *)&ar->eeprom;
1572 u8 *addr = ar->eeprom.mac_address;
1573 __le32 offsets[RW];
1574 int i, j, err, bands = 0;
1575
1576 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1577
1578 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
1579#ifndef __CHECKER__
1580 /* don't want to handle trailing remains */
1581 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1582#endif
1583
1584 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1585 for (j = 0; j < RW; j++)
1586 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1587 RB * i + 4 * j);
1588
1589 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
1590 RB, (u8 *) &offsets,
1591 RB, eeprom + RB * i);
1592 if (err)
1593 return err;
1594 }
1595
1596#undef RW
1597#undef RB
1598
1599 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
1600 return -ENODATA;
1601
1602 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1603 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
1604 bands++;
1605 }
1606 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1607 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
1608 bands++;
1609 }
1610 /*
1611 * I measured this, a bandswitch takes roughly
1612 * 135 ms and a frequency switch about 80.
1613 *
1614 * FIXME: measure these values again once EEPROM settings
1615 * are used, that will influence them!
1616 */
1617 if (bands == 2)
1618 ar->hw->channel_change_time = 135 * 1000;
1619 else
1620 ar->hw->channel_change_time = 80 * 1000;
1621
1622 /* second part of wiphy init */
1623 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
1624
1625 return bands ? 0 : -EINVAL;
1626}
1627
1628int ar9170_register(struct ar9170 *ar, struct device *pdev)
1629{
1630 int err;
1631
1632 /* try to read EEPROM, init MAC addr */
1633 err = ar9170_read_eeprom(ar);
1634 if (err)
1635 goto err_out;
1636
1637 err = ieee80211_register_hw(ar->hw);
1638 if (err)
1639 goto err_out;
1640
1641 err = ar9170_init_leds(ar);
1642 if (err)
1643 goto err_unreg;
1644
1645#ifdef CONFIG_AR9170_LEDS
1646 err = ar9170_register_leds(ar);
1647 if (err)
1648 goto err_unreg;
1649#endif /* CONFIG_AR9170_LEDS */
1650
1651 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
1652 wiphy_name(ar->hw->wiphy));
1653
1654 return err;
1655
1656err_unreg:
1657 ieee80211_unregister_hw(ar->hw);
1658
1659err_out:
1660 return err;
1661}
1662
1663void ar9170_unregister(struct ar9170 *ar)
1664{
1665#ifdef CONFIG_AR9170_LEDS
1666 ar9170_unregister_leds(ar);
1667#endif /* CONFIG_AR9170_LEDS */
1668
1669 ieee80211_unregister_hw(ar->hw);
1670 mutex_destroy(&ar->mutex);
1671}
diff --git a/drivers/net/wireless/ar9170/phy.c b/drivers/net/wireless/ar9170/phy.c
new file mode 100644
index 000000000000..6ce20754b8e7
--- /dev/null
+++ b/drivers/net/wireless/ar9170/phy.c
@@ -0,0 +1,1240 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * PHY and RF code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <linux/bitrev.h>
40#include "ar9170.h"
41#include "cmd.h"
42
43static int ar9170_init_power_cal(struct ar9170 *ar)
44{
45 ar9170_regwrite_begin(ar);
46
47 ar9170_regwrite(0x1bc000 + 0x993c, 0x7f);
48 ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f);
49 ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f);
50 ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f);
51 ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f);
52 ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f);
53 ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f);
54 ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f);
55 ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f);
56 ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f);
57
58 ar9170_regwrite_finish();
59 return ar9170_regwrite_result();
60}
61
62struct ar9170_phy_init {
63 u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
64};
65
66static struct ar9170_phy_init ar5416_phy_init[] = {
67 { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
68 { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
69 { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
70 { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
71 { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
72 { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
73 { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
74 { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
75 { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
76 { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
77 { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
78 { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
79 { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
80 { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
81 { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
82 { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
83 { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
84 { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
85 { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
86 { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
87 { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, },
88 { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
89 { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
90 { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, },
91 { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
92 { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
93 { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
94 { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
95 { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
96 { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
97 { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
98 { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
99 { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
100 { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
101 { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
102 { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
103 { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
104 { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
105 { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
106 { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
107 { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
108 { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
109 { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
110 { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
111 { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
112 { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
113 { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
114 { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
115 { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
116 { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
117 { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
118 { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
119 { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
120 { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
121 { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
122 { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
123 { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
124 { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
125 { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
126 { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
127 { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
128 { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
129 { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
130 { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
131 { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
132 { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
133 { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
134 { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
135 { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, },
136 { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
137 { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
138 { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
139 { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
140 { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
141 { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
142 { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
143 { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
144 { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
145 { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
146 { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
147 { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
148 { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
149 { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
150 { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
151 { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
152 { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
153 { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
154 { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
155 { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
156 { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
157 { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
158 { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
159 { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
160 { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
161 { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
162 { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
163 { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
164 { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
165 { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
166 { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
167 { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
168 { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
169 { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
170 { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
171 { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
172 { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
173 { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
174 { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
175 { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
176 { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
177 { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
178 { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
179 { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
180 { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
181 { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
182 { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
183 { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
184 { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
185 { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
186 { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
187 { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
188 { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
189 { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
190 { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
191 { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
192 { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
193 { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
194 { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
195 { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
196 { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
197 { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
198 { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
199 { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
200 { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
201 { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
202 { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
203 { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
204 { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
205 { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
206 { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
207 { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
208 { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
209 { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
210 { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
211 { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
212 { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
213 { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
214 { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
215 { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
216 { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
217 { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
218 { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
219 { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
220 { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
221 { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
222 { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
223 { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
224 { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
225 { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
226 { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
227 { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
228 { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
229 { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
230 { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
231 { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
232 { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
233 { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
234 { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
235 { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
236 { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
237 { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
238 { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
239 { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
240 { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
241 { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
242 { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
243 { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
244 { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
245 { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
246 { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
247 { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
248 { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
249 { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
250 { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
251 { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
252 { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
253 { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
254 { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
255 { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
256 { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
257 { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
258 { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
259 { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
260 { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
261 { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
262 { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
263 { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
264 { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
265 { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
266 { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
267 { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
268 { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
269 { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
270 { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
271 { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
272 { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
273 { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
274 { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
275 { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
276 { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
277 { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
278 { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
279 { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
280 { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
281 { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
282 { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
283 { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
284 { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
285 { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
286 { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
287 { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
288 { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
289 { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
290 { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
291 { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
292 { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
293 { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
294 { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
295 { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
296 { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
297 { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
298 { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
299 { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
300 { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
301 { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
302 { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
303 { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
304 { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
305 { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
306 { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
307 { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
308 { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
309 { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
310 { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
311 { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
312 { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
313 { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
314 { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
315 { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
316 { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
317 { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
318 { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
319 { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
320 { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
321 { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
322 { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
323 { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
324 { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
325 { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
326 { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
327 { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
328 { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
329 { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
330 { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
331 { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
332 { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
333 { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
334 { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
335 { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
336 { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
337 { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
338 { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
339 { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
340 { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
341 { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
342 { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
343 { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
344 { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
345 { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
346 { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
347 { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
348 { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
349 { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
350 { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
351 { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
352 { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
353 { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
354 { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
355 { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
356 { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
357 { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
358 { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
359 { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
360 { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
361 { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
362 { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
363 { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
364 { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
365 { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
366 { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
367 { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
368 { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
369 { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
370 { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
371 { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
372 { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
373 { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
374 { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
375 { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
376 { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
377 { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
378 { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
379 { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
380/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
381 { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
382 { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
383 { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
384 { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
385 { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
386 { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
387 { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
388 { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
389 { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
390 { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
391 { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
392 { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
393 { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
394 { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
395 { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
396 { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
397};
398
399int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
400{
401 int i, err;
402 u32 val;
403 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
404 bool is_40mhz = false; /* XXX: for now */
405
406 ar9170_regwrite_begin(ar);
407
408 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
409 if (is_40mhz) {
410 if (is_2ghz)
411 val = ar5416_phy_init[i]._2ghz_40;
412 else
413 val = ar5416_phy_init[i]._5ghz_40;
414 } else {
415 if (is_2ghz)
416 val = ar5416_phy_init[i]._2ghz_20;
417 else
418 val = ar5416_phy_init[i]._5ghz_20;
419 }
420
421 ar9170_regwrite(ar5416_phy_init[i].reg, val);
422 }
423
424 ar9170_regwrite_finish();
425 err = ar9170_regwrite_result();
426 if (err)
427 return err;
428
429 /* XXX: use EEPROM data here! */
430
431 err = ar9170_init_power_cal(ar);
432 if (err)
433 return err;
434
435 /* XXX: remove magic! */
436 if (is_2ghz)
437 err = ar9170_write_reg(ar, 0x1d4014, 0x5163);
438 else
439 err = ar9170_write_reg(ar, 0x1d4014, 0x5143);
440
441 return err;
442}
443
444struct ar9170_rf_init {
445 u32 reg, _5ghz, _2ghz;
446};
447
448static struct ar9170_rf_init ar9170_rf_init[] = {
449 /* bank 0 */
450 { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
451 { 0x1c58e0, 0x02008020, 0x02008020},
452 /* bank 1 */
453 { 0x1c58b0, 0x02108421, 0x02108421},
454 { 0x1c58ec, 0x00000008, 0x00000008},
455 /* bank 2 */
456 { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
457 { 0x1c58e0, 0x00000420, 0x00000420},
458 /* bank 3 */
459 { 0x1c58f0, 0x01400018, 0x01c00018},
460 /* bank 4 */
461 { 0x1c58b0, 0x000001a1, 0x000001a1},
462 { 0x1c58e8, 0x00000001, 0x00000001},
463 /* bank 5 */
464 { 0x1c58b0, 0x00000013, 0x00000013},
465 { 0x1c58e4, 0x00000002, 0x00000002},
466 /* bank 6 */
467 { 0x1c58b0, 0x00000000, 0x00000000},
468 { 0x1c58b0, 0x00000000, 0x00000000},
469 { 0x1c58b0, 0x00000000, 0x00000000},
470 { 0x1c58b0, 0x00000000, 0x00000000},
471 { 0x1c58b0, 0x00000000, 0x00000000},
472 { 0x1c58b0, 0x00004000, 0x00004000},
473 { 0x1c58b0, 0x00006c00, 0x00006c00},
474 { 0x1c58b0, 0x00002c00, 0x00002c00},
475 { 0x1c58b0, 0x00004800, 0x00004800},
476 { 0x1c58b0, 0x00004000, 0x00004000},
477 { 0x1c58b0, 0x00006000, 0x00006000},
478 { 0x1c58b0, 0x00001000, 0x00001000},
479 { 0x1c58b0, 0x00004000, 0x00004000},
480 { 0x1c58b0, 0x00007c00, 0x00007c00},
481 { 0x1c58b0, 0x00007c00, 0x00007c00},
482 { 0x1c58b0, 0x00007c00, 0x00007c00},
483 { 0x1c58b0, 0x00007c00, 0x00007c00},
484 { 0x1c58b0, 0x00007c00, 0x00007c00},
485 { 0x1c58b0, 0x00087c00, 0x00087c00},
486 { 0x1c58b0, 0x00007c00, 0x00007c00},
487 { 0x1c58b0, 0x00005400, 0x00005400},
488 { 0x1c58b0, 0x00000c00, 0x00000c00},
489 { 0x1c58b0, 0x00001800, 0x00001800},
490 { 0x1c58b0, 0x00007c00, 0x00007c00},
491 { 0x1c58b0, 0x00006c00, 0x00006c00},
492 { 0x1c58b0, 0x00006c00, 0x00006c00},
493 { 0x1c58b0, 0x00007c00, 0x00007c00},
494 { 0x1c58b0, 0x00002c00, 0x00002c00},
495 { 0x1c58b0, 0x00003c00, 0x00003c00},
496 { 0x1c58b0, 0x00003800, 0x00003800},
497 { 0x1c58b0, 0x00001c00, 0x00001c00},
498 { 0x1c58b0, 0x00000800, 0x00000800},
499 { 0x1c58b0, 0x00000408, 0x00000408},
500 { 0x1c58b0, 0x00004c15, 0x00004c15},
501 { 0x1c58b0, 0x00004188, 0x00004188},
502 { 0x1c58b0, 0x0000201e, 0x0000201e},
503 { 0x1c58b0, 0x00010408, 0x00010408},
504 { 0x1c58b0, 0x00000801, 0x00000801},
505 { 0x1c58b0, 0x00000c08, 0x00000c08},
506 { 0x1c58b0, 0x0000181e, 0x0000181e},
507 { 0x1c58b0, 0x00001016, 0x00001016},
508 { 0x1c58b0, 0x00002800, 0x00002800},
509 { 0x1c58b0, 0x00004010, 0x00004010},
510 { 0x1c58b0, 0x0000081c, 0x0000081c},
511 { 0x1c58b0, 0x00000115, 0x00000115},
512 { 0x1c58b0, 0x00000015, 0x00000015},
513 { 0x1c58b0, 0x00000066, 0x00000066},
514 { 0x1c58b0, 0x0000001c, 0x0000001c},
515 { 0x1c58b0, 0x00000000, 0x00000000},
516 { 0x1c58b0, 0x00000004, 0x00000004},
517 { 0x1c58b0, 0x00000015, 0x00000015},
518 { 0x1c58b0, 0x0000001f, 0x0000001f},
519 { 0x1c58e0, 0x00000000, 0x00000400},
520 /* bank 7 */
521 { 0x1c58b0, 0x000000a0, 0x000000a0},
522 { 0x1c58b0, 0x00000000, 0x00000000},
523 { 0x1c58b0, 0x00000040, 0x00000040},
524 { 0x1c58f0, 0x0000001c, 0x0000001c},
525};
526
527static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
528{
529 int err, i;
530
531 ar9170_regwrite_begin(ar);
532
533 for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++)
534 ar9170_regwrite(ar9170_rf_init[i].reg,
535 band5ghz ? ar9170_rf_init[i]._5ghz
536 : ar9170_rf_init[i]._2ghz);
537
538 ar9170_regwrite_finish();
539 err = ar9170_regwrite_result();
540 if (err)
541 printk(KERN_ERR "%s: rf init failed\n",
542 wiphy_name(ar->hw->wiphy));
543 return err;
544}
545
546static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
547 u32 freq, enum ar9170_bw bw)
548{
549 int err;
550 u32 d0, d1, td0, td1, fd0, fd1;
551 u8 chansel;
552 u8 refsel0 = 1, refsel1 = 0;
553 u8 lf_synth = 0;
554
555 switch (bw) {
556 case AR9170_BW_40_ABOVE:
557 freq += 10;
558 break;
559 case AR9170_BW_40_BELOW:
560 freq -= 10;
561 break;
562 case AR9170_BW_20:
563 break;
564 case __AR9170_NUM_BW:
565 BUG();
566 }
567
568 if (band5ghz) {
569 if (freq % 10) {
570 chansel = (freq - 4800) / 5;
571 } else {
572 chansel = ((freq - 4800) / 10) * 2;
573 refsel0 = 0;
574 refsel1 = 1;
575 }
576 chansel = byte_rev_table[chansel];
577 } else {
578 if (freq == 2484) {
579 chansel = 10 + (freq - 2274) / 5;
580 lf_synth = 1;
581 } else
582 chansel = 16 + (freq - 2272) / 5;
583 chansel *= 4;
584 chansel = byte_rev_table[chansel];
585 }
586
587 d1 = chansel;
588 d0 = 0x21 |
589 refsel0 << 3 |
590 refsel1 << 2 |
591 lf_synth << 1;
592 td0 = d0 & 0x1f;
593 td1 = d1 & 0x1f;
594 fd0 = td1 << 5 | td0;
595
596 td0 = (d0 >> 5) & 0x7;
597 td1 = (d1 >> 5) & 0x7;
598 fd1 = td1 << 5 | td0;
599
600 ar9170_regwrite_begin(ar);
601
602 ar9170_regwrite(0x1c58b0, fd0);
603 ar9170_regwrite(0x1c58e8, fd1);
604
605 ar9170_regwrite_finish();
606 err = ar9170_regwrite_result();
607 if (err)
608 return err;
609
610 msleep(10);
611
612 return 0;
613}
614
615struct ar9170_phy_freq_params {
616 u8 coeff_exp;
617 u16 coeff_man;
618 u8 coeff_exp_shgi;
619 u16 coeff_man_shgi;
620};
621
622struct ar9170_phy_freq_entry {
623 u16 freq;
624 struct ar9170_phy_freq_params params[__AR9170_NUM_BW];
625};
626
627/* NB: must be in sync with channel tables in main! */
628static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = {
629/*
630 * freq,
631 * 20MHz,
632 * 40MHz (below),
633 * 40Mhz (above),
634 */
635 { 2412, {
636 { 3, 21737, 3, 19563, },
637 { 3, 21827, 3, 19644, },
638 { 3, 21647, 3, 19482, },
639 } },
640 { 2417, {
641 { 3, 21692, 3, 19523, },
642 { 3, 21782, 3, 19604, },
643 { 3, 21602, 3, 19442, },
644 } },
645 { 2422, {
646 { 3, 21647, 3, 19482, },
647 { 3, 21737, 3, 19563, },
648 { 3, 21558, 3, 19402, },
649 } },
650 { 2427, {
651 { 3, 21602, 3, 19442, },
652 { 3, 21692, 3, 19523, },
653 { 3, 21514, 3, 19362, },
654 } },
655 { 2432, {
656 { 3, 21558, 3, 19402, },
657 { 3, 21647, 3, 19482, },
658 { 3, 21470, 3, 19323, },
659 } },
660 { 2437, {
661 { 3, 21514, 3, 19362, },
662 { 3, 21602, 3, 19442, },
663 { 3, 21426, 3, 19283, },
664 } },
665 { 2442, {
666 { 3, 21470, 3, 19323, },
667 { 3, 21558, 3, 19402, },
668 { 3, 21382, 3, 19244, },
669 } },
670 { 2447, {
671 { 3, 21426, 3, 19283, },
672 { 3, 21514, 3, 19362, },
673 { 3, 21339, 3, 19205, },
674 } },
675 { 2452, {
676 { 3, 21382, 3, 19244, },
677 { 3, 21470, 3, 19323, },
678 { 3, 21295, 3, 19166, },
679 } },
680 { 2457, {
681 { 3, 21339, 3, 19205, },
682 { 3, 21426, 3, 19283, },
683 { 3, 21252, 3, 19127, },
684 } },
685 { 2462, {
686 { 3, 21295, 3, 19166, },
687 { 3, 21382, 3, 19244, },
688 { 3, 21209, 3, 19088, },
689 } },
690 { 2467, {
691 { 3, 21252, 3, 19127, },
692 { 3, 21339, 3, 19205, },
693 { 3, 21166, 3, 19050, },
694 } },
695 { 2472, {
696 { 3, 21209, 3, 19088, },
697 { 3, 21295, 3, 19166, },
698 { 3, 21124, 3, 19011, },
699 } },
700 { 2484, {
701 { 3, 21107, 3, 18996, },
702 { 3, 21192, 3, 19073, },
703 { 3, 21022, 3, 18920, },
704 } },
705 { 4920, {
706 { 4, 21313, 4, 19181, },
707 { 4, 21356, 4, 19220, },
708 { 4, 21269, 4, 19142, },
709 } },
710 { 4940, {
711 { 4, 21226, 4, 19104, },
712 { 4, 21269, 4, 19142, },
713 { 4, 21183, 4, 19065, },
714 } },
715 { 4960, {
716 { 4, 21141, 4, 19027, },
717 { 4, 21183, 4, 19065, },
718 { 4, 21098, 4, 18988, },
719 } },
720 { 4980, {
721 { 4, 21056, 4, 18950, },
722 { 4, 21098, 4, 18988, },
723 { 4, 21014, 4, 18912, },
724 } },
725 { 5040, {
726 { 4, 20805, 4, 18725, },
727 { 4, 20846, 4, 18762, },
728 { 4, 20764, 4, 18687, },
729 } },
730 { 5060, {
731 { 4, 20723, 4, 18651, },
732 { 4, 20764, 4, 18687, },
733 { 4, 20682, 4, 18614, },
734 } },
735 { 5080, {
736 { 4, 20641, 4, 18577, },
737 { 4, 20682, 4, 18614, },
738 { 4, 20601, 4, 18541, },
739 } },
740 { 5180, {
741 { 4, 20243, 4, 18219, },
742 { 4, 20282, 4, 18254, },
743 { 4, 20204, 4, 18183, },
744 } },
745 { 5200, {
746 { 4, 20165, 4, 18148, },
747 { 4, 20204, 4, 18183, },
748 { 4, 20126, 4, 18114, },
749 } },
750 { 5220, {
751 { 4, 20088, 4, 18079, },
752 { 4, 20126, 4, 18114, },
753 { 4, 20049, 4, 18044, },
754 } },
755 { 5240, {
756 { 4, 20011, 4, 18010, },
757 { 4, 20049, 4, 18044, },
758 { 4, 19973, 4, 17976, },
759 } },
760 { 5260, {
761 { 4, 19935, 4, 17941, },
762 { 4, 19973, 4, 17976, },
763 { 4, 19897, 4, 17907, },
764 } },
765 { 5280, {
766 { 4, 19859, 4, 17873, },
767 { 4, 19897, 4, 17907, },
768 { 4, 19822, 4, 17840, },
769 } },
770 { 5300, {
771 { 4, 19784, 4, 17806, },
772 { 4, 19822, 4, 17840, },
773 { 4, 19747, 4, 17772, },
774 } },
775 { 5320, {
776 { 4, 19710, 4, 17739, },
777 { 4, 19747, 4, 17772, },
778 { 4, 19673, 4, 17706, },
779 } },
780 { 5500, {
781 { 4, 19065, 4, 17159, },
782 { 4, 19100, 4, 17190, },
783 { 4, 19030, 4, 17127, },
784 } },
785 { 5520, {
786 { 4, 18996, 4, 17096, },
787 { 4, 19030, 4, 17127, },
788 { 4, 18962, 4, 17065, },
789 } },
790 { 5540, {
791 { 4, 18927, 4, 17035, },
792 { 4, 18962, 4, 17065, },
793 { 4, 18893, 4, 17004, },
794 } },
795 { 5560, {
796 { 4, 18859, 4, 16973, },
797 { 4, 18893, 4, 17004, },
798 { 4, 18825, 4, 16943, },
799 } },
800 { 5580, {
801 { 4, 18792, 4, 16913, },
802 { 4, 18825, 4, 16943, },
803 { 4, 18758, 4, 16882, },
804 } },
805 { 5600, {
806 { 4, 18725, 4, 16852, },
807 { 4, 18758, 4, 16882, },
808 { 4, 18691, 4, 16822, },
809 } },
810 { 5620, {
811 { 4, 18658, 4, 16792, },
812 { 4, 18691, 4, 16822, },
813 { 4, 18625, 4, 16762, },
814 } },
815 { 5640, {
816 { 4, 18592, 4, 16733, },
817 { 4, 18625, 4, 16762, },
818 { 4, 18559, 4, 16703, },
819 } },
820 { 5660, {
821 { 4, 18526, 4, 16673, },
822 { 4, 18559, 4, 16703, },
823 { 4, 18493, 4, 16644, },
824 } },
825 { 5680, {
826 { 4, 18461, 4, 16615, },
827 { 4, 18493, 4, 16644, },
828 { 4, 18428, 4, 16586, },
829 } },
830 { 5700, {
831 { 4, 18396, 4, 16556, },
832 { 4, 18428, 4, 16586, },
833 { 4, 18364, 4, 16527, },
834 } },
835 { 5745, {
836 { 4, 18252, 4, 16427, },
837 { 4, 18284, 4, 16455, },
838 { 4, 18220, 4, 16398, },
839 } },
840 { 5765, {
841 { 4, 18189, 5, 32740, },
842 { 4, 18220, 4, 16398, },
843 { 4, 18157, 5, 32683, },
844 } },
845 { 5785, {
846 { 4, 18126, 5, 32626, },
847 { 4, 18157, 5, 32683, },
848 { 4, 18094, 5, 32570, },
849 } },
850 { 5805, {
851 { 4, 18063, 5, 32514, },
852 { 4, 18094, 5, 32570, },
853 { 4, 18032, 5, 32458, },
854 } },
855 { 5825, {
856 { 4, 18001, 5, 32402, },
857 { 4, 18032, 5, 32458, },
858 { 4, 17970, 5, 32347, },
859 } },
860 { 5170, {
861 { 4, 20282, 4, 18254, },
862 { 4, 20321, 4, 18289, },
863 { 4, 20243, 4, 18219, },
864 } },
865 { 5190, {
866 { 4, 20204, 4, 18183, },
867 { 4, 20243, 4, 18219, },
868 { 4, 20165, 4, 18148, },
869 } },
870 { 5210, {
871 { 4, 20126, 4, 18114, },
872 { 4, 20165, 4, 18148, },
873 { 4, 20088, 4, 18079, },
874 } },
875 { 5230, {
876 { 4, 20049, 4, 18044, },
877 { 4, 20088, 4, 18079, },
878 { 4, 20011, 4, 18010, },
879 } },
880};
881
882static const struct ar9170_phy_freq_params *
883ar9170_get_hw_dyn_params(struct ieee80211_channel *channel,
884 enum ar9170_bw bw)
885{
886 unsigned int chanidx = 0;
887 u16 freq = 2412;
888
889 if (channel) {
890 chanidx = channel->hw_value;
891 freq = channel->center_freq;
892 }
893
894 BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params));
895
896 BUILD_BUG_ON(__AR9170_NUM_BW != 3);
897
898 WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq);
899
900 return &ar9170_phy_freq_params[chanidx].params[bw];
901}
902
903
904int ar9170_init_rf(struct ar9170 *ar)
905{
906 const struct ar9170_phy_freq_params *freqpar;
907 __le32 cmd[7];
908 int err;
909
910 err = ar9170_init_rf_banks_0_7(ar, false);
911 if (err)
912 return err;
913
914 err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20);
915 if (err)
916 return err;
917
918 freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20);
919
920 cmd[0] = cpu_to_le32(2412 * 1000);
921 cmd[1] = cpu_to_le32(0);
922 cmd[2] = cpu_to_le32(1);
923 cmd[3] = cpu_to_le32(freqpar->coeff_exp);
924 cmd[4] = cpu_to_le32(freqpar->coeff_man);
925 cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
926 cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi);
927
928 /* RF_INIT echoes the command back to us */
929 err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT,
930 sizeof(cmd), (u8 *)cmd,
931 sizeof(cmd), (u8 *)cmd);
932 if (err)
933 return err;
934
935 msleep(1000);
936
937 return ar9170_echo_test(ar, 0xaabbccdd);
938}
939
940static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
941{
942 int idx = nfreqs - 2;
943
944 while (idx >= 0) {
945 if (f >= freqs[idx])
946 return idx;
947 idx--;
948 }
949
950 return 0;
951}
952
953static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
954{
955 /* nothing to interpolate, it's horizontal */
956 if (y2 == y1)
957 return y1;
958
959 /* check if we hit one of the edges */
960 if (x == x1)
961 return y1;
962 if (x == x2)
963 return y2;
964
965 /* x1 == x2 is bad, hopefully == x */
966 if (x2 == x1)
967 return y1;
968
969 return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
970}
971
972static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
973{
974#define SHIFT 8
975 s32 y;
976
977 y = ar9170_interpolate_s32(x << SHIFT,
978 x1 << SHIFT, y1 << SHIFT,
979 x2 << SHIFT, y2 << SHIFT);
980
981 /*
982 * XXX: unwrap this expression
983 * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
984 * Can we rely on the compiler to optimise away the div?
985 */
986 return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
987#undef SHIFT
988}
989
990static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
991{
992 struct ar9170_calibration_target_power_legacy *ctpl;
993 struct ar9170_calibration_target_power_ht *ctph;
994 u8 *ctpres;
995 int ntargets;
996 int idx, i, n;
997 u8 ackpower, ackchains, f;
998 u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
999
1000 if (freq < 3000)
1001 f = freq - 2300;
1002 else
1003 f = (freq - 4800)/5;
1004
1005 /*
1006 * cycle through the various modes
1007 *
1008 * legacy modes first: 5G, 2G CCK, 2G OFDM
1009 */
1010 for (i = 0; i < 3; i++) {
1011 switch (i) {
1012 case 0: /* 5 GHz legacy */
1013 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
1014 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1015 ctpres = ar->power_5G_leg;
1016 break;
1017 case 1: /* 2.4 GHz CCK */
1018 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
1019 ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
1020 ctpres = ar->power_2G_cck;
1021 break;
1022 case 2: /* 2.4 GHz OFDM */
1023 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
1024 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1025 ctpres = ar->power_2G_ofdm;
1026 break;
1027 default:
1028 BUG();
1029 }
1030
1031 for (n = 0; n < ntargets; n++) {
1032 if (ctpl[n].freq == 0xff)
1033 break;
1034 pwr_freqs[n] = ctpl[n].freq;
1035 }
1036 ntargets = n;
1037 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1038 for (n = 0; n < 4; n++)
1039 ctpres[n] = ar9170_interpolate_u8(
1040 f,
1041 ctpl[idx + 0].freq,
1042 ctpl[idx + 0].power[n],
1043 ctpl[idx + 1].freq,
1044 ctpl[idx + 1].power[n]);
1045 }
1046
1047 /*
1048 * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40
1049 */
1050 for (i = 0; i < 4; i++) {
1051 switch (i) {
1052 case 0: /* 5 GHz HT 20 */
1053 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
1054 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1055 ctpres = ar->power_5G_ht20;
1056 break;
1057 case 1: /* 5 GHz HT 40 */
1058 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
1059 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1060 ctpres = ar->power_5G_ht40;
1061 break;
1062 case 2: /* 2.4 GHz HT 20 */
1063 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
1064 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1065 ctpres = ar->power_2G_ht20;
1066 break;
1067 case 3: /* 2.4 GHz HT 40 */
1068 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
1069 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1070 ctpres = ar->power_2G_ht40;
1071 break;
1072 default:
1073 BUG();
1074 }
1075
1076 for (n = 0; n < ntargets; n++) {
1077 if (ctph[n].freq == 0xff)
1078 break;
1079 pwr_freqs[n] = ctph[n].freq;
1080 }
1081 ntargets = n;
1082 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1083 for (n = 0; n < 8; n++)
1084 ctpres[n] = ar9170_interpolate_u8(
1085 f,
1086 ctph[idx + 0].freq,
1087 ctph[idx + 0].power[n],
1088 ctph[idx + 1].freq,
1089 ctph[idx + 1].power[n]);
1090 }
1091
1092 /* set ACK/CTS TX power */
1093 ar9170_regwrite_begin(ar);
1094
1095 if (ar->eeprom.tx_mask != 1)
1096 ackchains = AR9170_TX_PHY_TXCHAIN_2;
1097 else
1098 ackchains = AR9170_TX_PHY_TXCHAIN_1;
1099
1100 if (freq < 3000)
1101 ackpower = ar->power_2G_ofdm[0] & 0x3f;
1102 else
1103 ackpower = ar->power_5G_leg[0] & 0x3f;
1104
1105 ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26);
1106 ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 |
1107 ackpower << 21 | ackchains << 27);
1108
1109 ar9170_regwrite_finish();
1110 return ar9170_regwrite_result();
1111}
1112
1113static int ar9170_calc_noise_dbm(u32 raw_noise)
1114{
1115 if (raw_noise & 0x100)
1116 return ~((raw_noise & 0x0ff) >> 1);
1117 else
1118 return (raw_noise & 0xff) >> 1;
1119}
1120
1121int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1122 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw)
1123{
1124 const struct ar9170_phy_freq_params *freqpar;
1125 u32 cmd, tmp, offs;
1126 __le32 vals[8];
1127 int i, err;
1128 bool bandswitch;
1129
1130 /* clear BB heavy clip enable */
1131 err = ar9170_write_reg(ar, 0x1c59e0, 0x200);
1132 if (err)
1133 return err;
1134
1135 /* may be NULL at first setup */
1136 if (ar->channel)
1137 bandswitch = ar->channel->band != channel->band;
1138 else
1139 bandswitch = true;
1140
1141 /* HW workaround */
1142 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
1143 channel->center_freq <= 2417)
1144 bandswitch = true;
1145
1146 err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1147 if (err)
1148 return err;
1149
1150 if (rfi != AR9170_RFI_NONE || bandswitch) {
1151 u32 val = 0x400;
1152
1153 if (rfi == AR9170_RFI_COLD)
1154 val = 0x800;
1155
1156 /* warm/cold reset BB/ADDA */
1157 err = ar9170_write_reg(ar, 0x1d4004, val);
1158 if (err)
1159 return err;
1160
1161 err = ar9170_write_reg(ar, 0x1d4004, 0x0);
1162 if (err)
1163 return err;
1164
1165 err = ar9170_init_phy(ar, channel->band);
1166 if (err)
1167 return err;
1168
1169 err = ar9170_init_rf_banks_0_7(ar,
1170 channel->band == IEEE80211_BAND_5GHZ);
1171 if (err)
1172 return err;
1173
1174 cmd = AR9170_CMD_RF_INIT;
1175 } else {
1176 cmd = AR9170_CMD_FREQUENCY;
1177 }
1178
1179 err = ar9170_init_rf_bank4_pwr(ar,
1180 channel->band == IEEE80211_BAND_5GHZ,
1181 channel->center_freq, bw);
1182 if (err)
1183 return err;
1184
1185 switch (bw) {
1186 case AR9170_BW_20:
1187 tmp = 0x240;
1188 offs = 0;
1189 break;
1190 case AR9170_BW_40_BELOW:
1191 tmp = 0x2c4;
1192 offs = 3;
1193 break;
1194 case AR9170_BW_40_ABOVE:
1195 tmp = 0x2d4;
1196 offs = 1;
1197 break;
1198 default:
1199 BUG();
1200 return -ENOSYS;
1201 }
1202
1203 if (0 /* 2 streams capable */)
1204 tmp |= 0x100;
1205
1206 err = ar9170_write_reg(ar, 0x1c5804, tmp);
1207 if (err)
1208 return err;
1209
1210 err = ar9170_set_power_cal(ar, channel->center_freq, bw);
1211 if (err)
1212 return err;
1213
1214 freqpar = ar9170_get_hw_dyn_params(channel, bw);
1215
1216 vals[0] = cpu_to_le32(channel->center_freq * 1000);
1217 vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1);
1218 vals[2] = cpu_to_le32(offs << 2 | 1);
1219 vals[3] = cpu_to_le32(freqpar->coeff_exp);
1220 vals[4] = cpu_to_le32(freqpar->coeff_man);
1221 vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
1222 vals[6] = cpu_to_le32(freqpar->coeff_man_shgi);
1223 vals[7] = cpu_to_le32(1000);
1224
1225 err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals,
1226 sizeof(vals), (u8 *)vals);
1227 if (err)
1228 return err;
1229
1230 for (i = 0; i < 2; i++) {
1231 ar->noise[i] = ar9170_calc_noise_dbm(
1232 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
1233
1234 ar->noise[i + 2] = ar9170_calc_noise_dbm(
1235 (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff);
1236 }
1237
1238 ar->channel = channel;
1239 return 0;
1240}
diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c
new file mode 100644
index 000000000000..ad296840893e
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.c
@@ -0,0 +1,748 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * USB - frontend
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/module.h>
41#include <linux/usb.h>
42#include <linux/firmware.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "ar9170.h"
46#include "cmd.h"
47#include "hw.h"
48#include "usb.h"
49
50MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
51MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
52MODULE_LICENSE("GPL");
53MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
54MODULE_FIRMWARE("ar9170-1.fw");
55MODULE_FIRMWARE("ar9170-2.fw");
56
57static struct usb_device_id ar9170_usb_ids[] = {
58 /* Atheros 9170 */
59 { USB_DEVICE(0x0cf3, 0x9170) },
60 /* Atheros TG121N */
61 { USB_DEVICE(0x0cf3, 0x1001) },
62 /* D-Link DWA 160A */
63 { USB_DEVICE(0x07d1, 0x3c10) },
64 /* Netgear WNDA3100 */
65 { USB_DEVICE(0x0846, 0x9010) },
66 /* Netgear WN111 v2 */
67 { USB_DEVICE(0x0846, 0x9001) },
68 /* Zydas ZD1221 */
69 { USB_DEVICE(0x0ace, 0x1221) },
70 /* Z-Com UB81 BG */
71 { USB_DEVICE(0x0cde, 0x0023) },
72 /* Z-Com UB82 ABG */
73 { USB_DEVICE(0x0cde, 0x0026) },
74 /* Arcadyan WN7512 */
75 { USB_DEVICE(0x083a, 0xf522) },
76 /* Planex GWUS300 */
77 { USB_DEVICE(0x2019, 0x5304) },
78 /* IO-Data WNGDNUS2 */
79 { USB_DEVICE(0x04bb, 0x093f) },
80
81 /* terminate */
82 {}
83};
84MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
85
86static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
87{
88 struct sk_buff *skb = urb->context;
89 struct ar9170_usb *aru = (struct ar9170_usb *)
90 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
91
92 if (!aru) {
93 dev_kfree_skb_irq(skb);
94 return ;
95 }
96
97 ar9170_handle_tx_status(&aru->common, skb, false,
98 AR9170_TX_STATUS_COMPLETE);
99}
100
101static void ar9170_usb_tx_urb_complete(struct urb *urb)
102{
103}
104
105static void ar9170_usb_irq_completed(struct urb *urb)
106{
107 struct ar9170_usb *aru = urb->context;
108
109 switch (urb->status) {
110 /* everything is fine */
111 case 0:
112 break;
113
114 /* disconnect */
115 case -ENOENT:
116 case -ECONNRESET:
117 case -ENODEV:
118 case -ESHUTDOWN:
119 goto free;
120
121 default:
122 goto resubmit;
123 }
124
125 print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET,
126 urb->transfer_buffer, urb->actual_length);
127
128resubmit:
129 usb_anchor_urb(urb, &aru->rx_submitted);
130 if (usb_submit_urb(urb, GFP_ATOMIC)) {
131 usb_unanchor_urb(urb);
132 goto free;
133 }
134
135 return;
136
137free:
138 usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
139}
140
141static void ar9170_usb_rx_completed(struct urb *urb)
142{
143 struct sk_buff *skb = urb->context;
144 struct ar9170_usb *aru = (struct ar9170_usb *)
145 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
146 int err;
147
148 if (!aru)
149 goto free;
150
151 switch (urb->status) {
152 /* everything is fine */
153 case 0:
154 break;
155
156 /* disconnect */
157 case -ENOENT:
158 case -ECONNRESET:
159 case -ENODEV:
160 case -ESHUTDOWN:
161 goto free;
162
163 default:
164 goto resubmit;
165 }
166
167 skb_put(skb, urb->actual_length);
168 ar9170_rx(&aru->common, skb);
169
170resubmit:
171 skb_reset_tail_pointer(skb);
172 skb_trim(skb, 0);
173
174 usb_anchor_urb(urb, &aru->rx_submitted);
175 err = usb_submit_urb(urb, GFP_ATOMIC);
176 if (err) {
177 usb_unanchor_urb(urb);
178 dev_kfree_skb_irq(skb);
179 }
180
181 return ;
182
183free:
184 dev_kfree_skb_irq(skb);
185 return;
186}
187
188static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
189 struct urb *urb, gfp_t gfp)
190{
191 struct sk_buff *skb;
192
193 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp);
194 if (!skb)
195 return -ENOMEM;
196
197 /* reserve some space for mac80211's radiotap */
198 skb_reserve(skb, 32);
199
200 usb_fill_bulk_urb(urb, aru->udev,
201 usb_rcvbulkpipe(aru->udev, AR9170_EP_RX),
202 skb->data, min(skb_tailroom(skb),
203 AR9170_MAX_RX_BUFFER_SIZE),
204 ar9170_usb_rx_completed, skb);
205
206 return 0;
207}
208
209static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
210{
211 struct urb *urb = NULL;
212 void *ibuf;
213 int err = -ENOMEM;
214
215 /* initialize interrupt endpoint */
216 urb = usb_alloc_urb(0, GFP_KERNEL);
217 if (!urb)
218 goto out;
219
220 ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
221 if (!ibuf)
222 goto out;
223
224 usb_fill_int_urb(urb, aru->udev,
225 usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf,
226 64, ar9170_usb_irq_completed, aru, 1);
227 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
228
229 usb_anchor_urb(urb, &aru->rx_submitted);
230 err = usb_submit_urb(urb, GFP_KERNEL);
231 if (err) {
232 usb_unanchor_urb(urb);
233 usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
234 urb->transfer_dma);
235 }
236
237out:
238 usb_free_urb(urb);
239 return err;
240}
241
242static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru)
243{
244 struct urb *urb;
245 int i;
246 int err = -EINVAL;
247
248 for (i = 0; i < AR9170_NUM_RX_URBS; i++) {
249 err = -ENOMEM;
250 urb = usb_alloc_urb(0, GFP_KERNEL);
251 if (!urb)
252 goto err_out;
253
254 err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL);
255 if (err) {
256 usb_free_urb(urb);
257 goto err_out;
258 }
259
260 usb_anchor_urb(urb, &aru->rx_submitted);
261 err = usb_submit_urb(urb, GFP_KERNEL);
262 if (err) {
263 usb_unanchor_urb(urb);
264 dev_kfree_skb_any((void *) urb->transfer_buffer);
265 usb_free_urb(urb);
266 goto err_out;
267 }
268 usb_free_urb(urb);
269 }
270
271 /* the device now waiting for a firmware. */
272 aru->common.state = AR9170_IDLE;
273 return 0;
274
275err_out:
276
277 usb_kill_anchored_urbs(&aru->rx_submitted);
278 return err;
279}
280
281static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
282{
283 int ret;
284
285 aru->common.state = AR9170_UNKNOWN_STATE;
286
287 usb_unlink_anchored_urbs(&aru->tx_submitted);
288
289 /* give the LED OFF command and the deauth frame a chance to air. */
290 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
291 msecs_to_jiffies(100));
292 if (ret == 0)
293 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
294 usb_poison_anchored_urbs(&aru->tx_submitted);
295
296 usb_poison_anchored_urbs(&aru->rx_submitted);
297}
298
299static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
300 unsigned int plen, void *payload,
301 unsigned int outlen, void *out)
302{
303 struct ar9170_usb *aru = (void *) ar;
304 struct urb *urb = NULL;
305 unsigned long flags;
306 int err = -ENOMEM;
307
308 if (unlikely(!IS_ACCEPTING_CMD(ar)))
309 return -EPERM;
310
311 if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
312 return -EINVAL;
313
314 urb = usb_alloc_urb(0, GFP_ATOMIC);
315 if (unlikely(!urb))
316 goto err_free;
317
318 ar->cmdbuf[0] = cpu_to_le32(plen);
319 ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
320 /* writing multiple regs fills this buffer already */
321 if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
322 memcpy(&ar->cmdbuf[1], payload, plen);
323
324 spin_lock_irqsave(&aru->common.cmdlock, flags);
325 aru->readbuf = (u8 *)out;
326 aru->readlen = outlen;
327 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
328
329 usb_fill_int_urb(urb, aru->udev,
330 usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
331 aru->common.cmdbuf, plen + 4,
332 ar9170_usb_tx_urb_complete, NULL, 1);
333
334 usb_anchor_urb(urb, &aru->tx_submitted);
335 err = usb_submit_urb(urb, GFP_ATOMIC);
336 if (err) {
337 usb_unanchor_urb(urb);
338 usb_free_urb(urb);
339 goto err_unbuf;
340 }
341 usb_free_urb(urb);
342
343 err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
344 if (err == 0) {
345 err = -ETIMEDOUT;
346 goto err_unbuf;
347 }
348
349 if (outlen >= 0 && aru->readlen != outlen) {
350 err = -EMSGSIZE;
351 goto err_unbuf;
352 }
353
354 return 0;
355
356err_unbuf:
357 /* Maybe the device was removed in the second we were waiting? */
358 if (IS_STARTED(ar)) {
359 dev_err(&aru->udev->dev, "no command feedback "
360 "received (%d).\n", err);
361
362 /* provide some maybe useful debug information */
363 print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
364 aru->common.cmdbuf, plen + 4);
365 dump_stack();
366 }
367
368 /* invalidate to avoid completing the next prematurely */
369 spin_lock_irqsave(&aru->common.cmdlock, flags);
370 aru->readbuf = NULL;
371 aru->readlen = 0;
372 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
373
374err_free:
375
376 return err;
377}
378
379static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
380 bool txstatus_needed, unsigned int extra_len)
381{
382 struct ar9170_usb *aru = (struct ar9170_usb *) ar;
383 struct urb *urb;
384 int err;
385
386 if (unlikely(!IS_STARTED(ar))) {
387 /* Seriously, what were you drink... err... thinking!? */
388 return -EPERM;
389 }
390
391 urb = usb_alloc_urb(0, GFP_ATOMIC);
392 if (unlikely(!urb))
393 return -ENOMEM;
394
395 usb_fill_bulk_urb(urb, aru->udev,
396 usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
397 skb->data, skb->len + extra_len, (txstatus_needed ?
398 ar9170_usb_tx_urb_complete :
399 ar9170_usb_tx_urb_complete_free), skb);
400 urb->transfer_flags |= URB_ZERO_PACKET;
401
402 usb_anchor_urb(urb, &aru->tx_submitted);
403 err = usb_submit_urb(urb, GFP_ATOMIC);
404 if (unlikely(err))
405 usb_unanchor_urb(urb);
406
407 usb_free_urb(urb);
408 return err;
409}
410
411static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
412{
413 struct ar9170_usb *aru = (void *) ar;
414 unsigned long flags;
415 u32 in, out;
416
417 if (!buffer)
418 return ;
419
420 in = le32_to_cpup((__le32 *)buffer);
421 out = le32_to_cpu(ar->cmdbuf[0]);
422
423 /* mask off length byte */
424 out &= ~0xFF;
425
426 if (aru->readlen >= 0) {
427 /* add expected length */
428 out |= aru->readlen;
429 } else {
430 /* add obtained length */
431 out |= in & 0xFF;
432 }
433
434 /*
435 * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response
436 * length and we cannot predict the correct length in advance.
437 * So we only check if we provided enough space for the data.
438 */
439 if (unlikely(out < in)) {
440 dev_warn(&aru->udev->dev, "received invalid command response "
441 "got %d bytes, instead of %d bytes "
442 "and the resp length is %d bytes\n",
443 in, out, len);
444 print_hex_dump_bytes("ar9170 invalid resp: ",
445 DUMP_PREFIX_OFFSET, buffer, len);
446 /*
447 * Do not complete, then the command times out,
448 * and we get a stack trace from there.
449 */
450 return ;
451 }
452
453 spin_lock_irqsave(&aru->common.cmdlock, flags);
454 if (aru->readbuf && len > 0) {
455 memcpy(aru->readbuf, buffer + 4, len - 4);
456 aru->readbuf = NULL;
457 }
458 complete(&aru->cmd_wait);
459 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
460}
461
462static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
463 size_t len, u32 addr, bool complete)
464{
465 int transfer, err;
466 u8 *buf = kmalloc(4096, GFP_KERNEL);
467
468 if (!buf)
469 return -ENOMEM;
470
471 while (len) {
472 transfer = min_t(int, len, 4096);
473 memcpy(buf, data, transfer);
474
475 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
476 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
477 addr >> 8, 0, buf, transfer, 1000);
478
479 if (err < 0) {
480 kfree(buf);
481 return err;
482 }
483
484 len -= transfer;
485 data += transfer;
486 addr += transfer;
487 }
488 kfree(buf);
489
490 if (complete) {
491 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
492 0x31 /* FW DL COMPLETE */,
493 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000);
494 }
495
496 return 0;
497}
498
499static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
500{
501 int err = 0;
502
503 err = request_firmware(&aru->init_values, "ar9170-1.fw",
504 &aru->udev->dev);
505 if (err) {
506 dev_err(&aru->udev->dev, "file with init values not found.\n");
507 return err;
508 }
509
510 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
511 if (err) {
512 release_firmware(aru->init_values);
513 dev_err(&aru->udev->dev, "firmware file not found.\n");
514 return err;
515 }
516
517 return err;
518}
519
520static int ar9170_usb_reset(struct ar9170_usb *aru)
521{
522 int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
523
524 if (lock) {
525 ret = usb_lock_device_for_reset(aru->udev, aru->intf);
526 if (ret < 0) {
527 dev_err(&aru->udev->dev, "unable to lock device "
528 "for reset (%d).\n", ret);
529 return ret;
530 }
531 }
532
533 ret = usb_reset_device(aru->udev);
534 if (lock)
535 usb_unlock_device(aru->udev);
536
537 /* let it rest - for a second - */
538 msleep(1000);
539
540 return ret;
541}
542
543static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
544{
545 int err;
546
547 /* First, upload initial values to device RAM */
548 err = ar9170_usb_upload(aru, aru->init_values->data,
549 aru->init_values->size, 0x102800, false);
550 if (err) {
551 dev_err(&aru->udev->dev, "firmware part 1 "
552 "upload failed (%d).\n", err);
553 return err;
554 }
555
556 /* Then, upload the firmware itself and start it */
557 return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
558 0x200000, true);
559}
560
561static int ar9170_usb_init_transport(struct ar9170_usb *aru)
562{
563 struct ar9170 *ar = (void *) &aru->common;
564 int err;
565
566 ar9170_regwrite_begin(ar);
567
568 /* Set USB Rx stream mode MAX packet number to 2 */
569 ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4);
570
571 /* Set USB Rx stream mode timeout to 10us */
572 ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80);
573
574 ar9170_regwrite_finish();
575
576 err = ar9170_regwrite_result();
577 if (err)
578 dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err);
579
580 return err;
581}
582
583static void ar9170_usb_stop(struct ar9170 *ar)
584{
585 struct ar9170_usb *aru = (void *) ar;
586 int ret;
587
588 if (IS_ACCEPTING_CMD(ar))
589 aru->common.state = AR9170_STOPPED;
590
591 /* lets wait a while until the tx - queues are dried out */
592 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
593 msecs_to_jiffies(1000));
594 if (ret == 0)
595 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
596
597 usb_poison_anchored_urbs(&aru->tx_submitted);
598
599 /*
600 * Note:
601 * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
602 * Else we would end up with a unresponsive device...
603 */
604}
605
606static int ar9170_usb_open(struct ar9170 *ar)
607{
608 struct ar9170_usb *aru = (void *) ar;
609 int err;
610
611 usb_unpoison_anchored_urbs(&aru->tx_submitted);
612 err = ar9170_usb_init_transport(aru);
613 if (err) {
614 usb_poison_anchored_urbs(&aru->tx_submitted);
615 return err;
616 }
617
618 aru->common.state = AR9170_IDLE;
619 return 0;
620}
621
622static int ar9170_usb_probe(struct usb_interface *intf,
623 const struct usb_device_id *id)
624{
625 struct ar9170_usb *aru;
626 struct ar9170 *ar;
627 struct usb_device *udev;
628 int err;
629
630 aru = ar9170_alloc(sizeof(*aru));
631 if (IS_ERR(aru)) {
632 err = PTR_ERR(aru);
633 goto out;
634 }
635
636 udev = interface_to_usbdev(intf);
637 usb_get_dev(udev);
638 aru->udev = udev;
639 aru->intf = intf;
640 ar = &aru->common;
641
642 usb_set_intfdata(intf, aru);
643 SET_IEEE80211_DEV(ar->hw, &udev->dev);
644
645 init_usb_anchor(&aru->rx_submitted);
646 init_usb_anchor(&aru->tx_submitted);
647 init_completion(&aru->cmd_wait);
648
649 aru->common.stop = ar9170_usb_stop;
650 aru->common.open = ar9170_usb_open;
651 aru->common.tx = ar9170_usb_tx;
652 aru->common.exec_cmd = ar9170_usb_exec_cmd;
653 aru->common.callback_cmd = ar9170_usb_callback_cmd;
654
655 err = ar9170_usb_reset(aru);
656 if (err)
657 goto err_unlock;
658
659 err = ar9170_usb_request_firmware(aru);
660 if (err)
661 goto err_unlock;
662
663 err = ar9170_usb_alloc_rx_irq_urb(aru);
664 if (err)
665 goto err_freefw;
666
667 err = ar9170_usb_alloc_rx_bulk_urbs(aru);
668 if (err)
669 goto err_unrx;
670
671 err = ar9170_usb_upload_firmware(aru);
672 if (err) {
673 err = ar9170_echo_test(&aru->common, 0x60d43110);
674 if (err) {
675 /* force user invention, by disabling the device */
676 err = usb_driver_set_configuration(aru->udev, -1);
677 dev_err(&aru->udev->dev, "device is in a bad state. "
678 "please reconnect it!\n");
679 goto err_unrx;
680 }
681 }
682
683 err = ar9170_usb_open(ar);
684 if (err)
685 goto err_unrx;
686
687 err = ar9170_register(ar, &udev->dev);
688
689 ar9170_usb_stop(ar);
690 if (err)
691 goto err_unrx;
692
693 return 0;
694
695err_unrx:
696 ar9170_usb_cancel_urbs(aru);
697
698err_freefw:
699 release_firmware(aru->init_values);
700 release_firmware(aru->firmware);
701
702err_unlock:
703 usb_set_intfdata(intf, NULL);
704 usb_put_dev(udev);
705 ieee80211_free_hw(ar->hw);
706out:
707 return err;
708}
709
710static void ar9170_usb_disconnect(struct usb_interface *intf)
711{
712 struct ar9170_usb *aru = usb_get_intfdata(intf);
713
714 if (!aru)
715 return;
716
717 aru->common.state = AR9170_IDLE;
718 ar9170_unregister(&aru->common);
719 ar9170_usb_cancel_urbs(aru);
720
721 release_firmware(aru->init_values);
722 release_firmware(aru->firmware);
723
724 usb_put_dev(aru->udev);
725 usb_set_intfdata(intf, NULL);
726 ieee80211_free_hw(aru->common.hw);
727}
728
729static struct usb_driver ar9170_driver = {
730 .name = "ar9170usb",
731 .probe = ar9170_usb_probe,
732 .disconnect = ar9170_usb_disconnect,
733 .id_table = ar9170_usb_ids,
734 .soft_unbind = 1,
735};
736
737static int __init ar9170_init(void)
738{
739 return usb_register(&ar9170_driver);
740}
741
742static void __exit ar9170_exit(void)
743{
744 usb_deregister(&ar9170_driver);
745}
746
747module_init(ar9170_init);
748module_exit(ar9170_exit);
diff --git a/drivers/net/wireless/ar9170/usb.h b/drivers/net/wireless/ar9170/usb.h
new file mode 100644
index 000000000000..f5852924cd64
--- /dev/null
+++ b/drivers/net/wireless/ar9170/usb.h
@@ -0,0 +1,74 @@
1/*
2 * Atheros AR9170 USB driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __USB_H
40#define __USB_H
41
42#include <linux/usb.h>
43#include <linux/completion.h>
44#include <linux/spinlock.h>
45#include <linux/leds.h>
46#include <net/wireless.h>
47#include <net/mac80211.h>
48#include <linux/firmware.h>
49#include "eeprom.h"
50#include "hw.h"
51#include "ar9170.h"
52
53#define AR9170_NUM_RX_URBS 16
54
55struct firmware;
56
57struct ar9170_usb {
58 struct ar9170 common;
59 struct usb_device *udev;
60 struct usb_interface *intf;
61
62 struct usb_anchor rx_submitted;
63 struct usb_anchor tx_submitted;
64
65 spinlock_t cmdlock;
66 struct completion cmd_wait;
67 int readlen;
68 u8 *readbuf;
69
70 const struct firmware *init_values;
71 const struct firmware *firmware;
72};
73
74#endif /* __USB_H */
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index 14c11656e82c..a54a67c425c8 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1030,7 +1030,17 @@ static int arlan_mac_addr(struct net_device *dev, void *p)
1030 return 0; 1030 return 0;
1031} 1031}
1032 1032
1033 1033static const struct net_device_ops arlan_netdev_ops = {
1034 .ndo_open = arlan_open,
1035 .ndo_stop = arlan_close,
1036 .ndo_start_xmit = arlan_tx,
1037 .ndo_get_stats = arlan_statistics,
1038 .ndo_set_multicast_list = arlan_set_multicast,
1039 .ndo_change_mtu = arlan_change_mtu,
1040 .ndo_set_mac_address = arlan_mac_addr,
1041 .ndo_tx_timeout = arlan_tx_timeout,
1042 .ndo_validate_addr = eth_validate_addr,
1043};
1034 1044
1035static int __init arlan_setup_device(struct net_device *dev, int num) 1045static int __init arlan_setup_device(struct net_device *dev, int num)
1036{ 1046{
@@ -1042,14 +1052,7 @@ static int __init arlan_setup_device(struct net_device *dev, int num)
1042 ap->conf = (struct arlan_shmem *)(ap+1); 1052 ap->conf = (struct arlan_shmem *)(ap+1);
1043 1053
1044 dev->tx_queue_len = tx_queue_len; 1054 dev->tx_queue_len = tx_queue_len;
1045 dev->open = arlan_open; 1055 dev->netdev_ops = &arlan_netdev_ops;
1046 dev->stop = arlan_close;
1047 dev->hard_start_xmit = arlan_tx;
1048 dev->get_stats = arlan_statistics;
1049 dev->set_multicast_list = arlan_set_multicast;
1050 dev->change_mtu = arlan_change_mtu;
1051 dev->set_mac_address = arlan_mac_addr;
1052 dev->tx_timeout = arlan_tx_timeout;
1053 dev->watchdog_timeo = 3*HZ; 1056 dev->watchdog_timeo = 3*HZ;
1054 1057
1055 ap->irq_test_done = 0; 1058 ap->irq_test_done = 0;
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 0dc2c7321c8b..0b616e72fe05 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -204,9 +204,9 @@
204#define AR5K_TUNE_CWMAX_11B 1023 204#define AR5K_TUNE_CWMAX_11B 1023
205#define AR5K_TUNE_CWMAX_XR 7 205#define AR5K_TUNE_CWMAX_XR 7
206#define AR5K_TUNE_NOISE_FLOOR -72 206#define AR5K_TUNE_NOISE_FLOOR -72
207#define AR5K_TUNE_MAX_TXPOWER 60 207#define AR5K_TUNE_MAX_TXPOWER 63
208#define AR5K_TUNE_DEFAULT_TXPOWER 30 208#define AR5K_TUNE_DEFAULT_TXPOWER 25
209#define AR5K_TUNE_TPC_TXPOWER true 209#define AR5K_TUNE_TPC_TXPOWER false
210#define AR5K_TUNE_ANT_DIVERSITY true 210#define AR5K_TUNE_ANT_DIVERSITY true
211#define AR5K_TUNE_HWTXTRIES 4 211#define AR5K_TUNE_HWTXTRIES 4
212 212
@@ -551,11 +551,11 @@ enum ath5k_pkt_type {
551 */ 551 */
552#define AR5K_TXPOWER_OFDM(_r, _v) ( \ 552#define AR5K_TXPOWER_OFDM(_r, _v) ( \
553 ((0 & 1) << ((_v) + 6)) | \ 553 ((0 & 1) << ((_v) + 6)) | \
554 (((ah->ah_txpower.txp_rates[(_r)]) & 0x3f) << (_v)) \ 554 (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \
555) 555)
556 556
557#define AR5K_TXPOWER_CCK(_r, _v) ( \ 557#define AR5K_TXPOWER_CCK(_r, _v) ( \
558 (ah->ah_txpower.txp_rates[(_r)] & 0x3f) << (_v) \ 558 (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
559) 559)
560 560
561/* 561/*
@@ -1085,13 +1085,25 @@ struct ath5k_hw {
1085 struct ath5k_gain ah_gain; 1085 struct ath5k_gain ah_gain;
1086 u8 ah_offset[AR5K_MAX_RF_BANKS]; 1086 u8 ah_offset[AR5K_MAX_RF_BANKS];
1087 1087
1088
1088 struct { 1089 struct {
1089 u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE]; 1090 /* Temporary tables used for interpolation */
1090 u16 txp_rates[AR5K_MAX_RATES]; 1091 u8 tmpL[AR5K_EEPROM_N_PD_GAINS]
1091 s16 txp_min; 1092 [AR5K_EEPROM_POWER_TABLE_SIZE];
1092 s16 txp_max; 1093 u8 tmpR[AR5K_EEPROM_N_PD_GAINS]
1094 [AR5K_EEPROM_POWER_TABLE_SIZE];
1095 u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2];
1096 u16 txp_rates_power_table[AR5K_MAX_RATES];
1097 u8 txp_min_idx;
1093 bool txp_tpc; 1098 bool txp_tpc;
1099 /* Values in 0.25dB units */
1100 s16 txp_min_pwr;
1101 s16 txp_max_pwr;
1102 s16 txp_offset;
1094 s16 txp_ofdm; 1103 s16 txp_ofdm;
1104 /* Values in dB units */
1105 s16 txp_cck_ofdm_pwr_delta;
1106 s16 txp_cck_ofdm_gainf_delta;
1095 } ah_txpower; 1107 } ah_txpower;
1096 1108
1097 struct { 1109 struct {
@@ -1161,6 +1173,7 @@ extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_l
1161 1173
1162/* EEPROM access functions */ 1174/* EEPROM access functions */
1163extern int ath5k_eeprom_init(struct ath5k_hw *ah); 1175extern int ath5k_eeprom_init(struct ath5k_hw *ah);
1176extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
1164extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1177extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1165extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah); 1178extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1166 1179
@@ -1256,8 +1269,8 @@ extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
1256extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); 1269extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1257extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1270extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1258/* TX power setup */ 1271/* TX power setup */
1259extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower); 1272extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
1260extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power); 1273extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 ee_mode, u8 txpower);
1261 1274
1262/* 1275/*
1263 * Functions used internaly 1276 * Functions used internaly
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
index 656cb9dc833b..70d376c63aac 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -341,6 +341,8 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
341 if (ah->ah_rf_banks != NULL) 341 if (ah->ah_rf_banks != NULL)
342 kfree(ah->ah_rf_banks); 342 kfree(ah->ah_rf_banks);
343 343
344 ath5k_eeprom_detach(ah);
345
344 /* assume interrupts are down */ 346 /* assume interrupts are down */
345 kfree(ah); 347 kfree(ah);
346} 348}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index cad3ccf61b00..5d57d774e466 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -685,13 +685,6 @@ ath5k_pci_resume(struct pci_dev *pdev)
685 if (err) 685 if (err)
686 return err; 686 return err;
687 687
688 /*
689 * Suspend/Resume resets the PCI configuration space, so we have to
690 * re-disable the RETRY_TIMEOUT register (0x41) to keep
691 * PCI Tx retries from interfering with C3 CPU state
692 */
693 pci_write_config_byte(pdev, 0x41, 0);
694
695 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 688 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
696 if (err) { 689 if (err) {
697 ATH5K_ERR(sc, "request_irq failed\n"); 690 ATH5K_ERR(sc, "request_irq failed\n");
@@ -1095,9 +1088,18 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1095static inline int 1088static inline int
1096ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) 1089ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1097{ 1090{
1098 WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, 1091 int rix;
1099 "hw_rix out of bounds: %x\n", hw_rix); 1092
1100 return sc->rate_idx[sc->curband->band][hw_rix]; 1093 /* return base rate on errors */
1094 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
1095 "hw_rix out of bounds: %x\n", hw_rix))
1096 return 0;
1097
1098 rix = sc->rate_idx[sc->curband->band][hw_rix];
1099 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
1100 rix = 0;
1101
1102 return rix;
1101} 1103}
1102 1104
1103/***************\ 1105/***************\
@@ -1216,6 +1218,9 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1216 1218
1217 pktlen = skb->len; 1219 pktlen = skb->len;
1218 1220
1221 /* FIXME: If we are in g mode and rate is a CCK rate
1222 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1223 * from tx power (value is in dB units already) */
1219 if (info->control.hw_key) { 1224 if (info->control.hw_key) {
1220 keyidx = info->control.hw_key->hw_key_idx; 1225 keyidx = info->control.hw_key->hw_key_idx;
1221 pktlen += info->control.hw_key->icv_len; 1226 pktlen += info->control.hw_key->icv_len;
@@ -2044,6 +2049,9 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2044 antenna = sc->bsent & 4 ? 2 : 1; 2049 antenna = sc->bsent & 4 ? 2 : 1;
2045 } 2050 }
2046 2051
2052 /* FIXME: If we are in g mode and rate is a CCK rate
2053 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
2054 * from tx power (value is in dB units already) */
2047 ds->ds_data = bf->skbaddr; 2055 ds->ds_data = bf->skbaddr;
2048 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2056 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2049 ieee80211_get_hdrlen_from_skb(skb), 2057 ieee80211_get_hdrlen_from_skb(skb),
@@ -2305,7 +2313,7 @@ ath5k_init(struct ath5k_softc *sc)
2305 sc->curband = &sc->sbands[sc->curchan->band]; 2313 sc->curband = &sc->sbands[sc->curchan->band];
2306 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2314 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2307 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2315 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2308 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2316 AR5K_INT_FATAL | AR5K_INT_GLOBAL;
2309 ret = ath5k_reset(sc, false, false); 2317 ret = ath5k_reset(sc, false, false);
2310 if (ret) 2318 if (ret)
2311 goto done; 2319 goto done;
@@ -2554,7 +2562,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2554 if (skb_headroom(skb) < padsize) { 2562 if (skb_headroom(skb) < padsize) {
2555 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" 2563 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2556 " headroom to pad %d\n", hdrlen, padsize); 2564 " headroom to pad %d\n", hdrlen, padsize);
2557 return NETDEV_TX_BUSY; 2565 goto drop_packet;
2558 } 2566 }
2559 skb_push(skb, padsize); 2567 skb_push(skb, padsize);
2560 memmove(skb->data, skb->data+padsize, hdrlen); 2568 memmove(skb->data, skb->data+padsize, hdrlen);
@@ -2565,7 +2573,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2565 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2573 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2566 spin_unlock_irqrestore(&sc->txbuflock, flags); 2574 spin_unlock_irqrestore(&sc->txbuflock, flags);
2567 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2575 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2568 return NETDEV_TX_BUSY; 2576 goto drop_packet;
2569 } 2577 }
2570 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2578 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
2571 list_del(&bf->list); 2579 list_del(&bf->list);
@@ -2582,10 +2590,12 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2582 list_add_tail(&bf->list, &sc->txbuf); 2590 list_add_tail(&bf->list, &sc->txbuf);
2583 sc->txbuf_len++; 2591 sc->txbuf_len++;
2584 spin_unlock_irqrestore(&sc->txbuflock, flags); 2592 spin_unlock_irqrestore(&sc->txbuflock, flags);
2585 dev_kfree_skb_any(skb); 2593 goto drop_packet;
2586 return NETDEV_TX_OK;
2587 } 2594 }
2595 return NETDEV_TX_OK;
2588 2596
2597drop_packet:
2598 dev_kfree_skb_any(skb);
2589 return NETDEV_TX_OK; 2599 return NETDEV_TX_OK;
2590} 2600}
2591 2601
@@ -2608,12 +2618,6 @@ ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
2608 goto err; 2618 goto err;
2609 } 2619 }
2610 2620
2611 /*
2612 * This is needed only to setup initial state
2613 * but it's best done after a reset.
2614 */
2615 ath5k_hw_set_txpower_limit(sc->ah, 0);
2616
2617 ret = ath5k_rx_start(sc); 2621 ret = ath5k_rx_start(sc);
2618 if (ret) { 2622 if (ret) {
2619 ATH5K_ERR(sc, "can't start recv logic\n"); 2623 ATH5K_ERR(sc, "can't start recv logic\n");
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 20e0d14b41ec..822956114cd7 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -112,7 +112,7 @@ struct ath5k_softc {
112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
113 struct ieee80211_channel channels[ATH_CHAN_MAX]; 113 struct ieee80211_channel channels[ATH_CHAN_MAX];
114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
115 u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 115 s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
116 enum nl80211_iftype opmode; 116 enum nl80211_iftype opmode;
117 struct ath5k_hw *ah; /* Atheros HW */ 117 struct ath5k_hw *ah; /* Atheros HW */
118 118
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index b40a9287a39a..dc30a2b70a6b 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -194,6 +194,10 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
194 return -EINVAL; 194 return -EINVAL;
195 } 195 }
196 196
197 tx_power += ah->ah_txpower.txp_offset;
198 if (tx_power > AR5K_TUNE_MAX_TXPOWER)
199 tx_power = AR5K_TUNE_MAX_TXPOWER;
200
197 /* Clear descriptor */ 201 /* Clear descriptor */
198 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc)); 202 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
199 203
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
index ac45ca47ca87..c0fb3b09ba45 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org> 4 * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
@@ -98,11 +98,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
98 int ret; 98 int ret;
99 u16 val; 99 u16 val;
100 100
101 /* Initial TX thermal adjustment values */
102 ee->ee_tx_clip = 4;
103 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
104 ee->ee_gain_select = 1;
105
106 /* 101 /*
107 * Read values from EEPROM and store them in the capability structure 102 * Read values from EEPROM and store them in the capability structure
108 */ 103 */
@@ -241,22 +236,22 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
241 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); 236 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
242 switch(mode) { 237 switch(mode) {
243 case AR5K_EEPROM_MODE_11A: 238 case AR5K_EEPROM_MODE_11A:
244 ee->ee_ob[mode][3] = (val >> 5) & 0x7; 239 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
245 ee->ee_db[mode][3] = (val >> 2) & 0x7; 240 ee->ee_db[mode][3] = (val >> 2) & 0x7;
246 ee->ee_ob[mode][2] = (val << 1) & 0x7; 241 ee->ee_ob[mode][2] = (val << 1) & 0x7;
247 242
248 AR5K_EEPROM_READ(o++, val); 243 AR5K_EEPROM_READ(o++, val);
249 ee->ee_ob[mode][2] |= (val >> 15) & 0x1; 244 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
250 ee->ee_db[mode][2] = (val >> 12) & 0x7; 245 ee->ee_db[mode][2] = (val >> 12) & 0x7;
251 ee->ee_ob[mode][1] = (val >> 9) & 0x7; 246 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
252 ee->ee_db[mode][1] = (val >> 6) & 0x7; 247 ee->ee_db[mode][1] = (val >> 6) & 0x7;
253 ee->ee_ob[mode][0] = (val >> 3) & 0x7; 248 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
254 ee->ee_db[mode][0] = val & 0x7; 249 ee->ee_db[mode][0] = val & 0x7;
255 break; 250 break;
256 case AR5K_EEPROM_MODE_11G: 251 case AR5K_EEPROM_MODE_11G:
257 case AR5K_EEPROM_MODE_11B: 252 case AR5K_EEPROM_MODE_11B:
258 ee->ee_ob[mode][1] = (val >> 4) & 0x7; 253 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
259 ee->ee_db[mode][1] = val & 0x7; 254 ee->ee_db[mode][1] = val & 0x7;
260 break; 255 break;
261 } 256 }
262 257
@@ -504,35 +499,6 @@ ath5k_eeprom_init_modes(struct ath5k_hw *ah)
504 return 0; 499 return 0;
505} 500}
506 501
507/* Used to match PCDAC steps with power values on RF5111 chips
508 * (eeprom versions < 4). For RF5111 we have 10 pre-defined PCDAC
509 * steps that match with the power values we read from eeprom. On
510 * older eeprom versions (< 3.2) these steps are equaly spaced at
511 * 10% of the pcdac curve -until the curve reaches it's maximum-
512 * (10 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
513 * these 10 steps are spaced in a different way. This function returns
514 * the pcdac steps based on eeprom version and curve min/max so that we
515 * can have pcdac/pwr points.
516 */
517static inline void
518ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
519{
520 static const u16 intercepts3[] =
521 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
522 static const u16 intercepts3_2[] =
523 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
524 const u16 *ip;
525 int i;
526
527 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
528 ip = intercepts3_2;
529 else
530 ip = intercepts3;
531
532 for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
533 *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100;
534}
535
536/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff 502/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
537 * frequency mask) */ 503 * frequency mask) */
538static inline int 504static inline int
@@ -546,26 +512,25 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
546 int ret; 512 int ret;
547 u16 val; 513 u16 val;
548 514
515 ee->ee_n_piers[mode] = 0;
549 while(i < max) { 516 while(i < max) {
550 AR5K_EEPROM_READ(o++, val); 517 AR5K_EEPROM_READ(o++, val);
551 518
552 freq1 = (val >> 8) & 0xff; 519 freq1 = val & 0xff;
553 freq2 = val & 0xff; 520 if (!freq1)
554 521 break;
555 if (freq1) {
556 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
557 freq1, mode);
558 ee->ee_n_piers[mode]++;
559 }
560 522
561 if (freq2) { 523 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
562 pc[i++].freq = ath5k_eeprom_bin2freq(ee, 524 freq1, mode);
563 freq2, mode); 525 ee->ee_n_piers[mode]++;
564 ee->ee_n_piers[mode]++;
565 }
566 526
567 if (!freq1 || !freq2) 527 freq2 = (val >> 8) & 0xff;
528 if (!freq2)
568 break; 529 break;
530
531 pc[i++].freq = ath5k_eeprom_bin2freq(ee,
532 freq2, mode);
533 ee->ee_n_piers[mode]++;
569 } 534 }
570 535
571 /* return new offset */ 536 /* return new offset */
@@ -652,13 +617,122 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
652 return 0; 617 return 0;
653} 618}
654 619
655/* Read power calibration for RF5111 chips 620/*
621 * Read power calibration for RF5111 chips
622 *
656 * For RF5111 we have an XPD -eXternal Power Detector- curve 623 * For RF5111 we have an XPD -eXternal Power Detector- curve
657 * for each calibrated channel. Each curve has PCDAC steps on 624 * for each calibrated channel. Each curve has 0,5dB Power steps
658 * x axis and power on y axis and looks like a logarithmic 625 * on x axis and PCDAC steps (offsets) on y axis and looks like an
659 * function. To recreate the curve and pass the power values 626 * exponential function. To recreate the curve we read 11 points
660 * on the pcdac table, we read 10 points here and interpolate later. 627 * here and interpolate later.
661 */ 628 */
629
630/* Used to match PCDAC steps with power values on RF5111 chips
631 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
632 * steps that match with the power values we read from eeprom. On
633 * older eeprom versions (< 3.2) these steps are equaly spaced at
634 * 10% of the pcdac curve -until the curve reaches it's maximum-
635 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
636 * these 11 steps are spaced in a different way. This function returns
637 * the pcdac steps based on eeprom version and curve min/max so that we
638 * can have pcdac/pwr points.
639 */
640static inline void
641ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
642{
643 const static u16 intercepts3[] =
644 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
645 const static u16 intercepts3_2[] =
646 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
647 const u16 *ip;
648 int i;
649
650 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
651 ip = intercepts3_2;
652 else
653 ip = intercepts3;
654
655 for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
656 vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
657}
658
659/* Convert RF5111 specific data to generic raw data
660 * used by interpolation code */
661static int
662ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
663 struct ath5k_chan_pcal_info *chinfo)
664{
665 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
666 struct ath5k_chan_pcal_info_rf5111 *pcinfo;
667 struct ath5k_pdgain_info *pd;
668 u8 pier, point, idx;
669 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
670
671 /* Fill raw data for each calibration pier */
672 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
673
674 pcinfo = &chinfo[pier].rf5111_info;
675
676 /* Allocate pd_curves for this cal pier */
677 chinfo[pier].pd_curves =
678 kcalloc(AR5K_EEPROM_N_PD_CURVES,
679 sizeof(struct ath5k_pdgain_info),
680 GFP_KERNEL);
681
682 if (!chinfo[pier].pd_curves)
683 return -ENOMEM;
684
685 /* Only one curve for RF5111
686 * find out which one and place
687 * in in pd_curves.
688 * Note: ee_x_gain is reversed here */
689 for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
690
691 if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) {
692 pdgain_idx[0] = idx;
693 break;
694 }
695 }
696
697 ee->ee_pd_gains[mode] = 1;
698
699 pd = &chinfo[pier].pd_curves[idx];
700
701 pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111;
702
703 /* Allocate pd points for this curve */
704 pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
705 sizeof(u8), GFP_KERNEL);
706 if (!pd->pd_step)
707 return -ENOMEM;
708
709 pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
710 sizeof(s16), GFP_KERNEL);
711 if (!pd->pd_pwr)
712 return -ENOMEM;
713
714 /* Fill raw dataset
715 * (convert power to 0.25dB units
716 * for RF5112 combatibility) */
717 for (point = 0; point < pd->pd_points; point++) {
718
719 /* Absolute values */
720 pd->pd_pwr[point] = 2 * pcinfo->pwr[point];
721
722 /* Already sorted */
723 pd->pd_step[point] = pcinfo->pcdac[point];
724 }
725
726 /* Set min/max pwr */
727 chinfo[pier].min_pwr = pd->pd_pwr[0];
728 chinfo[pier].max_pwr = pd->pd_pwr[10];
729
730 }
731
732 return 0;
733}
734
735/* Parse EEPROM data */
662static int 736static int
663ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) 737ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
664{ 738{
@@ -747,30 +821,165 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
747 cdata->pcdac_max, cdata->pcdac); 821 cdata->pcdac_max, cdata->pcdac);
748 } 822 }
749 823
750 return 0; 824 return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal);
751} 825}
752 826
753/* Read power calibration for RF5112 chips 827
828/*
829 * Read power calibration for RF5112 chips
830 *
754 * For RF5112 we have 4 XPD -eXternal Power Detector- curves 831 * For RF5112 we have 4 XPD -eXternal Power Detector- curves
755 * for each calibrated channel on 0, -6, -12 and -18dbm but we only 832 * for each calibrated channel on 0, -6, -12 and -18dbm but we only
756 * use the higher (3) and the lower (0) curves. Each curve has PCDAC 833 * use the higher (3) and the lower (0) curves. Each curve has 0.5dB
757 * steps on x axis and power on y axis and looks like a linear 834 * power steps on x axis and PCDAC steps on y axis and looks like a
758 * function. To recreate the curve and pass the power values 835 * linear function. To recreate the curve and pass the power values
759 * on the pcdac table, we read 4 points for xpd 0 and 3 points 836 * on hw, we read 4 points for xpd 0 (lower gain -> max power)
760 * for xpd 3 here and interpolate later. 837 * and 3 points for xpd 3 (higher gain -> lower power) here and
838 * interpolate later.
761 * 839 *
762 * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. 840 * Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
763 */ 841 */
842
843/* Convert RF5112 specific data to generic raw data
844 * used by interpolation code */
845static int
846ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
847 struct ath5k_chan_pcal_info *chinfo)
848{
849 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
850 struct ath5k_chan_pcal_info_rf5112 *pcinfo;
851 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
852 unsigned int pier, pdg, point;
853
854 /* Fill raw data for each calibration pier */
855 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
856
857 pcinfo = &chinfo[pier].rf5112_info;
858
859 /* Allocate pd_curves for this cal pier */
860 chinfo[pier].pd_curves =
861 kcalloc(AR5K_EEPROM_N_PD_CURVES,
862 sizeof(struct ath5k_pdgain_info),
863 GFP_KERNEL);
864
865 if (!chinfo[pier].pd_curves)
866 return -ENOMEM;
867
868 /* Fill pd_curves */
869 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
870
871 u8 idx = pdgain_idx[pdg];
872 struct ath5k_pdgain_info *pd =
873 &chinfo[pier].pd_curves[idx];
874
875 /* Lowest gain curve (max power) */
876 if (pdg == 0) {
877 /* One more point for better accuracy */
878 pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS;
879
880 /* Allocate pd points for this curve */
881 pd->pd_step = kcalloc(pd->pd_points,
882 sizeof(u8), GFP_KERNEL);
883
884 if (!pd->pd_step)
885 return -ENOMEM;
886
887 pd->pd_pwr = kcalloc(pd->pd_points,
888 sizeof(s16), GFP_KERNEL);
889
890 if (!pd->pd_pwr)
891 return -ENOMEM;
892
893
894 /* Fill raw dataset
895 * (all power levels are in 0.25dB units) */
896 pd->pd_step[0] = pcinfo->pcdac_x0[0];
897 pd->pd_pwr[0] = pcinfo->pwr_x0[0];
898
899 for (point = 1; point < pd->pd_points;
900 point++) {
901 /* Absolute values */
902 pd->pd_pwr[point] =
903 pcinfo->pwr_x0[point];
904
905 /* Deltas */
906 pd->pd_step[point] =
907 pd->pd_step[point - 1] +
908 pcinfo->pcdac_x0[point];
909 }
910
911 /* Set min power for this frequency */
912 chinfo[pier].min_pwr = pd->pd_pwr[0];
913
914 /* Highest gain curve (min power) */
915 } else if (pdg == 1) {
916
917 pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS;
918
919 /* Allocate pd points for this curve */
920 pd->pd_step = kcalloc(pd->pd_points,
921 sizeof(u8), GFP_KERNEL);
922
923 if (!pd->pd_step)
924 return -ENOMEM;
925
926 pd->pd_pwr = kcalloc(pd->pd_points,
927 sizeof(s16), GFP_KERNEL);
928
929 if (!pd->pd_pwr)
930 return -ENOMEM;
931
932 /* Fill raw dataset
933 * (all power levels are in 0.25dB units) */
934 for (point = 0; point < pd->pd_points;
935 point++) {
936 /* Absolute values */
937 pd->pd_pwr[point] =
938 pcinfo->pwr_x3[point];
939
940 /* Fixed points */
941 pd->pd_step[point] =
942 pcinfo->pcdac_x3[point];
943 }
944
945 /* Since we have a higher gain curve
946 * override min power */
947 chinfo[pier].min_pwr = pd->pd_pwr[0];
948 }
949 }
950 }
951
952 return 0;
953}
954
955/* Parse EEPROM data */
764static int 956static int
765ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) 957ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
766{ 958{
767 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 959 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
768 struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; 960 struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
769 struct ath5k_chan_pcal_info *gen_chan_info; 961 struct ath5k_chan_pcal_info *gen_chan_info;
962 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
770 u32 offset; 963 u32 offset;
771 unsigned int i, c; 964 u8 i, c;
772 u16 val; 965 u16 val;
773 int ret; 966 int ret;
967 u8 pd_gains = 0;
968
969 /* Count how many curves we have and
970 * identify them (which one of the 4
971 * available curves we have on each count).
972 * Curves are stored from lower (x0) to
973 * higher (x3) gain */
974 for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) {
975 /* ee_x_gain[mode] is x gain mask */
976 if ((ee->ee_x_gain[mode] >> i) & 0x1)
977 pdgain_idx[pd_gains++] = i;
978 }
979 ee->ee_pd_gains[mode] = pd_gains;
980
981 if (pd_gains == 0 || pd_gains > 2)
982 return -EINVAL;
774 983
775 switch (mode) { 984 switch (mode) {
776 case AR5K_EEPROM_MODE_11A: 985 case AR5K_EEPROM_MODE_11A:
@@ -808,13 +1017,13 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
808 for (i = 0; i < ee->ee_n_piers[mode]; i++) { 1017 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
809 chan_pcal_info = &gen_chan_info[i].rf5112_info; 1018 chan_pcal_info = &gen_chan_info[i].rf5112_info;
810 1019
811 /* Power values in dBm * 4 1020 /* Power values in quarter dB
812 * for the lower xpd gain curve 1021 * for the lower xpd gain curve
813 * (0 dBm -> higher output power) */ 1022 * (0 dBm -> higher output power) */
814 for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { 1023 for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
815 AR5K_EEPROM_READ(offset++, val); 1024 AR5K_EEPROM_READ(offset++, val);
816 chan_pcal_info->pwr_x0[c] = (val & 0xff); 1025 chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff);
817 chan_pcal_info->pwr_x0[++c] = ((val >> 8) & 0xff); 1026 chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff);
818 } 1027 }
819 1028
820 /* PCDAC steps 1029 /* PCDAC steps
@@ -825,12 +1034,12 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
825 chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); 1034 chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
826 chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); 1035 chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
827 1036
828 /* Power values in dBm * 4 1037 /* Power values in quarter dB
829 * for the higher xpd gain curve 1038 * for the higher xpd gain curve
830 * (18 dBm -> lower output power) */ 1039 * (18 dBm -> lower output power) */
831 AR5K_EEPROM_READ(offset++, val); 1040 AR5K_EEPROM_READ(offset++, val);
832 chan_pcal_info->pwr_x3[0] = (val & 0xff); 1041 chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff);
833 chan_pcal_info->pwr_x3[1] = ((val >> 8) & 0xff); 1042 chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff);
834 1043
835 AR5K_EEPROM_READ(offset++, val); 1044 AR5K_EEPROM_READ(offset++, val);
836 chan_pcal_info->pwr_x3[2] = (val & 0xff); 1045 chan_pcal_info->pwr_x3[2] = (val & 0xff);
@@ -843,24 +1052,36 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
843 chan_pcal_info->pcdac_x3[2] = 63; 1052 chan_pcal_info->pcdac_x3[2] = 63;
844 1053
845 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { 1054 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
846 chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0xff); 1055 chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f);
847 1056
848 /* Last xpd0 power level is also channel maximum */ 1057 /* Last xpd0 power level is also channel maximum */
849 gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; 1058 gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
850 } else { 1059 } else {
851 chan_pcal_info->pcdac_x0[0] = 1; 1060 chan_pcal_info->pcdac_x0[0] = 1;
852 gen_chan_info[i].max_pwr = ((val >> 8) & 0xff); 1061 gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff);
853 } 1062 }
854 1063
855 /* Recreate pcdac_x0 table for this channel using pcdac steps */
856 chan_pcal_info->pcdac_x0[1] += chan_pcal_info->pcdac_x0[0];
857 chan_pcal_info->pcdac_x0[2] += chan_pcal_info->pcdac_x0[1];
858 chan_pcal_info->pcdac_x0[3] += chan_pcal_info->pcdac_x0[2];
859 } 1064 }
860 1065
861 return 0; 1066 return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info);
862} 1067}
863 1068
1069
1070/*
1071 * Read power calibration for RF2413 chips
1072 *
1073 * For RF2413 we have a Power to PDDAC table (Power Detector)
1074 * instead of a PCDAC and 4 pd gain curves for each calibrated channel.
1075 * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y
1076 * axis and looks like an exponential function like the RF5111 curve.
1077 *
1078 * To recreate the curves we read here the points and interpolate
1079 * later. Note that in most cases only 2 (higher and lower) curves are
1080 * used (like RF5112) but vendors have the oportunity to include all
1081 * 4 curves on eeprom. The final curve (higher power) has an extra
1082 * point for better accuracy like RF5112.
1083 */
1084
864/* For RF2413 power calibration data doesn't start on a fixed location and 1085/* For RF2413 power calibration data doesn't start on a fixed location and
865 * if a mode is not supported, it's section is missing -not zeroed-. 1086 * if a mode is not supported, it's section is missing -not zeroed-.
866 * So we need to calculate the starting offset for each section by using 1087 * So we need to calculate the starting offset for each section by using
@@ -890,13 +1111,15 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
890 switch(mode) { 1111 switch(mode) {
891 case AR5K_EEPROM_MODE_11G: 1112 case AR5K_EEPROM_MODE_11G:
892 if (AR5K_EEPROM_HDR_11B(ee->ee_header)) 1113 if (AR5K_EEPROM_HDR_11B(ee->ee_header))
893 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + 1114 offset += ath5k_pdgains_size_2413(ee,
894 AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1115 AR5K_EEPROM_MODE_11B) +
1116 AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
895 /* fall through */ 1117 /* fall through */
896 case AR5K_EEPROM_MODE_11B: 1118 case AR5K_EEPROM_MODE_11B:
897 if (AR5K_EEPROM_HDR_11A(ee->ee_header)) 1119 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
898 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + 1120 offset += ath5k_pdgains_size_2413(ee,
899 AR5K_EEPROM_N_5GHZ_CHAN / 2; 1121 AR5K_EEPROM_MODE_11A) +
1122 AR5K_EEPROM_N_5GHZ_CHAN / 2;
900 /* fall through */ 1123 /* fall through */
901 case AR5K_EEPROM_MODE_11A: 1124 case AR5K_EEPROM_MODE_11A:
902 break; 1125 break;
@@ -907,37 +1130,118 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
907 return offset; 1130 return offset;
908} 1131}
909 1132
910/* Read power calibration for RF2413 chips 1133/* Convert RF2413 specific data to generic raw data
911 * For RF2413 we have a PDDAC table (Power Detector) instead 1134 * used by interpolation code */
912 * of a PCDAC and 4 pd gain curves for each calibrated channel. 1135static int
913 * Each curve has PDDAC steps on x axis and power on y axis and 1136ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
914 * looks like an exponential function. To recreate the curves 1137 struct ath5k_chan_pcal_info *chinfo)
915 * we read here the points and interpolate later. Note that 1138{
916 * in most cases only higher and lower curves are used (like 1139 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
917 * RF5112) but vendors have the oportunity to include all 4 1140 struct ath5k_chan_pcal_info_rf2413 *pcinfo;
918 * curves on eeprom. The final curve (higher power) has an extra 1141 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
919 * point for better accuracy like RF5112. 1142 unsigned int pier, pdg, point;
920 */ 1143
1144 /* Fill raw data for each calibration pier */
1145 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
1146
1147 pcinfo = &chinfo[pier].rf2413_info;
1148
1149 /* Allocate pd_curves for this cal pier */
1150 chinfo[pier].pd_curves =
1151 kcalloc(AR5K_EEPROM_N_PD_CURVES,
1152 sizeof(struct ath5k_pdgain_info),
1153 GFP_KERNEL);
1154
1155 if (!chinfo[pier].pd_curves)
1156 return -ENOMEM;
1157
1158 /* Fill pd_curves */
1159 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
1160
1161 u8 idx = pdgain_idx[pdg];
1162 struct ath5k_pdgain_info *pd =
1163 &chinfo[pier].pd_curves[idx];
1164
1165 /* One more point for the highest power
1166 * curve (lowest gain) */
1167 if (pdg == ee->ee_pd_gains[mode] - 1)
1168 pd->pd_points = AR5K_EEPROM_N_PD_POINTS;
1169 else
1170 pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1;
1171
1172 /* Allocate pd points for this curve */
1173 pd->pd_step = kcalloc(pd->pd_points,
1174 sizeof(u8), GFP_KERNEL);
1175
1176 if (!pd->pd_step)
1177 return -ENOMEM;
1178
1179 pd->pd_pwr = kcalloc(pd->pd_points,
1180 sizeof(s16), GFP_KERNEL);
1181
1182 if (!pd->pd_pwr)
1183 return -ENOMEM;
1184
1185 /* Fill raw dataset
1186 * convert all pwr levels to
1187 * quarter dB for RF5112 combatibility */
1188 pd->pd_step[0] = pcinfo->pddac_i[pdg];
1189 pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
1190
1191 for (point = 1; point < pd->pd_points; point++) {
1192
1193 pd->pd_pwr[point] = pd->pd_pwr[point - 1] +
1194 2 * pcinfo->pwr[pdg][point - 1];
1195
1196 pd->pd_step[point] = pd->pd_step[point - 1] +
1197 pcinfo->pddac[pdg][point - 1];
1198
1199 }
1200
1201 /* Highest gain curve -> min power */
1202 if (pdg == 0)
1203 chinfo[pier].min_pwr = pd->pd_pwr[0];
1204
1205 /* Lowest gain curve -> max power */
1206 if (pdg == ee->ee_pd_gains[mode] - 1)
1207 chinfo[pier].max_pwr =
1208 pd->pd_pwr[pd->pd_points - 1];
1209 }
1210 }
1211
1212 return 0;
1213}
1214
1215/* Parse EEPROM data */
921static int 1216static int
922ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) 1217ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
923{ 1218{
924 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1219 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
925 struct ath5k_chan_pcal_info_rf2413 *chan_pcal_info; 1220 struct ath5k_chan_pcal_info_rf2413 *pcinfo;
926 struct ath5k_chan_pcal_info *gen_chan_info; 1221 struct ath5k_chan_pcal_info *chinfo;
927 unsigned int i, c; 1222 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
928 u32 offset; 1223 u32 offset;
929 int ret; 1224 int idx, i, ret;
930 u16 val; 1225 u16 val;
931 u8 pd_gains = 0; 1226 u8 pd_gains = 0;
932 1227
933 if (ee->ee_x_gain[mode] & 0x1) pd_gains++; 1228 /* Count how many curves we have and
934 if ((ee->ee_x_gain[mode] >> 1) & 0x1) pd_gains++; 1229 * identify them (which one of the 4
935 if ((ee->ee_x_gain[mode] >> 2) & 0x1) pd_gains++; 1230 * available curves we have on each count).
936 if ((ee->ee_x_gain[mode] >> 3) & 0x1) pd_gains++; 1231 * Curves are stored from higher to
1232 * lower gain so we go backwards */
1233 for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) {
1234 /* ee_x_gain[mode] is x gain mask */
1235 if ((ee->ee_x_gain[mode] >> idx) & 0x1)
1236 pdgain_idx[pd_gains++] = idx;
1237
1238 }
937 ee->ee_pd_gains[mode] = pd_gains; 1239 ee->ee_pd_gains[mode] = pd_gains;
938 1240
1241 if (pd_gains == 0)
1242 return -EINVAL;
1243
939 offset = ath5k_cal_data_offset_2413(ee, mode); 1244 offset = ath5k_cal_data_offset_2413(ee, mode);
940 ee->ee_n_piers[mode] = 0;
941 switch (mode) { 1245 switch (mode) {
942 case AR5K_EEPROM_MODE_11A: 1246 case AR5K_EEPROM_MODE_11A:
943 if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) 1247 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
@@ -945,7 +1249,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
945 1249
946 ath5k_eeprom_init_11a_pcal_freq(ah, offset); 1250 ath5k_eeprom_init_11a_pcal_freq(ah, offset);
947 offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; 1251 offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
948 gen_chan_info = ee->ee_pwr_cal_a; 1252 chinfo = ee->ee_pwr_cal_a;
949 break; 1253 break;
950 case AR5K_EEPROM_MODE_11B: 1254 case AR5K_EEPROM_MODE_11B:
951 if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) 1255 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
@@ -953,7 +1257,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
953 1257
954 ath5k_eeprom_init_11bg_2413(ah, mode, offset); 1258 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
955 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1259 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
956 gen_chan_info = ee->ee_pwr_cal_b; 1260 chinfo = ee->ee_pwr_cal_b;
957 break; 1261 break;
958 case AR5K_EEPROM_MODE_11G: 1262 case AR5K_EEPROM_MODE_11G:
959 if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) 1263 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
@@ -961,41 +1265,35 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
961 1265
962 ath5k_eeprom_init_11bg_2413(ah, mode, offset); 1266 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
963 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; 1267 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
964 gen_chan_info = ee->ee_pwr_cal_g; 1268 chinfo = ee->ee_pwr_cal_g;
965 break; 1269 break;
966 default: 1270 default:
967 return -EINVAL; 1271 return -EINVAL;
968 } 1272 }
969 1273
970 if (pd_gains == 0)
971 return 0;
972
973 for (i = 0; i < ee->ee_n_piers[mode]; i++) { 1274 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
974 chan_pcal_info = &gen_chan_info[i].rf2413_info; 1275 pcinfo = &chinfo[i].rf2413_info;
975 1276
976 /* 1277 /*
977 * Read pwr_i, pddac_i and the first 1278 * Read pwr_i, pddac_i and the first
978 * 2 pd points (pwr, pddac) 1279 * 2 pd points (pwr, pddac)
979 */ 1280 */
980 AR5K_EEPROM_READ(offset++, val); 1281 AR5K_EEPROM_READ(offset++, val);
981 chan_pcal_info->pwr_i[0] = val & 0x1f; 1282 pcinfo->pwr_i[0] = val & 0x1f;
982 chan_pcal_info->pddac_i[0] = (val >> 5) & 0x7f; 1283 pcinfo->pddac_i[0] = (val >> 5) & 0x7f;
983 chan_pcal_info->pwr[0][0] = 1284 pcinfo->pwr[0][0] = (val >> 12) & 0xf;
984 (val >> 12) & 0xf;
985 1285
986 AR5K_EEPROM_READ(offset++, val); 1286 AR5K_EEPROM_READ(offset++, val);
987 chan_pcal_info->pddac[0][0] = val & 0x3f; 1287 pcinfo->pddac[0][0] = val & 0x3f;
988 chan_pcal_info->pwr[0][1] = (val >> 6) & 0xf; 1288 pcinfo->pwr[0][1] = (val >> 6) & 0xf;
989 chan_pcal_info->pddac[0][1] = 1289 pcinfo->pddac[0][1] = (val >> 10) & 0x3f;
990 (val >> 10) & 0x3f;
991 1290
992 AR5K_EEPROM_READ(offset++, val); 1291 AR5K_EEPROM_READ(offset++, val);
993 chan_pcal_info->pwr[0][2] = val & 0xf; 1292 pcinfo->pwr[0][2] = val & 0xf;
994 chan_pcal_info->pddac[0][2] = 1293 pcinfo->pddac[0][2] = (val >> 4) & 0x3f;
995 (val >> 4) & 0x3f;
996 1294
997 chan_pcal_info->pwr[0][3] = 0; 1295 pcinfo->pwr[0][3] = 0;
998 chan_pcal_info->pddac[0][3] = 0; 1296 pcinfo->pddac[0][3] = 0;
999 1297
1000 if (pd_gains > 1) { 1298 if (pd_gains > 1) {
1001 /* 1299 /*
@@ -1003,44 +1301,36 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1003 * so it only has 2 pd points. 1301 * so it only has 2 pd points.
1004 * Continue wih pd gain 1. 1302 * Continue wih pd gain 1.
1005 */ 1303 */
1006 chan_pcal_info->pwr_i[1] = (val >> 10) & 0x1f; 1304 pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
1007 1305
1008 chan_pcal_info->pddac_i[1] = (val >> 15) & 0x1; 1306 pcinfo->pddac_i[1] = (val >> 15) & 0x1;
1009 AR5K_EEPROM_READ(offset++, val); 1307 AR5K_EEPROM_READ(offset++, val);
1010 chan_pcal_info->pddac_i[1] |= (val & 0x3F) << 1; 1308 pcinfo->pddac_i[1] |= (val & 0x3F) << 1;
1011 1309
1012 chan_pcal_info->pwr[1][0] = (val >> 6) & 0xf; 1310 pcinfo->pwr[1][0] = (val >> 6) & 0xf;
1013 chan_pcal_info->pddac[1][0] = 1311 pcinfo->pddac[1][0] = (val >> 10) & 0x3f;
1014 (val >> 10) & 0x3f;
1015 1312
1016 AR5K_EEPROM_READ(offset++, val); 1313 AR5K_EEPROM_READ(offset++, val);
1017 chan_pcal_info->pwr[1][1] = val & 0xf; 1314 pcinfo->pwr[1][1] = val & 0xf;
1018 chan_pcal_info->pddac[1][1] = 1315 pcinfo->pddac[1][1] = (val >> 4) & 0x3f;
1019 (val >> 4) & 0x3f; 1316 pcinfo->pwr[1][2] = (val >> 10) & 0xf;
1020 chan_pcal_info->pwr[1][2] = 1317
1021 (val >> 10) & 0xf; 1318 pcinfo->pddac[1][2] = (val >> 14) & 0x3;
1022
1023 chan_pcal_info->pddac[1][2] =
1024 (val >> 14) & 0x3;
1025 AR5K_EEPROM_READ(offset++, val); 1319 AR5K_EEPROM_READ(offset++, val);
1026 chan_pcal_info->pddac[1][2] |= 1320 pcinfo->pddac[1][2] |= (val & 0xF) << 2;
1027 (val & 0xF) << 2;
1028 1321
1029 chan_pcal_info->pwr[1][3] = 0; 1322 pcinfo->pwr[1][3] = 0;
1030 chan_pcal_info->pddac[1][3] = 0; 1323 pcinfo->pddac[1][3] = 0;
1031 } else if (pd_gains == 1) { 1324 } else if (pd_gains == 1) {
1032 /* 1325 /*
1033 * Pd gain 0 is the last one so 1326 * Pd gain 0 is the last one so
1034 * read the extra point. 1327 * read the extra point.
1035 */ 1328 */
1036 chan_pcal_info->pwr[0][3] = 1329 pcinfo->pwr[0][3] = (val >> 10) & 0xf;
1037 (val >> 10) & 0xf;
1038 1330
1039 chan_pcal_info->pddac[0][3] = 1331 pcinfo->pddac[0][3] = (val >> 14) & 0x3;
1040 (val >> 14) & 0x3;
1041 AR5K_EEPROM_READ(offset++, val); 1332 AR5K_EEPROM_READ(offset++, val);
1042 chan_pcal_info->pddac[0][3] |= 1333 pcinfo->pddac[0][3] |= (val & 0xF) << 2;
1043 (val & 0xF) << 2;
1044 } 1334 }
1045 1335
1046 /* 1336 /*
@@ -1048,105 +1338,65 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1048 * as above. 1338 * as above.
1049 */ 1339 */
1050 if (pd_gains > 2) { 1340 if (pd_gains > 2) {
1051 chan_pcal_info->pwr_i[2] = (val >> 4) & 0x1f; 1341 pcinfo->pwr_i[2] = (val >> 4) & 0x1f;
1052 chan_pcal_info->pddac_i[2] = (val >> 9) & 0x7f; 1342 pcinfo->pddac_i[2] = (val >> 9) & 0x7f;
1053 1343
1054 AR5K_EEPROM_READ(offset++, val); 1344 AR5K_EEPROM_READ(offset++, val);
1055 chan_pcal_info->pwr[2][0] = 1345 pcinfo->pwr[2][0] = (val >> 0) & 0xf;
1056 (val >> 0) & 0xf; 1346 pcinfo->pddac[2][0] = (val >> 4) & 0x3f;
1057 chan_pcal_info->pddac[2][0] = 1347 pcinfo->pwr[2][1] = (val >> 10) & 0xf;
1058 (val >> 4) & 0x3f; 1348
1059 chan_pcal_info->pwr[2][1] = 1349 pcinfo->pddac[2][1] = (val >> 14) & 0x3;
1060 (val >> 10) & 0xf;
1061
1062 chan_pcal_info->pddac[2][1] =
1063 (val >> 14) & 0x3;
1064 AR5K_EEPROM_READ(offset++, val); 1350 AR5K_EEPROM_READ(offset++, val);
1065 chan_pcal_info->pddac[2][1] |= 1351 pcinfo->pddac[2][1] |= (val & 0xF) << 2;
1066 (val & 0xF) << 2;
1067 1352
1068 chan_pcal_info->pwr[2][2] = 1353 pcinfo->pwr[2][2] = (val >> 4) & 0xf;
1069 (val >> 4) & 0xf; 1354 pcinfo->pddac[2][2] = (val >> 8) & 0x3f;
1070 chan_pcal_info->pddac[2][2] =
1071 (val >> 8) & 0x3f;
1072 1355
1073 chan_pcal_info->pwr[2][3] = 0; 1356 pcinfo->pwr[2][3] = 0;
1074 chan_pcal_info->pddac[2][3] = 0; 1357 pcinfo->pddac[2][3] = 0;
1075 } else if (pd_gains == 2) { 1358 } else if (pd_gains == 2) {
1076 chan_pcal_info->pwr[1][3] = 1359 pcinfo->pwr[1][3] = (val >> 4) & 0xf;
1077 (val >> 4) & 0xf; 1360 pcinfo->pddac[1][3] = (val >> 8) & 0x3f;
1078 chan_pcal_info->pddac[1][3] =
1079 (val >> 8) & 0x3f;
1080 } 1361 }
1081 1362
1082 if (pd_gains > 3) { 1363 if (pd_gains > 3) {
1083 chan_pcal_info->pwr_i[3] = (val >> 14) & 0x3; 1364 pcinfo->pwr_i[3] = (val >> 14) & 0x3;
1084 AR5K_EEPROM_READ(offset++, val); 1365 AR5K_EEPROM_READ(offset++, val);
1085 chan_pcal_info->pwr_i[3] |= ((val >> 0) & 0x7) << 2; 1366 pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
1086 1367
1087 chan_pcal_info->pddac_i[3] = (val >> 3) & 0x7f; 1368 pcinfo->pddac_i[3] = (val >> 3) & 0x7f;
1088 chan_pcal_info->pwr[3][0] = 1369 pcinfo->pwr[3][0] = (val >> 10) & 0xf;
1089 (val >> 10) & 0xf; 1370 pcinfo->pddac[3][0] = (val >> 14) & 0x3;
1090 chan_pcal_info->pddac[3][0] =
1091 (val >> 14) & 0x3;
1092 1371
1093 AR5K_EEPROM_READ(offset++, val); 1372 AR5K_EEPROM_READ(offset++, val);
1094 chan_pcal_info->pddac[3][0] |= 1373 pcinfo->pddac[3][0] |= (val & 0xF) << 2;
1095 (val & 0xF) << 2; 1374 pcinfo->pwr[3][1] = (val >> 4) & 0xf;
1096 chan_pcal_info->pwr[3][1] = 1375 pcinfo->pddac[3][1] = (val >> 8) & 0x3f;
1097 (val >> 4) & 0xf; 1376
1098 chan_pcal_info->pddac[3][1] = 1377 pcinfo->pwr[3][2] = (val >> 14) & 0x3;
1099 (val >> 8) & 0x3f;
1100
1101 chan_pcal_info->pwr[3][2] =
1102 (val >> 14) & 0x3;
1103 AR5K_EEPROM_READ(offset++, val); 1378 AR5K_EEPROM_READ(offset++, val);
1104 chan_pcal_info->pwr[3][2] |= 1379 pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2;
1105 ((val >> 0) & 0x3) << 2;
1106 1380
1107 chan_pcal_info->pddac[3][2] = 1381 pcinfo->pddac[3][2] = (val >> 2) & 0x3f;
1108 (val >> 2) & 0x3f; 1382 pcinfo->pwr[3][3] = (val >> 8) & 0xf;
1109 chan_pcal_info->pwr[3][3] =
1110 (val >> 8) & 0xf;
1111 1383
1112 chan_pcal_info->pddac[3][3] = 1384 pcinfo->pddac[3][3] = (val >> 12) & 0xF;
1113 (val >> 12) & 0xF;
1114 AR5K_EEPROM_READ(offset++, val); 1385 AR5K_EEPROM_READ(offset++, val);
1115 chan_pcal_info->pddac[3][3] |= 1386 pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4;
1116 ((val >> 0) & 0x3) << 4;
1117 } else if (pd_gains == 3) { 1387 } else if (pd_gains == 3) {
1118 chan_pcal_info->pwr[2][3] = 1388 pcinfo->pwr[2][3] = (val >> 14) & 0x3;
1119 (val >> 14) & 0x3;
1120 AR5K_EEPROM_READ(offset++, val); 1389 AR5K_EEPROM_READ(offset++, val);
1121 chan_pcal_info->pwr[2][3] |= 1390 pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2;
1122 ((val >> 0) & 0x3) << 2;
1123
1124 chan_pcal_info->pddac[2][3] =
1125 (val >> 2) & 0x3f;
1126 }
1127 1391
1128 for (c = 0; c < pd_gains; c++) { 1392 pcinfo->pddac[2][3] = (val >> 2) & 0x3f;
1129 /* Recreate pwr table for this channel using pwr steps */
1130 chan_pcal_info->pwr[c][0] += chan_pcal_info->pwr_i[c] * 2;
1131 chan_pcal_info->pwr[c][1] += chan_pcal_info->pwr[c][0];
1132 chan_pcal_info->pwr[c][2] += chan_pcal_info->pwr[c][1];
1133 chan_pcal_info->pwr[c][3] += chan_pcal_info->pwr[c][2];
1134 if (chan_pcal_info->pwr[c][3] == chan_pcal_info->pwr[c][2])
1135 chan_pcal_info->pwr[c][3] = 0;
1136
1137 /* Recreate pddac table for this channel using pddac steps */
1138 chan_pcal_info->pddac[c][0] += chan_pcal_info->pddac_i[c];
1139 chan_pcal_info->pddac[c][1] += chan_pcal_info->pddac[c][0];
1140 chan_pcal_info->pddac[c][2] += chan_pcal_info->pddac[c][1];
1141 chan_pcal_info->pddac[c][3] += chan_pcal_info->pddac[c][2];
1142 if (chan_pcal_info->pddac[c][3] == chan_pcal_info->pddac[c][2])
1143 chan_pcal_info->pddac[c][3] = 0;
1144 } 1393 }
1145 } 1394 }
1146 1395
1147 return 0; 1396 return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo);
1148} 1397}
1149 1398
1399
1150/* 1400/*
1151 * Read per rate target power (this is the maximum tx power 1401 * Read per rate target power (this is the maximum tx power
1152 * supported by the card). This info is used when setting 1402 * supported by the card). This info is used when setting
@@ -1154,11 +1404,12 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1154 * 1404 *
1155 * This also works for v5 EEPROMs. 1405 * This also works for v5 EEPROMs.
1156 */ 1406 */
1157static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) 1407static int
1408ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1158{ 1409{
1159 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1410 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1160 struct ath5k_rate_pcal_info *rate_pcal_info; 1411 struct ath5k_rate_pcal_info *rate_pcal_info;
1161 u16 *rate_target_pwr_num; 1412 u8 *rate_target_pwr_num;
1162 u32 offset; 1413 u32 offset;
1163 u16 val; 1414 u16 val;
1164 int ret, i; 1415 int ret, i;
@@ -1264,7 +1515,9 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1264 else 1515 else
1265 read_pcal = ath5k_eeprom_read_pcal_info_5111; 1516 read_pcal = ath5k_eeprom_read_pcal_info_5111;
1266 1517
1267 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { 1518
1519 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G;
1520 mode++) {
1268 err = read_pcal(ah, mode); 1521 err = read_pcal(ah, mode);
1269 if (err) 1522 if (err)
1270 return err; 1523 return err;
@@ -1277,6 +1530,62 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1277 return 0; 1530 return 0;
1278} 1531}
1279 1532
1533static int
1534ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
1535{
1536 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1537 struct ath5k_chan_pcal_info *chinfo;
1538 u8 pier, pdg;
1539
1540 switch (mode) {
1541 case AR5K_EEPROM_MODE_11A:
1542 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
1543 return 0;
1544 chinfo = ee->ee_pwr_cal_a;
1545 break;
1546 case AR5K_EEPROM_MODE_11B:
1547 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
1548 return 0;
1549 chinfo = ee->ee_pwr_cal_b;
1550 break;
1551 case AR5K_EEPROM_MODE_11G:
1552 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
1553 return 0;
1554 chinfo = ee->ee_pwr_cal_g;
1555 break;
1556 default:
1557 return -EINVAL;
1558 }
1559
1560 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
1561 if (!chinfo[pier].pd_curves)
1562 continue;
1563
1564 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
1565 struct ath5k_pdgain_info *pd =
1566 &chinfo[pier].pd_curves[pdg];
1567
1568 if (pd != NULL) {
1569 kfree(pd->pd_step);
1570 kfree(pd->pd_pwr);
1571 }
1572 }
1573
1574 kfree(chinfo[pier].pd_curves);
1575 }
1576
1577 return 0;
1578}
1579
1580void
1581ath5k_eeprom_detach(struct ath5k_hw *ah)
1582{
1583 u8 mode;
1584
1585 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
1586 ath5k_eeprom_free_pcal_info(ah, mode);
1587}
1588
1280/* Read conformance test limits used for regulatory control */ 1589/* Read conformance test limits used for regulatory control */
1281static int 1590static int
1282ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) 1591ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1457,3 +1766,4 @@ bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah)
1457 else 1766 else
1458 return false; 1767 return false;
1459} 1768}
1769
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
index 1deebc0257d4..b0c0606dea0b 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -173,6 +173,7 @@
173#define AR5K_EEPROM_N_5GHZ_CHAN 10 173#define AR5K_EEPROM_N_5GHZ_CHAN 10
174#define AR5K_EEPROM_N_2GHZ_CHAN 3 174#define AR5K_EEPROM_N_2GHZ_CHAN 3
175#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 175#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
176#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
176#define AR5K_EEPROM_MAX_CHAN 10 177#define AR5K_EEPROM_MAX_CHAN 10
177#define AR5K_EEPROM_N_PWR_POINTS_5111 11 178#define AR5K_EEPROM_N_PWR_POINTS_5111 11
178#define AR5K_EEPROM_N_PCDAC 11 179#define AR5K_EEPROM_N_PCDAC 11
@@ -193,7 +194,7 @@
193#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10) 194#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
194#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32) 195#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
195#define AR5K_EEPROM_MAX_CTLS 32 196#define AR5K_EEPROM_MAX_CTLS 32
196#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4 197#define AR5K_EEPROM_N_PD_CURVES 4
197#define AR5K_EEPROM_N_XPD0_POINTS 4 198#define AR5K_EEPROM_N_XPD0_POINTS 4
198#define AR5K_EEPROM_N_XPD3_POINTS 3 199#define AR5K_EEPROM_N_XPD3_POINTS 3
199#define AR5K_EEPROM_N_PD_GAINS 4 200#define AR5K_EEPROM_N_PD_GAINS 4
@@ -232,7 +233,7 @@ enum ath5k_ctl_mode {
232 AR5K_CTL_11B = 1, 233 AR5K_CTL_11B = 1,
233 AR5K_CTL_11G = 2, 234 AR5K_CTL_11G = 2,
234 AR5K_CTL_TURBO = 3, 235 AR5K_CTL_TURBO = 3,
235 AR5K_CTL_108G = 4, 236 AR5K_CTL_TURBOG = 4,
236 AR5K_CTL_2GHT20 = 5, 237 AR5K_CTL_2GHT20 = 5,
237 AR5K_CTL_5GHT20 = 6, 238 AR5K_CTL_5GHT20 = 6,
238 AR5K_CTL_2GHT40 = 7, 239 AR5K_CTL_2GHT40 = 7,
@@ -240,65 +241,114 @@ enum ath5k_ctl_mode {
240 AR5K_CTL_MODE_M = 15, 241 AR5K_CTL_MODE_M = 15,
241}; 242};
242 243
244/* Default CTL ids for the 3 main reg domains.
245 * Atheros only uses these by default but vendors
246 * can have up to 32 different CTLs for different
247 * scenarios. Note that theese values are ORed with
248 * the mode id (above) so we can have up to 24 CTL
249 * datasets out of these 3 main regdomains. That leaves
250 * 8 ids that can be used by vendors and since 0x20 is
251 * missing from HAL sources i guess this is the set of
252 * custom CTLs vendors can use. */
253#define AR5K_CTL_FCC 0x10
254#define AR5K_CTL_CUSTOM 0x20
255#define AR5K_CTL_ETSI 0x30
256#define AR5K_CTL_MKK 0x40
257
258/* Indicates a CTL with only mode set and
259 * no reg domain mapping, such CTLs are used
260 * for world roaming domains or simply when
261 * a reg domain is not set */
262#define AR5K_CTL_NO_REGDOMAIN 0xf0
263
264/* Indicates an empty (invalid) CTL */
265#define AR5K_CTL_NO_CTL 0xff
266
243/* Per channel calibration data, used for power table setup */ 267/* Per channel calibration data, used for power table setup */
244struct ath5k_chan_pcal_info_rf5111 { 268struct ath5k_chan_pcal_info_rf5111 {
245 /* Power levels in half dbm units 269 /* Power levels in half dbm units
246 * for one power curve. */ 270 * for one power curve. */
247 u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111]; 271 u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
248 /* PCDAC table steps 272 /* PCDAC table steps
249 * for the above values */ 273 * for the above values */
250 u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111]; 274 u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
251 /* Starting PCDAC step */ 275 /* Starting PCDAC step */
252 u8 pcdac_min; 276 u8 pcdac_min;
253 /* Final PCDAC step */ 277 /* Final PCDAC step */
254 u8 pcdac_max; 278 u8 pcdac_max;
255}; 279};
256 280
257struct ath5k_chan_pcal_info_rf5112 { 281struct ath5k_chan_pcal_info_rf5112 {
258 /* Power levels in quarter dBm units 282 /* Power levels in quarter dBm units
259 * for lower (0) and higher (3) 283 * for lower (0) and higher (3)
260 * level curves */ 284 * level curves in 0.25dB units */
261 s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS]; 285 s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
262 s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS]; 286 s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
263 /* PCDAC table steps 287 /* PCDAC table steps
264 * for the above values */ 288 * for the above values */
265 u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS]; 289 u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
266 u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS]; 290 u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
267}; 291};
268 292
269struct ath5k_chan_pcal_info_rf2413 { 293struct ath5k_chan_pcal_info_rf2413 {
270 /* Starting pwr/pddac values */ 294 /* Starting pwr/pddac values */
271 s8 pwr_i[AR5K_EEPROM_N_PD_GAINS]; 295 s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
272 u8 pddac_i[AR5K_EEPROM_N_PD_GAINS]; 296 u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
273 /* (pwr,pddac) points */ 297 /* (pwr,pddac) points
274 s8 pwr[AR5K_EEPROM_N_PD_GAINS] 298 * power levels in 0.5dB units */
275 [AR5K_EEPROM_N_PD_POINTS]; 299 s8 pwr[AR5K_EEPROM_N_PD_GAINS]
276 u8 pddac[AR5K_EEPROM_N_PD_GAINS] 300 [AR5K_EEPROM_N_PD_POINTS];
277 [AR5K_EEPROM_N_PD_POINTS]; 301 u8 pddac[AR5K_EEPROM_N_PD_GAINS]
302 [AR5K_EEPROM_N_PD_POINTS];
303};
304
305enum ath5k_powertable_type {
306 AR5K_PWRTABLE_PWR_TO_PCDAC = 0,
307 AR5K_PWRTABLE_LINEAR_PCDAC = 1,
308 AR5K_PWRTABLE_PWR_TO_PDADC = 2,
309};
310
311struct ath5k_pdgain_info {
312 u8 pd_points;
313 u8 *pd_step;
314 /* Power values are in
315 * 0.25dB units */
316 s16 *pd_pwr;
278}; 317};
279 318
280struct ath5k_chan_pcal_info { 319struct ath5k_chan_pcal_info {
281 /* Frequency */ 320 /* Frequency */
282 u16 freq; 321 u16 freq;
283 /* Max available power */ 322 /* Tx power boundaries */
284 s8 max_pwr; 323 s16 max_pwr;
324 s16 min_pwr;
285 union { 325 union {
286 struct ath5k_chan_pcal_info_rf5111 rf5111_info; 326 struct ath5k_chan_pcal_info_rf5111 rf5111_info;
287 struct ath5k_chan_pcal_info_rf5112 rf5112_info; 327 struct ath5k_chan_pcal_info_rf5112 rf5112_info;
288 struct ath5k_chan_pcal_info_rf2413 rf2413_info; 328 struct ath5k_chan_pcal_info_rf2413 rf2413_info;
289 }; 329 };
330 /* Raw values used by phy code
331 * Curves are stored in order from lower
332 * gain to higher gain (max txpower -> min txpower) */
333 struct ath5k_pdgain_info *pd_curves;
290}; 334};
291 335
292/* Per rate calibration data for each mode, used for power table setup */ 336/* Per rate calibration data for each mode,
337 * used for rate power table setup.
338 * Note: Values in 0.5dB units */
293struct ath5k_rate_pcal_info { 339struct ath5k_rate_pcal_info {
294 u16 freq; /* Frequency */ 340 u16 freq; /* Frequency */
295 /* Power level for 6-24Mbit/s rates */ 341 /* Power level for 6-24Mbit/s rates or
342 * 1Mb rate */
296 u16 target_power_6to24; 343 u16 target_power_6to24;
297 /* Power level for 36Mbit rate */ 344 /* Power level for 36Mbit rate or
345 * 2Mb rate */
298 u16 target_power_36; 346 u16 target_power_36;
299 /* Power level for 48Mbit rate */ 347 /* Power level for 48Mbit rate or
348 * 5.5Mbit rate */
300 u16 target_power_48; 349 u16 target_power_48;
301 /* Power level for 54Mbit rate */ 350 /* Power level for 54Mbit rate or
351 * 11Mbit rate */
302 u16 target_power_54; 352 u16 target_power_54;
303}; 353};
304 354
@@ -330,12 +380,6 @@ struct ath5k_eeprom_info {
330 u16 ee_cck_ofdm_power_delta; 380 u16 ee_cck_ofdm_power_delta;
331 u16 ee_scaled_cck_delta; 381 u16 ee_scaled_cck_delta;
332 382
333 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
334 u16 ee_tx_clip;
335 u16 ee_pwd_84;
336 u16 ee_pwd_90;
337 u16 ee_gain_select;
338
339 /* RF Calibration settings (reset, rfregs) */ 383 /* RF Calibration settings (reset, rfregs) */
340 u16 ee_i_cal[AR5K_EEPROM_N_MODES]; 384 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
341 u16 ee_q_cal[AR5K_EEPROM_N_MODES]; 385 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
@@ -363,23 +407,25 @@ struct ath5k_eeprom_info {
363 /* Power calibration data */ 407 /* Power calibration data */
364 u16 ee_false_detect[AR5K_EEPROM_N_MODES]; 408 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
365 409
366 /* Number of pd gain curves per mode (RF2413) */ 410 /* Number of pd gain curves per mode */
367 u8 ee_pd_gains[AR5K_EEPROM_N_MODES]; 411 u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
412 /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */
413 u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS];
368 414
369 u8 ee_n_piers[AR5K_EEPROM_N_MODES]; 415 u8 ee_n_piers[AR5K_EEPROM_N_MODES];
370 struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN]; 416 struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
371 struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN]; 417 struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
372 struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN]; 418 struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
373 419
374 /* Per rate target power levels */ 420 /* Per rate target power levels */
375 u16 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES]; 421 u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
376 struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN]; 422 struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
377 struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN]; 423 struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
378 struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN]; 424 struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
379 425
380 /* Conformance test limits (Unused) */ 426 /* Conformance test limits (Unused) */
381 u16 ee_ctls; 427 u8 ee_ctls;
382 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS]; 428 u8 ee_ctl[AR5K_EEPROM_MAX_CTLS];
383 struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS]; 429 struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
384 430
385 /* Noise Floor Calibration settings */ 431 /* Noise Floor Calibration settings */
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 44886434187b..61fb621ed20d 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1510,8 +1510,8 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1510 rf2425_ini_mode_end, mode); 1510 rf2425_ini_mode_end, mode);
1511 1511
1512 ath5k_hw_ini_registers(ah, 1512 ath5k_hw_ini_registers(ah,
1513 ARRAY_SIZE(rf2413_ini_common_end), 1513 ARRAY_SIZE(rf2425_ini_common_end),
1514 rf2413_ini_common_end, change_channel); 1514 rf2425_ini_common_end, change_channel);
1515 1515
1516 ath5k_hw_ini_registers(ah, 1516 ath5k_hw_ini_registers(ah,
1517 ARRAY_SIZE(rf5112_ini_bbgain), 1517 ARRAY_SIZE(rf5112_ini_bbgain),
diff --git a/drivers/net/wireless/ath5k/led.c b/drivers/net/wireless/ath5k/led.c
index 0686e12738b3..19555fb79c9b 100644
--- a/drivers/net/wireless/ath5k/led.c
+++ b/drivers/net/wireless/ath5k/led.c
@@ -65,6 +65,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, 65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
66 /* E-machines E510 (tuliom@gmail.com) */ 66 /* E-machines E510 (tuliom@gmail.com) */
67 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, 67 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
68 /* Acer Extensa 5620z (nekoreeve@gmail.com) */
69 { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
68 { } 70 { }
69}; 71};
70 72
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index 81f5bebc48b1..9e2faae5ae94 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
7 * 8 *
8 * Permission to use, copy, modify, and distribute this software for any 9 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 10 * purpose with or without fee is hereby granted, provided that the above
@@ -183,7 +184,9 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
183 if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE) 184 if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
184 return; 185 return;
185 186
186 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max, 187 /* Send the packet with 2dB below max power as
188 * patent doc suggest */
189 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max_pwr - 4,
187 AR5K_PHY_PAPD_PROBE_TXPOWER) | 190 AR5K_PHY_PAPD_PROBE_TXPOWER) |
188 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE); 191 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
189 192
@@ -1433,93 +1436,1120 @@ unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
1433 return false; /*XXX: What do we return for 5210 ?*/ 1436 return false; /*XXX: What do we return for 5210 ?*/
1434} 1437}
1435 1438
1439
1440/****************\
1441* TX power setup *
1442\****************/
1443
1444/*
1445 * Helper functions
1446 */
1447
1448/*
1449 * Do linear interpolation between two given (x, y) points
1450 */
1451static s16
1452ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
1453 s16 y_left, s16 y_right)
1454{
1455 s16 ratio, result;
1456
1457 /* Avoid divide by zero and skip interpolation
1458 * if we have the same point */
1459 if ((x_left == x_right) || (y_left == y_right))
1460 return y_left;
1461
1462 /*
1463 * Since we use ints and not fps, we need to scale up in
1464 * order to get a sane ratio value (or else we 'll eg. get
1465 * always 1 instead of 1.25, 1.75 etc). We scale up by 100
1466 * to have some accuracy both for 0.5 and 0.25 steps.
1467 */
1468 ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left));
1469
1470 /* Now scale down to be in range */
1471 result = y_left + (ratio * (target - x_left) / 100);
1472
1473 return result;
1474}
1475
1476/*
1477 * Find vertical boundary (min pwr) for the linear PCDAC curve.
1478 *
1479 * Since we have the top of the curve and we draw the line below
1480 * until we reach 1 (1 pcdac step) we need to know which point
1481 * (x value) that is so that we don't go below y axis and have negative
1482 * pcdac values when creating the curve, or fill the table with zeroes.
1483 */
1484static s16
1485ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
1486 const s16 *pwrL, const s16 *pwrR)
1487{
1488 s8 tmp;
1489 s16 min_pwrL, min_pwrR;
1490 s16 pwr_i = pwrL[0];
1491
1492 do {
1493 pwr_i--;
1494 tmp = (s8) ath5k_get_interpolated_value(pwr_i,
1495 pwrL[0], pwrL[1],
1496 stepL[0], stepL[1]);
1497
1498 } while (tmp > 1);
1499
1500 min_pwrL = pwr_i;
1501
1502 pwr_i = pwrR[0];
1503 do {
1504 pwr_i--;
1505 tmp = (s8) ath5k_get_interpolated_value(pwr_i,
1506 pwrR[0], pwrR[1],
1507 stepR[0], stepR[1]);
1508
1509 } while (tmp > 1);
1510
1511 min_pwrR = pwr_i;
1512
1513 /* Keep the right boundary so that it works for both curves */
1514 return max(min_pwrL, min_pwrR);
1515}
1516
1517/*
1518 * Interpolate (pwr,vpd) points to create a Power to PDADC or a
1519 * Power to PCDAC curve.
1520 *
1521 * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC
1522 * steps (offsets) on y axis. Power can go up to 31.5dB and max
1523 * PCDAC/PDADC step for each curve is 64 but we can write more than
1524 * one curves on hw so we can go up to 128 (which is the max step we
1525 * can write on the final table).
1526 *
1527 * We write y values (PCDAC/PDADC steps) on hw.
1528 */
1529static void
1530ath5k_create_power_curve(s16 pmin, s16 pmax,
1531 const s16 *pwr, const u8 *vpd,
1532 u8 num_points,
1533 u8 *vpd_table, u8 type)
1534{
1535 u8 idx[2] = { 0, 1 };
1536 s16 pwr_i = 2*pmin;
1537 int i;
1538
1539 if (num_points < 2)
1540 return;
1541
1542 /* We want the whole line, so adjust boundaries
1543 * to cover the entire power range. Note that
1544 * power values are already 0.25dB so no need
1545 * to multiply pwr_i by 2 */
1546 if (type == AR5K_PWRTABLE_LINEAR_PCDAC) {
1547 pwr_i = pmin;
1548 pmin = 0;
1549 pmax = 63;
1550 }
1551
1552 /* Find surrounding turning points (TPs)
1553 * and interpolate between them */
1554 for (i = 0; (i <= (u16) (pmax - pmin)) &&
1555 (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
1556
1557 /* We passed the right TP, move to the next set of TPs
1558 * if we pass the last TP, extrapolate above using the last
1559 * two TPs for ratio */
1560 if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) {
1561 idx[0]++;
1562 idx[1]++;
1563 }
1564
1565 vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i,
1566 pwr[idx[0]], pwr[idx[1]],
1567 vpd[idx[0]], vpd[idx[1]]);
1568
1569 /* Increase by 0.5dB
1570 * (0.25 dB units) */
1571 pwr_i += 2;
1572 }
1573}
1574
1575/*
1576 * Get the surrounding per-channel power calibration piers
1577 * for a given frequency so that we can interpolate between
1578 * them and come up with an apropriate dataset for our current
1579 * channel.
1580 */
1581static void
1582ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
1583 struct ieee80211_channel *channel,
1584 struct ath5k_chan_pcal_info **pcinfo_l,
1585 struct ath5k_chan_pcal_info **pcinfo_r)
1586{
1587 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1588 struct ath5k_chan_pcal_info *pcinfo;
1589 u8 idx_l, idx_r;
1590 u8 mode, max, i;
1591 u32 target = channel->center_freq;
1592
1593 idx_l = 0;
1594 idx_r = 0;
1595
1596 if (!(channel->hw_value & CHANNEL_OFDM)) {
1597 pcinfo = ee->ee_pwr_cal_b;
1598 mode = AR5K_EEPROM_MODE_11B;
1599 } else if (channel->hw_value & CHANNEL_2GHZ) {
1600 pcinfo = ee->ee_pwr_cal_g;
1601 mode = AR5K_EEPROM_MODE_11G;
1602 } else {
1603 pcinfo = ee->ee_pwr_cal_a;
1604 mode = AR5K_EEPROM_MODE_11A;
1605 }
1606 max = ee->ee_n_piers[mode] - 1;
1607
1608 /* Frequency is below our calibrated
1609 * range. Use the lowest power curve
1610 * we have */
1611 if (target < pcinfo[0].freq) {
1612 idx_l = idx_r = 0;
1613 goto done;
1614 }
1615
1616 /* Frequency is above our calibrated
1617 * range. Use the highest power curve
1618 * we have */
1619 if (target > pcinfo[max].freq) {
1620 idx_l = idx_r = max;
1621 goto done;
1622 }
1623
1624 /* Frequency is inside our calibrated
1625 * channel range. Pick the surrounding
1626 * calibration piers so that we can
1627 * interpolate */
1628 for (i = 0; i <= max; i++) {
1629
1630 /* Frequency matches one of our calibration
1631 * piers, no need to interpolate, just use
1632 * that calibration pier */
1633 if (pcinfo[i].freq == target) {
1634 idx_l = idx_r = i;
1635 goto done;
1636 }
1637
1638 /* We found a calibration pier that's above
1639 * frequency, use this pier and the previous
1640 * one to interpolate */
1641 if (target < pcinfo[i].freq) {
1642 idx_r = i;
1643 idx_l = idx_r - 1;
1644 goto done;
1645 }
1646 }
1647
1648done:
1649 *pcinfo_l = &pcinfo[idx_l];
1650 *pcinfo_r = &pcinfo[idx_r];
1651
1652 return;
1653}
1654
1655/*
1656 * Get the surrounding per-rate power calibration data
1657 * for a given frequency and interpolate between power
1658 * values to set max target power supported by hw for
1659 * each rate.
1660 */
1661static void
1662ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
1663 struct ieee80211_channel *channel,
1664 struct ath5k_rate_pcal_info *rates)
1665{
1666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1667 struct ath5k_rate_pcal_info *rpinfo;
1668 u8 idx_l, idx_r;
1669 u8 mode, max, i;
1670 u32 target = channel->center_freq;
1671
1672 idx_l = 0;
1673 idx_r = 0;
1674
1675 if (!(channel->hw_value & CHANNEL_OFDM)) {
1676 rpinfo = ee->ee_rate_tpwr_b;
1677 mode = AR5K_EEPROM_MODE_11B;
1678 } else if (channel->hw_value & CHANNEL_2GHZ) {
1679 rpinfo = ee->ee_rate_tpwr_g;
1680 mode = AR5K_EEPROM_MODE_11G;
1681 } else {
1682 rpinfo = ee->ee_rate_tpwr_a;
1683 mode = AR5K_EEPROM_MODE_11A;
1684 }
1685 max = ee->ee_rate_target_pwr_num[mode] - 1;
1686
1687 /* Get the surrounding calibration
1688 * piers - same as above */
1689 if (target < rpinfo[0].freq) {
1690 idx_l = idx_r = 0;
1691 goto done;
1692 }
1693
1694 if (target > rpinfo[max].freq) {
1695 idx_l = idx_r = max;
1696 goto done;
1697 }
1698
1699 for (i = 0; i <= max; i++) {
1700
1701 if (rpinfo[i].freq == target) {
1702 idx_l = idx_r = i;
1703 goto done;
1704 }
1705
1706 if (target < rpinfo[i].freq) {
1707 idx_r = i;
1708 idx_l = idx_r - 1;
1709 goto done;
1710 }
1711 }
1712
1713done:
1714 /* Now interpolate power value, based on the frequency */
1715 rates->freq = target;
1716
1717 rates->target_power_6to24 =
1718 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1719 rpinfo[idx_r].freq,
1720 rpinfo[idx_l].target_power_6to24,
1721 rpinfo[idx_r].target_power_6to24);
1722
1723 rates->target_power_36 =
1724 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1725 rpinfo[idx_r].freq,
1726 rpinfo[idx_l].target_power_36,
1727 rpinfo[idx_r].target_power_36);
1728
1729 rates->target_power_48 =
1730 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1731 rpinfo[idx_r].freq,
1732 rpinfo[idx_l].target_power_48,
1733 rpinfo[idx_r].target_power_48);
1734
1735 rates->target_power_54 =
1736 ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
1737 rpinfo[idx_r].freq,
1738 rpinfo[idx_l].target_power_54,
1739 rpinfo[idx_r].target_power_54);
1740}
1741
1742/*
1743 * Get the max edge power for this channel if
1744 * we have such data from EEPROM's Conformance Test
1745 * Limits (CTL), and limit max power if needed.
1746 *
1747 * FIXME: Only works for world regulatory domains
1748 */
1749static void
1750ath5k_get_max_ctl_power(struct ath5k_hw *ah,
1751 struct ieee80211_channel *channel)
1752{
1753 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1754 struct ath5k_edge_power *rep = ee->ee_ctl_pwr;
1755 u8 *ctl_val = ee->ee_ctl;
1756 s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4;
1757 s16 edge_pwr = 0;
1758 u8 rep_idx;
1759 u8 i, ctl_mode;
1760 u8 ctl_idx = 0xFF;
1761 u32 target = channel->center_freq;
1762
1763 /* Find out a CTL for our mode that's not mapped
1764 * on a specific reg domain.
1765 *
1766 * TODO: Map our current reg domain to one of the 3 available
1767 * reg domain ids so that we can support more CTLs. */
1768 switch (channel->hw_value & CHANNEL_MODES) {
1769 case CHANNEL_A:
1770 ctl_mode = AR5K_CTL_11A | AR5K_CTL_NO_REGDOMAIN;
1771 break;
1772 case CHANNEL_G:
1773 ctl_mode = AR5K_CTL_11G | AR5K_CTL_NO_REGDOMAIN;
1774 break;
1775 case CHANNEL_B:
1776 ctl_mode = AR5K_CTL_11B | AR5K_CTL_NO_REGDOMAIN;
1777 break;
1778 case CHANNEL_T:
1779 ctl_mode = AR5K_CTL_TURBO | AR5K_CTL_NO_REGDOMAIN;
1780 break;
1781 case CHANNEL_TG:
1782 ctl_mode = AR5K_CTL_TURBOG | AR5K_CTL_NO_REGDOMAIN;
1783 break;
1784 case CHANNEL_XR:
1785 /* Fall through */
1786 default:
1787 return;
1788 }
1789
1790 for (i = 0; i < ee->ee_ctls; i++) {
1791 if (ctl_val[i] == ctl_mode) {
1792 ctl_idx = i;
1793 break;
1794 }
1795 }
1796
1797 /* If we have a CTL dataset available grab it and find the
1798 * edge power for our frequency */
1799 if (ctl_idx == 0xFF)
1800 return;
1801
1802 /* Edge powers are sorted by frequency from lower
1803 * to higher. Each CTL corresponds to 8 edge power
1804 * measurements. */
1805 rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES;
1806
1807 /* Don't do boundaries check because we
1808 * might have more that one bands defined
1809 * for this mode */
1810
1811 /* Get the edge power that's closer to our
1812 * frequency */
1813 for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) {
1814 rep_idx += i;
1815 if (target <= rep[rep_idx].freq)
1816 edge_pwr = (s16) rep[rep_idx].edge;
1817 }
1818
1819 if (edge_pwr)
1820 ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr);
1821}
1822
1823
1824/*
1825 * Power to PCDAC table functions
1826 */
1827
1436/* 1828/*
1437 * TX power setup 1829 * Fill Power to PCDAC table on RF5111
1830 *
1831 * No further processing is needed for RF5111, the only thing we have to
1832 * do is fill the values below and above calibration range since eeprom data
1833 * may not cover the entire PCDAC table.
1438 */ 1834 */
1835static void
1836ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
1837 s16 *table_max)
1838{
1839 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1840 u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
1841 u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
1842 s16 min_pwr, max_pwr;
1843
1844 /* Get table boundaries */
1845 min_pwr = table_min[0];
1846 pcdac_0 = pcdac_tmp[0];
1847
1848 max_pwr = table_max[0];
1849 pcdac_n = pcdac_tmp[table_max[0] - table_min[0]];
1850
1851 /* Extrapolate below minimum using pcdac_0 */
1852 pcdac_i = 0;
1853 for (i = 0; i < min_pwr; i++)
1854 pcdac_out[pcdac_i++] = pcdac_0;
1855
1856 /* Copy values from pcdac_tmp */
1857 pwr_idx = min_pwr;
1858 for (i = 0 ; pwr_idx <= max_pwr &&
1859 pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
1860 pcdac_out[pcdac_i++] = pcdac_tmp[i];
1861 pwr_idx++;
1862 }
1863
1864 /* Extrapolate above maximum */
1865 while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE)
1866 pcdac_out[pcdac_i++] = pcdac_n;
1867
1868}
1439 1869
1440/* 1870/*
1441 * Initialize the tx power table (not fully implemented) 1871 * Combine available XPD Curves and fill Linear Power to PCDAC table
1872 * on RF5112
1873 *
1874 * RFX112 can have up to 2 curves (one for low txpower range and one for
1875 * higher txpower range). We need to put them both on pcdac_out and place
1876 * them in the correct location. In case we only have one curve available
1877 * just fit it on pcdac_out (it's supposed to cover the entire range of
1878 * available pwr levels since it's always the higher power curve). Extrapolate
1879 * below and above final table if needed.
1442 */ 1880 */
1443static void ath5k_txpower_table(struct ath5k_hw *ah, 1881static void
1444 struct ieee80211_channel *channel, s16 max_power) 1882ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
1883 s16 *table_max, u8 pdcurves)
1445{ 1884{
1446 unsigned int i, min, max, n; 1885 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1447 u16 txpower, *rates; 1886 u8 *pcdac_low_pwr;
1448 1887 u8 *pcdac_high_pwr;
1449 rates = ah->ah_txpower.txp_rates; 1888 u8 *pcdac_tmp;
1450 1889 u8 pwr;
1451 txpower = AR5K_TUNE_DEFAULT_TXPOWER * 2; 1890 s16 max_pwr_idx;
1452 if (max_power > txpower) 1891 s16 min_pwr_idx;
1453 txpower = max_power > AR5K_TUNE_MAX_TXPOWER ? 1892 s16 mid_pwr_idx = 0;
1454 AR5K_TUNE_MAX_TXPOWER : max_power; 1893 /* Edge flag turs on the 7nth bit on the PCDAC
1455 1894 * to delcare the higher power curve (force values
1456 for (i = 0; i < AR5K_MAX_RATES; i++) 1895 * to be greater than 64). If we only have one curve
1457 rates[i] = txpower; 1896 * we don't need to set this, if we have 2 curves and
1458 1897 * fill the table backwards this can also be used to
1459 /* XXX setup target powers by rate */ 1898 * switch from higher power curve to lower power curve */
1460 1899 u8 edge_flag;
1461 ah->ah_txpower.txp_min = rates[7]; 1900 int i;
1462 ah->ah_txpower.txp_max = rates[0]; 1901
1463 ah->ah_txpower.txp_ofdm = rates[0]; 1902 /* When we have only one curve available
1464 1903 * that's the higher power curve. If we have
1465 /* Calculate the power table */ 1904 * two curves the first is the high power curve
1466 n = ARRAY_SIZE(ah->ah_txpower.txp_pcdac); 1905 * and the next is the low power curve. */
1467 min = AR5K_EEPROM_PCDAC_START; 1906 if (pdcurves > 1) {
1468 max = AR5K_EEPROM_PCDAC_STOP; 1907 pcdac_low_pwr = ah->ah_txpower.tmpL[1];
1469 for (i = 0; i < n; i += AR5K_EEPROM_PCDAC_STEP) 1908 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
1470 ah->ah_txpower.txp_pcdac[i] = 1909 mid_pwr_idx = table_max[1] - table_min[1] - 1;
1471#ifdef notyet 1910 max_pwr_idx = (table_max[0] - table_min[0]) / 2;
1472 min + ((i * (max - min)) / n); 1911
1473#else 1912 /* If table size goes beyond 31.5dB, keep the
1474 min; 1913 * upper 31.5dB range when setting tx power.
1914 * Note: 126 = 31.5 dB in quarter dB steps */
1915 if (table_max[0] - table_min[1] > 126)
1916 min_pwr_idx = table_max[0] - 126;
1917 else
1918 min_pwr_idx = table_min[1];
1919
1920 /* Since we fill table backwards
1921 * start from high power curve */
1922 pcdac_tmp = pcdac_high_pwr;
1923
1924 edge_flag = 0x40;
1925#if 0
1926 /* If both min and max power limits are in lower
1927 * power curve's range, only use the low power curve.
1928 * TODO: min/max levels are related to target
1929 * power values requested from driver/user
1930 * XXX: Is this really needed ? */
1931 if (min_pwr < table_max[1] &&
1932 max_pwr < table_max[1]) {
1933 edge_flag = 0;
1934 pcdac_tmp = pcdac_low_pwr;
1935 max_pwr_idx = (table_max[1] - table_min[1])/2;
1936 }
1475#endif 1937#endif
1938 } else {
1939 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
1940 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
1941 min_pwr_idx = table_min[0];
1942 max_pwr_idx = (table_max[0] - table_min[0]) / 2;
1943 pcdac_tmp = pcdac_high_pwr;
1944 edge_flag = 0;
1945 }
1946
1947 /* This is used when setting tx power*/
1948 ah->ah_txpower.txp_min_idx = min_pwr_idx/2;
1949
1950 /* Fill Power to PCDAC table backwards */
1951 pwr = max_pwr_idx;
1952 for (i = 63; i >= 0; i--) {
1953 /* Entering lower power range, reset
1954 * edge flag and set pcdac_tmp to lower
1955 * power curve.*/
1956 if (edge_flag == 0x40 &&
1957 (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
1958 edge_flag = 0x00;
1959 pcdac_tmp = pcdac_low_pwr;
1960 pwr = mid_pwr_idx/2;
1961 }
1962
1963 /* Don't go below 1, extrapolate below if we have
1964 * already swithced to the lower power curve -or
1965 * we only have one curve and edge_flag is zero
1966 * anyway */
1967 if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
1968 while (i >= 0) {
1969 pcdac_out[i] = pcdac_out[i + 1];
1970 i--;
1971 }
1972 break;
1973 }
1974
1975 pcdac_out[i] = pcdac_tmp[pwr] | edge_flag;
1976
1977 /* Extrapolate above if pcdac is greater than
1978 * 126 -this can happen because we OR pcdac_out
1979 * value with edge_flag on high power curve */
1980 if (pcdac_out[i] > 126)
1981 pcdac_out[i] = 126;
1982
1983 /* Decrease by a 0.5dB step */
1984 pwr--;
1985 }
1476} 1986}
1477 1987
1988/* Write PCDAC values on hw */
1989static void
1990ath5k_setup_pcdac_table(struct ath5k_hw *ah)
1991{
1992 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
1993 int i;
1994
1995 /*
1996 * Write TX power values
1997 */
1998 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
1999 ath5k_hw_reg_write(ah,
2000 (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) |
2001 (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16),
2002 AR5K_PHY_PCDAC_TXPOWER(i));
2003 }
2004}
2005
2006
1478/* 2007/*
1479 * Set transmition power 2008 * Power to PDADC table functions
1480 */ 2009 */
1481int /*O.K. - txpower_table is unimplemented so this doesn't work*/ 2010
1482ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 2011/*
1483 unsigned int txpower) 2012 * Set the gain boundaries and create final Power to PDADC table
2013 *
2014 * We can have up to 4 pd curves, we need to do a simmilar process
2015 * as we do for RF5112. This time we don't have an edge_flag but we
2016 * set the gain boundaries on a separate register.
2017 */
2018static void
2019ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2020 s16 *pwr_min, s16 *pwr_max, u8 pdcurves)
1484{ 2021{
1485 bool tpc = ah->ah_txpower.txp_tpc; 2022 u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS];
1486 unsigned int i; 2023 u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
2024 u8 *pdadc_tmp;
2025 s16 pdadc_0;
2026 u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size;
2027 u8 pd_gain_overlap;
2028
2029 /* Note: Register value is initialized on initvals
2030 * there is no feedback from hw.
2031 * XXX: What about pd_gain_overlap from EEPROM ? */
2032 pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) &
2033 AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP;
2034
2035 /* Create final PDADC table */
2036 for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) {
2037 pdadc_tmp = ah->ah_txpower.tmpL[pdg];
2038
2039 if (pdg == pdcurves - 1)
2040 /* 2 dB boundary stretch for last
2041 * (higher power) curve */
2042 gain_boundaries[pdg] = pwr_max[pdg] + 4;
2043 else
2044 /* Set gain boundary in the middle
2045 * between this curve and the next one */
2046 gain_boundaries[pdg] =
2047 (pwr_max[pdg] + pwr_min[pdg + 1]) / 2;
2048
2049 /* Sanity check in case our 2 db stretch got out of
2050 * range. */
2051 if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER)
2052 gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER;
2053
2054 /* For the first curve (lower power)
2055 * start from 0 dB */
2056 if (pdg == 0)
2057 pdadc_0 = 0;
2058 else
2059 /* For the other curves use the gain overlap */
2060 pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) -
2061 pd_gain_overlap;
1487 2062
1488 ATH5K_TRACE(ah->ah_sc); 2063 /* Force each power step to be at least 0.5 dB */
1489 if (txpower > AR5K_TUNE_MAX_TXPOWER) { 2064 if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
1490 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); 2065 pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
1491 return -EINVAL; 2066 else
2067 pwr_step = 1;
2068
2069 /* If pdadc_0 is negative, we need to extrapolate
2070 * below this pdgain by a number of pwr_steps */
2071 while ((pdadc_0 < 0) && (pdadc_i < 128)) {
2072 s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step;
2073 pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp;
2074 pdadc_0++;
2075 }
2076
2077 /* Set last pwr level, using gain boundaries */
2078 pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
2079 /* Limit it to be inside pwr range */
2080 table_size = pwr_max[pdg] - pwr_min[pdg];
2081 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
2082
2083 /* Fill pdadc_out table */
2084 while (pdadc_0 < max_idx)
2085 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
2086
2087 /* Need to extrapolate above this pdgain? */
2088 if (pdadc_n <= max_idx)
2089 continue;
2090
2091 /* Force each power step to be at least 0.5 dB */
2092 if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
2093 pwr_step = pdadc_tmp[table_size - 1] -
2094 pdadc_tmp[table_size - 2];
2095 else
2096 pwr_step = 1;
2097
2098 /* Extrapolate above */
2099 while ((pdadc_0 < (s16) pdadc_n) &&
2100 (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) {
2101 s16 tmp = pdadc_tmp[table_size - 1] +
2102 (pdadc_0 - max_idx) * pwr_step;
2103 pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp;
2104 pdadc_0++;
2105 }
1492 } 2106 }
1493 2107
2108 while (pdg < AR5K_EEPROM_N_PD_GAINS) {
2109 gain_boundaries[pdg] = gain_boundaries[pdg - 1];
2110 pdg++;
2111 }
2112
2113 while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) {
2114 pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1];
2115 pdadc_i++;
2116 }
2117
2118 /* Set gain boundaries */
2119 ath5k_hw_reg_write(ah,
2120 AR5K_REG_SM(pd_gain_overlap,
2121 AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) |
2122 AR5K_REG_SM(gain_boundaries[0],
2123 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) |
2124 AR5K_REG_SM(gain_boundaries[1],
2125 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) |
2126 AR5K_REG_SM(gain_boundaries[2],
2127 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) |
2128 AR5K_REG_SM(gain_boundaries[3],
2129 AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4),
2130 AR5K_PHY_TPC_RG5);
2131
2132 /* Used for setting rate power table */
2133 ah->ah_txpower.txp_min_idx = pwr_min[0];
2134
2135}
2136
2137/* Write PDADC values on hw */
2138static void
2139ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah,
2140 u8 pdcurves, u8 *pdg_to_idx)
2141{
2142 u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
2143 u32 reg;
2144 u8 i;
2145
2146 /* Select the right pdgain curves */
2147
2148 /* Clear current settings */
2149 reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1);
2150 reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 |
2151 AR5K_PHY_TPC_RG1_PDGAIN_2 |
2152 AR5K_PHY_TPC_RG1_PDGAIN_3 |
2153 AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
2154
1494 /* 2155 /*
1495 * RF2413 for some reason can't 2156 * Use pd_gains curve from eeprom
1496 * transmit anything if we call
1497 * this funtion, so we skip it
1498 * until we fix txpower.
1499 * 2157 *
1500 * XXX: Assume same for RF2425 2158 * This overrides the default setting from initvals
1501 * to be safe. 2159 * in case some vendors (e.g. Zcomax) don't use the default
2160 * curves. If we don't honor their settings we 'll get a
2161 * 5dB (1 * gain overlap ?) drop.
1502 */ 2162 */
1503 if ((ah->ah_radio == AR5K_RF2413) || (ah->ah_radio == AR5K_RF2425)) 2163 reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
1504 return 0;
1505 2164
1506 /* Reset TX power values */ 2165 switch (pdcurves) {
1507 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 2166 case 3:
1508 ah->ah_txpower.txp_tpc = tpc; 2167 reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
1509 2168 /* Fall through */
1510 /* Initialize TX power table */ 2169 case 2:
1511 ath5k_txpower_table(ah, channel, txpower); 2170 reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
2171 /* Fall through */
2172 case 1:
2173 reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
2174 break;
2175 }
2176 ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1);
1512 2177
1513 /* 2178 /*
1514 * Write TX power values 2179 * Write TX power values
1515 */ 2180 */
1516 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { 2181 for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
1517 ath5k_hw_reg_write(ah, 2182 ath5k_hw_reg_write(ah,
1518 ((((ah->ah_txpower.txp_pcdac[(i << 1) + 1] << 8) | 0xff) & 0xffff) << 16) | 2183 ((pdadc_out[4*i + 0] & 0xff) << 0) |
1519 (((ah->ah_txpower.txp_pcdac[(i << 1) ] << 8) | 0xff) & 0xffff), 2184 ((pdadc_out[4*i + 1] & 0xff) << 8) |
1520 AR5K_PHY_PCDAC_TXPOWER(i)); 2185 ((pdadc_out[4*i + 2] & 0xff) << 16) |
2186 ((pdadc_out[4*i + 3] & 0xff) << 24),
2187 AR5K_PHY_PDADC_TXPOWER(i));
2188 }
2189}
2190
2191
2192/*
2193 * Common code for PCDAC/PDADC tables
2194 */
2195
2196/*
2197 * This is the main function that uses all of the above
2198 * to set PCDAC/PDADC table on hw for the current channel.
2199 * This table is used for tx power calibration on the basband,
2200 * without it we get weird tx power levels and in some cases
2201 * distorted spectral mask
2202 */
2203static int
2204ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2205 struct ieee80211_channel *channel,
2206 u8 ee_mode, u8 type)
2207{
2208 struct ath5k_pdgain_info *pdg_L, *pdg_R;
2209 struct ath5k_chan_pcal_info *pcinfo_L;
2210 struct ath5k_chan_pcal_info *pcinfo_R;
2211 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
2212 u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
2213 s16 table_min[AR5K_EEPROM_N_PD_GAINS];
2214 s16 table_max[AR5K_EEPROM_N_PD_GAINS];
2215 u8 *tmpL;
2216 u8 *tmpR;
2217 u32 target = channel->center_freq;
2218 int pdg, i;
2219
2220 /* Get surounding freq piers for this channel */
2221 ath5k_get_chan_pcal_surrounding_piers(ah, channel,
2222 &pcinfo_L,
2223 &pcinfo_R);
2224
2225 /* Loop over pd gain curves on
2226 * surounding freq piers by index */
2227 for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
2228
2229 /* Fill curves in reverse order
2230 * from lower power (max gain)
2231 * to higher power. Use curve -> idx
2232 * backmaping we did on eeprom init */
2233 u8 idx = pdg_curve_to_idx[pdg];
2234
2235 /* Grab the needed curves by index */
2236 pdg_L = &pcinfo_L->pd_curves[idx];
2237 pdg_R = &pcinfo_R->pd_curves[idx];
2238
2239 /* Initialize the temp tables */
2240 tmpL = ah->ah_txpower.tmpL[pdg];
2241 tmpR = ah->ah_txpower.tmpR[pdg];
2242
2243 /* Set curve's x boundaries and create
2244 * curves so that they cover the same
2245 * range (if we don't do that one table
2246 * will have values on some range and the
2247 * other one won't have any so interpolation
2248 * will fail) */
2249 table_min[pdg] = min(pdg_L->pd_pwr[0],
2250 pdg_R->pd_pwr[0]) / 2;
2251
2252 table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
2253 pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2;
2254
2255 /* Now create the curves on surrounding channels
2256 * and interpolate if needed to get the final
2257 * curve for this gain on this channel */
2258 switch (type) {
2259 case AR5K_PWRTABLE_LINEAR_PCDAC:
2260 /* Override min/max so that we don't loose
2261 * accuracy (don't divide by 2) */
2262 table_min[pdg] = min(pdg_L->pd_pwr[0],
2263 pdg_R->pd_pwr[0]);
2264
2265 table_max[pdg] =
2266 max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
2267 pdg_R->pd_pwr[pdg_R->pd_points - 1]);
2268
2269 /* Override minimum so that we don't get
2270 * out of bounds while extrapolating
2271 * below. Don't do this when we have 2
2272 * curves and we are on the high power curve
2273 * because table_min is ok in this case */
2274 if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) {
2275
2276 table_min[pdg] =
2277 ath5k_get_linear_pcdac_min(pdg_L->pd_step,
2278 pdg_R->pd_step,
2279 pdg_L->pd_pwr,
2280 pdg_R->pd_pwr);
2281
2282 /* Don't go too low because we will
2283 * miss the upper part of the curve.
2284 * Note: 126 = 31.5dB (max power supported)
2285 * in 0.25dB units */
2286 if (table_max[pdg] - table_min[pdg] > 126)
2287 table_min[pdg] = table_max[pdg] - 126;
2288 }
2289
2290 /* Fall through */
2291 case AR5K_PWRTABLE_PWR_TO_PCDAC:
2292 case AR5K_PWRTABLE_PWR_TO_PDADC:
2293
2294 ath5k_create_power_curve(table_min[pdg],
2295 table_max[pdg],
2296 pdg_L->pd_pwr,
2297 pdg_L->pd_step,
2298 pdg_L->pd_points, tmpL, type);
2299
2300 /* We are in a calibration
2301 * pier, no need to interpolate
2302 * between freq piers */
2303 if (pcinfo_L == pcinfo_R)
2304 continue;
2305
2306 ath5k_create_power_curve(table_min[pdg],
2307 table_max[pdg],
2308 pdg_R->pd_pwr,
2309 pdg_R->pd_step,
2310 pdg_R->pd_points, tmpR, type);
2311 break;
2312 default:
2313 return -EINVAL;
2314 }
2315
2316 /* Interpolate between curves
2317 * of surounding freq piers to
2318 * get the final curve for this
2319 * pd gain. Re-use tmpL for interpolation
2320 * output */
2321 for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) &&
2322 (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
2323 tmpL[i] = (u8) ath5k_get_interpolated_value(target,
2324 (s16) pcinfo_L->freq,
2325 (s16) pcinfo_R->freq,
2326 (s16) tmpL[i],
2327 (s16) tmpR[i]);
2328 }
1521 } 2329 }
1522 2330
2331 /* Now we have a set of curves for this
2332 * channel on tmpL (x range is table_max - table_min
2333 * and y values are tmpL[pdg][]) sorted in the same
2334 * order as EEPROM (because we've used the backmaping).
2335 * So for RF5112 it's from higher power to lower power
2336 * and for RF2413 it's from lower power to higher power.
2337 * For RF5111 we only have one curve. */
2338
2339 /* Fill min and max power levels for this
2340 * channel by interpolating the values on
2341 * surounding channels to complete the dataset */
2342 ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
2343 (s16) pcinfo_L->freq,
2344 (s16) pcinfo_R->freq,
2345 pcinfo_L->min_pwr, pcinfo_R->min_pwr);
2346
2347 ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target,
2348 (s16) pcinfo_L->freq,
2349 (s16) pcinfo_R->freq,
2350 pcinfo_L->max_pwr, pcinfo_R->max_pwr);
2351
2352 /* We are ready to go, fill PCDAC/PDADC
2353 * table and write settings on hardware */
2354 switch (type) {
2355 case AR5K_PWRTABLE_LINEAR_PCDAC:
2356 /* For RF5112 we can have one or two curves
2357 * and each curve covers a certain power lvl
2358 * range so we need to do some more processing */
2359 ath5k_combine_linear_pcdac_curves(ah, table_min, table_max,
2360 ee->ee_pd_gains[ee_mode]);
2361
2362 /* Set txp.offset so that we can
2363 * match max power value with max
2364 * table index */
2365 ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
2366
2367 /* Write settings on hw */
2368 ath5k_setup_pcdac_table(ah);
2369 break;
2370 case AR5K_PWRTABLE_PWR_TO_PCDAC:
2371 /* We are done for RF5111 since it has only
2372 * one curve, just fit the curve on the table */
2373 ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max);
2374
2375 /* No rate powertable adjustment for RF5111 */
2376 ah->ah_txpower.txp_min_idx = 0;
2377 ah->ah_txpower.txp_offset = 0;
2378
2379 /* Write settings on hw */
2380 ath5k_setup_pcdac_table(ah);
2381 break;
2382 case AR5K_PWRTABLE_PWR_TO_PDADC:
2383 /* Set PDADC boundaries and fill
2384 * final PDADC table */
2385 ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
2386 ee->ee_pd_gains[ee_mode]);
2387
2388 /* Write settings on hw */
2389 ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx);
2390
2391 /* Set txp.offset, note that table_min
2392 * can be negative */
2393 ah->ah_txpower.txp_offset = table_min[0];
2394 break;
2395 default:
2396 return -EINVAL;
2397 }
2398
2399 return 0;
2400}
2401
2402
2403/*
2404 * Per-rate tx power setting
2405 *
2406 * This is the code that sets the desired tx power (below
2407 * maximum) on hw for each rate (we also have TPC that sets
2408 * power per packet). We do that by providing an index on the
2409 * PCDAC/PDADC table we set up.
2410 */
2411
2412/*
2413 * Set rate power table
2414 *
2415 * For now we only limit txpower based on maximum tx power
2416 * supported by hw (what's inside rate_info). We need to limit
2417 * this even more, based on regulatory domain etc.
2418 *
2419 * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
2420 * and is indexed as follows:
2421 * rates[0] - rates[7] -> OFDM rates
2422 * rates[8] - rates[14] -> CCK rates
2423 * rates[15] -> XR rates (they all have the same power)
2424 */
2425static void
2426ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2427 struct ath5k_rate_pcal_info *rate_info,
2428 u8 ee_mode)
2429{
2430 unsigned int i;
2431 u16 *rates;
2432
2433 /* max_pwr is power level we got from driver/user in 0.5dB
2434 * units, switch to 0.25dB units so we can compare */
2435 max_pwr *= 2;
2436 max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2;
2437
2438 /* apply rate limits */
2439 rates = ah->ah_txpower.txp_rates_power_table;
2440
2441 /* OFDM rates 6 to 24Mb/s */
2442 for (i = 0; i < 5; i++)
2443 rates[i] = min(max_pwr, rate_info->target_power_6to24);
2444
2445 /* Rest OFDM rates */
2446 rates[5] = min(rates[0], rate_info->target_power_36);
2447 rates[6] = min(rates[0], rate_info->target_power_48);
2448 rates[7] = min(rates[0], rate_info->target_power_54);
2449
2450 /* CCK rates */
2451 /* 1L */
2452 rates[8] = min(rates[0], rate_info->target_power_6to24);
2453 /* 2L */
2454 rates[9] = min(rates[0], rate_info->target_power_36);
2455 /* 2S */
2456 rates[10] = min(rates[0], rate_info->target_power_36);
2457 /* 5L */
2458 rates[11] = min(rates[0], rate_info->target_power_48);
2459 /* 5S */
2460 rates[12] = min(rates[0], rate_info->target_power_48);
2461 /* 11L */
2462 rates[13] = min(rates[0], rate_info->target_power_54);
2463 /* 11S */
2464 rates[14] = min(rates[0], rate_info->target_power_54);
2465
2466 /* XR rates */
2467 rates[15] = min(rates[0], rate_info->target_power_6to24);
2468
2469 /* CCK rates have different peak to average ratio
2470 * so we have to tweak their power so that gainf
2471 * correction works ok. For this we use OFDM to
2472 * CCK delta from eeprom */
2473 if ((ee_mode == AR5K_EEPROM_MODE_11G) &&
2474 (ah->ah_phy_revision < AR5K_SREV_PHY_5212A))
2475 for (i = 8; i <= 15; i++)
2476 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
2477
2478 ah->ah_txpower.txp_min_pwr = rates[7];
2479 ah->ah_txpower.txp_max_pwr = rates[0];
2480 ah->ah_txpower.txp_ofdm = rates[7];
2481}
2482
2483
2484/*
2485 * Set transmition power
2486 */
2487int
2488ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2489 u8 ee_mode, u8 txpower)
2490{
2491 struct ath5k_rate_pcal_info rate_info;
2492 u8 type;
2493 int ret;
2494
2495 ATH5K_TRACE(ah->ah_sc);
2496 if (txpower > AR5K_TUNE_MAX_TXPOWER) {
2497 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
2498 return -EINVAL;
2499 }
2500 if (txpower == 0)
2501 txpower = AR5K_TUNE_DEFAULT_TXPOWER;
2502
2503 /* Reset TX power values */
2504 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
2505 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
2506 ah->ah_txpower.txp_min_pwr = 0;
2507 ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER;
2508
2509 /* Initialize TX power table */
2510 switch (ah->ah_radio) {
2511 case AR5K_RF5111:
2512 type = AR5K_PWRTABLE_PWR_TO_PCDAC;
2513 break;
2514 case AR5K_RF5112:
2515 type = AR5K_PWRTABLE_LINEAR_PCDAC;
2516 break;
2517 case AR5K_RF2413:
2518 case AR5K_RF5413:
2519 case AR5K_RF2316:
2520 case AR5K_RF2317:
2521 case AR5K_RF2425:
2522 type = AR5K_PWRTABLE_PWR_TO_PDADC;
2523 break;
2524 default:
2525 return -EINVAL;
2526 }
2527
2528 /* FIXME: Only on channel/mode change */
2529 ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type);
2530 if (ret)
2531 return ret;
2532
2533 /* Limit max power if we have a CTL available */
2534 ath5k_get_max_ctl_power(ah, channel);
2535
2536 /* FIXME: Tx power limit for this regdomain
2537 * XXX: Mac80211/CRDA will do that anyway ? */
2538
2539 /* FIXME: Antenna reduction stuff */
2540
2541 /* FIXME: Limit power on turbo modes */
2542
2543 /* FIXME: TPC scale reduction */
2544
2545 /* Get surounding channels for per-rate power table
2546 * calibration */
2547 ath5k_get_rate_pcal_data(ah, channel, &rate_info);
2548
2549 /* Setup rate power table */
2550 ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode);
2551
2552 /* Write rate power table on hw */
1523 ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) | 2553 ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) |
1524 AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) | 2554 AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) |
1525 AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1); 2555 AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1);
@@ -1536,26 +2566,34 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1536 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) | 2566 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
1537 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4); 2567 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
1538 2568
1539 if (ah->ah_txpower.txp_tpc) 2569 /* FIXME: TPC support */
2570 if (ah->ah_txpower.txp_tpc) {
1540 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE | 2571 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
1541 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 2572 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
1542 else 2573
2574 ath5k_hw_reg_write(ah,
2575 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) |
2576 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) |
2577 AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
2578 AR5K_TPC);
2579 } else {
1543 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | 2580 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
1544 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 2581 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
2582 }
1545 2583
1546 return 0; 2584 return 0;
1547} 2585}
1548 2586
1549int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power) 2587int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 mode, u8 txpower)
1550{ 2588{
1551 /*Just a try M.F.*/ 2589 /*Just a try M.F.*/
1552 struct ieee80211_channel *channel = &ah->ah_current_channel; 2590 struct ieee80211_channel *channel = &ah->ah_current_channel;
1553 2591
1554 ATH5K_TRACE(ah->ah_sc); 2592 ATH5K_TRACE(ah->ah_sc);
1555 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER, 2593 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
1556 "changing txpower to %d\n", power); 2594 "changing txpower to %d\n", txpower);
1557 2595
1558 return ath5k_hw_txpower(ah, channel, power); 2596 return ath5k_hw_txpower(ah, channel, mode, txpower);
1559} 2597}
1560 2598
1561#undef _ATH5K_PHY 2599#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 2dc008e10226..7070d1543cdc 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1554,6 +1554,19 @@
1554/*===5212 Specific PCU registers===*/ 1554/*===5212 Specific PCU registers===*/
1555 1555
1556/* 1556/*
1557 * Transmit power control register
1558 */
1559#define AR5K_TPC 0x80e8
1560#define AR5K_TPC_ACK 0x0000003f /* ack frames */
1561#define AR5K_TPC_ACK_S 0
1562#define AR5K_TPC_CTS 0x00003f00 /* cts frames */
1563#define AR5K_TPC_CTS_S 8
1564#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */
1565#define AR5K_TPC_CHIRP_S 16
1566#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */
1567#define AR5K_TPC_DOPPLER_S 24
1568
1569/*
1557 * XR (eXtended Range) mode register 1570 * XR (eXtended Range) mode register
1558 */ 1571 */
1559#define AR5K_XRMODE 0x80c0 /* Register Address */ 1572#define AR5K_XRMODE 0x80c0 /* Register Address */
@@ -2550,6 +2563,12 @@
2550#define AR5K_PHY_TPC_RG1 0xa258 2563#define AR5K_PHY_TPC_RG1 0xa258
2551#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000 2564#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000
2552#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14 2565#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14
2566#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000
2567#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16
2568#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000
2569#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18
2570#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000
2571#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20
2553 2572
2554#define AR5K_PHY_TPC_RG5 0xa26C 2573#define AR5K_PHY_TPC_RG5 0xa26C
2555#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F 2574#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 685dc213edae..7a17d31b2fd9 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -664,29 +664,35 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
664 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) 664 struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
665{ 665{
666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
667 s16 cck_ofdm_pwr_delta;
667 668
668 /* Set CCK to OFDM power delta */ 669 /* Adjust power delta for channel 14 */
669 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { 670 if (channel->center_freq == 2484)
670 int16_t cck_ofdm_pwr_delta; 671 cck_ofdm_pwr_delta =
671 672 ((ee->ee_cck_ofdm_power_delta -
672 /* Adjust power delta for channel 14 */ 673 ee->ee_scaled_cck_delta) * 2) / 10;
673 if (channel->center_freq == 2484) 674 else
674 cck_ofdm_pwr_delta = 675 cck_ofdm_pwr_delta =
675 ((ee->ee_cck_ofdm_power_delta - 676 (ee->ee_cck_ofdm_power_delta * 2) / 10;
676 ee->ee_scaled_cck_delta) * 2) / 10;
677 else
678 cck_ofdm_pwr_delta =
679 (ee->ee_cck_ofdm_power_delta * 2) / 10;
680 677
678 /* Set CCK to OFDM power delta on tx power
679 * adjustment register */
680 if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
681 if (channel->hw_value == CHANNEL_G) 681 if (channel->hw_value == CHANNEL_G)
682 ath5k_hw_reg_write(ah, 682 ath5k_hw_reg_write(ah,
683 AR5K_REG_SM((ee->ee_cck_ofdm_power_delta * -1), 683 AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
684 AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) | 684 AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
685 AR5K_REG_SM((cck_ofdm_pwr_delta * -1), 685 AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
686 AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX), 686 AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
687 AR5K_PHY_TX_PWR_ADJ); 687 AR5K_PHY_TX_PWR_ADJ);
688 else 688 else
689 ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ); 689 ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
690 } else {
691 /* For older revs we scale power on sw during tx power
692 * setup */
693 ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta;
694 ah->ah_txpower.txp_cck_ofdm_gainf_delta =
695 ee->ee_cck_ofdm_gain_delta;
690 } 696 }
691 697
692 /* Set antenna idle switch table */ 698 /* Set antenna idle switch table */
@@ -994,7 +1000,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
994 /* 1000 /*
995 * Set TX power (FIXME) 1001 * Set TX power (FIXME)
996 */ 1002 */
997 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER); 1003 ret = ath5k_hw_txpower(ah, channel, ee_mode,
1004 AR5K_TUNE_DEFAULT_TXPOWER);
998 if (ret) 1005 if (ret)
999 return ret; 1006 return ret;
1000 1007
diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath9k/ahb.c
index 00cc7bb01f2e..0e65c51ba176 100644
--- a/drivers/net/wireless/ath9k/ahb.c
+++ b/drivers/net/wireless/ath9k/ahb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> 3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> 4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 * 5 *
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c
index a39eb760cbb7..6c5e887d50d7 100644
--- a/drivers/net/wireless/ath9k/ani.c
+++ b/drivers/net/wireless/ath9k/ani.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath9k/ani.h
index 7315761f6d74..08b4e7ed5ff0 100644
--- a/drivers/net/wireless/ath9k/ani.h
+++ b/drivers/net/wireless/ath9k/ani.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index b64be8e9a690..2689a08a2844 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -295,13 +295,9 @@ struct ath_tx_control {
295 enum ath9k_internal_frame_type frame_type; 295 enum ath9k_internal_frame_type frame_type;
296}; 296};
297 297
298struct ath_xmit_status {
299 int retries;
300 int flags;
301#define ATH_TX_ERROR 0x01 298#define ATH_TX_ERROR 0x01
302#define ATH_TX_XRETRY 0x02 299#define ATH_TX_XRETRY 0x02
303#define ATH_TX_BAR 0x04 300#define ATH_TX_BAR 0x04
304};
305 301
306/* All RSSI values are noise floor adjusted */ 302/* All RSSI values are noise floor adjusted */
307struct ath_tx_stat { 303struct ath_tx_stat {
@@ -390,6 +386,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
390 386
391struct ath_vif { 387struct ath_vif {
392 int av_bslot; 388 int av_bslot;
389 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
393 enum nl80211_iftype av_opmode; 390 enum nl80211_iftype av_opmode;
394 struct ath_buf *av_bcbuf; 391 struct ath_buf *av_bcbuf;
395 struct ath_tx_control av_btxctl; 392 struct ath_tx_control av_btxctl;
@@ -406,7 +403,7 @@ struct ath_vif {
406 * number of beacon intervals, the game's up. 403 * number of beacon intervals, the game's up.
407 */ 404 */
408#define BSTUCK_THRESH (9 * ATH_BCBUF) 405#define BSTUCK_THRESH (9 * ATH_BCBUF)
409#define ATH_BCBUF 1 406#define ATH_BCBUF 4
410#define ATH_DEFAULT_BINTVAL 100 /* TU */ 407#define ATH_DEFAULT_BINTVAL 100 /* TU */
411#define ATH_DEFAULT_BMISS_LIMIT 10 408#define ATH_DEFAULT_BMISS_LIMIT 10
412#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 409#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 039c78136c50..ec995730632d 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -70,7 +70,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
70 ds = bf->bf_desc; 70 ds = bf->bf_desc;
71 flags = ATH9K_TXDESC_NOACK; 71 flags = ATH9K_TXDESC_NOACK;
72 72
73 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC && 73 if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
74 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) &&
74 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 75 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
75 ds->ds_link = bf->bf_daddr; /* self-linked */ 76 ds->ds_link = bf->bf_daddr; /* self-linked */
76 flags |= ATH9K_TXDESC_VEOL; 77 flags |= ATH9K_TXDESC_VEOL;
@@ -153,6 +154,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
153 bf->bf_mpdu = skb; 154 bf->bf_mpdu = skb;
154 if (skb == NULL) 155 if (skb == NULL)
155 return NULL; 156 return NULL;
157 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
158 avp->tsf_adjust;
156 159
157 info = IEEE80211_SKB_CB(skb); 160 info = IEEE80211_SKB_CB(skb);
158 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 161 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -253,7 +256,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
253{ 256{
254 struct ath_softc *sc = aphy->sc; 257 struct ath_softc *sc = aphy->sc;
255 struct ath_vif *avp; 258 struct ath_vif *avp;
256 struct ieee80211_hdr *hdr;
257 struct ath_buf *bf; 259 struct ath_buf *bf;
258 struct sk_buff *skb; 260 struct sk_buff *skb;
259 __le64 tstamp; 261 __le64 tstamp;
@@ -316,42 +318,33 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
316 318
317 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 319 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
318 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 320 sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
319 321 /* Calculate a TSF adjustment factor required for staggered beacons. */
320 /*
321 * Calculate a TSF adjustment factor required for
322 * staggered beacons. Note that we assume the format
323 * of the beacon frame leaves the tstamp field immediately
324 * following the header.
325 */
326 if (avp->av_bslot > 0) { 322 if (avp->av_bslot > 0) {
327 u64 tsfadjust; 323 u64 tsfadjust;
328 __le64 val;
329 int intval; 324 int intval;
330 325
331 intval = sc->hw->conf.beacon_int ? 326 intval = sc->hw->conf.beacon_int ?
332 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL; 327 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
333 328
334 /* 329 /*
335 * The beacon interval is in TU's; the TSF in usecs. 330 * Calculate the TSF offset for this beacon slot, i.e., the
336 * We figure out how many TU's to add to align the 331 * number of usecs that need to be added to the timestamp field
337 * timestamp then convert to TSF units and handle 332 * in Beacon and Probe Response frames. Beacon slot 0 is
338 * byte swapping before writing it in the frame. 333 * processed at the correct offset, so it does not require TSF
339 * The hardware will then add this each time a beacon 334 * adjustment. Other slots are adjusted to get the timestamp
340 * frame is sent. Note that we align vif's 1..N 335 * close to the TBTT for the BSS.
341 * and leave vif 0 untouched. This means vap 0
342 * has a timestamp in one beacon interval while the
343 * others get a timestamp aligned to the next interval.
344 */ 336 */
345 tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF; 337 tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
346 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */ 338 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
347 339
348 DPRINTF(sc, ATH_DBG_BEACON, 340 DPRINTF(sc, ATH_DBG_BEACON,
349 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 341 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
350 avp->av_bslot, intval, (unsigned long long)tsfadjust); 342 avp->av_bslot, intval, (unsigned long long)tsfadjust);
351 343
352 hdr = (struct ieee80211_hdr *)skb->data; 344 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
353 memcpy(&hdr[1], &val, sizeof(val)); 345 avp->tsf_adjust;
354 } 346 } else
347 avp->tsf_adjust = cpu_to_le64(0);
355 348
356 bf->bf_mpdu = skb; 349 bf->bf_mpdu = skb;
357 bf->bf_buf_addr = bf->bf_dmacontext = 350 bf->bf_buf_addr = bf->bf_dmacontext =
@@ -447,8 +440,16 @@ void ath_beacon_tasklet(unsigned long data)
447 tsf = ath9k_hw_gettsf64(ah); 440 tsf = ath9k_hw_gettsf64(ah);
448 tsftu = TSF_TO_TU(tsf>>32, tsf); 441 tsftu = TSF_TO_TU(tsf>>32, tsf);
449 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 442 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
450 vif = sc->beacon.bslot[(slot + 1) % ATH_BCBUF]; 443 /*
451 aphy = sc->beacon.bslot_aphy[(slot + 1) % ATH_BCBUF]; 444 * Reverse the slot order to get slot 0 on the TBTT offset that does
445 * not require TSF adjustment and other slots adding
446 * slot/ATH_BCBUF * beacon_int to timestamp. For example, with
447 * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 ..
448 * and slot 0 is at correct offset to TBTT.
449 */
450 slot = ATH_BCBUF - slot - 1;
451 vif = sc->beacon.bslot[slot];
452 aphy = sc->beacon.bslot_aphy[slot];
452 453
453 DPRINTF(sc, ATH_DBG_BEACON, 454 DPRINTF(sc, ATH_DBG_BEACON,
454 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 455 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -728,6 +729,7 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
728 ath_beacon_config_ap(sc, &conf, avp); 729 ath_beacon_config_ap(sc, &conf, avp);
729 break; 730 break;
730 case NL80211_IFTYPE_ADHOC: 731 case NL80211_IFTYPE_ADHOC:
732 case NL80211_IFTYPE_MESH_POINT:
731 ath_beacon_config_adhoc(sc, &conf, avp, vif); 733 ath_beacon_config_adhoc(sc, &conf, avp, vif);
732 break; 734 break;
733 case NL80211_IFTYPE_STATION: 735 case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c
index c9446fb6b153..e2d62e97131c 100644
--- a/drivers/net/wireless/ath9k/calib.c
+++ b/drivers/net/wireless/ath9k/calib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath9k/calib.h
index 32589e0c5018..1c74bd50700d 100644
--- a/drivers/net/wireless/ath9k/calib.h
+++ b/drivers/net/wireless/ath9k/calib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c
index 82573cadb1ab..fdf9528fa49b 100644
--- a/drivers/net/wireless/ath9k/debug.c
+++ b/drivers/net/wireless/ath9k/debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath9k/debug.h
index 065268b8568f..7b0e5419d2bc 100644
--- a/drivers/net/wireless/ath9k/debug.h
+++ b/drivers/net/wireless/ath9k/debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c
index 183c949bcca1..ffc36b0361c7 100644
--- a/drivers/net/wireless/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath9k/eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -342,8 +342,7 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
342static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) 342static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
343{ 343{
344#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 344#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
345 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 345 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
346 u16 *eep_data;
347 int addr, eep_start_loc = 0; 346 int addr, eep_start_loc = 0;
348 347
349 eep_start_loc = 64; 348 eep_start_loc = 64;
@@ -353,8 +352,6 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
353 "Reading from EEPROM, not flash\n"); 352 "Reading from EEPROM, not flash\n");
354 } 353 }
355 354
356 eep_data = (u16 *)eep;
357
358 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 355 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
359 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 356 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
360 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 357 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
@@ -363,6 +360,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
363 } 360 }
364 eep_data++; 361 eep_data++;
365 } 362 }
363
366 return true; 364 return true;
367#undef SIZE_EEPROM_4K 365#undef SIZE_EEPROM_4K
368} 366}
@@ -379,16 +377,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
379 377
380 378
381 if (!ath9k_hw_use_flash(ah)) { 379 if (!ath9k_hw_use_flash(ah)) {
382
383 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 380 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
384 &magic)) { 381 &magic)) {
385 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 382 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
386 "Reading Magic # failed\n"); 383 "Reading Magic # failed\n");
387 return false; 384 return false;
388 } 385 }
389 386
390 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 387 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
391 "Read Magic = 0x%04X\n", magic); 388 "Read Magic = 0x%04X\n", magic);
392 389
393 if (magic != AR5416_EEPROM_MAGIC) { 390 if (magic != AR5416_EEPROM_MAGIC) {
394 magic2 = swab16(magic); 391 magic2 = swab16(magic);
@@ -401,16 +398,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
401 temp = swab16(*eepdata); 398 temp = swab16(*eepdata);
402 *eepdata = temp; 399 *eepdata = temp;
403 eepdata++; 400 eepdata++;
404
405 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
406 "0x%04X ", *eepdata);
407
408 if (((addr + 1) % 6) == 0)
409 DPRINTF(ah->ah_sc,
410 ATH_DBG_EEPROM, "\n");
411 } 401 }
412 } else { 402 } else {
413 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 403 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
414 "Invalid EEPROM Magic. " 404 "Invalid EEPROM Magic. "
415 "endianness mismatch.\n"); 405 "endianness mismatch.\n");
416 return -EINVAL; 406 return -EINVAL;
@@ -426,7 +416,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
426 else 416 else
427 el = ah->eeprom.map4k.baseEepHeader.length; 417 el = ah->eeprom.map4k.baseEepHeader.length;
428 418
429 if (el > sizeof(struct ar5416_eeprom_def)) 419 if (el > sizeof(struct ar5416_eeprom_4k))
430 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); 420 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
431 else 421 else
432 el = el / sizeof(u16); 422 el = el / sizeof(u16);
@@ -441,7 +431,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
441 u16 word; 431 u16 word;
442 432
443 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 433 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
444 "EEPROM Endianness is not native.. Changing \n"); 434 "EEPROM Endianness is not native.. Changing\n");
445 435
446 word = swab16(eep->baseEepHeader.length); 436 word = swab16(eep->baseEepHeader.length);
447 eep->baseEepHeader.length = word; 437 eep->baseEepHeader.length = word;
@@ -483,7 +473,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
483 473
484 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 474 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
485 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 475 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
486 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 476 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
487 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 477 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
488 sum, ah->eep_ops->get_eeprom_ver(ah)); 478 sum, ah->eep_ops->get_eeprom_ver(ah));
489 return -EINVAL; 479 return -EINVAL;
@@ -1203,57 +1193,63 @@ static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
1203 } 1193 }
1204} 1194}
1205 1195
1206static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, 1196static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
1207 struct ath9k_channel *chan) 1197 struct modal_eep_4k_header *pModal,
1198 struct ar5416_eeprom_4k *eep,
1199 u8 txRxAttenLocal, int regChainOffset)
1208{ 1200{
1209 struct modal_eep_4k_header *pModal;
1210 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1211 int regChainOffset;
1212 u8 txRxAttenLocal;
1213 u8 ob[5], db1[5], db2[5];
1214 u8 ant_div_control1, ant_div_control2;
1215 u32 regVal;
1216
1217
1218 pModal = &eep->modalHeader;
1219
1220 txRxAttenLocal = 23;
1221
1222 REG_WRITE(ah, AR_PHY_SWITCH_COM,
1223 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1224
1225 regChainOffset = 0;
1226 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, 1201 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
1227 pModal->antCtrlChain[0]); 1202 pModal->antCtrlChain[0]);
1228 1203
1229 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, 1204 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
1230 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & 1205 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
1231 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 1206 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
1232 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 1207 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
1233 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | 1208 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
1234 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 1209 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
1235 1210
1236 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 1211 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1237 AR5416_EEP_MINOR_VER_3) { 1212 AR5416_EEP_MINOR_VER_3) {
1238 txRxAttenLocal = pModal->txRxAttenCh[0]; 1213 txRxAttenLocal = pModal->txRxAttenCh[0];
1214
1239 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1215 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1240 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); 1216 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
1241 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1217 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1242 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); 1218 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
1243 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1219 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1244 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, 1220 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1245 pModal->xatten2Margin[0]); 1221 pModal->xatten2Margin[0]);
1246 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 1222 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1247 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); 1223 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
1248 } 1224 }
1249 1225
1250 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1226 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1251 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 1227 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
1252 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, 1228 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
1253 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); 1229 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
1254 1230
1255 if (AR_SREV_9285_11(ah)) 1231 if (AR_SREV_9285_11(ah))
1256 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); 1232 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
1233}
1234
1235static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1236 struct ath9k_channel *chan)
1237{
1238 struct modal_eep_4k_header *pModal;
1239 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1240 u8 txRxAttenLocal;
1241 u8 ob[5], db1[5], db2[5];
1242 u8 ant_div_control1, ant_div_control2;
1243 u32 regVal;
1244
1245 pModal = &eep->modalHeader;
1246 txRxAttenLocal = 23;
1247
1248 REG_WRITE(ah, AR_PHY_SWITCH_COM,
1249 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1250
1251 /* Single chain for 4K EEPROM*/
1252 ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal, 0);
1257 1253
1258 /* Initialize Ant Diversity settings from EEPROM */ 1254 /* Initialize Ant Diversity settings from EEPROM */
1259 if (pModal->version == 3) { 1255 if (pModal->version == 3) {
@@ -1295,9 +1291,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1295 db2[4] = ((pModal->db2_234 >> 8) & 0xf); 1291 db2[4] = ((pModal->db2_234 >> 8) & 0xf);
1296 1292
1297 } else if (pModal->version == 1) { 1293 } else if (pModal->version == 1) {
1298
1299 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1300 "EEPROM Model version is set to 1 \n");
1301 ob[0] = (pModal->ob_01 & 0xf); 1294 ob[0] = (pModal->ob_01 & 0xf);
1302 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf; 1295 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf;
1303 db1[0] = (pModal->db1_01 & 0xf); 1296 db1[0] = (pModal->db1_01 & 0xf);
@@ -1385,8 +1378,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1385 AR_PHY_SETTLING_SWITCH, 1378 AR_PHY_SETTLING_SWITCH,
1386 pModal->swSettleHt40); 1379 pModal->swSettleHt40);
1387 } 1380 }
1388
1389 return true;
1390} 1381}
1391 1382
1392static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, 1383static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
@@ -1464,16 +1455,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
1464static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) 1455static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
1465{ 1456{
1466#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 1457#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
1467 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1458 u16 *eep_data = (u16 *)&ah->eeprom.def;
1468 u16 *eep_data;
1469 int addr, ar5416_eep_start_loc = 0x100; 1459 int addr, ar5416_eep_start_loc = 0x100;
1470 1460
1471 eep_data = (u16 *)eep;
1472
1473 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 1461 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
1474 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 1462 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
1475 eep_data)) { 1463 eep_data)) {
1476 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1464 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1477 "Unable to read eeprom region\n"); 1465 "Unable to read eeprom region\n");
1478 return false; 1466 return false;
1479 } 1467 }
@@ -1492,17 +1480,14 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1492 bool need_swap = false; 1480 bool need_swap = false;
1493 int i, addr, size; 1481 int i, addr, size;
1494 1482
1495 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 1483 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
1496 &magic)) { 1484 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n");
1497 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1498 "Reading Magic # failed\n");
1499 return false; 1485 return false;
1500 } 1486 }
1501 1487
1502 if (!ath9k_hw_use_flash(ah)) { 1488 if (!ath9k_hw_use_flash(ah)) {
1503
1504 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1489 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1505 "Read Magic = 0x%04X\n", magic); 1490 "Read Magic = 0x%04X\n", magic);
1506 1491
1507 if (magic != AR5416_EEPROM_MAGIC) { 1492 if (magic != AR5416_EEPROM_MAGIC) {
1508 magic2 = swab16(magic); 1493 magic2 = swab16(magic);
@@ -1516,18 +1501,11 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1516 temp = swab16(*eepdata); 1501 temp = swab16(*eepdata);
1517 *eepdata = temp; 1502 *eepdata = temp;
1518 eepdata++; 1503 eepdata++;
1519
1520 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1521 "0x%04X ", *eepdata);
1522
1523 if (((addr + 1) % 6) == 0)
1524 DPRINTF(ah->ah_sc,
1525 ATH_DBG_EEPROM, "\n");
1526 } 1504 }
1527 } else { 1505 } else {
1528 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1506 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1529 "Invalid EEPROM Magic. " 1507 "Invalid EEPROM Magic. "
1530 "endianness mismatch.\n"); 1508 "Endianness mismatch.\n");
1531 return -EINVAL; 1509 return -EINVAL;
1532 } 1510 }
1533 } 1511 }
@@ -1556,7 +1534,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1556 u16 word; 1534 u16 word;
1557 1535
1558 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1536 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1559 "EEPROM Endianness is not native.. Changing \n"); 1537 "EEPROM Endianness is not native.. Changing.\n");
1560 1538
1561 word = swab16(eep->baseEepHeader.length); 1539 word = swab16(eep->baseEepHeader.length);
1562 eep->baseEepHeader.length = word; 1540 eep->baseEepHeader.length = word;
@@ -1602,7 +1580,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1602 1580
1603 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 1581 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
1604 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 1582 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
1605 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1583 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
1606 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 1584 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
1607 sum, ah->eep_ops->get_eeprom_ver(ah)); 1585 sum, ah->eep_ops->get_eeprom_ver(ah));
1608 return -EINVAL; 1586 return -EINVAL;
@@ -1614,7 +1592,6 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
1614static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, 1592static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
1615 enum eeprom_param param) 1593 enum eeprom_param param)
1616{ 1594{
1617#define AR5416_VER_MASK (pBase->version & AR5416_EEP_VER_MINOR_MASK)
1618 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1595 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1619 struct modal_eep_header *pModal = eep->modalHeader; 1596 struct modal_eep_header *pModal = eep->modalHeader;
1620 struct base_eep_header *pBase = &eep->baseEepHeader; 1597 struct base_eep_header *pBase = &eep->baseEepHeader;
@@ -1681,21 +1658,73 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
1681 default: 1658 default:
1682 return 0; 1659 return 0;
1683 } 1660 }
1684#undef AR5416_VER_MASK
1685} 1661}
1686 1662
1687/* XXX: Clean me up, make me more legible */ 1663static void ath9k_hw_def_set_gain(struct ath_hw *ah,
1688static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, 1664 struct modal_eep_header *pModal,
1665 struct ar5416_eeprom_def *eep,
1666 u8 txRxAttenLocal, int regChainOffset, int i)
1667{
1668 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
1669 txRxAttenLocal = pModal->txRxAttenCh[i];
1670
1671 if (AR_SREV_9280_10_OR_LATER(ah)) {
1672 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1673 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
1674 pModal->bswMargin[i]);
1675 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1676 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
1677 pModal->bswAtten[i]);
1678 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1679 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1680 pModal->xatten2Margin[i]);
1681 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1682 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
1683 pModal->xatten2Db[i]);
1684 } else {
1685 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1686 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1687 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
1688 | SM(pModal-> bswMargin[i],
1689 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
1690 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
1691 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1692 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
1693 | SM(pModal->bswAtten[i],
1694 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
1695 }
1696 }
1697
1698 if (AR_SREV_9280_10_OR_LATER(ah)) {
1699 REG_RMW_FIELD(ah,
1700 AR_PHY_RXGAIN + regChainOffset,
1701 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
1702 REG_RMW_FIELD(ah,
1703 AR_PHY_RXGAIN + regChainOffset,
1704 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
1705 } else {
1706 REG_WRITE(ah,
1707 AR_PHY_RXGAIN + regChainOffset,
1708 (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) &
1709 ~AR_PHY_RXGAIN_TXRX_ATTEN)
1710 | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN));
1711 REG_WRITE(ah,
1712 AR_PHY_GAIN_2GHZ + regChainOffset,
1713 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
1714 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
1715 SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
1716 }
1717}
1718
1719static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
1689 struct ath9k_channel *chan) 1720 struct ath9k_channel *chan)
1690{ 1721{
1691#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
1692 struct modal_eep_header *pModal; 1722 struct modal_eep_header *pModal;
1693 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1723 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1694 int i, regChainOffset; 1724 int i, regChainOffset;
1695 u8 txRxAttenLocal; 1725 u8 txRxAttenLocal;
1696 1726
1697 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 1727 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
1698
1699 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; 1728 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
1700 1729
1701 REG_WRITE(ah, AR_PHY_SWITCH_COM, 1730 REG_WRITE(ah, AR_PHY_SWITCH_COM,
@@ -1708,8 +1737,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1708 } 1737 }
1709 1738
1710 if (AR_SREV_5416_20_OR_LATER(ah) && 1739 if (AR_SREV_5416_20_OR_LATER(ah) &&
1711 (ah->rxchainmask == 5 || ah->txchainmask == 5) 1740 (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
1712 && (i != 0))
1713 regChainOffset = (i == 1) ? 0x2000 : 0x1000; 1741 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1714 else 1742 else
1715 regChainOffset = i * 0x1000; 1743 regChainOffset = i * 0x1000;
@@ -1718,9 +1746,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1718 pModal->antCtrlChain[i]); 1746 pModal->antCtrlChain[i]);
1719 1747
1720 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, 1748 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
1721 (REG_READ(ah, 1749 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
1722 AR_PHY_TIMING_CTRL4(0) +
1723 regChainOffset) &
1724 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 1750 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
1725 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 1751 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
1726 SM(pModal->iqCalICh[i], 1752 SM(pModal->iqCalICh[i],
@@ -1728,87 +1754,9 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1728 SM(pModal->iqCalQCh[i], 1754 SM(pModal->iqCalQCh[i],
1729 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 1755 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
1730 1756
1731 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 1757 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah))
1732 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { 1758 ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
1733 txRxAttenLocal = pModal->txRxAttenCh[i]; 1759 regChainOffset, i);
1734 if (AR_SREV_9280_10_OR_LATER(ah)) {
1735 REG_RMW_FIELD(ah,
1736 AR_PHY_GAIN_2GHZ +
1737 regChainOffset,
1738 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
1739 pModal->
1740 bswMargin[i]);
1741 REG_RMW_FIELD(ah,
1742 AR_PHY_GAIN_2GHZ +
1743 regChainOffset,
1744 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
1745 pModal->
1746 bswAtten[i]);
1747 REG_RMW_FIELD(ah,
1748 AR_PHY_GAIN_2GHZ +
1749 regChainOffset,
1750 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
1751 pModal->
1752 xatten2Margin[i]);
1753 REG_RMW_FIELD(ah,
1754 AR_PHY_GAIN_2GHZ +
1755 regChainOffset,
1756 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
1757 pModal->
1758 xatten2Db[i]);
1759 } else {
1760 REG_WRITE(ah,
1761 AR_PHY_GAIN_2GHZ +
1762 regChainOffset,
1763 (REG_READ(ah,
1764 AR_PHY_GAIN_2GHZ +
1765 regChainOffset) &
1766 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
1767 | SM(pModal->
1768 bswMargin[i],
1769 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
1770 REG_WRITE(ah,
1771 AR_PHY_GAIN_2GHZ +
1772 regChainOffset,
1773 (REG_READ(ah,
1774 AR_PHY_GAIN_2GHZ +
1775 regChainOffset) &
1776 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
1777 | SM(pModal->bswAtten[i],
1778 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
1779 }
1780 }
1781 if (AR_SREV_9280_10_OR_LATER(ah)) {
1782 REG_RMW_FIELD(ah,
1783 AR_PHY_RXGAIN +
1784 regChainOffset,
1785 AR9280_PHY_RXGAIN_TXRX_ATTEN,
1786 txRxAttenLocal);
1787 REG_RMW_FIELD(ah,
1788 AR_PHY_RXGAIN +
1789 regChainOffset,
1790 AR9280_PHY_RXGAIN_TXRX_MARGIN,
1791 pModal->rxTxMarginCh[i]);
1792 } else {
1793 REG_WRITE(ah,
1794 AR_PHY_RXGAIN + regChainOffset,
1795 (REG_READ(ah,
1796 AR_PHY_RXGAIN +
1797 regChainOffset) &
1798 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
1799 SM(txRxAttenLocal,
1800 AR_PHY_RXGAIN_TXRX_ATTEN));
1801 REG_WRITE(ah,
1802 AR_PHY_GAIN_2GHZ +
1803 regChainOffset,
1804 (REG_READ(ah,
1805 AR_PHY_GAIN_2GHZ +
1806 regChainOffset) &
1807 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
1808 SM(pModal->rxTxMarginCh[i],
1809 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
1810 }
1811 }
1812 } 1760 }
1813 1761
1814 if (AR_SREV_9280_10_OR_LATER(ah)) { 1762 if (AR_SREV_9280_10_OR_LATER(ah)) {
@@ -1855,8 +1803,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1855 AR_AN_TOP2_LOCALBIAS, 1803 AR_AN_TOP2_LOCALBIAS,
1856 AR_AN_TOP2_LOCALBIAS_S, 1804 AR_AN_TOP2_LOCALBIAS_S,
1857 pModal->local_bias); 1805 pModal->local_bias);
1858 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
1859 pModal->force_xpaon);
1860 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, 1806 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
1861 pModal->force_xpaon); 1807 pModal->force_xpaon);
1862 } 1808 }
@@ -1882,6 +1828,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1882 1828
1883 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1829 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1884 pModal->txEndToRxOn); 1830 pModal->txEndToRxOn);
1831
1885 if (AR_SREV_9280_10_OR_LATER(ah)) { 1832 if (AR_SREV_9280_10_OR_LATER(ah)) {
1886 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 1833 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
1887 pModal->thresh62); 1834 pModal->thresh62);
@@ -1912,10 +1859,10 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1912 } 1859 }
1913 1860
1914 if (AR_SREV_9280_20_OR_LATER(ah) && 1861 if (AR_SREV_9280_20_OR_LATER(ah) &&
1915 AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) 1862 AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
1916 REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, 1863 REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
1917 AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, 1864 AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
1918 pModal->miscBits); 1865 pModal->miscBits);
1919 1866
1920 1867
1921 if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { 1868 if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
@@ -1926,18 +1873,15 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah,
1926 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0); 1873 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0);
1927 else 1874 else
1928 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 1875 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
1929 eep->baseEepHeader.dacLpMode); 1876 eep->baseEepHeader.dacLpMode);
1930 1877
1931 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP, 1878 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
1932 pModal->miscBits >> 2); 1879 pModal->miscBits >> 2);
1933 1880
1934 REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9, 1881 REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9,
1935 AR_PHY_TX_DESIRED_SCALE_CCK, 1882 AR_PHY_TX_DESIRED_SCALE_CCK,
1936 eep->baseEepHeader.desiredScaleCCK); 1883 eep->baseEepHeader.desiredScaleCCK);
1937 } 1884 }
1938
1939 return true;
1940#undef AR5416_VER_MASK
1941} 1885}
1942 1886
1943static void ath9k_hw_def_set_addac(struct ath_hw *ah, 1887static void ath9k_hw_def_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h
index d6f6108f63c7..25b68c881ff1 100644
--- a/drivers/net/wireless/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath9k/eeprom.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -95,6 +95,7 @@
95#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 95#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
96#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 96#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
97 97
98#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
98#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ 99#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
99 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 100 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
100 101
@@ -489,7 +490,7 @@ struct eeprom_ops {
489 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); 490 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
490 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, 491 u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
491 struct ath9k_channel *chan); 492 struct ath9k_channel *chan);
492 bool (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 493 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
493 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); 494 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
494 int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, 495 int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
495 u16 cfgCtl, u8 twiceAntennaReduction, 496 u16 cfgCtl, u8 twiceAntennaReduction,
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index d494e98ba971..b15eaf8417ff 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -588,6 +588,10 @@ static int ath9k_hw_post_attach(struct ath_hw *ah)
588 ecode = ath9k_hw_eeprom_attach(ah); 588 ecode = ath9k_hw_eeprom_attach(ah);
589 if (ecode != 0) 589 if (ecode != 0)
590 return ecode; 590 return ecode;
591
592 DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n",
593 ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah));
594
591 ecode = ath9k_hw_rfattach(ah); 595 ecode = ath9k_hw_rfattach(ah);
592 if (ecode != 0) 596 if (ecode != 0)
593 return ecode; 597 return ecode;
@@ -1444,6 +1448,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1444 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1448 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1445 break; 1449 break;
1446 case NL80211_IFTYPE_ADHOC: 1450 case NL80211_IFTYPE_ADHOC:
1451 case NL80211_IFTYPE_MESH_POINT:
1447 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC 1452 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1448 | AR_STA_ID1_KSRCH_MODE); 1453 | AR_STA_ID1_KSRCH_MODE);
1449 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1454 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
@@ -2273,11 +2278,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2273 else 2278 else
2274 ath9k_hw_spur_mitigate(ah, chan); 2279 ath9k_hw_spur_mitigate(ah, chan);
2275 2280
2276 if (!ah->eep_ops->set_board_values(ah, chan)) { 2281 ah->eep_ops->set_board_values(ah, chan);
2277 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2278 "error setting board options\n");
2279 return -EIO;
2280 }
2281 2282
2282 ath9k_hw_decrease_chain_power(ah, chan); 2283 ath9k_hw_decrease_chain_power(ah, chan);
2283 2284
@@ -3149,6 +3150,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3149 flags |= AR_TBTT_TIMER_EN; 3150 flags |= AR_TBTT_TIMER_EN;
3150 break; 3151 break;
3151 case NL80211_IFTYPE_ADHOC: 3152 case NL80211_IFTYPE_ADHOC:
3153 case NL80211_IFTYPE_MESH_POINT:
3152 REG_SET_BIT(ah, AR_TXCFG, 3154 REG_SET_BIT(ah, AR_TXCFG,
3153 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 3155 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
3154 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 3156 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index dc681f011fdf..0b594e0ee260 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
index 1d60c3706f1c..e2f0a34b79a1 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c
index f757bc7eec68..e0a6dee45839 100644
--- a/drivers/net/wireless/ath9k/mac.c
+++ b/drivers/net/wireless/ath9k/mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath9k/mac.h
index a75f65dae1d7..1176bce8b76c 100644
--- a/drivers/net/wireless/ath9k/mac.h
+++ b/drivers/net/wireless/ath9k/mac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 8db75f6de53e..13d4e6756c99 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -940,18 +940,25 @@ static void ath_led_blink_work(struct work_struct *work)
940 940
941 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) 941 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
942 return; 942 return;
943 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 943
944 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); 944 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
945 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
946 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
947 else
948 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
949 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
945 950
946 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work, 951 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
947 (sc->sc_flags & SC_OP_LED_ON) ? 952 (sc->sc_flags & SC_OP_LED_ON) ?
948 msecs_to_jiffies(sc->led_off_duration) : 953 msecs_to_jiffies(sc->led_off_duration) :
949 msecs_to_jiffies(sc->led_on_duration)); 954 msecs_to_jiffies(sc->led_on_duration));
950 955
951 sc->led_on_duration = 956 sc->led_on_duration = sc->led_on_cnt ?
952 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25); 957 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
953 sc->led_off_duration = 958 ATH_LED_ON_DURATION_IDLE;
954 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10); 959 sc->led_off_duration = sc->led_off_cnt ?
960 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
961 ATH_LED_OFF_DURATION_IDLE;
955 sc->led_on_cnt = sc->led_off_cnt = 0; 962 sc->led_on_cnt = sc->led_off_cnt = 0;
956 if (sc->sc_flags & SC_OP_LED_ON) 963 if (sc->sc_flags & SC_OP_LED_ON)
957 sc->sc_flags &= ~SC_OP_LED_ON; 964 sc->sc_flags &= ~SC_OP_LED_ON;
@@ -1592,7 +1599,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1592 hw->wiphy->interface_modes = 1599 hw->wiphy->interface_modes =
1593 BIT(NL80211_IFTYPE_AP) | 1600 BIT(NL80211_IFTYPE_AP) |
1594 BIT(NL80211_IFTYPE_STATION) | 1601 BIT(NL80211_IFTYPE_STATION) |
1595 BIT(NL80211_IFTYPE_ADHOC); 1602 BIT(NL80211_IFTYPE_ADHOC) |
1603 BIT(NL80211_IFTYPE_MESH_POINT);
1596 1604
1597 hw->wiphy->reg_notifier = ath9k_reg_notifier; 1605 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1598 hw->wiphy->strict_regulatory = true; 1606 hw->wiphy->strict_regulatory = true;
@@ -2200,18 +2208,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2200 ic_opmode = NL80211_IFTYPE_STATION; 2208 ic_opmode = NL80211_IFTYPE_STATION;
2201 break; 2209 break;
2202 case NL80211_IFTYPE_ADHOC: 2210 case NL80211_IFTYPE_ADHOC:
2203 if (sc->nbcnvifs >= ATH_BCBUF) {
2204 ret = -ENOBUFS;
2205 goto out;
2206 }
2207 ic_opmode = NL80211_IFTYPE_ADHOC;
2208 break;
2209 case NL80211_IFTYPE_AP: 2211 case NL80211_IFTYPE_AP:
2212 case NL80211_IFTYPE_MESH_POINT:
2210 if (sc->nbcnvifs >= ATH_BCBUF) { 2213 if (sc->nbcnvifs >= ATH_BCBUF) {
2211 ret = -ENOBUFS; 2214 ret = -ENOBUFS;
2212 goto out; 2215 goto out;
2213 } 2216 }
2214 ic_opmode = NL80211_IFTYPE_AP; 2217 ic_opmode = conf->type;
2215 break; 2218 break;
2216 default: 2219 default:
2217 DPRINTF(sc, ATH_DBG_FATAL, 2220 DPRINTF(sc, ATH_DBG_FATAL,
@@ -2247,7 +2250,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2247 * Note we only do this (at the moment) for station mode. 2250 * Note we only do this (at the moment) for station mode.
2248 */ 2251 */
2249 if ((conf->type == NL80211_IFTYPE_STATION) || 2252 if ((conf->type == NL80211_IFTYPE_STATION) ||
2250 (conf->type == NL80211_IFTYPE_ADHOC)) { 2253 (conf->type == NL80211_IFTYPE_ADHOC) ||
2254 (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2251 if (ath9k_hw_phycounters(sc->sc_ah)) 2255 if (ath9k_hw_phycounters(sc->sc_ah))
2252 sc->imask |= ATH9K_INT_MIB; 2256 sc->imask |= ATH9K_INT_MIB;
2253 sc->imask |= ATH9K_INT_TSFOOR; 2257 sc->imask |= ATH9K_INT_TSFOOR;
@@ -2294,8 +2298,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2294 del_timer_sync(&sc->ani.timer); 2298 del_timer_sync(&sc->ani.timer);
2295 2299
2296 /* Reclaim beacon resources */ 2300 /* Reclaim beacon resources */
2297 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 2301 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2298 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) { 2302 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2303 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2299 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2304 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2300 ath_beacon_return(sc, avp); 2305 ath_beacon_return(sc, avp);
2301 } 2306 }
@@ -2428,6 +2433,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2428 switch (vif->type) { 2433 switch (vif->type) {
2429 case NL80211_IFTYPE_STATION: 2434 case NL80211_IFTYPE_STATION:
2430 case NL80211_IFTYPE_ADHOC: 2435 case NL80211_IFTYPE_ADHOC:
2436 case NL80211_IFTYPE_MESH_POINT:
2431 /* Set BSSID */ 2437 /* Set BSSID */
2432 memcpy(sc->curbssid, conf->bssid, ETH_ALEN); 2438 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2433 memcpy(avp->bssid, conf->bssid, ETH_ALEN); 2439 memcpy(avp->bssid, conf->bssid, ETH_ALEN);
@@ -2451,7 +2457,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
2451 } 2457 }
2452 2458
2453 if ((vif->type == NL80211_IFTYPE_ADHOC) || 2459 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2454 (vif->type == NL80211_IFTYPE_AP)) { 2460 (vif->type == NL80211_IFTYPE_AP) ||
2461 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2455 if ((conf->changed & IEEE80211_IFCC_BEACON) || 2462 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2456 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED && 2463 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2457 conf->enable_beacon)) { 2464 conf->enable_beacon)) {
@@ -2723,7 +2730,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2723 2730
2724 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2731 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2725 break; 2732 break;
2726 case IEEE80211_AMPDU_TX_RESUME: 2733 case IEEE80211_AMPDU_TX_OPERATIONAL:
2727 ath_tx_aggr_resume(sc, sta, tid); 2734 ath_tx_aggr_resume(sc, sta, tid);
2728 break; 2735 break;
2729 default: 2736 default:
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c
index 9a58baabb9ca..6dbc58580abb 100644
--- a/drivers/net/wireless/ath9k/pci.c
+++ b/drivers/net/wireless/ath9k/pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -87,7 +87,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
87 struct ath_softc *sc; 87 struct ath_softc *sc;
88 struct ieee80211_hw *hw; 88 struct ieee80211_hw *hw;
89 u8 csz; 89 u8 csz;
90 u32 val;
91 int ret = 0; 90 int ret = 0;
92 struct ath_hw *ah; 91 struct ath_hw *ah;
93 92
@@ -134,14 +133,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
134 133
135 pci_set_master(pdev); 134 pci_set_master(pdev);
136 135
137 /*
138 * Disable the RETRY_TIMEOUT register (0x41) to keep
139 * PCI Tx retries from interfering with C3 CPU state.
140 */
141 pci_read_config_dword(pdev, 0x40, &val);
142 if ((val & 0x0000ff00) != 0)
143 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
144
145 ret = pci_request_region(pdev, 0, "ath9k"); 136 ret = pci_request_region(pdev, 0, "ath9k");
146 if (ret) { 137 if (ret) {
147 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 138 dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -253,21 +244,12 @@ static int ath_pci_resume(struct pci_dev *pdev)
253 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 244 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
254 struct ath_wiphy *aphy = hw->priv; 245 struct ath_wiphy *aphy = hw->priv;
255 struct ath_softc *sc = aphy->sc; 246 struct ath_softc *sc = aphy->sc;
256 u32 val;
257 int err; 247 int err;
258 248
259 err = pci_enable_device(pdev); 249 err = pci_enable_device(pdev);
260 if (err) 250 if (err)
261 return err; 251 return err;
262 pci_restore_state(pdev); 252 pci_restore_state(pdev);
263 /*
264 * Suspend/Resume resets the PCI configuration space, so we have to
265 * re-disable the RETRY_TIMEOUT register (0x41) to keep
266 * PCI Tx retries from interfering with C3 CPU state
267 */
268 pci_read_config_dword(pdev, 0x40, &val);
269 if ((val & 0x0000ff00) != 0)
270 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
271 253
272 /* Enable LED */ 254 /* Enable LED */
273 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, 255 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
index e1494bae0f9f..8bcba906929a 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 1eac8c707342..0f7f8e0c9c95 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 832735677a46..824ccbb8b7b8 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc. 2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2008 Atheros Communications, Inc. 3 * Copyright (c) 2004-2009 Atheros Communications, Inc.
4 * 4 *
5 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -864,6 +864,8 @@ static void ath_rc_ratefind(struct ath_softc *sc,
864 rate_table, nrix, 1, 0); 864 rate_table, nrix, 1, 0);
865 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 865 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
866 try_per_rate, nrix, 0); 866 try_per_rate, nrix, 0);
867
868 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
867 } else { 869 } else {
868 try_per_rate = (ATH_11N_TXMAXTRY/4); 870 try_per_rate = (ATH_11N_TXMAXTRY/4);
869 /* Set the choosen rate. No RTS for first series entry. */ 871 /* Set the choosen rate. No RTS for first series entry. */
@@ -1468,16 +1470,18 @@ static void ath_rc_init(struct ath_softc *sc,
1468 ath_rc_priv->ht_cap); 1470 ath_rc_priv->ht_cap);
1469} 1471}
1470 1472
1471static u8 ath_rc_build_ht_caps(struct ath_softc *sc, bool is_ht, bool is_cw40, 1473static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1472 bool is_sgi40) 1474 bool is_cw40, bool is_sgi40)
1473{ 1475{
1474 u8 caps = 0; 1476 u8 caps = 0;
1475 1477
1476 if (is_ht) { 1478 if (sta->ht_cap.ht_supported) {
1477 caps = WLAN_RC_HT_FLAG; 1479 caps = WLAN_RC_HT_FLAG;
1478 if (sc->sc_ah->caps.tx_chainmask != 1 && 1480 if (sc->sc_ah->caps.tx_chainmask != 1 &&
1479 ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) 1481 ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
1480 caps |= WLAN_RC_DS_FLAG; 1482 if (sta->ht_cap.mcs.rx_mask[1])
1483 caps |= WLAN_RC_DS_FLAG;
1484 }
1481 if (is_cw40) 1485 if (is_cw40)
1482 caps |= WLAN_RC_40_FLAG; 1486 caps |= WLAN_RC_40_FLAG;
1483 if (is_sgi40) 1487 if (is_sgi40)
@@ -1615,6 +1619,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1615 /* Choose rate table first */ 1619 /* Choose rate table first */
1616 1620
1617 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) || 1621 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) ||
1622 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) ||
1618 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) { 1623 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
1619 rate_table = ath_choose_rate_table(sc, sband->band, 1624 rate_table = ath_choose_rate_table(sc, sband->band,
1620 sta->ht_cap.ht_supported, 1625 sta->ht_cap.ht_supported,
@@ -1624,8 +1629,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1624 rate_table = sc->cur_rate_table; 1629 rate_table = sc->cur_rate_table;
1625 } 1630 }
1626 1631
1627 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta->ht_cap.ht_supported, 1632 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
1628 is_cw40, is_sgi40);
1629 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1633 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1630} 1634}
1631 1635
@@ -1659,8 +1663,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1659 rate_table = ath_choose_rate_table(sc, sband->band, 1663 rate_table = ath_choose_rate_table(sc, sband->band,
1660 sta->ht_cap.ht_supported, 1664 sta->ht_cap.ht_supported,
1661 oper_cw40); 1665 oper_cw40);
1662 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, 1666 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1663 sta->ht_cap.ht_supported,
1664 oper_cw40, oper_sgi40); 1667 oper_cw40, oper_sgi40);
1665 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1668 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1666 1669
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index db9b0b9a3431..199a3ce57d64 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting 2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc. 3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008 Atheros Communications Inc. 4 * Copyright (c) 2008-2009 Atheros Communications Inc.
5 * 5 *
6 * Permission to use, copy, modify, and/or distribute this software for any 6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 0bba17662a1f..71cb18d6757d 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -344,8 +344,13 @@ void ath_rx_cleanup(struct ath_softc *sc)
344 344
345 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 345 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
346 skb = bf->bf_mpdu; 346 skb = bf->bf_mpdu;
347 if (skb) 347 if (skb) {
348 dma_unmap_single(sc->dev,
349 bf->bf_buf_addr,
350 sc->rx.bufsize,
351 DMA_FROM_DEVICE);
348 dev_kfree_skb(skb); 352 dev_kfree_skb(skb);
353 }
349 } 354 }
350 355
351 if (sc->rx.rxdma.dd_desc_len != 0) 356 if (sc->rx.rxdma.dd_desc_len != 0)
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index d86e90e38173..52605246679f 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
index b8f9b6d6bec4..4ca625102291 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
index 8f885f3bc8df..9f5fbd4eea7a 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index b41d0002f3fe..4d0e298cd1c7 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index e3f376611f85..689bdbf78808 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -64,6 +64,10 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head); 65 struct list_head *head);
66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
67static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
68 int txok);
69static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
70 int nbad, int txok, bool update_rc);
67 71
68/*********************/ 72/*********************/
69/* Aggregation logic */ 73/* Aggregation logic */
@@ -274,9 +278,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
274 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 278 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
275 struct ath_desc *ds = bf_last->bf_desc; 279 struct ath_desc *ds = bf_last->bf_desc;
276 struct list_head bf_head, bf_pending; 280 struct list_head bf_head, bf_pending;
277 u16 seq_st = 0; 281 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
278 u32 ba[WME_BA_BMP_SIZE >> 5]; 282 u32 ba[WME_BA_BMP_SIZE >> 5];
279 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 283 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
284 bool rc_update = true;
280 285
281 skb = (struct sk_buff *)bf->bf_mpdu; 286 skb = (struct sk_buff *)bf->bf_mpdu;
282 hdr = (struct ieee80211_hdr *)skb->data; 287 hdr = (struct ieee80211_hdr *)skb->data;
@@ -316,6 +321,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
316 INIT_LIST_HEAD(&bf_pending); 321 INIT_LIST_HEAD(&bf_pending);
317 INIT_LIST_HEAD(&bf_head); 322 INIT_LIST_HEAD(&bf_head);
318 323
324 nbad = ath_tx_num_badfrms(sc, bf, txok);
319 while (bf) { 325 while (bf) {
320 txfail = txpending = 0; 326 txfail = txpending = 0;
321 bf_next = bf->bf_next; 327 bf_next = bf->bf_next;
@@ -323,8 +329,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
323 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 329 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
324 /* transmit completion, subframe is 330 /* transmit completion, subframe is
325 * acked by block ack */ 331 * acked by block ack */
332 acked_cnt++;
326 } else if (!isaggr && txok) { 333 } else if (!isaggr && txok) {
327 /* transmit completion */ 334 /* transmit completion */
335 acked_cnt++;
328 } else { 336 } else {
329 if (!(tid->state & AGGR_CLEANUP) && 337 if (!(tid->state & AGGR_CLEANUP) &&
330 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 338 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
@@ -335,6 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
335 bf->bf_state.bf_type |= BUF_XRETRY; 343 bf->bf_state.bf_type |= BUF_XRETRY;
336 txfail = 1; 344 txfail = 1;
337 sendbar = 1; 345 sendbar = 1;
346 txfail_cnt++;
338 } 347 }
339 } else { 348 } else {
340 /* 349 /*
@@ -361,6 +370,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
361 ath_tx_update_baw(sc, tid, bf->bf_seqno); 370 ath_tx_update_baw(sc, tid, bf->bf_seqno);
362 spin_unlock_bh(&txq->axq_lock); 371 spin_unlock_bh(&txq->axq_lock);
363 372
373 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
374 ath_tx_rc_status(bf, ds, nbad, txok, true);
375 rc_update = false;
376 } else {
377 ath_tx_rc_status(bf, ds, nbad, txok, false);
378 }
379
364 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); 380 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
365 } else { 381 } else {
366 /* retry the un-acked ones */ 382 /* retry the un-acked ones */
@@ -1734,7 +1750,7 @@ exit:
1734/*****************/ 1750/*****************/
1735 1751
1736static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1752static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1737 struct ath_xmit_status *tx_status) 1753 int tx_flags)
1738{ 1754{
1739 struct ieee80211_hw *hw = sc->hw; 1755 struct ieee80211_hw *hw = sc->hw;
1740 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1756 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1755,18 +1771,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1755 tx_info->rate_driver_data[0] = NULL; 1771 tx_info->rate_driver_data[0] = NULL;
1756 } 1772 }
1757 1773
1758 if (tx_status->flags & ATH_TX_BAR) { 1774 if (tx_flags & ATH_TX_BAR)
1759 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1775 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1760 tx_status->flags &= ~ATH_TX_BAR;
1761 }
1762 1776
1763 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1777 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
1764 /* Frame was ACKed */ 1778 /* Frame was ACKed */
1765 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1779 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1766 } 1780 }
1767 1781
1768 tx_info->status.rates[0].count = tx_status->retries + 1;
1769
1770 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1782 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1771 padsize = hdrlen & 3; 1783 padsize = hdrlen & 3;
1772 if (padsize && hdrlen >= 24) { 1784 if (padsize && hdrlen >= 24) {
@@ -1789,29 +1801,22 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1789 int txok, int sendbar) 1801 int txok, int sendbar)
1790{ 1802{
1791 struct sk_buff *skb = bf->bf_mpdu; 1803 struct sk_buff *skb = bf->bf_mpdu;
1792 struct ath_xmit_status tx_status;
1793 unsigned long flags; 1804 unsigned long flags;
1805 int tx_flags = 0;
1794 1806
1795 /*
1796 * Set retry information.
1797 * NB: Don't use the information in the descriptor, because the frame
1798 * could be software retried.
1799 */
1800 tx_status.retries = bf->bf_retries;
1801 tx_status.flags = 0;
1802 1807
1803 if (sendbar) 1808 if (sendbar)
1804 tx_status.flags = ATH_TX_BAR; 1809 tx_flags = ATH_TX_BAR;
1805 1810
1806 if (!txok) { 1811 if (!txok) {
1807 tx_status.flags |= ATH_TX_ERROR; 1812 tx_flags |= ATH_TX_ERROR;
1808 1813
1809 if (bf_isxretried(bf)) 1814 if (bf_isxretried(bf))
1810 tx_status.flags |= ATH_TX_XRETRY; 1815 tx_flags |= ATH_TX_XRETRY;
1811 } 1816 }
1812 1817
1813 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1818 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1814 ath_tx_complete(sc, skb, &tx_status); 1819 ath_tx_complete(sc, skb, tx_flags);
1815 1820
1816 /* 1821 /*
1817 * Return the list of ath_buf of this mpdu to free queue 1822 * Return the list of ath_buf of this mpdu to free queue
@@ -1852,27 +1857,40 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1852 return nbad; 1857 return nbad;
1853} 1858}
1854 1859
1855static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) 1860static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1861 int nbad, int txok, bool update_rc)
1856{ 1862{
1857 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 1863 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1864 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1859 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1860 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1866 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1867 struct ieee80211_hw *hw = tx_info_priv->aphy->hw;
1868 u8 i, tx_rateindex;
1869
1870 if (txok)
1871 tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
1861 1872
1862 tx_info_priv->update_rc = false; 1873 tx_rateindex = ds->ds_txstat.ts_rateindex;
1874 WARN_ON(tx_rateindex >= hw->max_rates);
1875
1876 tx_info_priv->update_rc = update_rc;
1863 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) 1877 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1864 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1878 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1865 1879
1866 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 1880 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1867 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { 1881 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1868 if (ieee80211_is_data(hdr->frame_control)) { 1882 if (ieee80211_is_data(hdr->frame_control)) {
1869 memcpy(&tx_info_priv->tx, &ds->ds_txstat, 1883 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
1870 sizeof(tx_info_priv->tx)); 1884 sizeof(tx_info_priv->tx));
1871 tx_info_priv->n_frames = bf->bf_nframes; 1885 tx_info_priv->n_frames = bf->bf_nframes;
1872 tx_info_priv->n_bad_frames = nbad; 1886 tx_info_priv->n_bad_frames = nbad;
1873 tx_info_priv->update_rc = true;
1874 } 1887 }
1875 } 1888 }
1889
1890 for (i = tx_rateindex + 1; i < hw->max_rates; i++)
1891 tx_info->status.rates[i].count = 0;
1892
1893 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
1876} 1894}
1877 1895
1878static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 1896static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
@@ -1897,7 +1915,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1897 struct ath_buf *bf, *lastbf, *bf_held = NULL; 1915 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1898 struct list_head bf_head; 1916 struct list_head bf_head;
1899 struct ath_desc *ds; 1917 struct ath_desc *ds;
1900 int txok, nbad = 0; 1918 int txok;
1901 int status; 1919 int status;
1902 1920
1903 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1921 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
@@ -1991,13 +2009,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1991 bf->bf_retries = ds->ds_txstat.ts_longretry; 2009 bf->bf_retries = ds->ds_txstat.ts_longretry;
1992 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 2010 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1993 bf->bf_state.bf_type |= BUF_XRETRY; 2011 bf->bf_state.bf_type |= BUF_XRETRY;
1994 nbad = 0; 2012 ath_tx_rc_status(bf, ds, 0, txok, true);
1995 } else {
1996 nbad = ath_tx_num_badfrms(sc, bf, txok);
1997 } 2013 }
1998 2014
1999 ath_tx_rc_status(bf, ds, nbad);
2000
2001 if (bf_isampdu(bf)) 2015 if (bf_isampdu(bf))
2002 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); 2016 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
2003 else 2017 else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b72ef3fd315a..4896e0831114 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3993,6 +3993,8 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
3993 dev->irq_reason = 0; 3993 dev->irq_reason = 0;
3994 memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); 3994 memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
3995 dev->irq_savedstate = B43_IRQ_MASKTEMPLATE; 3995 dev->irq_savedstate = B43_IRQ_MASKTEMPLATE;
3996 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
3997 dev->irq_savedstate &= ~B43_IRQ_PHY_TXERR;
3996 3998
3997 dev->mac_suspended = 1; 3999 dev->mac_suspended = 1;
3998 4000
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 0f53c7e5e01e..a63d88841df8 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -50,7 +50,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
50} 50}
51 51
52/* Extract the bitrate index out of an OFDM PLCP header. */ 52/* Extract the bitrate index out of an OFDM PLCP header. */
53static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy) 53static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
54{ 54{
55 int base = aphy ? 0 : 4; 55 int base = aphy ? 0 : 4;
56 56
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 205603d082aa..73f93a0ff2df 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -233,7 +233,7 @@ struct iwl3945_eeprom {
233#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ 233#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
234 234
235#define TFD_QUEUE_MIN 0 235#define TFD_QUEUE_MIN 0
236#define TFD_QUEUE_MAX 6 236#define TFD_QUEUE_MAX 5 /* 4 DATA + 1 CMD */
237 237
238#define IWL_NUM_SCAN_RATES (2) 238#define IWL_NUM_SCAN_RATES (2)
239 239
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index f65c308a6714..af6b9d444778 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -124,7 +124,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
124#define IWL39_RATE_HIGH_TH 11520 124#define IWL39_RATE_HIGH_TH 11520
125#define IWL_SUCCESS_UP_TH 8960 125#define IWL_SUCCESS_UP_TH 8960
126#define IWL_SUCCESS_DOWN_TH 10880 126#define IWL_SUCCESS_DOWN_TH 10880
127#define IWL_RATE_MIN_FAILURE_TH 8 127#define IWL_RATE_MIN_FAILURE_TH 6
128#define IWL_RATE_MIN_SUCCESS_TH 8 128#define IWL_RATE_MIN_SUCCESS_TH 8
129#define IWL_RATE_DECREASE_TH 1920 129#define IWL_RATE_DECREASE_TH 1920
130#define IWL_RATE_RETRY_TH 15 130#define IWL_RATE_RETRY_TH 15
@@ -488,7 +488,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
488 488
489 IWL_DEBUG_RATE(priv, "enter\n"); 489 IWL_DEBUG_RATE(priv, "enter\n");
490 490
491 retries = info->status.rates[0].count - 1; 491 retries = info->status.rates[0].count;
492 /* Sanity Check for retries */ 492 /* Sanity Check for retries */
493 if (retries > IWL_RATE_RETRY_TH) 493 if (retries > IWL_RATE_RETRY_TH)
494 retries = IWL_RATE_RETRY_TH; 494 retries = IWL_RATE_RETRY_TH;
@@ -791,16 +791,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
791 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { 791 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
792 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); 792 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
793 scale_action = -1; 793 scale_action = -1;
794
795 /* No throughput measured yet for adjacent rates, 794 /* No throughput measured yet for adjacent rates,
796 * try increase */ 795 * try increase */
797 } else if ((low_tpt == IWL_INVALID_VALUE) && 796 } else if ((low_tpt == IWL_INVALID_VALUE) &&
798 (high_tpt == IWL_INVALID_VALUE)) { 797 (high_tpt == IWL_INVALID_VALUE)) {
799 798
800 if (high != IWL_RATE_INVALID && window->success_counter >= IWL_RATE_INCREASE_TH) 799 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
801 scale_action = 1; 800 scale_action = 1;
802 else if (low != IWL_RATE_INVALID) 801 else if (low != IWL_RATE_INVALID)
803 scale_action = -1; 802 scale_action = 0;
804 803
805 /* Both adjacent throughputs are measured, but neither one has 804 /* Both adjacent throughputs are measured, but neither one has
806 * better throughput; we're using the best rate, don't change 805 * better throughput; we're using the best rate, don't change
@@ -826,14 +825,14 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
826 else { 825 else {
827 IWL_DEBUG_RATE(priv, 826 IWL_DEBUG_RATE(priv,
828 "decrease rate because of high tpt\n"); 827 "decrease rate because of high tpt\n");
829 scale_action = -1; 828 scale_action = 0;
830 } 829 }
831 } else if (low_tpt != IWL_INVALID_VALUE) { 830 } else if (low_tpt != IWL_INVALID_VALUE) {
832 if (low_tpt > current_tpt) { 831 if (low_tpt > current_tpt) {
833 IWL_DEBUG_RATE(priv, 832 IWL_DEBUG_RATE(priv,
834 "decrease rate because of low tpt\n"); 833 "decrease rate because of low tpt\n");
835 scale_action = -1; 834 scale_action = -1;
836 } else if (window->success_counter >= IWL_RATE_INCREASE_TH) { 835 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
837 /* Lower rate has better 836 /* Lower rate has better
838 * throughput,decrease rate */ 837 * throughput,decrease rate */
839 scale_action = 1; 838 scale_action = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index ba7e720e73c1..2399328e8de7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -293,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 293 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
294 (txq_id != IWL_CMD_QUEUE_NUM) && 294 (txq_id != IWL_CMD_QUEUE_NUM) &&
295 priv->mac80211_registered) 295 priv->mac80211_registered)
296 ieee80211_wake_queue(priv->hw, txq_id); 296 iwl_wake_queue(priv, txq_id);
297} 297}
298 298
299/** 299/**
@@ -747,11 +747,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
747 int i; 747 int i;
748 int counter; 748 int counter;
749 749
750 /* classify bd */
751 if (txq->q.id == IWL_CMD_QUEUE_NUM)
752 /* nothing to cleanup after for host commands */
753 return;
754
755 /* sanity check */ 750 /* sanity check */
756 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); 751 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
757 if (counter > NUM_TFD_CHUNKS) { 752 if (counter > NUM_TFD_CHUNKS) {
@@ -1046,7 +1041,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
1046 goto error; 1041 goto error;
1047 1042
1048 /* Tx queue(s) */ 1043 /* Tx queue(s) */
1049 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { 1044 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
1050 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 1045 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
1051 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 1046 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
1052 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 1047 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
@@ -1184,7 +1179,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1184 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 1179 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
1185 1180
1186 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 1181 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
1187 if(rc) 1182 if (rc)
1188 return rc; 1183 return rc;
1189 1184
1190 priv->cfg->ops->lib->apm_ops.config(priv); 1185 priv->cfg->ops->lib->apm_ops.config(priv);
@@ -1239,8 +1234,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1239 int txq_id; 1234 int txq_id;
1240 1235
1241 /* Tx queues */ 1236 /* Tx queues */
1242 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) 1237 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++)
1243 iwl_tx_queue_free(priv, txq_id); 1238 if (txq_id == IWL_CMD_QUEUE_NUM)
1239 iwl_cmd_queue_free(priv);
1240 else
1241 iwl_tx_queue_free(priv, txq_id);
1242
1244} 1243}
1245 1244
1246void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1245void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1259,7 +1258,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1259 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1258 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1260 1259
1261 /* reset TFD queues */ 1260 /* reset TFD queues */
1262 for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { 1261 for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) {
1263 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); 1262 iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1264 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, 1263 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1265 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1264 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
@@ -2488,6 +2487,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2488 return -ENOMEM; 2487 return -ENOMEM;
2489 } 2488 }
2490 2489
2490 /* Assign number of Usable TX queues */
2491 priv->hw_params.max_txq_num = TFD_QUEUE_MAX;
2492
2491 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2493 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2492 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; 2494 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
2493 priv->hw_params.max_pkt_size = 2342; 2495 priv->hw_params.max_pkt_size = 2342;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index bd0140be774e..847a6220c5e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2178,10 +2178,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2178 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2178 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2179 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 2179 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
2180 if (agg->state == IWL_AGG_OFF) 2180 if (agg->state == IWL_AGG_OFF)
2181 ieee80211_wake_queue(priv->hw, txq_id); 2181 iwl_wake_queue(priv, txq_id);
2182 else 2182 else
2183 ieee80211_wake_queue(priv->hw, 2183 iwl_wake_queue(priv, txq->swq_id);
2184 txq->swq_id);
2185 } 2184 }
2186 } 2185 }
2187 } else { 2186 } else {
@@ -2205,7 +2204,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2205 2204
2206 if (priv->mac80211_registered && 2205 if (priv->mac80211_registered &&
2207 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2206 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2208 ieee80211_wake_queue(priv->hw, txq_id); 2207 iwl_wake_queue(priv, txq_id);
2209 } 2208 }
2210 2209
2211 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2210 if (qc && likely(sta_id != IWL_INVALID_STATION))
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 08c19bea71e3..e5ca2511a81a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1077,7 +1077,7 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1077 1077
1078 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1078 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1079 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1079 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
1080 IWL_WARN(priv, 1080 IWL_ERR(priv,
1081 "queue number out of range: %d, must be %d to %d\n", 1081 "queue number out of range: %d, must be %d to %d\n",
1082 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1082 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1083 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1083 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
@@ -1295,10 +1295,9 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1295 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1295 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1296 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 1296 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
1297 if (agg->state == IWL_AGG_OFF) 1297 if (agg->state == IWL_AGG_OFF)
1298 ieee80211_wake_queue(priv->hw, txq_id); 1298 iwl_wake_queue(priv, txq_id);
1299 else 1299 else
1300 ieee80211_wake_queue(priv->hw, 1300 iwl_wake_queue(priv, txq->swq_id);
1301 txq->swq_id);
1302 } 1301 }
1303 } 1302 }
1304 } else { 1303 } else {
@@ -1324,7 +1323,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1324 1323
1325 if (priv->mac80211_registered && 1324 if (priv->mac80211_registered &&
1326 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1325 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1327 ieee80211_wake_queue(priv->hw, txq_id); 1326 iwl_wake_queue(priv, txq_id);
1328 } 1327 }
1329 1328
1330 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1329 if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0db3bc011ac2..663dc83be501 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1567,9 +1567,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
1567 if (iwl_is_associated(priv)) { 1567 if (iwl_is_associated(priv)) {
1568 struct iwl_rxon_cmd *active_rxon = 1568 struct iwl_rxon_cmd *active_rxon =
1569 (struct iwl_rxon_cmd *)&priv->active_rxon; 1569 (struct iwl_rxon_cmd *)&priv->active_rxon;
1570 1570 /* apply any changes in staging */
1571 memcpy(&priv->staging_rxon, &priv->active_rxon, 1571 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
1572 sizeof(priv->staging_rxon));
1573 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1572 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1574 } else { 1573 } else {
1575 /* Initialize our rx_config data */ 1574 /* Initialize our rx_config data */
@@ -2184,110 +2183,112 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2184 struct iwl_priv *priv = hw->priv; 2183 struct iwl_priv *priv = hw->priv;
2185 const struct iwl_channel_info *ch_info; 2184 const struct iwl_channel_info *ch_info;
2186 struct ieee80211_conf *conf = &hw->conf; 2185 struct ieee80211_conf *conf = &hw->conf;
2187 unsigned long flags; 2186 unsigned long flags = 0;
2188 int ret = 0; 2187 int ret = 0;
2189 u16 channel; 2188 u16 ch;
2189 int scan_active = 0;
2190 2190
2191 mutex_lock(&priv->mutex); 2191 mutex_lock(&priv->mutex);
2192 IWL_DEBUG_MAC80211(priv, "enter to channel %d\n", conf->channel->hw_value); 2192 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2193 conf->channel->hw_value, changed);
2193 2194
2194 priv->current_ht_config.is_ht = conf_is_ht(conf); 2195 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2195 2196 test_bit(STATUS_SCANNING, &priv->status))) {
2196 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2197 scan_active = 1;
2197 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); 2198 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2198 goto out;
2199 } 2199 }
2200 2200
2201 if (!conf->radio_enabled)
2202 iwl_radio_kill_sw_disable_radio(priv);
2203 2201
2204 if (!iwl_is_ready(priv)) { 2202 /* during scanning mac80211 will delay channel setting until
2205 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2203 * scan finish with changed = 0
2206 ret = -EIO; 2204 */
2207 goto out; 2205 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2208 } 2206 if (scan_active)
2207 goto set_ch_out;
2208
2209 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2210 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2211 if (!is_channel_valid(ch_info)) {
2212 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2213 ret = -EINVAL;
2214 goto set_ch_out;
2215 }
2209 2216
2210 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2217 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2211 test_bit(STATUS_SCANNING, &priv->status))) { 2218 !is_channel_ibss(ch_info)) {
2212 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2219 IWL_ERR(priv, "channel %d in band %d not "
2213 mutex_unlock(&priv->mutex); 2220 "IBSS channel\n",
2214 return 0; 2221 conf->channel->hw_value, conf->channel->band);
2215 } 2222 ret = -EINVAL;
2223 goto set_ch_out;
2224 }
2216 2225
2217 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2226 priv->current_ht_config.is_ht = conf_is_ht(conf);
2218 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
2219 if (!is_channel_valid(ch_info)) {
2220 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2221 ret = -EINVAL;
2222 goto out;
2223 }
2224 2227
2225 if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 2228 spin_lock_irqsave(&priv->lock, flags);
2226 !is_channel_ibss(ch_info)) {
2227 IWL_ERR(priv, "channel %d in band %d not IBSS channel\n",
2228 conf->channel->hw_value, conf->channel->band);
2229 ret = -EINVAL;
2230 goto out;
2231 }
2232 2229
2233 spin_lock_irqsave(&priv->lock, flags);
2234 2230
2231 /* if we are switching from ht to 2.4 clear flags
2232 * from any ht related info since 2.4 does not
2233 * support ht */
2234 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2235 priv->staging_rxon.flags = 0;
2235 2236
2236 /* if we are switching from ht to 2.4 clear flags 2237 iwl_set_rxon_channel(priv, conf->channel);
2237 * from any ht related info since 2.4 does not
2238 * support ht */
2239 if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
2240#ifdef IEEE80211_CONF_CHANNEL_SWITCH
2241 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
2242#endif
2243 )
2244 priv->staging_rxon.flags = 0;
2245 2238
2246 iwl_set_rxon_channel(priv, conf->channel); 2239 iwl_set_flags_for_band(priv, conf->channel->band);
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241 set_ch_out:
2242 /* The list of supported rates and rate mask can be different
2243 * for each band; since the band may have changed, reset
2244 * the rate mask to what mac80211 lists */
2245 iwl_set_rate(priv);
2246 }
2247 2247
2248 iwl_set_flags_for_band(priv, conf->channel->band); 2248 if (changed & IEEE80211_CONF_CHANGE_PS) {
2249 if (conf->flags & IEEE80211_CONF_PS)
2250 ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
2251 else
2252 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2253 if (ret)
2254 IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
2249 2255
2250 /* The list of supported rates and rate mask can be different 2256 }
2251 * for each band; since the band may have changed, reset
2252 * the rate mask to what mac80211 lists */
2253 iwl_set_rate(priv);
2254 2257
2255 spin_unlock_irqrestore(&priv->lock, flags); 2258 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2259 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2260 priv->tx_power_user_lmt, conf->power_level);
2256 2261
2257#ifdef IEEE80211_CONF_CHANNEL_SWITCH 2262 iwl_set_tx_power(priv, conf->power_level, false);
2258 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { 2263 }
2259 iwl_hw_channel_switch(priv, conf->channel); 2264
2260 goto out; 2265 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2266 iwl_set_rxon_chain(priv);
2267
2268 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
2269 if (conf->radio_enabled &&
2270 iwl_radio_kill_sw_enable_radio(priv)) {
2271 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
2272 "waiting for uCode\n");
2273 goto out;
2274 }
2275
2276 if (!conf->radio_enabled)
2277 iwl_radio_kill_sw_disable_radio(priv);
2261 } 2278 }
2262#endif
2263 2279
2264 if (!conf->radio_enabled) { 2280 if (!conf->radio_enabled) {
2265 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); 2281 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
2266 goto out; 2282 goto out;
2267 } 2283 }
2268 2284
2269 if (iwl_is_rfkill(priv)) { 2285 if (!iwl_is_ready(priv)) {
2270 IWL_DEBUG_MAC80211(priv, "leave - RF kill\n"); 2286 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2271 ret = -EIO;
2272 goto out; 2287 goto out;
2273 } 2288 }
2274 2289
2275 if (conf->flags & IEEE80211_CONF_PS) 2290 if (scan_active)
2276 ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3); 2291 goto out;
2277 else
2278 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2279 if (ret)
2280 IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
2281
2282 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2283 priv->tx_power_user_lmt, conf->power_level);
2284
2285 iwl_set_tx_power(priv, conf->power_level, false);
2286
2287 iwl_set_rate(priv);
2288
2289 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2290 iwl_set_rxon_chain(priv);
2291 2292
2292 if (memcmp(&priv->active_rxon, 2293 if (memcmp(&priv->active_rxon,
2293 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2294 &priv->staging_rxon, sizeof(priv->staging_rxon)))
@@ -2295,9 +2296,9 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2295 else 2296 else
2296 IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n"); 2297 IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n");
2297 2298
2298 IWL_DEBUG_MAC80211(priv, "leave\n");
2299 2299
2300out: 2300out:
2301 IWL_DEBUG_MAC80211(priv, "leave\n");
2301 mutex_unlock(&priv->mutex); 2302 mutex_unlock(&priv->mutex);
2302 return ret; 2303 return ret;
2303} 2304}
@@ -2682,6 +2683,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2682 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2683 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2683{ 2684{
2684 struct iwl_priv *priv = hw->priv; 2685 struct iwl_priv *priv = hw->priv;
2686 int ret;
2685 2687
2686 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 2688 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2687 sta->addr, tid); 2689 sta->addr, tid);
@@ -2695,13 +2697,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2695 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); 2697 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
2696 case IEEE80211_AMPDU_RX_STOP: 2698 case IEEE80211_AMPDU_RX_STOP:
2697 IWL_DEBUG_HT(priv, "stop Rx\n"); 2699 IWL_DEBUG_HT(priv, "stop Rx\n");
2698 return iwl_sta_rx_agg_stop(priv, sta->addr, tid); 2700 ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
2701 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2702 return 0;
2703 else
2704 return ret;
2699 case IEEE80211_AMPDU_TX_START: 2705 case IEEE80211_AMPDU_TX_START:
2700 IWL_DEBUG_HT(priv, "start Tx\n"); 2706 IWL_DEBUG_HT(priv, "start Tx\n");
2701 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 2707 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
2702 case IEEE80211_AMPDU_TX_STOP: 2708 case IEEE80211_AMPDU_TX_STOP:
2703 IWL_DEBUG_HT(priv, "stop Tx\n"); 2709 IWL_DEBUG_HT(priv, "stop Tx\n");
2704 return iwl_tx_agg_stop(priv, sta->addr, tid); 2710 ret = iwl_tx_agg_stop(priv, sta->addr, tid);
2711 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2712 return 0;
2713 else
2714 return ret;
2705 default: 2715 default:
2706 IWL_DEBUG_HT(priv, "unknown\n"); 2716 IWL_DEBUG_HT(priv, "unknown\n");
2707 return -EINVAL; 2717 return -EINVAL;
@@ -3083,11 +3093,6 @@ static ssize_t store_power_level(struct device *d,
3083 3093
3084 mutex_lock(&priv->mutex); 3094 mutex_lock(&priv->mutex);
3085 3095
3086 if (!iwl_is_ready(priv)) {
3087 ret = -EAGAIN;
3088 goto out;
3089 }
3090
3091 ret = strict_strtoul(buf, 10, &mode); 3096 ret = strict_strtoul(buf, 10, &mode);
3092 if (ret) 3097 if (ret)
3093 goto out; 3098 goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 085e9cf1cac9..c54fb93e9d72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1298,6 +1298,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
1298 hw->flags = IEEE80211_HW_SIGNAL_DBM | 1298 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1299 IEEE80211_HW_NOISE_DBM | 1299 IEEE80211_HW_NOISE_DBM |
1300 IEEE80211_HW_AMPDU_AGGREGATION | 1300 IEEE80211_HW_AMPDU_AGGREGATION |
1301 IEEE80211_HW_SPECTRUM_MGMT |
1301 IEEE80211_HW_SUPPORTS_PS; 1302 IEEE80211_HW_SUPPORTS_PS;
1302 hw->wiphy->interface_modes = 1303 hw->wiphy->interface_modes =
1303 BIT(NL80211_IFTYPE_STATION) | 1304 BIT(NL80211_IFTYPE_STATION) |
@@ -1308,9 +1309,6 @@ int iwl_setup_mac(struct iwl_priv *priv)
1308 1309
1309 /* Default value; 4 EDCA QOS priorities */ 1310 /* Default value; 4 EDCA QOS priorities */
1310 hw->queues = 4; 1311 hw->queues = 4;
1311 /* queues to support 11n aggregation */
1312 if (priv->cfg->sku & IWL_SKU_N)
1313 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
1314 1312
1315 hw->conf.beacon_int = 100; 1313 hw->conf.beacon_int = 100;
1316 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 1314 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
@@ -1437,6 +1435,10 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1437 1435
1438 priv->tx_power_user_lmt = tx_power; 1436 priv->tx_power_user_lmt = tx_power;
1439 1437
1438 /* if nic is not up don't send command */
1439 if (!iwl_is_ready_rf(priv))
1440 return ret;
1441
1440 if (force && priv->cfg->ops->lib->send_tx_power) 1442 if (force && priv->cfg->ops->lib->send_tx_power)
1441 ret = priv->cfg->ops->lib->send_tx_power(priv); 1443 ret = priv->cfg->ops->lib->send_tx_power(priv);
1442 1444
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27310fec2e43..a8eac8c3c1fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -264,6 +264,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
264* RX 264* RX
265******************************************************/ 265******************************************************/
266void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 266void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
267void iwl_cmd_queue_free(struct iwl_priv *priv);
267int iwl_rx_queue_alloc(struct iwl_priv *priv); 268int iwl_rx_queue_alloc(struct iwl_priv *priv);
268void iwl_rx_handle(struct iwl_priv *priv); 269void iwl_rx_handle(struct iwl_priv *priv);
269int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 270int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 36cfeccfafbc..64eb585f1578 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -425,6 +425,56 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
425 return ret; 425 return ret;
426} 426}
427 427
428static ssize_t iwl_dbgfs_status_read(struct file *file,
429 char __user *user_buf,
430 size_t count, loff_t *ppos) {
431
432 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
433 char buf[512];
434 int pos = 0;
435 const size_t bufsz = sizeof(buf);
436
437 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
438 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
439 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
440 test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
441 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
442 test_bit(STATUS_INT_ENABLED, &priv->status));
443 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
444 test_bit(STATUS_RF_KILL_HW, &priv->status));
445 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n",
446 test_bit(STATUS_RF_KILL_SW, &priv->status));
447 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
448 test_bit(STATUS_INIT, &priv->status));
449 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
450 test_bit(STATUS_ALIVE, &priv->status));
451 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
452 test_bit(STATUS_READY, &priv->status));
453 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
454 test_bit(STATUS_TEMPERATURE, &priv->status));
455 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
456 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
457 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
458 test_bit(STATUS_EXIT_PENDING, &priv->status));
459 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_IN_SUSPEND:\t %d\n",
460 test_bit(STATUS_IN_SUSPEND, &priv->status));
461 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
462 test_bit(STATUS_STATISTICS, &priv->status));
463 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
464 test_bit(STATUS_SCANNING, &priv->status));
465 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
466 test_bit(STATUS_SCAN_ABORTING, &priv->status));
467 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
468 test_bit(STATUS_SCAN_HW, &priv->status));
469 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
470 test_bit(STATUS_POWER_PMI, &priv->status));
471 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
472 test_bit(STATUS_FW_ERROR, &priv->status));
473 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
474 test_bit(STATUS_MODE_PENDING, &priv->status));
475 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
476}
477
428DEBUGFS_READ_WRITE_FILE_OPS(sram); 478DEBUGFS_READ_WRITE_FILE_OPS(sram);
429DEBUGFS_WRITE_FILE_OPS(log_event); 479DEBUGFS_WRITE_FILE_OPS(log_event);
430DEBUGFS_READ_FILE_OPS(eeprom); 480DEBUGFS_READ_FILE_OPS(eeprom);
@@ -432,6 +482,7 @@ DEBUGFS_READ_FILE_OPS(stations);
432DEBUGFS_READ_FILE_OPS(rx_statistics); 482DEBUGFS_READ_FILE_OPS(rx_statistics);
433DEBUGFS_READ_FILE_OPS(tx_statistics); 483DEBUGFS_READ_FILE_OPS(tx_statistics);
434DEBUGFS_READ_FILE_OPS(channels); 484DEBUGFS_READ_FILE_OPS(channels);
485DEBUGFS_READ_FILE_OPS(status);
435 486
436/* 487/*
437 * Create the debugfs files and directories 488 * Create the debugfs files and directories
@@ -466,7 +517,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
466 DEBUGFS_ADD_FILE(rx_statistics, data); 517 DEBUGFS_ADD_FILE(rx_statistics, data);
467 DEBUGFS_ADD_FILE(tx_statistics, data); 518 DEBUGFS_ADD_FILE(tx_statistics, data);
468 DEBUGFS_ADD_FILE(channels, data); 519 DEBUGFS_ADD_FILE(channels, data);
469 DEBUGFS_ADD_X32(status, data, (u32 *)&priv->status); 520 DEBUGFS_ADD_FILE(status, data);
470 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 521 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
471 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 522 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
472 &priv->disable_chain_noise_cal); 523 &priv->disable_chain_noise_cal);
@@ -496,6 +547,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
496 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event); 547 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
497 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 548 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
498 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels); 549 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
550 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
499 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 551 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
500 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); 552 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
501 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise); 553 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 0baae8022824..ec9a13846edd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -996,6 +996,12 @@ struct iwl_priv {
996 u8 key_mapping_key; 996 u8 key_mapping_key;
997 unsigned long ucode_key_table; 997 unsigned long ucode_key_table;
998 998
999 /* queue refcounts */
1000#define IWL_MAX_HW_QUEUES 32
1001 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1002 /* for each AC */
1003 atomic_t queue_stop_count[4];
1004
999 /* Indication if ieee80211_ops->open has been called */ 1005 /* Indication if ieee80211_ops->open has been called */
1000 u8 is_open; 1006 u8 is_open;
1001 1007
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index fb64d297dd4e..a1328c3c81ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -93,4 +93,56 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
93 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 93 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
94} 94}
95 95
96/*
97 * we have 8 bits used like this:
98 *
99 * 7 6 5 4 3 2 1 0
100 * | | | | | | | |
101 * | | | | | | +-+-------- AC queue (0-3)
102 * | | | | | |
103 * | +-+-+-+-+------------ HW A-MPDU queue
104 * |
105 * +---------------------- indicates agg queue
106 */
107static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq)
108{
109 BUG_ON(ac > 3); /* only have 2 bits */
110 BUG_ON(hwq > 31); /* only have 5 bits */
111
112 return 0x80 | (hwq << 2) | ac;
113}
114
115static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue)
116{
117 u8 ac = queue;
118 u8 hwq = queue;
119
120 if (queue & 0x80) {
121 ac = queue & 3;
122 hwq = (queue >> 2) & 0x1f;
123 }
124
125 if (test_and_clear_bit(hwq, priv->queue_stopped))
126 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
127 ieee80211_wake_queue(priv->hw, ac);
128}
129
130static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue)
131{
132 u8 ac = queue;
133 u8 hwq = queue;
134
135 if (queue & 0x80) {
136 ac = queue & 3;
137 hwq = (queue >> 2) & 0x1f;
138 }
139
140 if (!test_and_set_bit(hwq, priv->queue_stopped))
141 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
142 ieee80211_stop_queue(priv->hw, ac);
143}
144
145#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
146#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
147
96#endif /* __iwl_helpers_h__ */ 148#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 18b7e4195ea1..47c894530eb5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -273,7 +273,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
273 if (priv->iw_mode != NL80211_IFTYPE_STATION) 273 if (priv->iw_mode != NL80211_IFTYPE_STATION)
274 final_mode = IWL_POWER_MODE_CAM; 274 final_mode = IWL_POWER_MODE_CAM;
275 275
276 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 276 if (iwl_is_ready_rf(priv) && !setting->power_disabled &&
277 ((setting->power_mode != final_mode) || force)) { 277 ((setting->power_mode != final_mode) || force)) {
278 struct iwl_powertable_cmd cmd; 278 struct iwl_powertable_cmd cmd;
279 279
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 1684490d93c0..5798fe49c771 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1138,8 +1138,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
1138 int sta_id; 1138 int sta_id;
1139 1139
1140 sta_id = iwl_find_station(priv, addr); 1140 sta_id = iwl_find_station(priv, addr);
1141 if (sta_id == IWL_INVALID_STATION) 1141 if (sta_id == IWL_INVALID_STATION) {
1142 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1142 return -ENXIO; 1143 return -ENXIO;
1144 }
1143 1145
1144 spin_lock_irqsave(&priv->sta_lock, flags); 1146 spin_lock_irqsave(&priv->sta_lock, flags);
1145 priv->stations[sta_id].sta.station_flags_msk = 0; 1147 priv->stations[sta_id].sta.station_flags_msk = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index dff60fb70214..1f117a49c569 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
174 * Free all buffers. 174 * Free all buffers.
175 * 0-fill, but do not free "txq" descriptor structure. 175 * 0-fill, but do not free "txq" descriptor structure.
176 */ 176 */
177static void iwl_cmd_queue_free(struct iwl_priv *priv) 177void iwl_cmd_queue_free(struct iwl_priv *priv)
178{ 178{
179 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 179 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
180 struct iwl_queue *q = &txq->q; 180 struct iwl_queue *q = &txq->q;
@@ -193,12 +193,14 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv)
193 193
194 /* De-alloc circular buffer of TFDs */ 194 /* De-alloc circular buffer of TFDs */
195 if (txq->q.n_bd) 195 if (txq->q.n_bd)
196 pci_free_consistent(dev, sizeof(struct iwl_tfd) * 196 pci_free_consistent(dev, priv->hw_params.tfd_size *
197 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 197 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
198 198
199 /* 0-fill queue descriptor structure */ 199 /* 0-fill queue descriptor structure */
200 memset(txq, 0, sizeof(*txq)); 200 memset(txq, 0, sizeof(*txq));
201} 201}
202EXPORT_SYMBOL(iwl_cmd_queue_free);
203
202/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 204/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
203 * DMA services 205 * DMA services
204 * 206 *
@@ -761,8 +763,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
761 hdr->seq_ctrl |= cpu_to_le16(seq_number); 763 hdr->seq_ctrl |= cpu_to_le16(seq_number);
762 seq_number += 0x10; 764 seq_number += 0x10;
763 /* aggregation is on for this <sta,tid> */ 765 /* aggregation is on for this <sta,tid> */
764 if (info->flags & IEEE80211_TX_CTL_AMPDU) 766 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
765 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 767 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
768 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
769 }
766 priv->stations[sta_id].tid[tid].tfds_in_queue++; 770 priv->stations[sta_id].tid[tid].tfds_in_queue++;
767 } 771 }
768 772
@@ -893,7 +897,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
893 iwl_txq_update_write_ptr(priv, txq); 897 iwl_txq_update_write_ptr(priv, txq);
894 spin_unlock_irqrestore(&priv->lock, flags); 898 spin_unlock_irqrestore(&priv->lock, flags);
895 } else { 899 } else {
896 ieee80211_stop_queue(priv->hw, txq->swq_id); 900 iwl_stop_queue(priv, txq->swq_id);
897 } 901 }
898 } 902 }
899 903
@@ -1221,8 +1225,10 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1221 1225
1222 sta_id = iwl_find_station(priv, ra); 1226 sta_id = iwl_find_station(priv, ra);
1223 1227
1224 if (sta_id == IWL_INVALID_STATION) 1228 if (sta_id == IWL_INVALID_STATION) {
1229 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1225 return -ENXIO; 1230 return -ENXIO;
1231 }
1226 1232
1227 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1233 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1228 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); 1234 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
@@ -1429,7 +1435,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1429 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1435 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1430 priv->mac80211_registered && 1436 priv->mac80211_registered &&
1431 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1437 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1432 ieee80211_wake_queue(priv->hw, txq->swq_id); 1438 iwl_wake_queue(priv, txq->swq_id);
1433 1439
1434 iwl_txq_check_empty(priv, sta_id, tid, scd_flow); 1440 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1435 } 1441 }
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4465320f2735..a71b08ca7c71 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -485,14 +485,14 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
485 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key, 485 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
486 keyconf->keylen); 486 keyconf->keylen);
487 487
488 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 488 if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
489 == STA_KEY_FLG_NO_ENC) 489 == STA_KEY_FLG_NO_ENC)
490 priv->stations[sta_id].sta.key.key_offset = 490 priv->stations_39[sta_id].sta.key.key_offset =
491 iwl_get_free_ucode_key_index(priv); 491 iwl_get_free_ucode_key_index(priv);
492 /* else, we are overriding an existing key => no need to allocated room 492 /* else, we are overriding an existing key => no need to allocated room
493 * in uCode. */ 493 * in uCode. */
494 494
495 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 495 WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
496 "no space for a new key"); 496 "no space for a new key");
497 497
498 priv->stations_39[sta_id].sta.key.key_flags = key_flags; 498 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
@@ -560,7 +560,7 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
560 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); 560 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
561 break; 561 break;
562 default: 562 default:
563 IWL_ERR(priv,"Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 563 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
564 ret = -EINVAL; 564 ret = -EINVAL;
565 } 565 }
566 566
@@ -1168,7 +1168,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1168 spin_unlock_irqrestore(&priv->lock, flags); 1168 spin_unlock_irqrestore(&priv->lock, flags);
1169 } 1169 }
1170 1170
1171 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb)); 1171 iwl_stop_queue(priv, skb_get_queue_mapping(skb));
1172 } 1172 }
1173 1173
1174 return 0; 1174 return 0;
@@ -3773,15 +3773,19 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
3773 } 3773 }
3774#endif 3774#endif
3775 3775
3776 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 3776 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
3777 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); 3777 if (conf->radio_enabled &&
3778 goto out; 3778 iwl_radio_kill_sw_enable_radio(priv)) {
3779 } 3779 IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
3780 "waiting for uCode\n");
3781 goto out;
3782 }
3780 3783
3781 if (!conf->radio_enabled) { 3784 if (!conf->radio_enabled) {
3782 iwl_radio_kill_sw_disable_radio(priv); 3785 iwl_radio_kill_sw_disable_radio(priv);
3783 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); 3786 IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
3784 goto out; 3787 goto out;
3788 }
3785 } 3789 }
3786 3790
3787 if (iwl_is_rfkill(priv)) { 3791 if (iwl_is_rfkill(priv)) {
@@ -4546,11 +4550,6 @@ static ssize_t store_power_level(struct device *d,
4546 4550
4547 mutex_lock(&priv->mutex); 4551 mutex_lock(&priv->mutex);
4548 4552
4549 if (!iwl_is_ready(priv)) {
4550 ret = -EAGAIN;
4551 goto out;
4552 }
4553
4554 ret = strict_strtoul(buf, 10, &mode); 4553 ret = strict_strtoul(buf, 10, &mode);
4555 if (ret) 4554 if (ret)
4556 goto out; 4555 goto out;
@@ -4905,7 +4904,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
4905 4904
4906 /* Tell mac80211 our characteristics */ 4905 /* Tell mac80211 our characteristics */
4907 hw->flags = IEEE80211_HW_SIGNAL_DBM | 4906 hw->flags = IEEE80211_HW_SIGNAL_DBM |
4908 IEEE80211_HW_NOISE_DBM; 4907 IEEE80211_HW_NOISE_DBM |
4908 IEEE80211_HW_SPECTRUM_MGMT;
4909 4909
4910 hw->wiphy->interface_modes = 4910 hw->wiphy->interface_modes =
4911 BIT(NL80211_IFTYPE_STATION) | 4911 BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index f8eb9097ff0a..d16b26416e82 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -33,22 +33,12 @@ struct rx_radiotap_hdr {
33 struct ieee80211_radiotap_header hdr; 33 struct ieee80211_radiotap_header hdr;
34 u8 flags; 34 u8 flags;
35 u8 rate; 35 u8 rate;
36 u16 chan_freq;
37 u16 chan_flags;
38 u8 antenna;
39 u8 antsignal; 36 u8 antsignal;
40 u16 rx_flags;
41#if 0
42 u8 pad[IEEE80211_RADIOTAP_HDRLEN - 18];
43#endif
44} __attribute__ ((packed)); 37} __attribute__ ((packed));
45 38
46#define RX_RADIOTAP_PRESENT ( \ 39#define RX_RADIOTAP_PRESENT ( \
47 (1 << IEEE80211_RADIOTAP_FLAGS) | \ 40 (1 << IEEE80211_RADIOTAP_FLAGS) | \
48 (1 << IEEE80211_RADIOTAP_RATE) | \ 41 (1 << IEEE80211_RADIOTAP_RATE) | \
49 (1 << IEEE80211_RADIOTAP_CHANNEL) | \
50 (1 << IEEE80211_RADIOTAP_ANTENNA) | \
51 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\ 42 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\
52 (1 << IEEE80211_RADIOTAP_RX_FLAGS) | \
53 0) 43 0)
54 44
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 4f60948dde9c..63d7e19ce9bd 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -351,19 +351,11 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
351 radiotap_hdr.hdr.it_pad = 0; 351 radiotap_hdr.hdr.it_pad = 0;
352 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); 352 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
353 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); 353 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
354 /* unknown values */ 354 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
355 radiotap_hdr.flags = 0; 355 radiotap_hdr.flags |= IEEE80211_RADIOTAP_F_BADFCS;
356 radiotap_hdr.chan_freq = 0;
357 radiotap_hdr.chan_flags = 0;
358 radiotap_hdr.antenna = 0;
359 /* known values */
360 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); 356 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
361 /* XXX must check no carryout */ 357 /* XXX must check no carryout */
362 radiotap_hdr.antsignal = prxpd->snr + prxpd->nf; 358 radiotap_hdr.antsignal = prxpd->snr + prxpd->nf;
363 radiotap_hdr.rx_flags = 0;
364 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
365 radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS;
366 //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18);
367 359
368 /* chop the rxpd */ 360 /* chop the rxpd */
369 skb_pull(skb, sizeof(struct rxpd)); 361 skb_pull(skb, sizeof(struct rxpd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2368b7f825a2..d4fdc8b7d7d8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -933,7 +933,6 @@ static int __init init_mac80211_hwsim(void)
933 BIT(NL80211_IFTYPE_STATION) | 933 BIT(NL80211_IFTYPE_STATION) |
934 BIT(NL80211_IFTYPE_AP) | 934 BIT(NL80211_IFTYPE_AP) |
935 BIT(NL80211_IFTYPE_MESH_POINT); 935 BIT(NL80211_IFTYPE_MESH_POINT);
936 hw->ampdu_queues = 1;
937 936
938 hw->flags = IEEE80211_HW_MFP_CAPABLE; 937 hw->flags = IEEE80211_HW_MFP_CAPABLE;
939 938
@@ -1041,6 +1040,9 @@ static int __init init_mac80211_hwsim(void)
1041 break; 1040 break;
1042 } 1041 }
1043 1042
1043 /* give the regulatory workqueue a chance to run */
1044 if (regtest)
1045 schedule_timeout_interruptible(1);
1044 err = ieee80211_register_hw(hw); 1046 err = ieee80211_register_hw(hw);
1045 if (err < 0) { 1047 if (err < 0) {
1046 printk(KERN_DEBUG "mac80211_hwsim: " 1048 printk(KERN_DEBUG "mac80211_hwsim: "
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index cfc5f41aa136..b45d6a4ed1e8 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,9 +1,10 @@
1config P54_COMMON 1config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER
4 ---help--- 5 ---help---
5 This is common code for isl38xx based cards. 6 This is common code for isl38xx/stlc45xx based modules.
6 This module does nothing by itself - the USB/PCI frontends 7 This module does nothing by itself - the USB/PCI/SPI front-ends
7 also need to be enabled in order to support any devices. 8 also need to be enabled in order to support any devices.
8 9
9 These devices require softmac firmware which can be found at 10 These devices require softmac firmware which can be found at
@@ -17,31 +18,6 @@ config P54_USB
17 select CRC32 18 select CRC32
18 ---help--- 19 ---help---
19 This driver is for USB isl38xx based wireless cards. 20 This driver is for USB isl38xx based wireless cards.
20 These are USB based adapters found in devices such as:
21
22 3COM 3CRWE254G72
23 SMC 2862W-G
24 Accton 802.11g WN4501 USB
25 Siemens Gigaset USB
26 Netgear WG121
27 Netgear WG111
28 Medion 40900, Roper Europe
29 Shuttle PN15, Airvast WM168g, IOGear GWU513
30 Linksys WUSB54G
31 Linksys WUSB54G Portable
32 DLink DWL-G120 Spinnaker
33 DLink DWL-G122
34 Belkin F5D7050 ver 1000
35 Cohiba Proto board
36 SMC 2862W-G version 2
37 U.S. Robotics U5 802.11g Adapter
38 FUJITSU E-5400 USB D1700
39 Sagem XG703A
40 DLink DWL-G120 Cohiba
41 Spinnaker Proto board
42 Linksys WUSB54AG
43 Inventel UR054G
44 Spinnaker DUT
45 21
46 These devices require softmac firmware which can be found at 22 These devices require softmac firmware which can be found at
47 http://prism54.org/ 23 http://prism54.org/
@@ -64,10 +40,15 @@ config P54_PCI
64 40
65config P54_SPI 41config P54_SPI
66 tristate "Prism54 SPI (stlc45xx) support" 42 tristate "Prism54 SPI (stlc45xx) support"
67 depends on P54_COMMON && SPI_MASTER 43 depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
68 ---help--- 44 ---help---
69 This driver is for stlc4550 or stlc4560 based wireless chips. 45 This driver is for stlc4550 or stlc4560 based wireless chips.
70 This driver is experimental, untested and will probably only work on 46 This driver is experimental, untested and will probably only work on
71 Nokia's N800/N810 Portable Internet Tablet. 47 Nokia's N800/N810 Portable Internet Tablet.
72 48
73 If you choose to build a module, it'll be called p54spi. 49 If you choose to build a module, it'll be called p54spi.
50
51config P54_LEDS
52 bool
53 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)
54 default y
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 0a989834b70d..0c1b0577d4ee 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -21,9 +21,9 @@
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22 22
23#include <net/mac80211.h> 23#include <net/mac80211.h>
24#ifdef CONFIG_MAC80211_LEDS 24#ifdef CONFIG_P54_LEDS
25#include <linux/leds.h> 25#include <linux/leds.h>
26#endif /* CONFIG_MAC80211_LEDS */ 26#endif /* CONFIG_P54_LEDS */
27 27
28#include "p54.h" 28#include "p54.h"
29#include "p54common.h" 29#include "p54common.h"
@@ -2420,7 +2420,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2420 return 0; 2420 return 0;
2421} 2421}
2422 2422
2423#ifdef CONFIG_MAC80211_LEDS 2423#ifdef CONFIG_P54_LEDS
2424static void p54_led_brightness_set(struct led_classdev *led_dev, 2424static void p54_led_brightness_set(struct led_classdev *led_dev,
2425 enum led_brightness brightness) 2425 enum led_brightness brightness)
2426{ 2426{
@@ -2508,7 +2508,7 @@ static void p54_unregister_leds(struct ieee80211_hw *dev)
2508 if (priv->assoc_led.registered) 2508 if (priv->assoc_led.registered)
2509 led_classdev_unregister(&priv->assoc_led.led_dev); 2509 led_classdev_unregister(&priv->assoc_led.led_dev);
2510} 2510}
2511#endif /* CONFIG_MAC80211_LEDS */ 2511#endif /* CONFIG_P54_LEDS */
2512 2512
2513static const struct ieee80211_ops p54_ops = { 2513static const struct ieee80211_ops p54_ops = {
2514 .tx = p54_tx, 2514 .tx = p54_tx,
@@ -2592,11 +2592,11 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
2592 return err; 2592 return err;
2593 } 2593 }
2594 2594
2595 #ifdef CONFIG_MAC80211_LEDS 2595#ifdef CONFIG_P54_LEDS
2596 err = p54_init_leds(dev); 2596 err = p54_init_leds(dev);
2597 if (err) 2597 if (err)
2598 return err; 2598 return err;
2599 #endif /* CONFIG_MAC80211_LEDS */ 2599#endif /* CONFIG_P54_LEDS */
2600 2600
2601 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); 2601 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
2602 return 0; 2602 return 0;
@@ -2610,9 +2610,9 @@ void p54_free_common(struct ieee80211_hw *dev)
2610 kfree(priv->output_limit); 2610 kfree(priv->output_limit);
2611 kfree(priv->curve_data); 2611 kfree(priv->curve_data);
2612 2612
2613 #ifdef CONFIG_MAC80211_LEDS 2613#ifdef CONFIG_P54_LEDS
2614 p54_unregister_leds(dev); 2614 p54_unregister_leds(dev);
2615 #endif /* CONFIG_MAC80211_LEDS */ 2615#endif /* CONFIG_P54_LEDS */
2616} 2616}
2617EXPORT_SYMBOL_GPL(p54_free_common); 2617EXPORT_SYMBOL_GPL(p54_free_common);
2618 2618
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 24fdfdfee3df..420fff42c0dd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2425,6 +2425,8 @@ static struct usb_device_id rt73usb_device_table[] = {
2425 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, 2425 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
2426 /* Surecom */ 2426 /* Surecom */
2427 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, 2427 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) },
2428 /* Tilgin */
2429 { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) },
2428 /* Philips */ 2430 /* Philips */
2429 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) }, 2431 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) },
2430 /* Planex */ 2432 /* Planex */
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index b728541f2fb5..3ab3eb957189 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -735,9 +735,9 @@ if (lp->tx_n_in_use > 0)
735 if (tx_status & AC_SFLD_OK) { 735 if (tx_status & AC_SFLD_OK) {
736 int ncollisions; 736 int ncollisions;
737 737
738 lp->stats.tx_packets++; 738 dev->stats.tx_packets++;
739 ncollisions = tx_status & AC_SFLD_MAXCOL; 739 ncollisions = tx_status & AC_SFLD_MAXCOL;
740 lp->stats.collisions += ncollisions; 740 dev->stats.collisions += ncollisions;
741#ifdef DEBUG_TX_INFO 741#ifdef DEBUG_TX_INFO
742 if (ncollisions > 0) 742 if (ncollisions > 0)
743 printk(KERN_DEBUG 743 printk(KERN_DEBUG
@@ -745,9 +745,9 @@ if (lp->tx_n_in_use > 0)
745 dev->name, ncollisions); 745 dev->name, ncollisions);
746#endif 746#endif
747 } else { 747 } else {
748 lp->stats.tx_errors++; 748 dev->stats.tx_errors++;
749 if (tx_status & AC_SFLD_S10) { 749 if (tx_status & AC_SFLD_S10) {
750 lp->stats.tx_carrier_errors++; 750 dev->stats.tx_carrier_errors++;
751#ifdef DEBUG_TX_FAIL 751#ifdef DEBUG_TX_FAIL
752 printk(KERN_DEBUG 752 printk(KERN_DEBUG
753 "%s: wv_complete(): tx error: no CS.\n", 753 "%s: wv_complete(): tx error: no CS.\n",
@@ -755,7 +755,7 @@ if (lp->tx_n_in_use > 0)
755#endif 755#endif
756 } 756 }
757 if (tx_status & AC_SFLD_S9) { 757 if (tx_status & AC_SFLD_S9) {
758 lp->stats.tx_carrier_errors++; 758 dev->stats.tx_carrier_errors++;
759#ifdef DEBUG_TX_FAIL 759#ifdef DEBUG_TX_FAIL
760 printk(KERN_DEBUG 760 printk(KERN_DEBUG
761 "%s: wv_complete(): tx error: lost CTS.\n", 761 "%s: wv_complete(): tx error: lost CTS.\n",
@@ -763,7 +763,7 @@ if (lp->tx_n_in_use > 0)
763#endif 763#endif
764 } 764 }
765 if (tx_status & AC_SFLD_S8) { 765 if (tx_status & AC_SFLD_S8) {
766 lp->stats.tx_fifo_errors++; 766 dev->stats.tx_fifo_errors++;
767#ifdef DEBUG_TX_FAIL 767#ifdef DEBUG_TX_FAIL
768 printk(KERN_DEBUG 768 printk(KERN_DEBUG
769 "%s: wv_complete(): tx error: slow DMA.\n", 769 "%s: wv_complete(): tx error: slow DMA.\n",
@@ -771,7 +771,7 @@ if (lp->tx_n_in_use > 0)
771#endif 771#endif
772 } 772 }
773 if (tx_status & AC_SFLD_S6) { 773 if (tx_status & AC_SFLD_S6) {
774 lp->stats.tx_heartbeat_errors++; 774 dev->stats.tx_heartbeat_errors++;
775#ifdef DEBUG_TX_FAIL 775#ifdef DEBUG_TX_FAIL
776 printk(KERN_DEBUG 776 printk(KERN_DEBUG
777 "%s: wv_complete(): tx error: heart beat.\n", 777 "%s: wv_complete(): tx error: heart beat.\n",
@@ -779,7 +779,7 @@ if (lp->tx_n_in_use > 0)
779#endif 779#endif
780 } 780 }
781 if (tx_status & AC_SFLD_S5) { 781 if (tx_status & AC_SFLD_S5) {
782 lp->stats.tx_aborted_errors++; 782 dev->stats.tx_aborted_errors++;
783#ifdef DEBUG_TX_FAIL 783#ifdef DEBUG_TX_FAIL
784 printk(KERN_DEBUG 784 printk(KERN_DEBUG
785 "%s: wv_complete(): tx error: too many collisions.\n", 785 "%s: wv_complete(): tx error: too many collisions.\n",
@@ -1346,20 +1346,6 @@ static void wv_init_info(struct net_device * dev)
1346 * or wireless extensions 1346 * or wireless extensions
1347 */ 1347 */
1348 1348
1349/*------------------------------------------------------------------*/
1350/*
1351 * Get the current Ethernet statistics. This may be called with the
1352 * card open or closed.
1353 * Used when the user read /proc/net/dev
1354 */
1355static en_stats *wavelan_get_stats(struct net_device * dev)
1356{
1357#ifdef DEBUG_IOCTL_TRACE
1358 printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
1359#endif
1360
1361 return &((net_local *)netdev_priv(dev))->stats;
1362}
1363 1349
1364/*------------------------------------------------------------------*/ 1350/*------------------------------------------------------------------*/
1365/* 1351/*
@@ -2466,7 +2452,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2466 "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n", 2452 "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
2467 dev->name, sksize); 2453 dev->name, sksize);
2468#endif 2454#endif
2469 lp->stats.rx_dropped++; 2455 dev->stats.rx_dropped++;
2470 return; 2456 return;
2471 } 2457 }
2472 2458
@@ -2526,8 +2512,8 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2526 netif_rx(skb); 2512 netif_rx(skb);
2527 2513
2528 /* Keep statistics up to date */ 2514 /* Keep statistics up to date */
2529 lp->stats.rx_packets++; 2515 dev->stats.rx_packets++;
2530 lp->stats.rx_bytes += sksize; 2516 dev->stats.rx_bytes += sksize;
2531 2517
2532#ifdef DEBUG_RX_TRACE 2518#ifdef DEBUG_RX_TRACE
2533 printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name); 2519 printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
@@ -2608,7 +2594,7 @@ static void wv_receive(struct net_device * dev)
2608#endif 2594#endif
2609 } else { /* If reception was no successful */ 2595 } else { /* If reception was no successful */
2610 2596
2611 lp->stats.rx_errors++; 2597 dev->stats.rx_errors++;
2612 2598
2613#ifdef DEBUG_RX_INFO 2599#ifdef DEBUG_RX_INFO
2614 printk(KERN_DEBUG 2600 printk(KERN_DEBUG
@@ -2624,7 +2610,7 @@ static void wv_receive(struct net_device * dev)
2624#endif 2610#endif
2625 2611
2626 if ((fd.fd_status & FD_STATUS_S7) != 0) { 2612 if ((fd.fd_status & FD_STATUS_S7) != 0) {
2627 lp->stats.rx_length_errors++; 2613 dev->stats.rx_length_errors++;
2628#ifdef DEBUG_RX_FAIL 2614#ifdef DEBUG_RX_FAIL
2629 printk(KERN_DEBUG 2615 printk(KERN_DEBUG
2630 "%s: wv_receive(): frame too short.\n", 2616 "%s: wv_receive(): frame too short.\n",
@@ -2633,7 +2619,7 @@ static void wv_receive(struct net_device * dev)
2633 } 2619 }
2634 2620
2635 if ((fd.fd_status & FD_STATUS_S8) != 0) { 2621 if ((fd.fd_status & FD_STATUS_S8) != 0) {
2636 lp->stats.rx_over_errors++; 2622 dev->stats.rx_over_errors++;
2637#ifdef DEBUG_RX_FAIL 2623#ifdef DEBUG_RX_FAIL
2638 printk(KERN_DEBUG 2624 printk(KERN_DEBUG
2639 "%s: wv_receive(): rx DMA overrun.\n", 2625 "%s: wv_receive(): rx DMA overrun.\n",
@@ -2642,7 +2628,7 @@ static void wv_receive(struct net_device * dev)
2642 } 2628 }
2643 2629
2644 if ((fd.fd_status & FD_STATUS_S9) != 0) { 2630 if ((fd.fd_status & FD_STATUS_S9) != 0) {
2645 lp->stats.rx_fifo_errors++; 2631 dev->stats.rx_fifo_errors++;
2646#ifdef DEBUG_RX_FAIL 2632#ifdef DEBUG_RX_FAIL
2647 printk(KERN_DEBUG 2633 printk(KERN_DEBUG
2648 "%s: wv_receive(): ran out of resources.\n", 2634 "%s: wv_receive(): ran out of resources.\n",
@@ -2651,7 +2637,7 @@ static void wv_receive(struct net_device * dev)
2651 } 2637 }
2652 2638
2653 if ((fd.fd_status & FD_STATUS_S10) != 0) { 2639 if ((fd.fd_status & FD_STATUS_S10) != 0) {
2654 lp->stats.rx_frame_errors++; 2640 dev->stats.rx_frame_errors++;
2655#ifdef DEBUG_RX_FAIL 2641#ifdef DEBUG_RX_FAIL
2656 printk(KERN_DEBUG 2642 printk(KERN_DEBUG
2657 "%s: wv_receive(): alignment error.\n", 2643 "%s: wv_receive(): alignment error.\n",
@@ -2660,7 +2646,7 @@ static void wv_receive(struct net_device * dev)
2660 } 2646 }
2661 2647
2662 if ((fd.fd_status & FD_STATUS_S11) != 0) { 2648 if ((fd.fd_status & FD_STATUS_S11) != 0) {
2663 lp->stats.rx_crc_errors++; 2649 dev->stats.rx_crc_errors++;
2664#ifdef DEBUG_RX_FAIL 2650#ifdef DEBUG_RX_FAIL
2665 printk(KERN_DEBUG 2651 printk(KERN_DEBUG
2666 "%s: wv_receive(): CRC error.\n", 2652 "%s: wv_receive(): CRC error.\n",
@@ -2826,7 +2812,7 @@ static int wv_packet_write(struct net_device * dev, void *buf, short length)
2826 dev->trans_start = jiffies; 2812 dev->trans_start = jiffies;
2827 2813
2828 /* Keep stats up to date. */ 2814 /* Keep stats up to date. */
2829 lp->stats.tx_bytes += length; 2815 dev->stats.tx_bytes += length;
2830 2816
2831 if (lp->tx_first_in_use == I82586NULL) 2817 if (lp->tx_first_in_use == I82586NULL)
2832 lp->tx_first_in_use = txblock; 2818 lp->tx_first_in_use = txblock;
@@ -4038,6 +4024,22 @@ static int wavelan_close(struct net_device * dev)
4038 return 0; 4024 return 0;
4039} 4025}
4040 4026
4027static const struct net_device_ops wavelan_netdev_ops = {
4028 .ndo_open = wavelan_open,
4029 .ndo_stop = wavelan_close,
4030 .ndo_start_xmit = wavelan_packet_xmit,
4031 .ndo_set_multicast_list = wavelan_set_multicast_list,
4032 .ndo_tx_timeout = wavelan_watchdog,
4033 .ndo_change_mtu = eth_change_mtu,
4034 .ndo_validate_addr = eth_validate_addr,
4035#ifdef SET_MAC_ADDRESS
4036 .ndo_set_mac_address = wavelan_set_mac_address
4037#else
4038 .ndo_set_mac_address = eth_mac_addr,
4039#endif
4040};
4041
4042
4041/*------------------------------------------------------------------*/ 4043/*------------------------------------------------------------------*/
4042/* 4044/*
4043 * Probe an I/O address, and if the WaveLAN is there configure the 4045 * Probe an I/O address, and if the WaveLAN is there configure the
@@ -4130,17 +4132,8 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
4130 /* Init spinlock */ 4132 /* Init spinlock */
4131 spin_lock_init(&lp->spinlock); 4133 spin_lock_init(&lp->spinlock);
4132 4134
4133 dev->open = wavelan_open; 4135 dev->netdev_ops = &wavelan_netdev_ops;
4134 dev->stop = wavelan_close; 4136 dev->watchdog_timeo = WATCHDOG_JIFFIES;
4135 dev->hard_start_xmit = wavelan_packet_xmit;
4136 dev->get_stats = wavelan_get_stats;
4137 dev->set_multicast_list = &wavelan_set_multicast_list;
4138 dev->tx_timeout = &wavelan_watchdog;
4139 dev->watchdog_timeo = WATCHDOG_JIFFIES;
4140#ifdef SET_MAC_ADDRESS
4141 dev->set_mac_address = &wavelan_set_mac_address;
4142#endif /* SET_MAC_ADDRESS */
4143
4144 dev->wireless_handlers = &wavelan_handler_def; 4137 dev->wireless_handlers = &wavelan_handler_def;
4145 lp->wireless_data.spy_data = &lp->spy_data; 4138 lp->wireless_data.spy_data = &lp->spy_data;
4146 dev->wireless_data = &lp->wireless_data; 4139 dev->wireless_data = &lp->wireless_data;
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 44d31bbf39e4..2daa0210d789 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -459,11 +459,9 @@ static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/
459/****************************** TYPES ******************************/ 459/****************************** TYPES ******************************/
460 460
461/* Shortcuts */ 461/* Shortcuts */
462typedef struct net_device_stats en_stats;
463typedef struct iw_statistics iw_stats; 462typedef struct iw_statistics iw_stats;
464typedef struct iw_quality iw_qual; 463typedef struct iw_quality iw_qual;
465typedef struct iw_freq iw_freq; 464typedef struct iw_freq iw_freq;typedef struct net_local net_local;
466typedef struct net_local net_local;
467typedef struct timer_list timer_list; 465typedef struct timer_list timer_list;
468 466
469/* Basic types */ 467/* Basic types */
@@ -475,15 +473,12 @@ typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
475 * For each network interface, Linux keeps data in two structures: "device" 473 * For each network interface, Linux keeps data in two structures: "device"
476 * keeps the generic data (same format for everybody) and "net_local" keeps 474 * keeps the generic data (same format for everybody) and "net_local" keeps
477 * additional specific data. 475 * additional specific data.
478 * Note that some of this specific data is in fact generic (en_stats, for
479 * example).
480 */ 476 */
481struct net_local 477struct net_local
482{ 478{
483 net_local * next; /* linked list of the devices */ 479 net_local * next; /* linked list of the devices */
484 struct net_device * dev; /* reverse link */ 480 struct net_device * dev; /* reverse link */
485 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */ 481 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
486 en_stats stats; /* Ethernet interface statistics */
487 int nresets; /* number of hardware resets */ 482 int nresets; /* number of hardware resets */
488 u_char reconfig_82586; /* We need to reconfigure the controller. */ 483 u_char reconfig_82586; /* We need to reconfigure the controller. */
489 u_char promiscuous; /* promiscuous mode */ 484 u_char promiscuous; /* promiscuous mode */
@@ -601,8 +596,6 @@ static void
601static inline void 596static inline void
602 wv_init_info(struct net_device *); /* display startup info */ 597 wv_init_info(struct net_device *); /* display startup info */
603/* ------------------- IOCTL, STATS & RECONFIG ------------------- */ 598/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
604static en_stats *
605 wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */
606static iw_stats * 599static iw_stats *
607 wavelan_get_wireless_stats(struct net_device *); 600 wavelan_get_wireless_stats(struct net_device *);
608static void 601static void
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index b1b947edcf01..540a2948596c 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -53,11 +53,11 @@ config SSB_B43_PCI_BRIDGE
53 53
54config SSB_PCMCIAHOST_POSSIBLE 54config SSB_PCMCIAHOST_POSSIBLE
55 bool 55 bool
56 depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL 56 depends on SSB && (PCMCIA = y || PCMCIA = SSB)
57 default y 57 default y
58 58
59config SSB_PCMCIAHOST 59config SSB_PCMCIAHOST
60 bool "Support for SSB on PCMCIA-bus host (EXPERIMENTAL)" 60 bool "Support for SSB on PCMCIA-bus host"
61 depends on SSB_PCMCIAHOST_POSSIBLE 61 depends on SSB_PCMCIAHOST_POSSIBLE
62 select SSB_SPROM 62 select SSB_SPROM
63 help 63 help
@@ -107,14 +107,14 @@ config SSB_DRIVER_PCICORE
107 If unsure, say Y 107 If unsure, say Y
108 108
109config SSB_PCICORE_HOSTMODE 109config SSB_PCICORE_HOSTMODE
110 bool "Hostmode support for SSB PCI core (EXPERIMENTAL)" 110 bool "Hostmode support for SSB PCI core"
111 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && EXPERIMENTAL 111 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS
112 help 112 help
113 PCIcore hostmode operation (external PCI bus). 113 PCIcore hostmode operation (external PCI bus).
114 114
115config SSB_DRIVER_MIPS 115config SSB_DRIVER_MIPS
116 bool "SSB Broadcom MIPS core driver (EXPERIMENTAL)" 116 bool "SSB Broadcom MIPS core driver"
117 depends on SSB && MIPS && EXPERIMENTAL 117 depends on SSB && MIPS
118 select SSB_SERIAL 118 select SSB_SERIAL
119 help 119 help
120 Driver for the Sonics Silicon Backplane attached 120 Driver for the Sonics Silicon Backplane attached
@@ -129,8 +129,8 @@ config SSB_EMBEDDED
129 default y 129 default y
130 130
131config SSB_DRIVER_EXTIF 131config SSB_DRIVER_EXTIF
132 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" 132 bool "SSB Broadcom EXTIF core driver"
133 depends on SSB_DRIVER_MIPS && EXPERIMENTAL 133 depends on SSB_DRIVER_MIPS
134 help 134 help
135 Driver for the Sonics Silicon Backplane attached 135 Driver for the Sonics Silicon Backplane attached
136 Broadcom EXTIF core. 136 Broadcom EXTIF core.
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 27a677584a4c..ef9c6a04ad8f 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -18,6 +18,7 @@
18 18
19static const struct pci_device_id b43_pci_bridge_tbl[] = { 19static const struct pci_device_id b43_pci_bridge_tbl[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) },
21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4306) },
21 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) },
22 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) },
23 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },