aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c121
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c101
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c14
4 files changed, 59 insertions, 184 deletions
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index a99a1633cc1..f78350a668d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -477,50 +477,6 @@ static void ai_scan(struct si_pub *sih, struct bcma_bus *bus)
477 } 477 }
478} 478}
479 479
480static struct bcma_device *ai_find_bcma_core(struct si_pub *sih, uint coreidx)
481{
482 struct si_info *sii = (struct si_info *)sih;
483 struct bcma_device *core;
484
485 list_for_each_entry(core, &sii->icbus->cores, list) {
486 if (core->core_index == coreidx)
487 return core;
488 }
489 return NULL;
490}
491/*
492 * This function changes the logical "focus" to the indicated core.
493 * Return the current core's virtual address. Since each core starts with the
494 * same set of registers (BIST, clock control, etc), the returned address
495 * contains the first register of this 'common' register block (not to be
496 * confused with 'common core').
497 */
498void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
499{
500 struct si_info *sii = (struct si_info *)sih;
501 struct bcma_device *core;
502
503 if (sii->curidx != coreidx) {
504 core = ai_find_bcma_core(sih, coreidx);
505 if (core == NULL)
506 return NULL;
507
508 (void)bcma_aread32(core, BCMA_IOST);
509 sii->curidx = coreidx;
510 }
511 return sii->curmap;
512}
513
514uint ai_corerev(struct si_pub *sih)
515{
516 struct si_info *sii;
517 u32 cib;
518
519 sii = (struct si_info *)sih;
520 cib = sii->cib[sii->curidx];
521 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
522}
523
524/* return true if PCIE capability exists in the pci config space */ 480/* return true if PCIE capability exists in the pci config space */
525static bool ai_ispcie(struct si_info *sii) 481static bool ai_ispcie(struct si_info *sii)
526{ 482{
@@ -579,9 +535,8 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
579 for (i = 0; i < sii->numcores; i++) { 535 for (i = 0; i < sii->numcores; i++) {
580 uint cid, crev; 536 uint cid, crev;
581 537
582 ai_setcoreidx(&sii->pub, i); 538 cid = sii->coreid[i];
583 cid = ai_coreid(&sii->pub); 539 crev = (sii->cib[i] & CIB_REV_MASK) >> CIB_REV_SHIFT;
584 crev = ai_corerev(&sii->pub);
585 540
586 if (cid == PCI_CORE_ID) { 541 if (cid == PCI_CORE_ID) {
587 pciidx = i; 542 pciidx = i;
@@ -804,22 +759,6 @@ void ai_detach(struct si_pub *sih)
804 kfree(sii); 759 kfree(sii);
805} 760}
806 761
807uint ai_coreid(struct si_pub *sih)
808{
809 struct si_info *sii;
810
811 sii = (struct si_info *)sih;
812 return sii->coreid[sii->curidx];
813}
814
815uint ai_coreidx(struct si_pub *sih)
816{
817 struct si_info *sii;
818
819 sii = (struct si_info *)sih;
820 return sii->curidx;
821}
822
823/* return index of coreid or BADIDX if not found */ 762/* return index of coreid or BADIDX if not found */
824struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit) 763struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
825{ 764{
@@ -842,45 +781,17 @@ struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
842} 781}
843 782
844/* 783/*
845 * This function changes logical "focus" to the indicated core; 784 * read/modify chipcommon core register.
846 * must be called with interrupts off.
847 * Moreover, callers should keep interrupts off during switching
848 * out of and back to d11 core.
849 */
850void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
851{
852 struct bcma_device *core;
853
854 core = ai_findcore(sih, coreid, coreunit);
855 if (core == NULL)
856 return NULL;
857
858 return ai_setcoreidx(sih, core->core_index);
859}
860
861/*
862 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
863 * operation, switch back to the original core, and return the new value.
864 *
865 * When using the silicon backplane, no fiddling with interrupts or core
866 * switches is needed.
867 *
868 * Also, when using pci/pcie, we can optimize away the core switching for pci
869 * registers and (on newer pci cores) chipcommon registers.
870 */ 785 */
871uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val) 786uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
872{ 787{
873 struct bcma_device *cc; 788 struct bcma_device *cc;
874 uint origidx = 0;
875 u32 w; 789 u32 w;
876 struct si_info *sii; 790 struct si_info *sii;
877 791
878 sii = (struct si_info *)sih; 792 sii = (struct si_info *)sih;
879 cc = sii->icbus->drv_cc.core; 793 cc = sii->icbus->drv_cc.core;
880 794
881 /* save current core index */
882 origidx = ai_coreidx(&sii->pub);
883
884 /* mask and set */ 795 /* mask and set */
885 if (mask || val) { 796 if (mask || val) {
886 bcma_maskset32(cc, regoff, ~mask, val); 797 bcma_maskset32(cc, regoff, ~mask, val);
@@ -889,9 +800,6 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
889 /* readback */ 800 /* readback */
890 w = bcma_read32(cc, regoff); 801 w = bcma_read32(cc, regoff);
891 802
892 /* restore core index */
893 ai_setcoreidx(&sii->pub, origidx);
894
895 return w; 803 return w;
896} 804}
897 805
@@ -1237,20 +1145,10 @@ void ai_pci_down(struct si_pub *sih)
1237void ai_pci_setup(struct si_pub *sih, uint coremask) 1145void ai_pci_setup(struct si_pub *sih, uint coremask)
1238{ 1146{
1239 struct si_info *sii; 1147 struct si_info *sii;
1240 struct sbpciregs __iomem *regs = NULL;
1241 u32 w; 1148 u32 w;
1242 uint idx = 0;
1243 1149
1244 sii = (struct si_info *)sih; 1150 sii = (struct si_info *)sih;
1245 1151
1246 if (PCI(sih)) {
1247 /* get current core index */
1248 idx = sii->curidx;
1249
1250 /* switch over to pci core */
1251 regs = ai_setcoreidx(sih, sii->buscoreidx);
1252 }
1253
1254 /* 1152 /*
1255 * Enable sb->pci interrupts. Assume 1153 * Enable sb->pci interrupts. Assume
1256 * PCI rev 2.3 support was added in pci core rev 6 and things changed.. 1154 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
@@ -1264,9 +1162,6 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
1264 1162
1265 if (PCI(sih)) { 1163 if (PCI(sih)) {
1266 pcicore_pci_setup(sii->pch); 1164 pcicore_pci_setup(sii->pch);
1267
1268 /* switch back to previous core */
1269 ai_setcoreidx(sih, idx);
1270 } 1165 }
1271} 1166}
1272 1167
@@ -1276,21 +1171,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
1276 */ 1171 */
1277int ai_pci_fixcfg(struct si_pub *sih) 1172int ai_pci_fixcfg(struct si_pub *sih)
1278{ 1173{
1279 uint origidx;
1280 void __iomem *regs = NULL;
1281 struct si_info *sii = (struct si_info *)sih; 1174 struct si_info *sii = (struct si_info *)sih;
1282 1175
1283 /* Fixup PI in SROM shadow area to enable the correct PCI core access */ 1176 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1284 /* save the current index */
1285 origidx = ai_coreidx(&sii->pub);
1286
1287 /* check 'pi' is correct and fix it if not */ 1177 /* check 'pi' is correct and fix it if not */
1288 regs = ai_setcore(&sii->pub, ai_get_buscoretype(sih), 0);
1289 pcicore_fixcfg(sii->pch); 1178 pcicore_fixcfg(sii->pch);
1290
1291 /* restore the original index */
1292 ai_setcoreidx(&sii->pub, origidx);
1293
1294 pcicore_hwup(sii->pch); 1179 pcicore_hwup(sii->pch);
1295 return 0; 1180 return 0;
1296} 1181}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index e37c9f4d843..6742758e4d4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -221,19 +221,12 @@ struct si_info {
221/* AMBA Interconnect exported externs */ 221/* AMBA Interconnect exported externs */
222extern struct bcma_device *ai_findcore(struct si_pub *sih, 222extern struct bcma_device *ai_findcore(struct si_pub *sih,
223 u16 coreid, u16 coreunit); 223 u16 coreid, u16 coreunit);
224extern uint ai_coreidx(struct si_pub *sih);
225extern uint ai_corerev(struct si_pub *sih);
226extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val); 224extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
227 225
228/* === exported functions === */ 226/* === exported functions === */
229extern struct si_pub *ai_attach(struct bcma_bus *pbus); 227extern struct si_pub *ai_attach(struct bcma_bus *pbus);
230extern void ai_detach(struct si_pub *sih); 228extern void ai_detach(struct si_pub *sih);
231extern uint ai_coreid(struct si_pub *sih);
232extern uint ai_corerev(struct si_pub *sih);
233extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); 229extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
234extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
235extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
236extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
237extern void ai_pci_setup(struct si_pub *sih, uint coremask); 230extern void ai_pci_setup(struct si_pub *sih, uint coremask);
238extern void ai_clkctl_init(struct si_pub *sih); 231extern void ai_clkctl_init(struct si_pub *sih);
239extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); 232extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index dab04bbedc8..b4cf617276c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -227,7 +227,7 @@ struct dma_info {
227 uint *msg_level; /* message level pointer */ 227 uint *msg_level; /* message level pointer */
228 char name[MAXNAMEL]; /* callers name for diag msgs */ 228 char name[MAXNAMEL]; /* callers name for diag msgs */
229 229
230 struct bcma_device *d11core; 230 struct bcma_device *core;
231 struct device *dmadev; 231 struct device *dmadev;
232 232
233 bool dma64; /* this dma engine is operating in 64-bit mode */ 233 bool dma64; /* this dma engine is operating in 64-bit mode */
@@ -383,15 +383,15 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
383 if (dmactrlflags & DMA_CTRL_PEN) { 383 if (dmactrlflags & DMA_CTRL_PEN) {
384 u32 control; 384 u32 control;
385 385
386 control = bcma_read32(di->d11core, DMA64TXREGOFFS(di, control)); 386 control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
387 bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 387 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
388 control | D64_XC_PD); 388 control | D64_XC_PD);
389 if (bcma_read32(di->d11core, DMA64TXREGOFFS(di, control)) & 389 if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
390 D64_XC_PD) 390 D64_XC_PD)
391 /* We *can* disable it so it is supported, 391 /* We *can* disable it so it is supported,
392 * restore control register 392 * restore control register
393 */ 393 */
394 bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 394 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
395 control); 395 control);
396 else 396 else
397 /* Not supported, don't allow it to be enabled */ 397 /* Not supported, don't allow it to be enabled */
@@ -406,9 +406,9 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
406static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) 406static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
407{ 407{
408 u32 w; 408 u32 w;
409 bcma_set32(di->d11core, ctrl_offset, D64_XC_AE); 409 bcma_set32(di->core, ctrl_offset, D64_XC_AE);
410 w = bcma_read32(di->d11core, ctrl_offset); 410 w = bcma_read32(di->core, ctrl_offset);
411 bcma_mask32(di->d11core, ctrl_offset, ~D64_XC_AE); 411 bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
412 return (w & D64_XC_AE) == D64_XC_AE; 412 return (w & D64_XC_AE) == D64_XC_AE;
413} 413}
414 414
@@ -442,13 +442,13 @@ static bool _dma_descriptor_align(struct dma_info *di)
442 442
443 /* Check to see if the descriptors need to be aligned on 4K/8K or not */ 443 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
444 if (di->d64txregbase != 0) { 444 if (di->d64txregbase != 0) {
445 bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 0xff0); 445 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
446 addrl = bcma_read32(di->d11core, DMA64TXREGOFFS(di, addrlow)); 446 addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
447 if (addrl != 0) 447 if (addrl != 0)
448 return false; 448 return false;
449 } else if (di->d64rxregbase != 0) { 449 } else if (di->d64rxregbase != 0) {
450 bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 0xff0); 450 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
451 addrl = bcma_read32(di->d11core, DMA64RXREGOFFS(di, addrlow)); 451 addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
452 if (addrl != 0) 452 if (addrl != 0)
453 return false; 453 return false;
454 } 454 }
@@ -565,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
565} 565}
566 566
567struct dma_pub *dma_attach(char *name, struct si_pub *sih, 567struct dma_pub *dma_attach(char *name, struct si_pub *sih,
568 struct bcma_device *d11core, 568 struct bcma_device *core,
569 uint txregbase, uint rxregbase, uint ntxd, uint nrxd, 569 uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
570 uint rxbufsize, int rxextheadroom, 570 uint rxbufsize, int rxextheadroom,
571 uint nrxpost, uint rxoffset, uint *msg_level) 571 uint nrxpost, uint rxoffset, uint *msg_level)
572{ 572{
573 struct dma_info *di; 573 struct dma_info *di;
574 u8 rev = core->id.rev;
574 uint size; 575 uint size;
575 576
576 /* allocate private info structure */ 577 /* allocate private info structure */
@@ -582,10 +583,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
582 583
583 584
584 di->dma64 = 585 di->dma64 =
585 ((bcma_aread32(d11core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); 586 ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
586 587
587 /* init dma reg info */ 588 /* init dma reg info */
588 di->d11core = d11core; 589 di->core = core;
589 di->d64txregbase = txregbase; 590 di->d64txregbase = txregbase;
590 di->d64rxregbase = rxregbase; 591 di->d64rxregbase = rxregbase;
591 592
@@ -606,7 +607,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
606 strncpy(di->name, name, MAXNAMEL); 607 strncpy(di->name, name, MAXNAMEL);
607 di->name[MAXNAMEL - 1] = '\0'; 608 di->name[MAXNAMEL - 1] = '\0';
608 609
609 di->dmadev = d11core->dma_dev; 610 di->dmadev = core->dma_dev;
610 611
611 /* save tunables */ 612 /* save tunables */
612 di->ntxd = (u16) ntxd; 613 di->ntxd = (u16) ntxd;
@@ -638,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
638 di->dataoffsetlow = di->ddoffsetlow; 639 di->dataoffsetlow = di->ddoffsetlow;
639 di->dataoffsethigh = di->ddoffsethigh; 640 di->dataoffsethigh = di->ddoffsethigh;
640 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ 641 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
641 if ((ai_coreid(sih) == SDIOD_CORE_ID) 642 if ((core->id.id == SDIOD_CORE_ID)
642 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) 643 && ((rev > 0) && (rev <= 2)))
643 di->addrext = 0; 644 di->addrext = 0;
644 else if ((ai_coreid(sih) == I2S_CORE_ID) && 645 else if ((core->id.id == I2S_CORE_ID) &&
645 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) 646 ((rev == 0) || (rev == 1)))
646 di->addrext = 0; 647 di->addrext = 0;
647 else 648 else
648 di->addrext = _dma_isaddrext(di); 649 di->addrext = _dma_isaddrext(di);
@@ -792,14 +793,14 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
792 if ((di->ddoffsetlow == 0) 793 if ((di->ddoffsetlow == 0)
793 || !(pa & PCI32ADDR_HIGH)) { 794 || !(pa & PCI32ADDR_HIGH)) {
794 if (direction == DMA_TX) { 795 if (direction == DMA_TX) {
795 bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 796 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
796 pa + di->ddoffsetlow); 797 pa + di->ddoffsetlow);
797 bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrhigh), 798 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
798 di->ddoffsethigh); 799 di->ddoffsethigh);
799 } else { 800 } else {
800 bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 801 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
801 pa + di->ddoffsetlow); 802 pa + di->ddoffsetlow);
802 bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrhigh), 803 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
803 di->ddoffsethigh); 804 di->ddoffsethigh);
804 } 805 }
805 } else { 806 } else {
@@ -811,18 +812,18 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
811 pa &= ~PCI32ADDR_HIGH; 812 pa &= ~PCI32ADDR_HIGH;
812 813
813 if (direction == DMA_TX) { 814 if (direction == DMA_TX) {
814 bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 815 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
815 pa + di->ddoffsetlow); 816 pa + di->ddoffsetlow);
816 bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrhigh), 817 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
817 di->ddoffsethigh); 818 di->ddoffsethigh);
818 bcma_maskset32(di->d11core, DMA64TXREGOFFS(di, control), 819 bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
819 D64_XC_AE, (ae << D64_XC_AE_SHIFT)); 820 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
820 } else { 821 } else {
821 bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 822 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
822 pa + di->ddoffsetlow); 823 pa + di->ddoffsetlow);
823 bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrhigh), 824 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
824 di->ddoffsethigh); 825 di->ddoffsethigh);
825 bcma_maskset32(di->d11core, DMA64RXREGOFFS(di, control), 826 bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
826 D64_RC_AE, (ae << D64_RC_AE_SHIFT)); 827 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
827 } 828 }
828 } 829 }
@@ -835,7 +836,7 @@ static void _dma_rxenable(struct dma_info *di)
835 836
836 DMA_TRACE("%s:\n", di->name); 837 DMA_TRACE("%s:\n", di->name);
837 838
838 control = D64_RC_RE | (bcma_read32(di->d11core, 839 control = D64_RC_RE | (bcma_read32(di->core,
839 DMA64RXREGOFFS(di, control)) & 840 DMA64RXREGOFFS(di, control)) &
840 D64_RC_AE); 841 D64_RC_AE);
841 842
@@ -845,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
845 if (dmactrlflags & DMA_CTRL_ROC) 846 if (dmactrlflags & DMA_CTRL_ROC)
846 control |= D64_RC_OC; 847 control |= D64_RC_OC;
847 848
848 bcma_write32(di->d11core, DMA64RXREGOFFS(di, control), 849 bcma_write32(di->core, DMA64RXREGOFFS(di, control),
849 ((di->rxoffset << D64_RC_RO_SHIFT) | control)); 850 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
850} 851}
851 852
@@ -888,7 +889,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
888 return NULL; 889 return NULL;
889 890
890 curr = 891 curr =
891 B2I(((bcma_read32(di->d11core, 892 B2I(((bcma_read32(di->core,
892 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - 893 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
893 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); 894 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
894 895
@@ -971,7 +972,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
971 if (resid > 0) { 972 if (resid > 0) {
972 uint cur; 973 uint cur;
973 cur = 974 cur =
974 B2I(((bcma_read32(di->d11core, 975 B2I(((bcma_read32(di->core,
975 DMA64RXREGOFFS(di, status0)) & 976 DMA64RXREGOFFS(di, status0)) &
976 D64_RS0_CD_MASK) - di->rcvptrbase) & 977 D64_RS0_CD_MASK) - di->rcvptrbase) &
977 D64_RS0_CD_MASK, struct dma64desc); 978 D64_RS0_CD_MASK, struct dma64desc);
@@ -1004,9 +1005,9 @@ static bool dma64_rxidle(struct dma_info *di)
1004 if (di->nrxd == 0) 1005 if (di->nrxd == 0)
1005 return true; 1006 return true;
1006 1007
1007 return ((bcma_read32(di->d11core, 1008 return ((bcma_read32(di->core,
1008 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == 1009 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
1009 (bcma_read32(di->d11core, DMA64RXREGOFFS(di, ptr)) & 1010 (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
1010 D64_RS0_CD_MASK)); 1011 D64_RS0_CD_MASK));
1011} 1012}
1012 1013
@@ -1090,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
1090 di->rxout = rxout; 1091 di->rxout = rxout;
1091 1092
1092 /* update the chip lastdscr pointer */ 1093 /* update the chip lastdscr pointer */
1093 bcma_write32(di->d11core, DMA64RXREGOFFS(di, ptr), 1094 bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
1094 di->rcvptrbase + I2B(rxout, struct dma64desc)); 1095 di->rcvptrbase + I2B(rxout, struct dma64desc));
1095 1096
1096 return ring_empty; 1097 return ring_empty;
@@ -1151,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
1151 1152
1152 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) 1153 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1153 control |= D64_XC_PD; 1154 control |= D64_XC_PD;
1154 bcma_set32(di->d11core, DMA64TXREGOFFS(di, control), control); 1155 bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
1155 1156
1156 /* DMA engine with alignment requirement requires table to be inited 1157 /* DMA engine with alignment requirement requires table to be inited
1157 * before enabling the engine 1158 * before enabling the engine
@@ -1169,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
1169 if (di->ntxd == 0) 1170 if (di->ntxd == 0)
1170 return; 1171 return;
1171 1172
1172 bcma_set32(di->d11core, DMA64TXREGOFFS(di, control), D64_XC_SE); 1173 bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1173} 1174}
1174 1175
1175void dma_txresume(struct dma_pub *pub) 1176void dma_txresume(struct dma_pub *pub)
@@ -1181,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
1181 if (di->ntxd == 0) 1182 if (di->ntxd == 0)
1182 return; 1183 return;
1183 1184
1184 bcma_mask32(di->d11core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); 1185 bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
1185} 1186}
1186 1187
1187bool dma_txsuspended(struct dma_pub *pub) 1188bool dma_txsuspended(struct dma_pub *pub)
@@ -1189,7 +1190,7 @@ bool dma_txsuspended(struct dma_pub *pub)
1189 struct dma_info *di = (struct dma_info *)pub; 1190 struct dma_info *di = (struct dma_info *)pub;
1190 1191
1191 return (di->ntxd == 0) || 1192 return (di->ntxd == 0) ||
1192 ((bcma_read32(di->d11core, 1193 ((bcma_read32(di->core,
1193 DMA64TXREGOFFS(di, control)) & D64_XC_SE) == 1194 DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
1194 D64_XC_SE); 1195 D64_XC_SE);
1195} 1196}
@@ -1224,16 +1225,16 @@ bool dma_txreset(struct dma_pub *pub)
1224 return true; 1225 return true;
1225 1226
1226 /* suspend tx DMA first */ 1227 /* suspend tx DMA first */
1227 bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), D64_XC_SE); 1228 bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1228 SPINWAIT(((status = 1229 SPINWAIT(((status =
1229 (bcma_read32(di->d11core, DMA64TXREGOFFS(di, status0)) & 1230 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1230 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && 1231 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
1231 (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), 1232 (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
1232 10000); 1233 10000);
1233 1234
1234 bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 0); 1235 bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
1235 SPINWAIT(((status = 1236 SPINWAIT(((status =
1236 (bcma_read32(di->d11core, DMA64TXREGOFFS(di, status0)) & 1237 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1237 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); 1238 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
1238 1239
1239 /* wait for the last transaction to complete */ 1240 /* wait for the last transaction to complete */
@@ -1250,9 +1251,9 @@ bool dma_rxreset(struct dma_pub *pub)
1250 if (di->nrxd == 0) 1251 if (di->nrxd == 0)
1251 return true; 1252 return true;
1252 1253
1253 bcma_write32(di->d11core, DMA64RXREGOFFS(di, control), 0); 1254 bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
1254 SPINWAIT(((status = 1255 SPINWAIT(((status =
1255 (bcma_read32(di->d11core, DMA64RXREGOFFS(di, status0)) & 1256 (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
1256 D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); 1257 D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
1257 1258
1258 return status == D64_RS0_RS_DISABLED; 1259 return status == D64_RS0_RS_DISABLED;
@@ -1315,7 +1316,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
1315 1316
1316 /* kick the chip */ 1317 /* kick the chip */
1317 if (commit) 1318 if (commit)
1318 bcma_write32(di->d11core, DMA64TXREGOFFS(di, ptr), 1319 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1319 di->xmtptrbase + I2B(txout, struct dma64desc)); 1320 di->xmtptrbase + I2B(txout, struct dma64desc));
1320 1321
1321 /* tx flow control */ 1322 /* tx flow control */
@@ -1363,14 +1364,14 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1363 if (range == DMA_RANGE_ALL) 1364 if (range == DMA_RANGE_ALL)
1364 end = di->txout; 1365 end = di->txout;
1365 else { 1366 else {
1366 end = (u16) (B2I(((bcma_read32(di->d11core, 1367 end = (u16) (B2I(((bcma_read32(di->core,
1367 DMA64TXREGOFFS(di, status0)) & 1368 DMA64TXREGOFFS(di, status0)) &
1368 D64_XS0_CD_MASK) - di->xmtptrbase) & 1369 D64_XS0_CD_MASK) - di->xmtptrbase) &
1369 D64_XS0_CD_MASK, struct dma64desc)); 1370 D64_XS0_CD_MASK, struct dma64desc));
1370 1371
1371 if (range == DMA_RANGE_TRANSFERED) { 1372 if (range == DMA_RANGE_TRANSFERED) {
1372 active_desc = 1373 active_desc =
1373 (u16)(bcma_read32(di->d11core, 1374 (u16)(bcma_read32(di->core,
1374 DMA64TXREGOFFS(di, status1)) & 1375 DMA64TXREGOFFS(di, status1)) &
1375 D64_XS1_AD_MASK); 1376 D64_XS1_AD_MASK);
1376 active_desc = 1377 active_desc =
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 1c75e2f52d2..f7ed34034f8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1953,12 +1953,11 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
1953 flags |= SICF_PCLKE; 1953 flags |= SICF_PCLKE;
1954 1954
1955 /* 1955 /*
1956 * TODO: test suspend/resume
1957 *
1956 * AI chip doesn't restore bar0win2 on 1958 * AI chip doesn't restore bar0win2 on
1957 * hibernation/resume, need sw fixup 1959 * hibernation/resume, need sw fixup
1958 */ 1960 */
1959 if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
1960 (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
1961 (void)ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
1962 1961
1963 bcma_core_enable(wlc_hw->d11core, flags); 1962 bcma_core_enable(wlc_hw->d11core, flags);
1964 brcms_c_mctrl_reset(wlc_hw); 1963 brcms_c_mctrl_reset(wlc_hw);
@@ -4484,8 +4483,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4484 wlc_hw->vendorid = pcidev->vendor; 4483 wlc_hw->vendorid = pcidev->vendor;
4485 wlc_hw->deviceid = pcidev->device; 4484 wlc_hw->deviceid = pcidev->device;
4486 4485
4487 /* set bar0 window to point at D11 core */
4488 (void)ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
4489 wlc_hw->d11core = core; 4486 wlc_hw->d11core = core;
4490 wlc_hw->corerev = core->id.rev; 4487 wlc_hw->corerev = core->id.rev;
4491 4488
@@ -4606,7 +4603,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4606 wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; 4603 wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
4607 wlc->band->bandunit = j; 4604 wlc->band->bandunit = j;
4608 wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; 4605 wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
4609 wlc->core->coreidx = ai_coreidx(wlc_hw->sih); 4606 wlc->core->coreidx = core->core_index;
4610 4607
4611 wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap)); 4608 wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
4612 wlc_hw->machwcap_backup = wlc_hw->machwcap; 4609 wlc_hw->machwcap_backup = wlc_hw->machwcap;
@@ -5055,12 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5055 ai_pci_fixcfg(wlc_hw->sih); 5052 ai_pci_fixcfg(wlc_hw->sih);
5056 5053
5057 /* 5054 /*
5055 * TODO: test suspend/resume
5056 *
5058 * AI chip doesn't restore bar0win2 on 5057 * AI chip doesn't restore bar0win2 on
5059 * hibernation/resume, need sw fixup 5058 * hibernation/resume, need sw fixup
5060 */ 5059 */
5061 if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
5062 (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
5063 (void)ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
5064 5060
5065 /* 5061 /*
5066 * Inform phy that a POR reset has occurred so 5062 * Inform phy that a POR reset has occurred so