aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/t3_hw.c
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2007-01-18 22:04:14 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:46 -0500
commit4d22de3e6cc4a09c369b504cd8bcde3385a974cd (patch)
treeaf13a2ee582105d961c79fc4e55fce0b5e043310 /drivers/net/cxgb3/t3_hw.c
parent0bf94faf64afaba6e7b49fd11541b59d2ba06d0e (diff)
Add support for the latest 1G/10G Chelsio adapter, T3.
This driver is required by the Chelsio T3 RDMA driver posted by Steve Wise. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cxgb3/t3_hw.c')
-rw-r--r--drivers/net/cxgb3/t3_hw.c3354
1 files changed, 3354 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..a4e2e57e1465
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3354 @@
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include "common.h"
13#include "regs.h"
14#include "sge_defs.h"
15#include "firmware_exports.h"
16
17 /**
18 * t3_wait_op_done_val - wait until an operation is completed
19 * @adapter: the adapter performing the operation
20 * @reg: the register to check for completion
21 * @mask: a single-bit field within @reg that indicates completion
22 * @polarity: the value of the field when the operation is completed
23 * @attempts: number of check iterations
24 * @delay: delay in usecs between iterations
25 * @valp: where to store the value of the register at completion time
26 *
27 * Wait until an operation is completed by checking a bit in a register
28 * up to @attempts times. If @valp is not NULL the value of the register
29 * at the time it indicated completion is stored there. Returns 0 if the
30 * operation completes and -EAGAIN otherwise.
31 */
32
33int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
34 int polarity, int attempts, int delay, u32 *valp)
35{
36 while (1) {
37 u32 val = t3_read_reg(adapter, reg);
38
39 if (!!(val & mask) == polarity) {
40 if (valp)
41 *valp = val;
42 return 0;
43 }
44 if (--attempts == 0)
45 return -EAGAIN;
46 if (delay)
47 udelay(delay);
48 }
49}
50
51/**
52 * t3_write_regs - write a bunch of registers
53 * @adapter: the adapter to program
54 * @p: an array of register address/register value pairs
55 * @n: the number of address/value pairs
56 * @offset: register address offset
57 *
58 * Takes an array of register address/register value pairs and writes each
59 * value to the corresponding register. Register addresses are adjusted
60 * by the supplied offset.
61 */
62void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
63 int n, unsigned int offset)
64{
65 while (n--) {
66 t3_write_reg(adapter, p->reg_addr + offset, p->val);
67 p++;
68 }
69}
70
71/**
72 * t3_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
77 *
78 * Sets a register field specified by the supplied mask to the
79 * given value.
80 */
81void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
82 u32 val)
83{
84 u32 v = t3_read_reg(adapter, addr) & ~mask;
85
86 t3_write_reg(adapter, addr, v | val);
87 t3_read_reg(adapter, addr); /* flush */
88}
89
90/**
91 * t3_read_indirect - read indirectly addressed registers
92 * @adap: the adapter
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @start_idx: index of first indirect register to read
97 * @nregs: how many indirect registers to read
98 *
99 * Reads registers that are accessed indirectly through an address/data
100 * register pair.
101 */
102void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 unsigned int data_reg, u32 *vals, unsigned int nregs,
104 unsigned int start_idx)
105{
106 while (nregs--) {
107 t3_write_reg(adap, addr_reg, start_idx);
108 *vals++ = t3_read_reg(adap, data_reg);
109 start_idx++;
110 }
111}
112
113/**
114 * t3_mc7_bd_read - read from MC7 through backdoor accesses
115 * @mc7: identifies MC7 to read from
116 * @start: index of first 64-bit word to read
117 * @n: number of 64-bit words to read
118 * @buf: where to store the read result
119 *
120 * Read n 64-bit words from MC7 starting at word start, using backdoor
121 * accesses.
122 */
123int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
124 u64 *buf)
125{
126 static const int shift[] = { 0, 0, 16, 24 };
127 static const int step[] = { 0, 32, 16, 8 };
128
129 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
130 struct adapter *adap = mc7->adapter;
131
132 if (start >= size64 || start + n > size64)
133 return -EINVAL;
134
135 start *= (8 << mc7->width);
136 while (n--) {
137 int i;
138 u64 val64 = 0;
139
140 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
141 int attempts = 10;
142 u32 val;
143
144 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
145 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
146 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
147 while ((val & F_BUSY) && attempts--)
148 val = t3_read_reg(adap,
149 mc7->offset + A_MC7_BD_OP);
150 if (val & F_BUSY)
151 return -EIO;
152
153 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
154 if (mc7->width == 0) {
155 val64 = t3_read_reg(adap,
156 mc7->offset +
157 A_MC7_BD_DATA0);
158 val64 |= (u64) val << 32;
159 } else {
160 if (mc7->width > 1)
161 val >>= shift[mc7->width];
162 val64 |= (u64) val << (step[mc7->width] * i);
163 }
164 start += 8;
165 }
166 *buf++ = val64;
167 }
168 return 0;
169}
170
171/*
172 * Initialize MI1.
173 */
174static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
175{
176 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
177 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
178 V_CLKDIV(clkdiv);
179
180 if (!(ai->caps & SUPPORTED_10000baseT_Full))
181 val |= V_ST(1);
182 t3_write_reg(adap, A_MI1_CFG, val);
183}
184
185#define MDIO_ATTEMPTS 10
186
187/*
188 * MI1 read/write operations for direct-addressed PHYs.
189 */
190static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
191 int reg_addr, unsigned int *valp)
192{
193 int ret;
194 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
195
196 if (mmd_addr)
197 return -EINVAL;
198
199 mutex_lock(&adapter->mdio_lock);
200 t3_write_reg(adapter, A_MI1_ADDR, addr);
201 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
202 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
203 if (!ret)
204 *valp = t3_read_reg(adapter, A_MI1_DATA);
205 mutex_unlock(&adapter->mdio_lock);
206 return ret;
207}
208
209static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
210 int reg_addr, unsigned int val)
211{
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215 if (mmd_addr)
216 return -EINVAL;
217
218 mutex_lock(&adapter->mdio_lock);
219 t3_write_reg(adapter, A_MI1_ADDR, addr);
220 t3_write_reg(adapter, A_MI1_DATA, val);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
227static const struct mdio_ops mi1_mdio_ops = {
228 mi1_read,
229 mi1_write
230};
231
232/*
233 * MI1 read/write operations for indirect-addressed PHYs.
234 */
235static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
236 int reg_addr, unsigned int *valp)
237{
238 int ret;
239 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
240
241 mutex_lock(&adapter->mdio_lock);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 if (!ret) {
247 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
248 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
249 MDIO_ATTEMPTS, 20);
250 if (!ret)
251 *valp = t3_read_reg(adapter, A_MI1_DATA);
252 }
253 mutex_unlock(&adapter->mdio_lock);
254 return ret;
255}
256
257static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
258 int reg_addr, unsigned int val)
259{
260 int ret;
261 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
262
263 mutex_lock(&adapter->mdio_lock);
264 t3_write_reg(adapter, A_MI1_ADDR, addr);
265 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
266 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
267 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
268 if (!ret) {
269 t3_write_reg(adapter, A_MI1_DATA, val);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
272 MDIO_ATTEMPTS, 20);
273 }
274 mutex_unlock(&adapter->mdio_lock);
275 return ret;
276}
277
278static const struct mdio_ops mi1_mdio_ext_ops = {
279 mi1_ext_read,
280 mi1_ext_write
281};
282
283/**
284 * t3_mdio_change_bits - modify the value of a PHY register
285 * @phy: the PHY to operate on
286 * @mmd: the device address
287 * @reg: the register address
288 * @clear: what part of the register value to mask off
289 * @set: what part of the register value to set
290 *
291 * Changes the value of a PHY register by applying a mask to its current
292 * value and ORing the result with a new value.
293 */
294int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
295 unsigned int set)
296{
297 int ret;
298 unsigned int val;
299
300 ret = mdio_read(phy, mmd, reg, &val);
301 if (!ret) {
302 val &= ~clear;
303 ret = mdio_write(phy, mmd, reg, val | set);
304 }
305 return ret;
306}
307
308/**
309 * t3_phy_reset - reset a PHY block
310 * @phy: the PHY to operate on
311 * @mmd: the device address of the PHY block to reset
312 * @wait: how long to wait for the reset to complete in 1ms increments
313 *
314 * Resets a PHY block and optionally waits for the reset to complete.
315 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
316 * for 10G PHYs.
317 */
318int t3_phy_reset(struct cphy *phy, int mmd, int wait)
319{
320 int err;
321 unsigned int ctl;
322
323 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
324 if (err || !wait)
325 return err;
326
327 do {
328 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
329 if (err)
330 return err;
331 ctl &= BMCR_RESET;
332 if (ctl)
333 msleep(1);
334 } while (ctl && --wait);
335
336 return ctl ? -1 : 0;
337}
338
339/**
340 * t3_phy_advertise - set the PHY advertisement registers for autoneg
341 * @phy: the PHY to operate on
342 * @advert: bitmap of capabilities the PHY should advertise
343 *
344 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
345 * requested capabilities.
346 */
347int t3_phy_advertise(struct cphy *phy, unsigned int advert)
348{
349 int err;
350 unsigned int val = 0;
351
352 err = mdio_read(phy, 0, MII_CTRL1000, &val);
353 if (err)
354 return err;
355
356 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
357 if (advert & ADVERTISED_1000baseT_Half)
358 val |= ADVERTISE_1000HALF;
359 if (advert & ADVERTISED_1000baseT_Full)
360 val |= ADVERTISE_1000FULL;
361
362 err = mdio_write(phy, 0, MII_CTRL1000, val);
363 if (err)
364 return err;
365
366 val = 1;
367 if (advert & ADVERTISED_10baseT_Half)
368 val |= ADVERTISE_10HALF;
369 if (advert & ADVERTISED_10baseT_Full)
370 val |= ADVERTISE_10FULL;
371 if (advert & ADVERTISED_100baseT_Half)
372 val |= ADVERTISE_100HALF;
373 if (advert & ADVERTISED_100baseT_Full)
374 val |= ADVERTISE_100FULL;
375 if (advert & ADVERTISED_Pause)
376 val |= ADVERTISE_PAUSE_CAP;
377 if (advert & ADVERTISED_Asym_Pause)
378 val |= ADVERTISE_PAUSE_ASYM;
379 return mdio_write(phy, 0, MII_ADVERTISE, val);
380}
381
382/**
383 * t3_set_phy_speed_duplex - force PHY speed and duplex
384 * @phy: the PHY to operate on
385 * @speed: requested PHY speed
386 * @duplex: requested PHY duplex
387 *
388 * Force a 10/100/1000 PHY's speed and duplex. This also disables
389 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
390 */
391int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
392{
393 int err;
394 unsigned int ctl;
395
396 err = mdio_read(phy, 0, MII_BMCR, &ctl);
397 if (err)
398 return err;
399
400 if (speed >= 0) {
401 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
402 if (speed == SPEED_100)
403 ctl |= BMCR_SPEED100;
404 else if (speed == SPEED_1000)
405 ctl |= BMCR_SPEED1000;
406 }
407 if (duplex >= 0) {
408 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
409 if (duplex == DUPLEX_FULL)
410 ctl |= BMCR_FULLDPLX;
411 }
412 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
413 ctl |= BMCR_ANENABLE;
414 return mdio_write(phy, 0, MII_BMCR, ctl);
415}
416
417static const struct adapter_info t3_adap_info[] = {
418 {2, 0, 0, 0,
419 F_GPIO2_OEN | F_GPIO4_OEN |
420 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
421 SUPPORTED_OFFLOAD,
422 &mi1_mdio_ops, "Chelsio PE9000"},
423 {2, 0, 0, 0,
424 F_GPIO2_OEN | F_GPIO4_OEN |
425 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
426 SUPPORTED_OFFLOAD,
427 &mi1_mdio_ops, "Chelsio T302"},
428 {1, 0, 0, 0,
429 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
430 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
431 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
432 &mi1_mdio_ext_ops, "Chelsio T310"},
433 {2, 0, 0, 0,
434 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
435 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
436 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
437 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
438 &mi1_mdio_ext_ops, "Chelsio T320"},
439};
440
441/*
442 * Return the adapter_info structure with a given index. Out-of-range indices
443 * return NULL.
444 */
445const struct adapter_info *t3_get_adapter_info(unsigned int id)
446{
447 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
448}
449
450#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
451 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
452#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
453
454static const struct port_type_info port_types[] = {
455 {NULL},
456 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
457 "10GBASE-XR"},
458 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
459 "10/100/1000BASE-T"},
460 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
461 "10/100/1000BASE-T"},
462 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
463 {NULL, CAPS_10G, "10GBASE-KX4"},
464 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
465 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
466 "10GBASE-SR"},
467 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
468};
469
470#undef CAPS_1G
471#undef CAPS_10G
472
473#define VPD_ENTRY(name, len) \
474 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
475
476/*
477 * Partial EEPROM Vital Product Data structure. Includes only the ID and
478 * VPD-R sections.
479 */
480struct t3_vpd {
481 u8 id_tag;
482 u8 id_len[2];
483 u8 id_data[16];
484 u8 vpdr_tag;
485 u8 vpdr_len[2];
486 VPD_ENTRY(pn, 16); /* part number */
487 VPD_ENTRY(ec, 16); /* EC level */
488 VPD_ENTRY(sn, 16); /* serial number */
489 VPD_ENTRY(na, 12); /* MAC address base */
490 VPD_ENTRY(cclk, 6); /* core clock */
491 VPD_ENTRY(mclk, 6); /* mem clock */
492 VPD_ENTRY(uclk, 6); /* uP clk */
493 VPD_ENTRY(mdc, 6); /* MDIO clk */
494 VPD_ENTRY(mt, 2); /* mem timing */
495 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
496 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
497 VPD_ENTRY(port0, 2); /* PHY0 complex */
498 VPD_ENTRY(port1, 2); /* PHY1 complex */
499 VPD_ENTRY(port2, 2); /* PHY2 complex */
500 VPD_ENTRY(port3, 2); /* PHY3 complex */
501 VPD_ENTRY(rv, 1); /* csum */
502 u32 pad; /* for multiple-of-4 sizing and alignment */
503};
504
505#define EEPROM_MAX_POLL 4
506#define EEPROM_STAT_ADDR 0x4000
507#define VPD_BASE 0xc00
508
509/**
510 * t3_seeprom_read - read a VPD EEPROM location
511 * @adapter: adapter to read
512 * @addr: EEPROM address
513 * @data: where to store the read data
514 *
515 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
516 * VPD ROM capability. A zero is written to the flag bit when the
517 * addres is written to the control register. The hardware device will
518 * set the flag to 1 when 4 bytes have been read into the data register.
519 */
520int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
521{
522 u16 val;
523 int attempts = EEPROM_MAX_POLL;
524 unsigned int base = adapter->params.pci.vpd_cap_addr;
525
526 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
527 return -EINVAL;
528
529 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
530 do {
531 udelay(10);
532 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
533 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
534
535 if (!(val & PCI_VPD_ADDR_F)) {
536 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
537 return -EIO;
538 }
539 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
540 *data = le32_to_cpu(*data);
541 return 0;
542}
543
544/**
545 * t3_seeprom_write - write a VPD EEPROM location
546 * @adapter: adapter to write
547 * @addr: EEPROM address
548 * @data: value to write
549 *
550 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
551 * VPD ROM capability.
552 */
553int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
554{
555 u16 val;
556 int attempts = EEPROM_MAX_POLL;
557 unsigned int base = adapter->params.pci.vpd_cap_addr;
558
559 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
560 return -EINVAL;
561
562 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
563 cpu_to_le32(data));
564 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
565 addr | PCI_VPD_ADDR_F);
566 do {
567 msleep(1);
568 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
569 } while ((val & PCI_VPD_ADDR_F) && --attempts);
570
571 if (val & PCI_VPD_ADDR_F) {
572 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
573 return -EIO;
574 }
575 return 0;
576}
577
578/**
579 * t3_seeprom_wp - enable/disable EEPROM write protection
580 * @adapter: the adapter
581 * @enable: 1 to enable write protection, 0 to disable it
582 *
583 * Enables or disables write protection on the serial EEPROM.
584 */
585int t3_seeprom_wp(struct adapter *adapter, int enable)
586{
587 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
588}
589
590/*
591 * Convert a character holding a hex digit to a number.
592 */
593static unsigned int hex2int(unsigned char c)
594{
595 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
596}
597
598/**
599 * get_vpd_params - read VPD parameters from VPD EEPROM
600 * @adapter: adapter to read
601 * @p: where to store the parameters
602 *
603 * Reads card parameters stored in VPD EEPROM.
604 */
605static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
606{
607 int i, addr, ret;
608 struct t3_vpd vpd;
609
610 /*
611 * Card information is normally at VPD_BASE but some early cards had
612 * it at 0.
613 */
614 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
615 if (ret)
616 return ret;
617 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
618
619 for (i = 0; i < sizeof(vpd); i += 4) {
620 ret = t3_seeprom_read(adapter, addr + i,
621 (u32 *)((u8 *)&vpd + i));
622 if (ret)
623 return ret;
624 }
625
626 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
627 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
628 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
629 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
630 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
631
632 /* Old eeproms didn't have port information */
633 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
634 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
635 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
636 } else {
637 p->port_type[0] = hex2int(vpd.port0_data[0]);
638 p->port_type[1] = hex2int(vpd.port1_data[0]);
639 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
640 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
641 }
642
643 for (i = 0; i < 6; i++)
644 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
645 hex2int(vpd.na_data[2 * i + 1]);
646 return 0;
647}
648
649/* serial flash and firmware constants */
650enum {
651 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
652 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
653 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
654
655 /* flash command opcodes */
656 SF_PROG_PAGE = 2, /* program page */
657 SF_WR_DISABLE = 4, /* disable writes */
658 SF_RD_STATUS = 5, /* read status register */
659 SF_WR_ENABLE = 6, /* enable writes */
660 SF_RD_DATA_FAST = 0xb, /* read flash */
661 SF_ERASE_SECTOR = 0xd8, /* erase sector */
662
663 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
664 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
665};
666
667/**
668 * sf1_read - read data from the serial flash
669 * @adapter: the adapter
670 * @byte_cnt: number of bytes to read
671 * @cont: whether another operation will be chained
672 * @valp: where to store the read data
673 *
674 * Reads up to 4 bytes of data from the serial flash. The location of
675 * the read needs to be specified prior to calling this by issuing the
676 * appropriate commands to the serial flash.
677 */
678static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
679 u32 *valp)
680{
681 int ret;
682
683 if (!byte_cnt || byte_cnt > 4)
684 return -EINVAL;
685 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
686 return -EBUSY;
687 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
688 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
689 if (!ret)
690 *valp = t3_read_reg(adapter, A_SF_DATA);
691 return ret;
692}
693
694/**
695 * sf1_write - write data to the serial flash
696 * @adapter: the adapter
697 * @byte_cnt: number of bytes to write
698 * @cont: whether another operation will be chained
699 * @val: value to write
700 *
701 * Writes up to 4 bytes of data to the serial flash. The location of
702 * the write needs to be specified prior to calling this by issuing the
703 * appropriate commands to the serial flash.
704 */
705static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
706 u32 val)
707{
708 if (!byte_cnt || byte_cnt > 4)
709 return -EINVAL;
710 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
711 return -EBUSY;
712 t3_write_reg(adapter, A_SF_DATA, val);
713 t3_write_reg(adapter, A_SF_OP,
714 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
715 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
716}
717
718/**
719 * flash_wait_op - wait for a flash operation to complete
720 * @adapter: the adapter
721 * @attempts: max number of polls of the status register
722 * @delay: delay between polls in ms
723 *
724 * Wait for a flash operation to complete by polling the status register.
725 */
726static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
727{
728 int ret;
729 u32 status;
730
731 while (1) {
732 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
733 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
734 return ret;
735 if (!(status & 1))
736 return 0;
737 if (--attempts == 0)
738 return -EAGAIN;
739 if (delay)
740 msleep(delay);
741 }
742}
743
744/**
745 * t3_read_flash - read words from serial flash
746 * @adapter: the adapter
747 * @addr: the start address for the read
748 * @nwords: how many 32-bit words to read
749 * @data: where to store the read data
750 * @byte_oriented: whether to store data as bytes or as words
751 *
752 * Read the specified number of 32-bit words from the serial flash.
753 * If @byte_oriented is set the read data is stored as a byte array
754 * (i.e., big-endian), otherwise as 32-bit words in the platform's
755 * natural endianess.
756 */
757int t3_read_flash(struct adapter *adapter, unsigned int addr,
758 unsigned int nwords, u32 *data, int byte_oriented)
759{
760 int ret;
761
762 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
763 return -EINVAL;
764
765 addr = swab32(addr) | SF_RD_DATA_FAST;
766
767 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
768 (ret = sf1_read(adapter, 1, 1, data)) != 0)
769 return ret;
770
771 for (; nwords; nwords--, data++) {
772 ret = sf1_read(adapter, 4, nwords > 1, data);
773 if (ret)
774 return ret;
775 if (byte_oriented)
776 *data = htonl(*data);
777 }
778 return 0;
779}
780
781/**
782 * t3_write_flash - write up to a page of data to the serial flash
783 * @adapter: the adapter
784 * @addr: the start address to write
785 * @n: length of data to write
786 * @data: the data to write
787 *
788 * Writes up to a page of data (256 bytes) to the serial flash starting
789 * at the given address.
790 */
791static int t3_write_flash(struct adapter *adapter, unsigned int addr,
792 unsigned int n, const u8 *data)
793{
794 int ret;
795 u32 buf[64];
796 unsigned int i, c, left, val, offset = addr & 0xff;
797
798 if (addr + n > SF_SIZE || offset + n > 256)
799 return -EINVAL;
800
801 val = swab32(addr) | SF_PROG_PAGE;
802
803 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
804 (ret = sf1_write(adapter, 4, 1, val)) != 0)
805 return ret;
806
807 for (left = n; left; left -= c) {
808 c = min(left, 4U);
809 for (val = 0, i = 0; i < c; ++i)
810 val = (val << 8) + *data++;
811
812 ret = sf1_write(adapter, c, c != left, val);
813 if (ret)
814 return ret;
815 }
816 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
817 return ret;
818
819 /* Read the page to verify the write succeeded */
820 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
821 if (ret)
822 return ret;
823
824 if (memcmp(data - n, (u8 *) buf + offset, n))
825 return -EIO;
826 return 0;
827}
828
829/**
830 * t3_get_fw_version - read the firmware version
831 * @adapter: the adapter
832 * @vers: where to place the version
833 *
834 * Reads the FW version from flash.
835 */
836int t3_get_fw_version(struct adapter *adapter, u32 *vers)
837{
838 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
839}
840
841/**
842 * t3_check_fw_version - check if the FW is compatible with this driver
843 * @adapter: the adapter
844 *
845 * Checks if an adapter's FW is compatible with the driver. Returns 0
846 * if the versions are compatible, a negative error otherwise.
847 */
848int t3_check_fw_version(struct adapter *adapter)
849{
850 int ret;
851 u32 vers;
852
853 ret = t3_get_fw_version(adapter, &vers);
854 if (ret)
855 return ret;
856
857 /* Minor 0xfff means the FW is an internal development-only version. */
858 if ((vers & 0xfff) == 0xfff)
859 return 0;
860
861 if (vers == 0x1002009)
862 return 0;
863
864 CH_ERR(adapter, "found wrong FW version, driver needs version 2.9\n");
865 return -EINVAL;
866}
867
868/**
869 * t3_flash_erase_sectors - erase a range of flash sectors
870 * @adapter: the adapter
871 * @start: the first sector to erase
872 * @end: the last sector to erase
873 *
874 * Erases the sectors in the given range.
875 */
876static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
877{
878 while (start <= end) {
879 int ret;
880
881 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
882 (ret = sf1_write(adapter, 4, 0,
883 SF_ERASE_SECTOR | (start << 8))) != 0 ||
884 (ret = flash_wait_op(adapter, 5, 500)) != 0)
885 return ret;
886 start++;
887 }
888 return 0;
889}
890
891/*
892 * t3_load_fw - download firmware
893 * @adapter: the adapter
894 * @fw_data: the firrware image to write
895 * @size: image size
896 *
897 * Write the supplied firmware image to the card's serial flash.
898 * The FW image has the following sections: @size - 8 bytes of code and
899 * data, followed by 4 bytes of FW version, followed by the 32-bit
900 * 1's complement checksum of the whole image.
901 */
902int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
903{
904 u32 csum;
905 unsigned int i;
906 const u32 *p = (const u32 *)fw_data;
907 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
908
909 if (size & 3)
910 return -EINVAL;
911 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
912 return -EFBIG;
913
914 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
915 csum += ntohl(p[i]);
916 if (csum != 0xffffffff) {
917 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
918 csum);
919 return -EINVAL;
920 }
921
922 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
923 if (ret)
924 goto out;
925
926 size -= 8; /* trim off version and checksum */
927 for (addr = FW_FLASH_BOOT_ADDR; size;) {
928 unsigned int chunk_size = min(size, 256U);
929
930 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
931 if (ret)
932 goto out;
933
934 addr += chunk_size;
935 fw_data += chunk_size;
936 size -= chunk_size;
937 }
938
939 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
940out:
941 if (ret)
942 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
943 return ret;
944}
945
946#define CIM_CTL_BASE 0x2000
947
948/**
949 * t3_cim_ctl_blk_read - read a block from CIM control region
950 *
951 * @adap: the adapter
952 * @addr: the start address within the CIM control region
953 * @n: number of words to read
954 * @valp: where to store the result
955 *
956 * Reads a block of 4-byte words from the CIM control region.
957 */
958int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
959 unsigned int n, unsigned int *valp)
960{
961 int ret = 0;
962
963 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
964 return -EBUSY;
965
966 for ( ; !ret && n--; addr += 4) {
967 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
968 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
969 0, 5, 2);
970 if (!ret)
971 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
972 }
973 return ret;
974}
975
976
977/**
978 * t3_link_changed - handle interface link changes
979 * @adapter: the adapter
980 * @port_id: the port index that changed link state
981 *
982 * Called when a port's link settings change to propagate the new values
983 * to the associated PHY and MAC. After performing the common tasks it
984 * invokes an OS-specific handler.
985 */
986void t3_link_changed(struct adapter *adapter, int port_id)
987{
988 int link_ok, speed, duplex, fc;
989 struct port_info *pi = adap2pinfo(adapter, port_id);
990 struct cphy *phy = &pi->phy;
991 struct cmac *mac = &pi->mac;
992 struct link_config *lc = &pi->link_config;
993
994 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
995
996 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
997 uses_xaui(adapter)) {
998 if (link_ok)
999 t3b_pcs_reset(mac);
1000 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1001 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1002 }
1003 lc->link_ok = link_ok;
1004 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1005 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1006 if (lc->requested_fc & PAUSE_AUTONEG)
1007 fc &= lc->requested_fc;
1008 else
1009 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1010
1011 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1012 /* Set MAC speed, duplex, and flow control to match PHY. */
1013 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1014 lc->fc = fc;
1015 }
1016
1017 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1018}
1019
1020/**
1021 * t3_link_start - apply link configuration to MAC/PHY
1022 * @phy: the PHY to setup
1023 * @mac: the MAC to setup
1024 * @lc: the requested link configuration
1025 *
1026 * Set up a port's MAC and PHY according to a desired link configuration.
1027 * - If the PHY can auto-negotiate first decide what to advertise, then
1028 * enable/disable auto-negotiation as desired, and reset.
1029 * - If the PHY does not auto-negotiate just reset it.
1030 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1031 * otherwise do it later based on the outcome of auto-negotiation.
1032 */
1033int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1034{
1035 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1036
1037 lc->link_ok = 0;
1038 if (lc->supported & SUPPORTED_Autoneg) {
1039 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1040 if (fc) {
1041 lc->advertising |= ADVERTISED_Asym_Pause;
1042 if (fc & PAUSE_RX)
1043 lc->advertising |= ADVERTISED_Pause;
1044 }
1045 phy->ops->advertise(phy, lc->advertising);
1046
1047 if (lc->autoneg == AUTONEG_DISABLE) {
1048 lc->speed = lc->requested_speed;
1049 lc->duplex = lc->requested_duplex;
1050 lc->fc = (unsigned char)fc;
1051 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1052 fc);
1053 /* Also disables autoneg */
1054 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1055 phy->ops->reset(phy, 0);
1056 } else
1057 phy->ops->autoneg_enable(phy);
1058 } else {
1059 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1060 lc->fc = (unsigned char)fc;
1061 phy->ops->reset(phy, 0);
1062 }
1063 return 0;
1064}
1065
1066/**
1067 * t3_set_vlan_accel - control HW VLAN extraction
1068 * @adapter: the adapter
1069 * @ports: bitmap of adapter ports to operate on
1070 * @on: enable (1) or disable (0) HW VLAN extraction
1071 *
1072 * Enables or disables HW extraction of VLAN tags for the given port.
1073 */
1074void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1075{
1076 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1077 ports << S_VLANEXTRACTIONENABLE,
1078 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1079}
1080
1081struct intr_info {
1082 unsigned int mask; /* bits to check in interrupt status */
1083 const char *msg; /* message to print or NULL */
1084 short stat_idx; /* stat counter to increment or -1 */
1085 unsigned short fatal:1; /* whether the condition reported is fatal */
1086};
1087
1088/**
1089 * t3_handle_intr_status - table driven interrupt handler
1090 * @adapter: the adapter that generated the interrupt
1091 * @reg: the interrupt status register to process
1092 * @mask: a mask to apply to the interrupt status
1093 * @acts: table of interrupt actions
1094 * @stats: statistics counters tracking interrupt occurences
1095 *
1096 * A table driven interrupt handler that applies a set of masks to an
1097 * interrupt status word and performs the corresponding actions if the
1098 * interrupts described by the mask have occured. The actions include
1099 * optionally printing a warning or alert message, and optionally
1100 * incrementing a stat counter. The table is terminated by an entry
1101 * specifying mask 0. Returns the number of fatal interrupt conditions.
1102 */
1103static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1104 unsigned int mask,
1105 const struct intr_info *acts,
1106 unsigned long *stats)
1107{
1108 int fatal = 0;
1109 unsigned int status = t3_read_reg(adapter, reg) & mask;
1110
1111 for (; acts->mask; ++acts) {
1112 if (!(status & acts->mask))
1113 continue;
1114 if (acts->fatal) {
1115 fatal++;
1116 CH_ALERT(adapter, "%s (0x%x)\n",
1117 acts->msg, status & acts->mask);
1118 } else if (acts->msg)
1119 CH_WARN(adapter, "%s (0x%x)\n",
1120 acts->msg, status & acts->mask);
1121 if (acts->stat_idx >= 0)
1122 stats[acts->stat_idx]++;
1123 }
1124 if (status) /* clear processed interrupts */
1125 t3_write_reg(adapter, reg, status);
1126 return fatal;
1127}
1128
1129#define SGE_INTR_MASK (F_RSPQDISABLED)
1130#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1131 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1132 F_NFASRCHFAIL)
1133#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1134#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1135 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1136 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1137#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1138 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1139 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1140 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1141 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1142 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1143#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1144 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1145 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1146 V_BISTERR(M_BISTERR) | F_PEXERR)
1147#define ULPRX_INTR_MASK F_PARERR
1148#define ULPTX_INTR_MASK 0
1149#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1150 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1151 F_ZERO_SWITCH_ERROR)
1152#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1153 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1154 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1155 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1156#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1157 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1158 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1159#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1160 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1161 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1162#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1163 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1164 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1165 V_MCAPARERRENB(M_MCAPARERRENB))
1166#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1167 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1168 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1169 F_MPS0 | F_CPL_SWITCH)
1170
1171/*
1172 * Interrupt handler for the PCIX1 module.
1173 */
1174static void pci_intr_handler(struct adapter *adapter)
1175{
1176 static const struct intr_info pcix1_intr_info[] = {
1177 { F_PEXERR, "PCI PEX error", -1, 1 },
1178 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1179 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1180 {F_RCVTARABT, "PCI received target abort", -1, 1},
1181 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1182 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1183 {F_DETPARERR, "PCI detected parity error", -1, 1},
1184 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1185 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1186 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1187 1},
1188 {F_DETCORECCERR, "PCI correctable ECC error",
1189 STAT_PCI_CORR_ECC, 0},
1190 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1191 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1192 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1193 1},
1194 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1195 1},
1196 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1197 1},
1198 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1199 "error", -1, 1},
1200 {0}
1201 };
1202
1203 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1204 pcix1_intr_info, adapter->irq_stats))
1205 t3_fatal_err(adapter);
1206}
1207
1208/*
1209 * Interrupt handler for the PCIE module.
1210 */
1211static void pcie_intr_handler(struct adapter *adapter)
1212{
1213 static const struct intr_info pcie_intr_info[] = {
1214 {F_UNXSPLCPLERRR,
1215 "PCI unexpected split completion DMA read error", -1, 1},
1216 {F_UNXSPLCPLERRC,
1217 "PCI unexpected split completion DMA command error", -1, 1},
1218 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1219 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1220 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1221 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1222 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1223 "PCI MSI-X table/PBA parity error", -1, 1},
1224 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1225 {0}
1226 };
1227
1228 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1229 pcie_intr_info, adapter->irq_stats))
1230 t3_fatal_err(adapter);
1231}
1232
1233/*
1234 * TP interrupt handler.
1235 */
1236static void tp_intr_handler(struct adapter *adapter)
1237{
1238 static const struct intr_info tp_intr_info[] = {
1239 {0xffffff, "TP parity error", -1, 1},
1240 {0x1000000, "TP out of Rx pages", -1, 1},
1241 {0x2000000, "TP out of Tx pages", -1, 1},
1242 {0}
1243 };
1244
1245 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1246 tp_intr_info, NULL))
1247 t3_fatal_err(adapter);
1248}
1249
1250/*
1251 * CIM interrupt handler.
1252 */
1253static void cim_intr_handler(struct adapter *adapter)
1254{
1255 static const struct intr_info cim_intr_info[] = {
1256 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1257 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1258 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1259 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1260 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1261 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1262 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1263 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1264 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1265 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1266 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1267 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1268 {0}
1269 };
1270
1271 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1272 cim_intr_info, NULL))
1273 t3_fatal_err(adapter);
1274}
1275
1276/*
1277 * ULP RX interrupt handler.
1278 */
1279static void ulprx_intr_handler(struct adapter *adapter)
1280{
1281 static const struct intr_info ulprx_intr_info[] = {
1282 {F_PARERR, "ULP RX parity error", -1, 1},
1283 {0}
1284 };
1285
1286 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1287 ulprx_intr_info, NULL))
1288 t3_fatal_err(adapter);
1289}
1290
1291/*
1292 * ULP TX interrupt handler.
1293 */
1294static void ulptx_intr_handler(struct adapter *adapter)
1295{
1296 static const struct intr_info ulptx_intr_info[] = {
1297 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1298 STAT_ULP_CH0_PBL_OOB, 0},
1299 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1300 STAT_ULP_CH1_PBL_OOB, 0},
1301 {0}
1302 };
1303
1304 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1305 ulptx_intr_info, adapter->irq_stats))
1306 t3_fatal_err(adapter);
1307}
1308
1309#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1310 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1311 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1312 F_ICSPI1_TX_FRAMING_ERROR)
1313#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1314 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1315 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1316 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1317
1318/*
1319 * PM TX interrupt handler.
1320 */
1321static void pmtx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info pmtx_intr_info[] = {
1324 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1325 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1326 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1327 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1328 "PMTX ispi parity error", -1, 1},
1329 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1330 "PMTX ospi parity error", -1, 1},
1331 {0}
1332 };
1333
1334 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1335 pmtx_intr_info, NULL))
1336 t3_fatal_err(adapter);
1337}
1338
1339#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1340 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1341 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1342 F_IESPI1_TX_FRAMING_ERROR)
1343#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1344 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1345 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1346 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1347
1348/*
1349 * PM RX interrupt handler.
1350 */
1351static void pmrx_intr_handler(struct adapter *adapter)
1352{
1353 static const struct intr_info pmrx_intr_info[] = {
1354 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1355 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1356 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1357 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1358 "PMRX ispi parity error", -1, 1},
1359 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1360 "PMRX ospi parity error", -1, 1},
1361 {0}
1362 };
1363
1364 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1365 pmrx_intr_info, NULL))
1366 t3_fatal_err(adapter);
1367}
1368
1369/*
1370 * CPL switch interrupt handler.
1371 */
1372static void cplsw_intr_handler(struct adapter *adapter)
1373{
1374 static const struct intr_info cplsw_intr_info[] = {
1375/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1376 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1377 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1378 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1379 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1380 {0}
1381 };
1382
1383 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1384 cplsw_intr_info, NULL))
1385 t3_fatal_err(adapter);
1386}
1387
1388/*
1389 * MPS interrupt handler.
1390 */
1391static void mps_intr_handler(struct adapter *adapter)
1392{
1393 static const struct intr_info mps_intr_info[] = {
1394 {0x1ff, "MPS parity error", -1, 1},
1395 {0}
1396 };
1397
1398 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1399 mps_intr_info, NULL))
1400 t3_fatal_err(adapter);
1401}
1402
1403#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1404
1405/*
1406 * MC7 interrupt handler.
1407 */
1408static void mc7_intr_handler(struct mc7 *mc7)
1409{
1410 struct adapter *adapter = mc7->adapter;
1411 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1412
1413 if (cause & F_CE) {
1414 mc7->stats.corr_err++;
1415 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1416 "data 0x%x 0x%x 0x%x\n", mc7->name,
1417 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1418 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1419 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1420 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1421 }
1422
1423 if (cause & F_UE) {
1424 mc7->stats.uncorr_err++;
1425 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1426 "data 0x%x 0x%x 0x%x\n", mc7->name,
1427 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1428 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1429 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1430 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1431 }
1432
1433 if (G_PE(cause)) {
1434 mc7->stats.parity_err++;
1435 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1436 mc7->name, G_PE(cause));
1437 }
1438
1439 if (cause & F_AE) {
1440 u32 addr = 0;
1441
1442 if (adapter->params.rev > 0)
1443 addr = t3_read_reg(adapter,
1444 mc7->offset + A_MC7_ERR_ADDR);
1445 mc7->stats.addr_err++;
1446 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1447 mc7->name, addr);
1448 }
1449
1450 if (cause & MC7_INTR_FATAL)
1451 t3_fatal_err(adapter);
1452
1453 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1454}
1455
1456#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1457 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1458/*
1459 * XGMAC interrupt handler.
1460 */
1461static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1462{
1463 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1464 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1465
1466 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1467 mac->stats.tx_fifo_parity_err++;
1468 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1469 }
1470 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1471 mac->stats.rx_fifo_parity_err++;
1472 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1473 }
1474 if (cause & F_TXFIFO_UNDERRUN)
1475 mac->stats.tx_fifo_urun++;
1476 if (cause & F_RXFIFO_OVERFLOW)
1477 mac->stats.rx_fifo_ovfl++;
1478 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1479 mac->stats.serdes_signal_loss++;
1480 if (cause & F_XAUIPCSCTCERR)
1481 mac->stats.xaui_pcs_ctc_err++;
1482 if (cause & F_XAUIPCSALIGNCHANGE)
1483 mac->stats.xaui_pcs_align_change++;
1484
1485 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1486 if (cause & XGM_INTR_FATAL)
1487 t3_fatal_err(adap);
1488 return cause != 0;
1489}
1490
1491/*
1492 * Interrupt handler for PHY events.
1493 */
1494int t3_phy_intr_handler(struct adapter *adapter)
1495{
1496 static const int intr_gpio_bits[] = { 8, 0x20 };
1497
1498 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1499
1500 for_each_port(adapter, i) {
1501 if (cause & intr_gpio_bits[i]) {
1502 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1503 int phy_cause = phy->ops->intr_handler(phy);
1504
1505 if (phy_cause & cphy_cause_link_change)
1506 t3_link_changed(adapter, i);
1507 if (phy_cause & cphy_cause_fifo_error)
1508 phy->fifo_errors++;
1509 }
1510 }
1511
1512 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1513 return 0;
1514}
1515
1516/*
1517 * T3 slow path (non-data) interrupt handler.
1518 */
1519int t3_slow_intr_handler(struct adapter *adapter)
1520{
1521 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1522
1523 cause &= adapter->slow_intr_mask;
1524 if (!cause)
1525 return 0;
1526 if (cause & F_PCIM0) {
1527 if (is_pcie(adapter))
1528 pcie_intr_handler(adapter);
1529 else
1530 pci_intr_handler(adapter);
1531 }
1532 if (cause & F_SGE3)
1533 t3_sge_err_intr_handler(adapter);
1534 if (cause & F_MC7_PMRX)
1535 mc7_intr_handler(&adapter->pmrx);
1536 if (cause & F_MC7_PMTX)
1537 mc7_intr_handler(&adapter->pmtx);
1538 if (cause & F_MC7_CM)
1539 mc7_intr_handler(&adapter->cm);
1540 if (cause & F_CIM)
1541 cim_intr_handler(adapter);
1542 if (cause & F_TP1)
1543 tp_intr_handler(adapter);
1544 if (cause & F_ULP2_RX)
1545 ulprx_intr_handler(adapter);
1546 if (cause & F_ULP2_TX)
1547 ulptx_intr_handler(adapter);
1548 if (cause & F_PM1_RX)
1549 pmrx_intr_handler(adapter);
1550 if (cause & F_PM1_TX)
1551 pmtx_intr_handler(adapter);
1552 if (cause & F_CPL_SWITCH)
1553 cplsw_intr_handler(adapter);
1554 if (cause & F_MPS0)
1555 mps_intr_handler(adapter);
1556 if (cause & F_MC5A)
1557 t3_mc5_intr_handler(&adapter->mc5);
1558 if (cause & F_XGMAC0_0)
1559 mac_intr_handler(adapter, 0);
1560 if (cause & F_XGMAC0_1)
1561 mac_intr_handler(adapter, 1);
1562 if (cause & F_T3DBG)
1563 t3_os_ext_intr_handler(adapter);
1564
1565 /* Clear the interrupts just processed. */
1566 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1567 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1568 return 1;
1569}
1570
1571/**
1572 * t3_intr_enable - enable interrupts
1573 * @adapter: the adapter whose interrupts should be enabled
1574 *
1575 * Enable interrupts by setting the interrupt enable registers of the
1576 * various HW modules and then enabling the top-level interrupt
1577 * concentrator.
1578 */
1579void t3_intr_enable(struct adapter *adapter)
1580{
1581 static const struct addr_val_pair intr_en_avp[] = {
1582 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1583 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1584 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1585 MC7_INTR_MASK},
1586 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1587 MC7_INTR_MASK},
1588 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1589 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1590 {A_TP_INT_ENABLE, 0x3bfffff},
1591 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1592 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1593 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1594 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1595 };
1596
1597 adapter->slow_intr_mask = PL_INTR_MASK;
1598
1599 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1600
1601 if (adapter->params.rev > 0) {
1602 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1603 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1604 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1605 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1606 F_PBL_BOUND_ERR_CH1);
1607 } else {
1608 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1609 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1610 }
1611
1612 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1613 adapter_info(adapter)->gpio_intr);
1614 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1615 adapter_info(adapter)->gpio_intr);
1616 if (is_pcie(adapter))
1617 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1618 else
1619 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1620 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1621 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1622}
1623
1624/**
1625 * t3_intr_disable - disable a card's interrupts
1626 * @adapter: the adapter whose interrupts should be disabled
1627 *
1628 * Disable interrupts. We only disable the top-level interrupt
1629 * concentrator and the SGE data interrupts.
1630 */
1631void t3_intr_disable(struct adapter *adapter)
1632{
1633 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1634 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1635 adapter->slow_intr_mask = 0;
1636}
1637
1638/**
1639 * t3_intr_clear - clear all interrupts
1640 * @adapter: the adapter whose interrupts should be cleared
1641 *
1642 * Clears all interrupts.
1643 */
1644void t3_intr_clear(struct adapter *adapter)
1645{
1646 static const unsigned int cause_reg_addr[] = {
1647 A_SG_INT_CAUSE,
1648 A_SG_RSPQ_FL_STATUS,
1649 A_PCIX_INT_CAUSE,
1650 A_MC7_INT_CAUSE,
1651 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1652 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1653 A_CIM_HOST_INT_CAUSE,
1654 A_TP_INT_CAUSE,
1655 A_MC5_DB_INT_CAUSE,
1656 A_ULPRX_INT_CAUSE,
1657 A_ULPTX_INT_CAUSE,
1658 A_CPL_INTR_CAUSE,
1659 A_PM1_TX_INT_CAUSE,
1660 A_PM1_RX_INT_CAUSE,
1661 A_MPS_INT_CAUSE,
1662 A_T3DBG_INT_CAUSE,
1663 };
1664 unsigned int i;
1665
1666 /* Clear PHY and MAC interrupts for each port. */
1667 for_each_port(adapter, i)
1668 t3_port_intr_clear(adapter, i);
1669
1670 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1671 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1672
1673 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1674 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1675}
1676
1677/**
1678 * t3_port_intr_enable - enable port-specific interrupts
1679 * @adapter: associated adapter
1680 * @idx: index of port whose interrupts should be enabled
1681 *
1682 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1683 * adapter port.
1684 */
1685void t3_port_intr_enable(struct adapter *adapter, int idx)
1686{
1687 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1688
1689 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1690 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1691 phy->ops->intr_enable(phy);
1692}
1693
1694/**
1695 * t3_port_intr_disable - disable port-specific interrupts
1696 * @adapter: associated adapter
1697 * @idx: index of port whose interrupts should be disabled
1698 *
1699 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1700 * adapter port.
1701 */
1702void t3_port_intr_disable(struct adapter *adapter, int idx)
1703{
1704 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1705
1706 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1707 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1708 phy->ops->intr_disable(phy);
1709}
1710
1711/**
1712 * t3_port_intr_clear - clear port-specific interrupts
1713 * @adapter: associated adapter
1714 * @idx: index of port whose interrupts to clear
1715 *
1716 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1717 * adapter port.
1718 */
1719void t3_port_intr_clear(struct adapter *adapter, int idx)
1720{
1721 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1722
1723 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1724 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1725 phy->ops->intr_clear(phy);
1726}
1727
1728/**
1729 * t3_sge_write_context - write an SGE context
1730 * @adapter: the adapter
1731 * @id: the context id
1732 * @type: the context type
1733 *
1734 * Program an SGE context with the values already loaded in the
1735 * CONTEXT_DATA? registers.
1736 */
1737static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1738 unsigned int type)
1739{
1740 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1741 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1742 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1743 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1744 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1745 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1746 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1747 0, 5, 1);
1748}
1749
1750/**
1751 * t3_sge_init_ecntxt - initialize an SGE egress context
1752 * @adapter: the adapter to configure
1753 * @id: the context id
1754 * @gts_enable: whether to enable GTS for the context
1755 * @type: the egress context type
1756 * @respq: associated response queue
1757 * @base_addr: base address of queue
1758 * @size: number of queue entries
1759 * @token: uP token
1760 * @gen: initial generation value for the context
1761 * @cidx: consumer pointer
1762 *
1763 * Initialize an SGE egress context and make it ready for use. If the
1764 * platform allows concurrent context operations, the caller is
1765 * responsible for appropriate locking.
1766 */
1767int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1768 enum sge_context_type type, int respq, u64 base_addr,
1769 unsigned int size, unsigned int token, int gen,
1770 unsigned int cidx)
1771{
1772 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1773
1774 if (base_addr & 0xfff) /* must be 4K aligned */
1775 return -EINVAL;
1776 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1777 return -EBUSY;
1778
1779 base_addr >>= 12;
1780 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1781 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1782 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1783 V_EC_BASE_LO(base_addr & 0xffff));
1784 base_addr >>= 16;
1785 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1786 base_addr >>= 32;
1787 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1788 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1789 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1790 F_EC_VALID);
1791 return t3_sge_write_context(adapter, id, F_EGRESS);
1792}
1793
1794/**
1795 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1796 * @adapter: the adapter to configure
1797 * @id: the context id
1798 * @gts_enable: whether to enable GTS for the context
1799 * @base_addr: base address of queue
1800 * @size: number of queue entries
1801 * @bsize: size of each buffer for this queue
1802 * @cong_thres: threshold to signal congestion to upstream producers
1803 * @gen: initial generation value for the context
1804 * @cidx: consumer pointer
1805 *
1806 * Initialize an SGE free list context and make it ready for use. The
1807 * caller is responsible for ensuring only one context operation occurs
1808 * at a time.
1809 */
1810int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1811 int gts_enable, u64 base_addr, unsigned int size,
1812 unsigned int bsize, unsigned int cong_thres, int gen,
1813 unsigned int cidx)
1814{
1815 if (base_addr & 0xfff) /* must be 4K aligned */
1816 return -EINVAL;
1817 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1818 return -EBUSY;
1819
1820 base_addr >>= 12;
1821 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1822 base_addr >>= 32;
1823 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1824 V_FL_BASE_HI((u32) base_addr) |
1825 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1826 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1827 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1828 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1829 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1830 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1831 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1832 return t3_sge_write_context(adapter, id, F_FREELIST);
1833}
1834
1835/**
1836 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1837 * @adapter: the adapter to configure
1838 * @id: the context id
1839 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1840 * @base_addr: base address of queue
1841 * @size: number of queue entries
1842 * @fl_thres: threshold for selecting the normal or jumbo free list
1843 * @gen: initial generation value for the context
1844 * @cidx: consumer pointer
1845 *
1846 * Initialize an SGE response queue context and make it ready for use.
1847 * The caller is responsible for ensuring only one context operation
1848 * occurs at a time.
1849 */
1850int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1851 int irq_vec_idx, u64 base_addr, unsigned int size,
1852 unsigned int fl_thres, int gen, unsigned int cidx)
1853{
1854 unsigned int intr = 0;
1855
1856 if (base_addr & 0xfff) /* must be 4K aligned */
1857 return -EINVAL;
1858 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1859 return -EBUSY;
1860
1861 base_addr >>= 12;
1862 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1863 V_CQ_INDEX(cidx));
1864 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1865 base_addr >>= 32;
1866 if (irq_vec_idx >= 0)
1867 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1868 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1869 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1870 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1871 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1872}
1873
1874/**
1875 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1876 * @adapter: the adapter to configure
1877 * @id: the context id
1878 * @base_addr: base address of queue
1879 * @size: number of queue entries
1880 * @rspq: response queue for async notifications
1881 * @ovfl_mode: CQ overflow mode
1882 * @credits: completion queue credits
1883 * @credit_thres: the credit threshold
1884 *
1885 * Initialize an SGE completion queue context and make it ready for use.
1886 * The caller is responsible for ensuring only one context operation
1887 * occurs at a time.
1888 */
1889int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1890 unsigned int size, int rspq, int ovfl_mode,
1891 unsigned int credits, unsigned int credit_thres)
1892{
1893 if (base_addr & 0xfff) /* must be 4K aligned */
1894 return -EINVAL;
1895 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1896 return -EBUSY;
1897
1898 base_addr >>= 12;
1899 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1900 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1901 base_addr >>= 32;
1902 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1903 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1904 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1905 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1906 V_CQ_CREDIT_THRES(credit_thres));
1907 return t3_sge_write_context(adapter, id, F_CQ);
1908}
1909
1910/**
1911 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1912 * @adapter: the adapter
1913 * @id: the egress context id
1914 * @enable: enable (1) or disable (0) the context
1915 *
1916 * Enable or disable an SGE egress context. The caller is responsible for
1917 * ensuring only one context operation occurs at a time.
1918 */
1919int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1920{
1921 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1922 return -EBUSY;
1923
1924 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1925 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1926 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1927 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1928 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1929 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1930 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1931 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1932 0, 5, 1);
1933}
1934
1935/**
1936 * t3_sge_disable_fl - disable an SGE free-buffer list
1937 * @adapter: the adapter
1938 * @id: the free list context id
1939 *
1940 * Disable an SGE free-buffer list. The caller is responsible for
1941 * ensuring only one context operation occurs at a time.
1942 */
1943int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1944{
1945 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1946 return -EBUSY;
1947
1948 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1949 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1950 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1954 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1955 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1956 0, 5, 1);
1957}
1958
1959/**
1960 * t3_sge_disable_rspcntxt - disable an SGE response queue
1961 * @adapter: the adapter
1962 * @id: the response queue context id
1963 *
1964 * Disable an SGE response queue. The caller is responsible for
1965 * ensuring only one context operation occurs at a time.
1966 */
1967int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1968{
1969 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1970 return -EBUSY;
1971
1972 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1973 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1974 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1978 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
1979 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1980 0, 5, 1);
1981}
1982
1983/**
1984 * t3_sge_disable_cqcntxt - disable an SGE completion queue
1985 * @adapter: the adapter
1986 * @id: the completion queue context id
1987 *
1988 * Disable an SGE completion queue. The caller is responsible for
1989 * ensuring only one context operation occurs at a time.
1990 */
1991int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
1992{
1993 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1994 return -EBUSY;
1995
1996 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1997 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1998 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2000 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2002 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2003 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2004 0, 5, 1);
2005}
2006
2007/**
2008 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2009 * @adapter: the adapter
2010 * @id: the context id
2011 * @op: the operation to perform
2012 *
2013 * Perform the selected operation on an SGE completion queue context.
2014 * The caller is responsible for ensuring only one context operation
2015 * occurs at a time.
2016 */
2017int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2018 unsigned int credits)
2019{
2020 u32 val;
2021
2022 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2023 return -EBUSY;
2024
2025 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2026 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2027 V_CONTEXT(id) | F_CQ);
2028 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2029 0, 5, 1, &val))
2030 return -EIO;
2031
2032 if (op >= 2 && op < 7) {
2033 if (adapter->params.rev > 0)
2034 return G_CQ_INDEX(val);
2035
2036 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2037 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2038 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2039 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2040 return -EIO;
2041 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2042 }
2043 return 0;
2044}
2045
2046/**
2047 * t3_sge_read_context - read an SGE context
2048 * @type: the context type
2049 * @adapter: the adapter
2050 * @id: the context id
2051 * @data: holds the retrieved context
2052 *
2053 * Read an SGE egress context. The caller is responsible for ensuring
2054 * only one context operation occurs at a time.
2055 */
2056static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2057 unsigned int id, u32 data[4])
2058{
2059 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2060 return -EBUSY;
2061
2062 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2063 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2064 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2065 5, 1))
2066 return -EIO;
2067 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2068 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2069 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2070 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2071 return 0;
2072}
2073
2074/**
2075 * t3_sge_read_ecntxt - read an SGE egress context
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2084{
2085 if (id >= 65536)
2086 return -EINVAL;
2087 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2088}
2089
2090/**
2091 * t3_sge_read_cq - read an SGE CQ context
2092 * @adapter: the adapter
2093 * @id: the context id
2094 * @data: holds the retrieved context
2095 *
2096 * Read an SGE CQ context. The caller is responsible for ensuring
2097 * only one context operation occurs at a time.
2098 */
2099int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2100{
2101 if (id >= 65536)
2102 return -EINVAL;
2103 return t3_sge_read_context(F_CQ, adapter, id, data);
2104}
2105
2106/**
2107 * t3_sge_read_fl - read an SGE free-list context
2108 * @adapter: the adapter
2109 * @id: the context id
2110 * @data: holds the retrieved context
2111 *
2112 * Read an SGE free-list context. The caller is responsible for ensuring
2113 * only one context operation occurs at a time.
2114 */
2115int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2116{
2117 if (id >= SGE_QSETS * 2)
2118 return -EINVAL;
2119 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2120}
2121
2122/**
2123 * t3_sge_read_rspq - read an SGE response queue context
2124 * @adapter: the adapter
2125 * @id: the context id
2126 * @data: holds the retrieved context
2127 *
2128 * Read an SGE response queue context. The caller is responsible for
2129 * ensuring only one context operation occurs at a time.
2130 */
2131int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2132{
2133 if (id >= SGE_QSETS)
2134 return -EINVAL;
2135 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2136}
2137
2138/**
2139 * t3_config_rss - configure Rx packet steering
2140 * @adapter: the adapter
2141 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2142 * @cpus: values for the CPU lookup table (0xff terminated)
2143 * @rspq: values for the response queue lookup table (0xffff terminated)
2144 *
2145 * Programs the receive packet steering logic. @cpus and @rspq provide
2146 * the values for the CPU and response queue lookup tables. If they
2147 * provide fewer values than the size of the tables the supplied values
2148 * are used repeatedly until the tables are fully populated.
2149 */
2150void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2151 const u8 * cpus, const u16 *rspq)
2152{
2153 int i, j, cpu_idx = 0, q_idx = 0;
2154
2155 if (cpus)
2156 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2157 u32 val = i << 16;
2158
2159 for (j = 0; j < 2; ++j) {
2160 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2161 if (cpus[cpu_idx] == 0xff)
2162 cpu_idx = 0;
2163 }
2164 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2165 }
2166
2167 if (rspq)
2168 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2169 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2170 (i << 16) | rspq[q_idx++]);
2171 if (rspq[q_idx] == 0xffff)
2172 q_idx = 0;
2173 }
2174
2175 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2176}
2177
2178/**
2179 * t3_read_rss - read the contents of the RSS tables
2180 * @adapter: the adapter
2181 * @lkup: holds the contents of the RSS lookup table
2182 * @map: holds the contents of the RSS map table
2183 *
2184 * Reads the contents of the receive packet steering tables.
2185 */
2186int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2187{
2188 int i;
2189 u32 val;
2190
2191 if (lkup)
2192 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2193 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2194 0xffff0000 | i);
2195 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2196 if (!(val & 0x80000000))
2197 return -EAGAIN;
2198 *lkup++ = val;
2199 *lkup++ = (val >> 8);
2200 }
2201
2202 if (map)
2203 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2204 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2205 0xffff0000 | i);
2206 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2207 if (!(val & 0x80000000))
2208 return -EAGAIN;
2209 *map++ = val;
2210 }
2211 return 0;
2212}
2213
2214/**
2215 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2216 * @adap: the adapter
2217 * @enable: 1 to select offload mode, 0 for regular NIC
2218 *
2219 * Switches TP to NIC/offload mode.
2220 */
2221void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2222{
2223 if (is_offload(adap) || !enable)
2224 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2225 V_NICMODE(!enable));
2226}
2227
2228/**
2229 * pm_num_pages - calculate the number of pages of the payload memory
2230 * @mem_size: the size of the payload memory
2231 * @pg_size: the size of each payload memory page
2232 *
2233 * Calculate the number of pages, each of the given size, that fit in a
2234 * memory of the specified size, respecting the HW requirement that the
2235 * number of pages must be a multiple of 24.
2236 */
2237static inline unsigned int pm_num_pages(unsigned int mem_size,
2238 unsigned int pg_size)
2239{
2240 unsigned int n = mem_size / pg_size;
2241
2242 return n - n % 24;
2243}
2244
2245#define mem_region(adap, start, size, reg) \
2246 t3_write_reg((adap), A_ ## reg, (start)); \
2247 start += size
2248
2249/*
2250 * partition_mem - partition memory and configure TP memory settings
2251 * @adap: the adapter
2252 * @p: the TP parameters
2253 *
2254 * Partitions context and payload memory and configures TP's memory
2255 * registers.
2256 */
2257static void partition_mem(struct adapter *adap, const struct tp_params *p)
2258{
2259 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2260 unsigned int timers = 0, timers_shift = 22;
2261
2262 if (adap->params.rev > 0) {
2263 if (tids <= 16 * 1024) {
2264 timers = 1;
2265 timers_shift = 16;
2266 } else if (tids <= 64 * 1024) {
2267 timers = 2;
2268 timers_shift = 18;
2269 } else if (tids <= 256 * 1024) {
2270 timers = 3;
2271 timers_shift = 20;
2272 }
2273 }
2274
2275 t3_write_reg(adap, A_TP_PMM_SIZE,
2276 p->chan_rx_size | (p->chan_tx_size >> 16));
2277
2278 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2279 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2280 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2281 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2282 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2283
2284 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2285 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2286 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2287
2288 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2289 /* Add a bit of headroom and make multiple of 24 */
2290 pstructs += 48;
2291 pstructs -= pstructs % 24;
2292 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2293
2294 m = tids * TCB_SIZE;
2295 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2296 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2297 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2298 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2299 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2300 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2301 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2302 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2303
2304 m = (m + 4095) & ~0xfff;
2305 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2306 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2307
2308 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2309 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2310 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2311 if (tids < m)
2312 adap->params.mc5.nservers += m - tids;
2313}
2314
2315static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2316 u32 val)
2317{
2318 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2319 t3_write_reg(adap, A_TP_PIO_DATA, val);
2320}
2321
2322static void tp_config(struct adapter *adap, const struct tp_params *p)
2323{
2324 unsigned int v;
2325
2326 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2327 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2328 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2329 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2330 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2331 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2332 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2333 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2334 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2335 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2336 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2337 F_IPV6ENABLE | F_NICMODE);
2338 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2339 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2340 t3_set_reg_field(adap, A_TP_PARA_REG6,
2341 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2342 0);
2343
2344 v = t3_read_reg(adap, A_TP_PC_CONFIG);
2345 v &= ~(F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL);
2346 t3_write_reg(adap, A_TP_PC_CONFIG, v | F_TXDEFERENABLE |
2347 F_MODULATEUNIONMODE | F_HEARBEATDACK |
2348 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2349
2350 v = t3_read_reg(adap, A_TP_PC_CONFIG2);
2351 v &= ~F_CHDRAFULL;
2352 t3_write_reg(adap, A_TP_PC_CONFIG2, v);
2353
2354 if (adap->params.rev > 0) {
2355 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2356 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2357 F_TXPACEAUTO);
2358 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2359 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2360 } else
2361 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2362
2363 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2364 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2365 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2366}
2367
2368/* Desired TP timer resolution in usec */
2369#define TP_TMR_RES 50
2370
2371/* TCP timer values in ms */
2372#define TP_DACK_TIMER 50
2373#define TP_RTO_MIN 250
2374
2375/**
2376 * tp_set_timers - set TP timing parameters
2377 * @adap: the adapter to set
2378 * @core_clk: the core clock frequency in Hz
2379 *
2380 * Set TP's timing parameters, such as the various timer resolutions and
2381 * the TCP timer values.
2382 */
2383static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2384{
2385 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2386 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2387 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2388 unsigned int tps = core_clk >> tre;
2389
2390 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2391 V_DELAYEDACKRESOLUTION(dack_re) |
2392 V_TIMESTAMPRESOLUTION(tstamp_re));
2393 t3_write_reg(adap, A_TP_DACK_TIMER,
2394 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2395 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2396 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2397 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2398 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2399 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2400 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2401 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2402 V_KEEPALIVEMAX(9));
2403
2404#define SECONDS * tps
2405
2406 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2407 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2408 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2409 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2410 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2411 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2412 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2413 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2414 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2415
2416#undef SECONDS
2417}
2418
2419/**
2420 * t3_tp_set_coalescing_size - set receive coalescing size
2421 * @adap: the adapter
2422 * @size: the receive coalescing size
2423 * @psh: whether a set PSH bit should deliver coalesced data
2424 *
2425 * Set the receive coalescing size and PSH bit handling.
2426 */
2427int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2428{
2429 u32 val;
2430
2431 if (size > MAX_RX_COALESCING_LEN)
2432 return -EINVAL;
2433
2434 val = t3_read_reg(adap, A_TP_PARA_REG3);
2435 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2436
2437 if (size) {
2438 val |= F_RXCOALESCEENABLE;
2439 if (psh)
2440 val |= F_RXCOALESCEPSHEN;
2441 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2442 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2443 }
2444 t3_write_reg(adap, A_TP_PARA_REG3, val);
2445 return 0;
2446}
2447
2448/**
2449 * t3_tp_set_max_rxsize - set the max receive size
2450 * @adap: the adapter
2451 * @size: the max receive size
2452 *
2453 * Set TP's max receive size. This is the limit that applies when
2454 * receive coalescing is disabled.
2455 */
2456void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2457{
2458 t3_write_reg(adap, A_TP_PARA_REG7,
2459 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2460}
2461
2462static void __devinit init_mtus(unsigned short mtus[])
2463{
2464 /*
2465 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2466 * it can accomodate max size TCP/IP headers when SACK and timestamps
2467 * are enabled and still have at least 8 bytes of payload.
2468 */
2469 mtus[0] = 88;
2470 mtus[1] = 256;
2471 mtus[2] = 512;
2472 mtus[3] = 576;
2473 mtus[4] = 808;
2474 mtus[5] = 1024;
2475 mtus[6] = 1280;
2476 mtus[7] = 1492;
2477 mtus[8] = 1500;
2478 mtus[9] = 2002;
2479 mtus[10] = 2048;
2480 mtus[11] = 4096;
2481 mtus[12] = 4352;
2482 mtus[13] = 8192;
2483 mtus[14] = 9000;
2484 mtus[15] = 9600;
2485}
2486
2487/*
2488 * Initial congestion control parameters.
2489 */
2490static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2491{
2492 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2493 a[9] = 2;
2494 a[10] = 3;
2495 a[11] = 4;
2496 a[12] = 5;
2497 a[13] = 6;
2498 a[14] = 7;
2499 a[15] = 8;
2500 a[16] = 9;
2501 a[17] = 10;
2502 a[18] = 14;
2503 a[19] = 17;
2504 a[20] = 21;
2505 a[21] = 25;
2506 a[22] = 30;
2507 a[23] = 35;
2508 a[24] = 45;
2509 a[25] = 60;
2510 a[26] = 80;
2511 a[27] = 100;
2512 a[28] = 200;
2513 a[29] = 300;
2514 a[30] = 400;
2515 a[31] = 500;
2516
2517 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2518 b[9] = b[10] = 1;
2519 b[11] = b[12] = 2;
2520 b[13] = b[14] = b[15] = b[16] = 3;
2521 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2522 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2523 b[28] = b[29] = 6;
2524 b[30] = b[31] = 7;
2525}
2526
2527/* The minimum additive increment value for the congestion control table */
2528#define CC_MIN_INCR 2U
2529
2530/**
2531 * t3_load_mtus - write the MTU and congestion control HW tables
2532 * @adap: the adapter
2533 * @mtus: the unrestricted values for the MTU table
2534 * @alphs: the values for the congestion control alpha parameter
2535 * @beta: the values for the congestion control beta parameter
2536 * @mtu_cap: the maximum permitted effective MTU
2537 *
2538 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2539 * Update the high-speed congestion control table with the supplied alpha,
2540 * beta, and MTUs.
2541 */
2542void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2543 unsigned short alpha[NCCTRL_WIN],
2544 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2545{
2546 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2547 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2548 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2549 28672, 40960, 57344, 81920, 114688, 163840, 229376
2550 };
2551
2552 unsigned int i, w;
2553
2554 for (i = 0; i < NMTUS; ++i) {
2555 unsigned int mtu = min(mtus[i], mtu_cap);
2556 unsigned int log2 = fls(mtu);
2557
2558 if (!(mtu & ((1 << log2) >> 2))) /* round */
2559 log2--;
2560 t3_write_reg(adap, A_TP_MTU_TABLE,
2561 (i << 24) | (log2 << 16) | mtu);
2562
2563 for (w = 0; w < NCCTRL_WIN; ++w) {
2564 unsigned int inc;
2565
2566 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2567 CC_MIN_INCR);
2568
2569 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2570 (w << 16) | (beta[w] << 13) | inc);
2571 }
2572 }
2573}
2574
2575/**
2576 * t3_read_hw_mtus - returns the values in the HW MTU table
2577 * @adap: the adapter
2578 * @mtus: where to store the HW MTU values
2579 *
2580 * Reads the HW MTU table.
2581 */
2582void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2583{
2584 int i;
2585
2586 for (i = 0; i < NMTUS; ++i) {
2587 unsigned int val;
2588
2589 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2590 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2591 mtus[i] = val & 0x3fff;
2592 }
2593}
2594
2595/**
2596 * t3_get_cong_cntl_tab - reads the congestion control table
2597 * @adap: the adapter
2598 * @incr: where to store the alpha values
2599 *
2600 * Reads the additive increments programmed into the HW congestion
2601 * control table.
2602 */
2603void t3_get_cong_cntl_tab(struct adapter *adap,
2604 unsigned short incr[NMTUS][NCCTRL_WIN])
2605{
2606 unsigned int mtu, w;
2607
2608 for (mtu = 0; mtu < NMTUS; ++mtu)
2609 for (w = 0; w < NCCTRL_WIN; ++w) {
2610 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2611 0xffff0000 | (mtu << 5) | w);
2612 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2613 0x1fff;
2614 }
2615}
2616
2617/**
2618 * t3_tp_get_mib_stats - read TP's MIB counters
2619 * @adap: the adapter
2620 * @tps: holds the returned counter values
2621 *
2622 * Returns the values of TP's MIB counters.
2623 */
2624void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2625{
2626 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2627 sizeof(*tps) / sizeof(u32), 0);
2628}
2629
2630#define ulp_region(adap, name, start, len) \
2631 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2632 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2633 (start) + (len) - 1); \
2634 start += len
2635
2636#define ulptx_region(adap, name, start, len) \
2637 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2638 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2639 (start) + (len) - 1)
2640
2641static void ulp_config(struct adapter *adap, const struct tp_params *p)
2642{
2643 unsigned int m = p->chan_rx_size;
2644
2645 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2646 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2647 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2648 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2649 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2650 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2651 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2652 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2653}
2654
2655void t3_config_trace_filter(struct adapter *adapter,
2656 const struct trace_params *tp, int filter_index,
2657 int invert, int enable)
2658{
2659 u32 addr, key[4], mask[4];
2660
2661 key[0] = tp->sport | (tp->sip << 16);
2662 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2663 key[2] = tp->dip;
2664 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2665
2666 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2667 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2668 mask[2] = tp->dip_mask;
2669 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2670
2671 if (invert)
2672 key[3] |= (1 << 29);
2673 if (enable)
2674 key[3] |= (1 << 28);
2675
2676 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2677 tp_wr_indirect(adapter, addr++, key[0]);
2678 tp_wr_indirect(adapter, addr++, mask[0]);
2679 tp_wr_indirect(adapter, addr++, key[1]);
2680 tp_wr_indirect(adapter, addr++, mask[1]);
2681 tp_wr_indirect(adapter, addr++, key[2]);
2682 tp_wr_indirect(adapter, addr++, mask[2]);
2683 tp_wr_indirect(adapter, addr++, key[3]);
2684 tp_wr_indirect(adapter, addr, mask[3]);
2685 t3_read_reg(adapter, A_TP_PIO_DATA);
2686}
2687
2688/**
2689 * t3_config_sched - configure a HW traffic scheduler
2690 * @adap: the adapter
2691 * @kbps: target rate in Kbps
2692 * @sched: the scheduler index
2693 *
2694 * Configure a HW scheduler for the target rate
2695 */
2696int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2697{
2698 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2699 unsigned int clk = adap->params.vpd.cclk * 1000;
2700 unsigned int selected_cpt = 0, selected_bpt = 0;
2701
2702 if (kbps > 0) {
2703 kbps *= 125; /* -> bytes */
2704 for (cpt = 1; cpt <= 255; cpt++) {
2705 tps = clk / cpt;
2706 bpt = (kbps + tps / 2) / tps;
2707 if (bpt > 0 && bpt <= 255) {
2708 v = bpt * tps;
2709 delta = v >= kbps ? v - kbps : kbps - v;
2710 if (delta <= mindelta) {
2711 mindelta = delta;
2712 selected_cpt = cpt;
2713 selected_bpt = bpt;
2714 }
2715 } else if (selected_cpt)
2716 break;
2717 }
2718 if (!selected_cpt)
2719 return -EINVAL;
2720 }
2721 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2722 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2723 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2724 if (sched & 1)
2725 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2726 else
2727 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2728 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2729 return 0;
2730}
2731
2732static int tp_init(struct adapter *adap, const struct tp_params *p)
2733{
2734 int busy = 0;
2735
2736 tp_config(adap, p);
2737 t3_set_vlan_accel(adap, 3, 0);
2738
2739 if (is_offload(adap)) {
2740 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2741 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2742 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2743 0, 1000, 5);
2744 if (busy)
2745 CH_ERR(adap, "TP initialization timed out\n");
2746 }
2747
2748 if (!busy)
2749 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2750 return busy;
2751}
2752
2753int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2754{
2755 if (port_mask & ~((1 << adap->params.nports) - 1))
2756 return -EINVAL;
2757 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2758 port_mask << S_PORT0ACTIVE);
2759 return 0;
2760}
2761
2762/*
2763 * Perform the bits of HW initialization that are dependent on the number
2764 * of available ports.
2765 */
2766static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2767{
2768 int i;
2769
2770 if (nports == 1) {
2771 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2772 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2773 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2774 F_PORT0ACTIVE | F_ENFORCEPKT);
2775 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2776 } else {
2777 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2778 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2779 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2780 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2781 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2782 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2783 F_ENFORCEPKT);
2784 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2785 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2786 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2787 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2788 for (i = 0; i < 16; i++)
2789 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2790 (i << 16) | 0x1010);
2791 }
2792}
2793
2794static int calibrate_xgm(struct adapter *adapter)
2795{
2796 if (uses_xaui(adapter)) {
2797 unsigned int v, i;
2798
2799 for (i = 0; i < 5; ++i) {
2800 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2801 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2802 msleep(1);
2803 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2804 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2805 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2806 V_XAUIIMP(G_CALIMP(v) >> 2));
2807 return 0;
2808 }
2809 }
2810 CH_ERR(adapter, "MAC calibration failed\n");
2811 return -1;
2812 } else {
2813 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2814 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2815 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2816 F_XGM_IMPSETUPDATE);
2817 }
2818 return 0;
2819}
2820
2821static void calibrate_xgm_t3b(struct adapter *adapter)
2822{
2823 if (!uses_xaui(adapter)) {
2824 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2825 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2826 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2827 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2828 F_XGM_IMPSETUPDATE);
2829 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2830 0);
2831 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2832 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2833 }
2834}
2835
2836struct mc7_timing_params {
2837 unsigned char ActToPreDly;
2838 unsigned char ActToRdWrDly;
2839 unsigned char PreCyc;
2840 unsigned char RefCyc[5];
2841 unsigned char BkCyc;
2842 unsigned char WrToRdDly;
2843 unsigned char RdToWrDly;
2844};
2845
2846/*
2847 * Write a value to a register and check that the write completed. These
2848 * writes normally complete in a cycle or two, so one read should suffice.
2849 * The very first read exists to flush the posted write to the device.
2850 */
2851static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2852{
2853 t3_write_reg(adapter, addr, val);
2854 t3_read_reg(adapter, addr); /* flush */
2855 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2856 return 0;
2857 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2858 return -EIO;
2859}
2860
2861static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2862{
2863 static const unsigned int mc7_mode[] = {
2864 0x632, 0x642, 0x652, 0x432, 0x442
2865 };
2866 static const struct mc7_timing_params mc7_timings[] = {
2867 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2868 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2869 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2870 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2871 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2872 };
2873
2874 u32 val;
2875 unsigned int width, density, slow, attempts;
2876 struct adapter *adapter = mc7->adapter;
2877 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2878
2879 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2880 slow = val & F_SLOW;
2881 width = G_WIDTH(val);
2882 density = G_DEN(val);
2883
2884 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2885 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2886 msleep(1);
2887
2888 if (!slow) {
2889 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2890 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2891 msleep(1);
2892 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2893 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2894 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2895 mc7->name);
2896 goto out_fail;
2897 }
2898 }
2899
2900 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2901 V_ACTTOPREDLY(p->ActToPreDly) |
2902 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2903 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2904 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2905
2906 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2907 val | F_CLKEN | F_TERM150);
2908 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2909
2910 if (!slow)
2911 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2912 F_DLLENB);
2913 udelay(1);
2914
2915 val = slow ? 3 : 6;
2916 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2917 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2918 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2919 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2920 goto out_fail;
2921
2922 if (!slow) {
2923 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2924 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2925 udelay(5);
2926 }
2927
2928 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2929 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2930 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2931 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2932 mc7_mode[mem_type]) ||
2933 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2934 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2935 goto out_fail;
2936
2937 /* clock value is in KHz */
2938 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2939 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2940
2941 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2942 F_PERREFEN | V_PREREFDIV(mc7_clock));
2943 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2944
2945 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2946 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2947 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2948 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2949 (mc7->size << width) - 1);
2950 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2951 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2952
2953 attempts = 50;
2954 do {
2955 msleep(250);
2956 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2957 } while ((val & F_BUSY) && --attempts);
2958 if (val & F_BUSY) {
2959 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2960 goto out_fail;
2961 }
2962
2963 /* Enable normal memory accesses. */
2964 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2965 return 0;
2966
2967out_fail:
2968 return -1;
2969}
2970
2971static void config_pcie(struct adapter *adap)
2972{
2973 static const u16 ack_lat[4][6] = {
2974 {237, 416, 559, 1071, 2095, 4143},
2975 {128, 217, 289, 545, 1057, 2081},
2976 {73, 118, 154, 282, 538, 1050},
2977 {67, 107, 86, 150, 278, 534}
2978 };
2979 static const u16 rpl_tmr[4][6] = {
2980 {711, 1248, 1677, 3213, 6285, 12429},
2981 {384, 651, 867, 1635, 3171, 6243},
2982 {219, 354, 462, 846, 1614, 3150},
2983 {201, 321, 258, 450, 834, 1602}
2984 };
2985
2986 u16 val;
2987 unsigned int log2_width, pldsize;
2988 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
2989
2990 pci_read_config_word(adap->pdev,
2991 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
2992 &val);
2993 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
2994 pci_read_config_word(adap->pdev,
2995 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
2996 &val);
2997
2998 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
2999 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3000 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3001 log2_width = fls(adap->params.pci.width) - 1;
3002 acklat = ack_lat[log2_width][pldsize];
3003 if (val & 1) /* check LOsEnable */
3004 acklat += fst_trn_tx * 4;
3005 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3006
3007 if (adap->params.rev == 0)
3008 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3009 V_T3A_ACKLAT(M_T3A_ACKLAT),
3010 V_T3A_ACKLAT(acklat));
3011 else
3012 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3013 V_ACKLAT(acklat));
3014
3015 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3016 V_REPLAYLMT(rpllmt));
3017
3018 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3019 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3020}
3021
3022/*
3023 * Initialize and configure T3 HW modules. This performs the
3024 * initialization steps that need to be done once after a card is reset.
3025 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3026 *
3027 * fw_params are passed to FW and their value is platform dependent. Only the
3028 * top 8 bits are available for use, the rest must be 0.
3029 */
3030int t3_init_hw(struct adapter *adapter, u32 fw_params)
3031{
3032 int err = -EIO, attempts = 100;
3033 const struct vpd_params *vpd = &adapter->params.vpd;
3034
3035 if (adapter->params.rev > 0)
3036 calibrate_xgm_t3b(adapter);
3037 else if (calibrate_xgm(adapter))
3038 goto out_err;
3039
3040 if (vpd->mclk) {
3041 partition_mem(adapter, &adapter->params.tp);
3042
3043 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3044 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3045 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3046 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3047 adapter->params.mc5.nfilters,
3048 adapter->params.mc5.nroutes))
3049 goto out_err;
3050 }
3051
3052 if (tp_init(adapter, &adapter->params.tp))
3053 goto out_err;
3054
3055 t3_tp_set_coalescing_size(adapter,
3056 min(adapter->params.sge.max_pkt_size,
3057 MAX_RX_COALESCING_LEN), 1);
3058 t3_tp_set_max_rxsize(adapter,
3059 min(adapter->params.sge.max_pkt_size, 16384U));
3060 ulp_config(adapter, &adapter->params.tp);
3061
3062 if (is_pcie(adapter))
3063 config_pcie(adapter);
3064 else
3065 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3066
3067 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3068 init_hw_for_avail_ports(adapter, adapter->params.nports);
3069 t3_sge_init(adapter, &adapter->params.sge);
3070
3071 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3072 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3073 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3074 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3075
3076 do { /* wait for uP to initialize */
3077 msleep(20);
3078 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3079 if (!attempts)
3080 goto out_err;
3081
3082 err = 0;
3083out_err:
3084 return err;
3085}
3086
3087/**
3088 * get_pci_mode - determine a card's PCI mode
3089 * @adapter: the adapter
3090 * @p: where to store the PCI settings
3091 *
3092 * Determines a card's PCI mode and associated parameters, such as speed
3093 * and width.
3094 */
3095static void __devinit get_pci_mode(struct adapter *adapter,
3096 struct pci_params *p)
3097{
3098 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3099 u32 pci_mode, pcie_cap;
3100
3101 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3102 if (pcie_cap) {
3103 u16 val;
3104
3105 p->variant = PCI_VARIANT_PCIE;
3106 p->pcie_cap_addr = pcie_cap;
3107 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3108 &val);
3109 p->width = (val >> 4) & 0x3f;
3110 return;
3111 }
3112
3113 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3114 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3115 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3116 pci_mode = G_PCIXINITPAT(pci_mode);
3117 if (pci_mode == 0)
3118 p->variant = PCI_VARIANT_PCI;
3119 else if (pci_mode < 4)
3120 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3121 else if (pci_mode < 8)
3122 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3123 else
3124 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3125}
3126
3127/**
3128 * init_link_config - initialize a link's SW state
3129 * @lc: structure holding the link state
3130 * @ai: information about the current card
3131 *
3132 * Initializes the SW state maintained for each link, including the link's
3133 * capabilities and default speed/duplex/flow-control/autonegotiation
3134 * settings.
3135 */
3136static void __devinit init_link_config(struct link_config *lc,
3137 unsigned int caps)
3138{
3139 lc->supported = caps;
3140 lc->requested_speed = lc->speed = SPEED_INVALID;
3141 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3142 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3143 if (lc->supported & SUPPORTED_Autoneg) {
3144 lc->advertising = lc->supported;
3145 lc->autoneg = AUTONEG_ENABLE;
3146 lc->requested_fc |= PAUSE_AUTONEG;
3147 } else {
3148 lc->advertising = 0;
3149 lc->autoneg = AUTONEG_DISABLE;
3150 }
3151}
3152
3153/**
3154 * mc7_calc_size - calculate MC7 memory size
3155 * @cfg: the MC7 configuration
3156 *
3157 * Calculates the size of an MC7 memory in bytes from the value of its
3158 * configuration register.
3159 */
3160static unsigned int __devinit mc7_calc_size(u32 cfg)
3161{
3162 unsigned int width = G_WIDTH(cfg);
3163 unsigned int banks = !!(cfg & F_BKS) + 1;
3164 unsigned int org = !!(cfg & F_ORG) + 1;
3165 unsigned int density = G_DEN(cfg);
3166 unsigned int MBs = ((256 << density) * banks) / (org << width);
3167
3168 return MBs << 20;
3169}
3170
3171static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3172 unsigned int base_addr, const char *name)
3173{
3174 u32 cfg;
3175
3176 mc7->adapter = adapter;
3177 mc7->name = name;
3178 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3179 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3180 mc7->size = mc7_calc_size(cfg);
3181 mc7->width = G_WIDTH(cfg);
3182}
3183
3184void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3185{
3186 mac->adapter = adapter;
3187 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3188 mac->nucast = 1;
3189
3190 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3191 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3192 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3193 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3194 F_ENRGMII, 0);
3195 }
3196}
3197
3198void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3199{
3200 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3201
3202 mi1_init(adapter, ai);
3203 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3204 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3205 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3206 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3207
3208 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3209 val |= F_ENRGMII;
3210
3211 /* Enable MAC clocks so we can access the registers */
3212 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3213 t3_read_reg(adapter, A_XGM_PORT_CFG);
3214
3215 val |= F_CLKDIVRESET_;
3216 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3217 t3_read_reg(adapter, A_XGM_PORT_CFG);
3218 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3219 t3_read_reg(adapter, A_XGM_PORT_CFG);
3220}
3221
3222/*
3223 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3224 * ones don't.
3225 */
3226int t3_reset_adapter(struct adapter *adapter)
3227{
3228 int i;
3229 uint16_t devid = 0;
3230
3231 if (is_pcie(adapter))
3232 pci_save_state(adapter->pdev);
3233 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3234
3235 /*
3236 * Delay. Give Some time to device to reset fully.
3237 * XXX The delay time should be modified.
3238 */
3239 for (i = 0; i < 10; i++) {
3240 msleep(50);
3241 pci_read_config_word(adapter->pdev, 0x00, &devid);
3242 if (devid == 0x1425)
3243 break;
3244 }
3245
3246 if (devid != 0x1425)
3247 return -1;
3248
3249 if (is_pcie(adapter))
3250 pci_restore_state(adapter->pdev);
3251 return 0;
3252}
3253
3254/*
3255 * Initialize adapter SW state for the various HW modules, set initial values
3256 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3257 * interface.
3258 */
3259int __devinit t3_prep_adapter(struct adapter *adapter,
3260 const struct adapter_info *ai, int reset)
3261{
3262 int ret;
3263 unsigned int i, j = 0;
3264
3265 get_pci_mode(adapter, &adapter->params.pci);
3266
3267 adapter->params.info = ai;
3268 adapter->params.nports = ai->nports;
3269 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3270 adapter->params.linkpoll_period = 0;
3271 adapter->params.stats_update_period = is_10G(adapter) ?
3272 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3273 adapter->params.pci.vpd_cap_addr =
3274 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3275 ret = get_vpd_params(adapter, &adapter->params.vpd);
3276 if (ret < 0)
3277 return ret;
3278
3279 if (reset && t3_reset_adapter(adapter))
3280 return -1;
3281
3282 t3_sge_prep(adapter, &adapter->params.sge);
3283
3284 if (adapter->params.vpd.mclk) {
3285 struct tp_params *p = &adapter->params.tp;
3286
3287 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3288 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3289 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3290
3291 p->nchan = ai->nports;
3292 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3293 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3294 p->cm_size = t3_mc7_size(&adapter->cm);
3295 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3296 p->chan_tx_size = p->pmtx_size / p->nchan;
3297 p->rx_pg_size = 64 * 1024;
3298 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3299 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3300 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3301 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3302 adapter->params.rev > 0 ? 12 : 6;
3303
3304 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3305 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3306 DEFAULT_NFILTERS : 0;
3307 adapter->params.mc5.nroutes = 0;
3308 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3309
3310 init_mtus(adapter->params.mtus);
3311 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3312 }
3313
3314 early_hw_init(adapter, ai);
3315
3316 for_each_port(adapter, i) {
3317 u8 hw_addr[6];
3318 struct port_info *p = adap2pinfo(adapter, i);
3319
3320 while (!adapter->params.vpd.port_type[j])
3321 ++j;
3322
3323 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3324 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3325 ai->mdio_ops);
3326 mac_prep(&p->mac, adapter, j);
3327 ++j;
3328
3329 /*
3330 * The VPD EEPROM stores the base Ethernet address for the
3331 * card. A port's address is derived from the base by adding
3332 * the port's index to the base's low octet.
3333 */
3334 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3335 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3336
3337 memcpy(adapter->port[i]->dev_addr, hw_addr,
3338 ETH_ALEN);
3339 memcpy(adapter->port[i]->perm_addr, hw_addr,
3340 ETH_ALEN);
3341 init_link_config(&p->link_config, p->port_type->caps);
3342 p->phy.ops->power_down(&p->phy, 1);
3343 if (!(p->port_type->caps & SUPPORTED_IRQ))
3344 adapter->params.linkpoll_period = 10;
3345 }
3346
3347 return 0;
3348}
3349
3350void t3_led_ready(struct adapter *adapter)
3351{
3352 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3353 F_GPIO0_OUT_VAL);
3354}