aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x.h
diff options
context:
space:
mode:
authorVladislav Zolotarov <vladz@broadcom.com>2008-06-23 23:34:36 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-23 23:34:36 -0400
commit7a9b25577c8a06d998fb11b28bf8229aa9623205 (patch)
tree99d1a31c0112557794398b8add1cb22c51667afa /drivers/net/bnx2x.h
parentbb2a0f7ae477740d947b442f640a5d10b51025c0 (diff)
bnx2x: Add TPA, Broadcoms HW LRO
The TPA stands for Transparent Packet Aggregation. When enabled, the FW aggregate in-order TCP packets according to the 4-tuple match and sends 1 big packet to the driver. This packet is stored on an SGL in which each SGE is 1 page. The FW also implements a timeout algorithm and it honors all TCP flag, including the push flag as a trigger to halt aggregation. After receiving Ben Hutchings comments, we also added ethtool support, so now, thanks to Ben's patch, when forwarding is enabled, our aggregation is turned off using the LRO flags. Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x.h')
-rw-r--r--drivers/net/bnx2x.h276
1 files changed, 197 insertions, 79 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index f7d73d6c3981..4bf4f7b205f2 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -132,8 +132,8 @@
132#define is_multi(bp) (bp->num_queues > 1) 132#define is_multi(bp) (bp->num_queues > 1)
133 133
134 134
135/* fast path */
135 136
136#define bnx2x_sp_check(bp, var) ((bp->slowpath) ? (&bp->slowpath->var) : NULL)
137struct sw_rx_bd { 137struct sw_rx_bd {
138 struct sk_buff *skb; 138 struct sk_buff *skb;
139 DECLARE_PCI_UNMAP_ADDR(mapping) 139 DECLARE_PCI_UNMAP_ADDR(mapping)
@@ -144,6 +144,52 @@ struct sw_tx_bd {
144 u16 first_bd; 144 u16 first_bd;
145}; 145};
146 146
147struct sw_rx_page {
148 struct page *page;
149 DECLARE_PCI_UNMAP_ADDR(mapping)
150};
151
152
153/* MC hsi */
154#define BCM_PAGE_SHIFT 12
155#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
156#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
157#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
158
159#define PAGES_PER_SGE_SHIFT 0
160#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
161
162/* SGE ring related macros */
163#define NUM_RX_SGE_PAGES 2
164#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
165#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
166/* RX_SGE_CNT is promissed to be a power of 2 */
167#define RX_SGE_MASK (RX_SGE_CNT - 1)
168#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
169#define MAX_RX_SGE (NUM_RX_SGE - 1)
170#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
171 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
172#define RX_SGE(x) ((x) & MAX_RX_SGE)
173
174/* SGE producer mask related macros */
175/* Number of bits in one sge_mask array element */
176#define RX_SGE_MASK_ELEM_SZ 64
177#define RX_SGE_MASK_ELEM_SHIFT 6
178#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
179
180/* Creates a bitmask of all ones in less significant bits.
181 idx - index of the most significant bit in the created mask */
182#define RX_SGE_ONES_MASK(idx) \
183 (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
184#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
185
186/* Number of u64 elements in SGE mask array */
187#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
188 RX_SGE_MASK_ELEM_SZ)
189#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
190#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
191
192
147struct bnx2x_fastpath { 193struct bnx2x_fastpath {
148 194
149 struct napi_struct napi; 195 struct napi_struct napi;
@@ -159,7 +205,8 @@ struct bnx2x_fastpath {
159 struct eth_tx_bd *tx_desc_ring; 205 struct eth_tx_bd *tx_desc_ring;
160 dma_addr_t tx_desc_mapping; 206 dma_addr_t tx_desc_mapping;
161 207
162 struct sw_rx_bd *rx_buf_ring; 208 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
209 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
163 210
164 struct eth_rx_bd *rx_desc_ring; 211 struct eth_rx_bd *rx_desc_ring;
165 dma_addr_t rx_desc_mapping; 212 dma_addr_t rx_desc_mapping;
@@ -167,6 +214,12 @@ struct bnx2x_fastpath {
167 union eth_rx_cqe *rx_comp_ring; 214 union eth_rx_cqe *rx_comp_ring;
168 dma_addr_t rx_comp_mapping; 215 dma_addr_t rx_comp_mapping;
169 216
217 /* SGE ring */
218 struct eth_rx_sge *rx_sge_ring;
219 dma_addr_t rx_sge_mapping;
220
221 u64 sge_mask[RX_SGE_MASK_LEN];
222
170 int state; 223 int state;
171#define BNX2X_FP_STATE_CLOSED 0 224#define BNX2X_FP_STATE_CLOSED 0
172#define BNX2X_FP_STATE_IRQ 0x80000 225#define BNX2X_FP_STATE_IRQ 0x80000
@@ -197,27 +250,152 @@ struct bnx2x_fastpath {
197 u16 rx_bd_cons; 250 u16 rx_bd_cons;
198 u16 rx_comp_prod; 251 u16 rx_comp_prod;
199 u16 rx_comp_cons; 252 u16 rx_comp_cons;
253 u16 rx_sge_prod;
254 /* The last maximal completed SGE */
255 u16 last_max_sge;
200 u16 *rx_cons_sb; 256 u16 *rx_cons_sb;
257 u16 *rx_bd_cons_sb;
201 258
202 unsigned long tx_pkt, 259 unsigned long tx_pkt,
203 rx_pkt, 260 rx_pkt,
204 rx_calls; 261 rx_calls,
262 rx_alloc_failed;
263 /* TPA related */
264 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
265 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
266#define BNX2X_TPA_START 1
267#define BNX2X_TPA_STOP 2
268 u8 disable_tpa;
269#ifdef BNX2X_STOP_ON_ERROR
270 u64 tpa_queue_used;
271#endif
205 272
206 struct bnx2x *bp; /* parent */ 273 struct bnx2x *bp; /* parent */
207}; 274};
208 275
209#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 276#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
277
278
279/* MC hsi */
280#define MAX_FETCH_BD 13 /* HW max BDs per packet */
281#define RX_COPY_THRESH 92
282
283#define NUM_TX_RINGS 16
284#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
285#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
286#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
287#define MAX_TX_BD (NUM_TX_BD - 1)
288#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
289#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
290 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
291#define TX_BD(x) ((x) & MAX_TX_BD)
292#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
293
294/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
295#define NUM_RX_RINGS 8
296#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
297#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
298#define RX_DESC_MASK (RX_DESC_CNT - 1)
299#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
300#define MAX_RX_BD (NUM_RX_BD - 1)
301#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
302#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
303 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
304#define RX_BD(x) ((x) & MAX_RX_BD)
305
306/* As long as CQE is 4 times bigger than BD entry we have to allocate
307 4 times more pages for CQ ring in order to keep it balanced with
308 BD ring */
309#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
310#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
311#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
312#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
313#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
314#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
315#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
316 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
317#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
318
319
210/* This is needed for determening of last_max */ 320/* This is needed for determening of last_max */
211#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
212 322
323#define __SGE_MASK_SET_BIT(el, bit) \
324 do { \
325 el = ((el) | ((u64)0x1 << (bit))); \
326 } while (0)
327
328#define __SGE_MASK_CLEAR_BIT(el, bit) \
329 do { \
330 el = ((el) & (~((u64)0x1 << (bit)))); \
331 } while (0)
332
333#define SGE_MASK_SET_BIT(fp, idx) \
334 __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
335 ((idx) & RX_SGE_MASK_ELEM_MASK))
336
337#define SGE_MASK_CLEAR_BIT(fp, idx) \
338 __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
339 ((idx) & RX_SGE_MASK_ELEM_MASK))
340
341
342/* used on a CID received from the HW */
343#define SW_CID(x) (le32_to_cpu(x) & \
344 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
345#define CQE_CMD(x) (le32_to_cpu(x) >> \
346 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
347
213#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ 348#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
214 le32_to_cpu((bd)->addr_lo)) 349 le32_to_cpu((bd)->addr_lo))
215#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 350#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
216 351
352
353#define DPM_TRIGER_TYPE 0x40
354#define DOORBELL(bp, cid, val) \
355 do { \
356 writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \
357 DPM_TRIGER_TYPE); \
358 } while (0)
359
360
361/* TX CSUM helpers */
362#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
363 skb->csum_offset)
364#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
365 skb->csum_offset))
366
367#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
368
369#define XMIT_PLAIN 0
370#define XMIT_CSUM_V4 0x1
371#define XMIT_CSUM_V6 0x2
372#define XMIT_CSUM_TCP 0x4
373#define XMIT_GSO_V4 0x8
374#define XMIT_GSO_V6 0x10
375
376#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
377#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
378
379
217/* stuff added to make the code fit 80Col */ 380/* stuff added to make the code fit 80Col */
218 381
219#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) 382#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
220 383
384#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
385#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
387 (TPA_TYPE_START | TPA_TYPE_END))
388
389#define BNX2X_RX_SUM_OK(cqe) \
390 (!(cqe->fast_path_cqe.status_flags & \
391 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
392 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
393
394#define BNX2X_RX_SUM_FIX(cqe) \
395 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
396 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
397 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
398
221#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \ 399#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
222 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \ 400 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
223 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG) 401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
@@ -247,6 +425,9 @@ struct bnx2x_fastpath {
247#define BNX2X_TX_SB_INDEX \ 425#define BNX2X_TX_SB_INDEX \
248 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) 426 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
249 427
428
429/* end of fast path */
430
250/* common */ 431/* common */
251 432
252struct bnx2x_common { 433struct bnx2x_common {
@@ -546,7 +727,7 @@ struct bnx2x {
546 struct pci_dev *pdev; 727 struct pci_dev *pdev;
547 728
548 atomic_t intr_sem; 729 atomic_t intr_sem;
549 struct msix_entry msix_table[MAX_CONTEXT+1]; 730 struct msix_entry msix_table[MAX_CONTEXT+1];
550 731
551 int tx_ring_size; 732 int tx_ring_size;
552 733
@@ -604,6 +785,7 @@ struct bnx2x {
604#define USING_DAC_FLAG 0x10 785#define USING_DAC_FLAG 0x10
605#define USING_MSIX_FLAG 0x20 786#define USING_MSIX_FLAG 0x20
606#define ASF_ENABLE_FLAG 0x40 787#define ASF_ENABLE_FLAG 0x40
788#define TPA_ENABLE_FLAG 0x80
607#define NO_MCP_FLAG 0x100 789#define NO_MCP_FLAG 0x100
608#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 790#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
609 791
@@ -725,76 +907,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
725 u32 len32); 907 u32 len32);
726int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); 908int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
727 909
728
729/* MC hsi */
730#define RX_COPY_THRESH 92
731#define BCM_PAGE_SHIFT 12
732#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
733#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
734#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
735
736#define NUM_TX_RINGS 16
737#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
738#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
739#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
740#define MAX_TX_BD (NUM_TX_BD - 1)
741#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
742#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
743 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
744#define TX_BD(x) ((x) & MAX_TX_BD)
745#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
746
747/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
748#define NUM_RX_RINGS 8
749#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
750#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
751#define RX_DESC_MASK (RX_DESC_CNT - 1)
752#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
753#define MAX_RX_BD (NUM_RX_BD - 1)
754#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
755#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
756 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
757#define RX_BD(x) ((x) & MAX_RX_BD)
758
759#define NUM_RCQ_RINGS (NUM_RX_RINGS * 2)
760#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
761#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
762#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
763#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
764#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
765#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
766 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
767#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
768
769
770/* used on a CID received from the HW */
771#define SW_CID(x) (le32_to_cpu(x) & \
772 (COMMON_RAMROD_ETH_RX_CQE_CID >> 1))
773#define CQE_CMD(x) (le32_to_cpu(x) >> \
774 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
775
776#define STROM_ASSERT_ARRAY_SIZE 50
777
778
779
780/* must be used on a CID before placing it on a HW ring */
781#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | (BP_E1HVN(bp) << 17) | x)
782
783#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
784#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
785
786
787#define BNX2X_BTR 3
788#define MAX_SPQ_PENDING 8
789
790
791#define DPM_TRIGER_TYPE 0x40
792#define DOORBELL(bp, cid, val) \
793 do { \
794 writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \
795 DPM_TRIGER_TYPE); \
796 } while (0)
797
798static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 910static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
799 int wait) 911 int wait)
800{ 912{
@@ -874,14 +986,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
874#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ 986#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
875 BNX2X_PHY_LOOPBACK_FAILED) 987 BNX2X_PHY_LOOPBACK_FAILED)
876 988
877#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) 989
990#define STROM_ASSERT_ARRAY_SIZE 50
991
878 992
879/* must be used on a CID before placing it on a HW ring */ 993/* must be used on a CID before placing it on a HW ring */
994#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | (BP_E1HVN(bp) << 17) | x)
995
996#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
997#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
998
999
1000#define BNX2X_BTR 3
1001#define MAX_SPQ_PENDING 8
880 1002
881#define BNX2X_RX_SUM_OK(cqe) \
882 (!(cqe->fast_path_cqe.status_flags & \
883 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
884 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
885 1003
886/* CMNG constants 1004/* CMNG constants
887 derived from lab experiments, and not from system spec calculations !!! */ 1005 derived from lab experiments, and not from system spec calculations !!! */