aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/myri10ge
diff options
context:
space:
mode:
authorBrice Goglin <brice@myri.com>2006-12-11 05:25:09 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-11 09:54:06 -0500
commitdd50f3361f9f0bb407658e9087947c9bdcdefffc (patch)
tree5f7315d3e94c668af539c2478d4dba5dc1933f96 /drivers/net/myri10ge
parent6250223e055764efcaef3809a9f2350edfc82bbc (diff)
[PATCH] myri10ge: add page-based skb routines
Add physical page skb allocation routines and page based rx_done, to be used by upcoming patches. Signed-off-by: Brice Goglin <brice@myri.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/myri10ge')
-rw-r--r--drivers/net/myri10ge/myri10ge.c190
1 files changed, 190 insertions, 0 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 1e62f58ed2c2..05b4f93518f5 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -92,8 +92,14 @@ MODULE_LICENSE("Dual BSD/GPL");
92#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) 92#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
93#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff 93#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
94 94
95#define MYRI10GE_ALLOC_ORDER 0
96#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
97#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
98
95struct myri10ge_rx_buffer_state { 99struct myri10ge_rx_buffer_state {
96 struct sk_buff *skb; 100 struct sk_buff *skb;
101 struct page *page;
102 int page_offset;
97 DECLARE_PCI_UNMAP_ADDR(bus) 103 DECLARE_PCI_UNMAP_ADDR(bus)
98 DECLARE_PCI_UNMAP_LEN(len) 104 DECLARE_PCI_UNMAP_LEN(len)
99}; 105};
@@ -116,9 +122,14 @@ struct myri10ge_rx_buf {
116 u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ 122 u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
117 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ 123 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
118 struct myri10ge_rx_buffer_state *info; 124 struct myri10ge_rx_buffer_state *info;
125 struct page *page;
126 dma_addr_t bus;
127 int page_offset;
119 int cnt; 128 int cnt;
129 int fill_cnt;
120 int alloc_fail; 130 int alloc_fail;
121 int mask; /* number of rx slots -1 */ 131 int mask; /* number of rx slots -1 */
132 int watchdog_needed;
122}; 133};
123 134
124struct myri10ge_tx_buf { 135struct myri10ge_tx_buf {
@@ -150,6 +161,7 @@ struct myri10ge_priv {
150 struct myri10ge_rx_buf rx_big; 161 struct myri10ge_rx_buf rx_big;
151 struct myri10ge_rx_done rx_done; 162 struct myri10ge_rx_done rx_done;
152 int small_bytes; 163 int small_bytes;
164 int big_bytes;
153 struct net_device *dev; 165 struct net_device *dev;
154 struct net_device_stats stats; 166 struct net_device_stats stats;
155 u8 __iomem *sram; 167 u8 __iomem *sram;
@@ -266,6 +278,10 @@ static int myri10ge_debug = -1; /* defaults above */
266module_param(myri10ge_debug, int, 0); 278module_param(myri10ge_debug, int, 0);
267MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); 279MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
268 280
281static int myri10ge_fill_thresh = 256;
282module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
283MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
284
269#define MYRI10GE_FW_OFFSET 1024*1024 285#define MYRI10GE_FW_OFFSET 1024*1024
270#define MYRI10GE_HIGHPART_TO_U32(X) \ 286#define MYRI10GE_HIGHPART_TO_U32(X) \
271(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 287(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -958,6 +974,180 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
958 } 974 }
959} 975}
960 976
977static inline void
978myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
979 struct skb_frag_struct *rx_frags, int len, int hlen)
980{
981 struct skb_frag_struct *skb_frags;
982
983 skb->len = skb->data_len = len;
984 skb->truesize = len + sizeof(struct sk_buff);
985 /* attach the page(s) */
986
987 skb_frags = skb_shinfo(skb)->frags;
988 while (len > 0) {
989 memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
990 len -= rx_frags->size;
991 skb_frags++;
992 rx_frags++;
993 skb_shinfo(skb)->nr_frags++;
994 }
995
996 /* pskb_may_pull is not available in irq context, but
997 * skb_pull() (for ether_pad and eth_type_trans()) requires
998 * the beginning of the packet in skb_headlen(), move it
999 * manually */
1000 memcpy(skb->data, va, hlen);
1001 skb_shinfo(skb)->frags[0].page_offset += hlen;
1002 skb_shinfo(skb)->frags[0].size -= hlen;
1003 skb->data_len -= hlen;
1004 skb->tail += hlen;
1005 skb_pull(skb, MXGEFW_PAD);
1006}
1007
1008static void
1009myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1010 int bytes, int watchdog)
1011{
1012 struct page *page;
1013 int idx;
1014
1015 if (unlikely(rx->watchdog_needed && !watchdog))
1016 return;
1017
1018 /* try to refill entire ring */
1019 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
1020 idx = rx->fill_cnt & rx->mask;
1021
1022 if ((bytes < MYRI10GE_ALLOC_SIZE / 2) &&
1023 (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) {
1024 /* we can use part of previous page */
1025 get_page(rx->page);
1026 } else {
1027 /* we need a new page */
1028 page =
1029 alloc_pages(GFP_ATOMIC | __GFP_COMP,
1030 MYRI10GE_ALLOC_ORDER);
1031 if (unlikely(page == NULL)) {
1032 if (rx->fill_cnt - rx->cnt < 16)
1033 rx->watchdog_needed = 1;
1034 return;
1035 }
1036 rx->page = page;
1037 rx->page_offset = 0;
1038 rx->bus = pci_map_page(mgp->pdev, page, 0,
1039 MYRI10GE_ALLOC_SIZE,
1040 PCI_DMA_FROMDEVICE);
1041 }
1042 rx->info[idx].page = rx->page;
1043 rx->info[idx].page_offset = rx->page_offset;
1044 /* note that this is the address of the start of the
1045 * page */
1046 pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1047 rx->shadow[idx].addr_low =
1048 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1049 rx->shadow[idx].addr_high =
1050 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
1051
1052 /* start next packet on a cacheline boundary */
1053 rx->page_offset += SKB_DATA_ALIGN(bytes);
1054 rx->fill_cnt++;
1055
1056 /* copy 8 descriptors to the firmware at a time */
1057 if ((idx & 7) == 7) {
1058 if (rx->wc_fifo == NULL)
1059 myri10ge_submit_8rx(&rx->lanai[idx - 7],
1060 &rx->shadow[idx - 7]);
1061 else {
1062 mb();
1063 myri10ge_pio_copy(rx->wc_fifo,
1064 &rx->shadow[idx - 7], 64);
1065 }
1066 }
1067 }
1068}
1069
1070static inline void
1071myri10ge_unmap_rx_page(struct pci_dev *pdev,
1072 struct myri10ge_rx_buffer_state *info, int bytes)
1073{
1074 /* unmap the recvd page if we're the only or last user of it */
1075 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1076 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1077 pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
1078 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1079 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1080 }
1081}
1082
1083#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
1084 * page into an skb */
1085
1086static inline int
1087myri10ge_page_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1088 int bytes, int len, __wsum csum)
1089{
1090 struct sk_buff *skb;
1091 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
1092 int i, idx, hlen, remainder;
1093 struct pci_dev *pdev = mgp->pdev;
1094 struct net_device *dev = mgp->dev;
1095 u8 *va;
1096
1097 len += MXGEFW_PAD;
1098 idx = rx->cnt & rx->mask;
1099 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1100 prefetch(va);
1101 /* Fill skb_frag_struct(s) with data from our receive */
1102 for (i = 0, remainder = len; remainder > 0; i++) {
1103 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1104 rx_frags[i].page = rx->info[idx].page;
1105 rx_frags[i].page_offset = rx->info[idx].page_offset;
1106 if (remainder < MYRI10GE_ALLOC_SIZE)
1107 rx_frags[i].size = remainder;
1108 else
1109 rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
1110 rx->cnt++;
1111 idx = rx->cnt & rx->mask;
1112 remainder -= MYRI10GE_ALLOC_SIZE;
1113 }
1114
1115 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
1116
1117 /* allocate an skb to attach the page(s) to. */
1118
1119 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1120 if (unlikely(skb == NULL)) {
1121 mgp->stats.rx_dropped++;
1122 do {
1123 i--;
1124 put_page(rx_frags[i].page);
1125 } while (i != 0);
1126 return 0;
1127 }
1128
1129 /* Attach the pages to the skb, and trim off any padding */
1130 myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
1131 if (skb_shinfo(skb)->frags[0].size <= 0) {
1132 put_page(skb_shinfo(skb)->frags[0].page);
1133 skb_shinfo(skb)->nr_frags = 0;
1134 }
1135 skb->protocol = eth_type_trans(skb, dev);
1136 skb->dev = dev;
1137
1138 if (mgp->csum_flag) {
1139 if ((skb->protocol == htons(ETH_P_IP)) ||
1140 (skb->protocol == htons(ETH_P_IPV6))) {
1141 skb->csum = csum;
1142 skb->ip_summed = CHECKSUM_COMPLETE;
1143 } else
1144 myri10ge_vlan_ip_csum(skb, csum);
1145 }
1146 netif_receive_skb(skb);
1147 dev->last_rx = jiffies;
1148 return 1;
1149}
1150
961static inline unsigned long 1151static inline unsigned long
962myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1152myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
963 int bytes, int len, __wsum csum) 1153 int bytes, int len, __wsum csum)