diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 12 | ||||
-rw-r--r-- | drivers/net/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/arm/ether3.c | 2 | ||||
-rw-r--r-- | drivers/net/atl1/atl1_main.c | 1 | ||||
-rw-r--r-- | drivers/net/bnx2.c | 103 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 10 | ||||
-rw-r--r-- | drivers/net/eepro100.c | 7 | ||||
-rw-r--r-- | drivers/net/gianfar.c | 12 | ||||
-rw-r--r-- | drivers/net/hamradio/baycom_epp.c | 2 | ||||
-rw-r--r-- | drivers/net/natsemi.c | 2 | ||||
-rw-r--r-- | drivers/net/ne2k-pci.c | 7 | ||||
-rw-r--r-- | drivers/net/ni5010.c | 6 | ||||
-rw-r--r-- | drivers/net/ns83820.c | 2 | ||||
-rw-r--r-- | drivers/net/phy/vitesse.c | 46 | ||||
-rw-r--r-- | drivers/net/pppol2tp.c | 18 | ||||
-rw-r--r-- | drivers/net/r8169.c | 2 | ||||
-rw-r--r-- | drivers/net/saa9730.c | 9 | ||||
-rw-r--r-- | drivers/net/sunvnet.c | 260 | ||||
-rw-r--r-- | drivers/net/sunvnet.h | 4 | ||||
-rw-r--r-- | drivers/net/tokenring/smctr.c | 6 | ||||
-rw-r--r-- | drivers/net/wan/pc300_drv.c | 2 | ||||
-rw-r--r-- | drivers/net/wan/sbni.c | 7 | ||||
-rw-r--r-- | drivers/net/xen-netfront.c | 1863 |
23 files changed, 2180 insertions, 205 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 43d03178064d..5fb659f8b20e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2486,6 +2486,18 @@ source "drivers/atm/Kconfig" | |||
2486 | 2486 | ||
2487 | source "drivers/s390/net/Kconfig" | 2487 | source "drivers/s390/net/Kconfig" |
2488 | 2488 | ||
2489 | config XEN_NETDEV_FRONTEND | ||
2490 | tristate "Xen network device frontend driver" | ||
2491 | depends on XEN | ||
2492 | default y | ||
2493 | help | ||
2494 | The network device frontend driver allows the kernel to | ||
2495 | access network devices exported exported by a virtual | ||
2496 | machine containing a physical network device driver. The | ||
2497 | frontend driver is intended for unprivileged guest domains; | ||
2498 | if you are compiling a kernel for a Xen guest, you almost | ||
2499 | certainly want to enable this. | ||
2500 | |||
2489 | config ISERIES_VETH | 2501 | config ISERIES_VETH |
2490 | tristate "iSeries Virtual Ethernet driver support" | 2502 | tristate "iSeries Virtual Ethernet driver support" |
2491 | depends on PPC_ISERIES | 2503 | depends on PPC_ISERIES |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index eb4167622a6a..0e286ab8855a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -127,6 +127,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o | |||
127 | obj-$(CONFIG_SLIP) += slip.o | 127 | obj-$(CONFIG_SLIP) += slip.o |
128 | obj-$(CONFIG_SLHC) += slhc.o | 128 | obj-$(CONFIG_SLHC) += slhc.o |
129 | 129 | ||
130 | obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o | ||
131 | |||
130 | obj-$(CONFIG_DUMMY) += dummy.o | 132 | obj-$(CONFIG_DUMMY) += dummy.o |
131 | obj-$(CONFIG_IFB) += ifb.o | 133 | obj-$(CONFIG_IFB) += ifb.o |
132 | obj-$(CONFIG_MACVLAN) += macvlan.o | 134 | obj-$(CONFIG_MACVLAN) += macvlan.o |
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index da713500654d..a7cac695a9bd 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c | |||
@@ -464,7 +464,7 @@ static void ether3_setmulticastlist(struct net_device *dev) | |||
464 | if (dev->flags & IFF_PROMISC) { | 464 | if (dev->flags & IFF_PROMISC) { |
465 | /* promiscuous mode */ | 465 | /* promiscuous mode */ |
466 | priv(dev)->regs.config1 |= CFG1_RECVPROMISC; | 466 | priv(dev)->regs.config1 |= CFG1_RECVPROMISC; |
467 | } else if (dev->flags & IFF_ALLMULTI) { | 467 | } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { |
468 | priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; | 468 | priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; |
469 | } else | 469 | } else |
470 | priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; | 470 | priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; |
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 4a18b881ae9a..fd1e156f1747 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -75,6 +75,7 @@ | |||
75 | #include <linux/compiler.h> | 75 | #include <linux/compiler.h> |
76 | #include <linux/delay.h> | 76 | #include <linux/delay.h> |
77 | #include <linux/mii.h> | 77 | #include <linux/mii.h> |
78 | #include <linux/interrupt.h> | ||
78 | #include <net/checksum.h> | 79 | #include <net/checksum.h> |
79 | 80 | ||
80 | #include <asm/atomic.h> | 81 | #include <asm/atomic.h> |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d23861c8658c..a729da061bbb 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -54,8 +54,8 @@ | |||
54 | 54 | ||
55 | #define DRV_MODULE_NAME "bnx2" | 55 | #define DRV_MODULE_NAME "bnx2" |
56 | #define PFX DRV_MODULE_NAME ": " | 56 | #define PFX DRV_MODULE_NAME ": " |
57 | #define DRV_MODULE_VERSION "1.6.2" | 57 | #define DRV_MODULE_VERSION "1.6.3" |
58 | #define DRV_MODULE_RELDATE "July 6, 2007" | 58 | #define DRV_MODULE_RELDATE "July 16, 2007" |
59 | 59 | ||
60 | #define RUN_AT(x) (jiffies + (x)) | 60 | #define RUN_AT(x) (jiffies + (x)) |
61 | 61 | ||
@@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = { | |||
126 | 126 | ||
127 | static struct flash_spec flash_table[] = | 127 | static struct flash_spec flash_table[] = |
128 | { | 128 | { |
129 | #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) | ||
130 | #define NONBUFFERED_FLAGS (BNX2_NV_WREN) | ||
129 | /* Slow EEPROM */ | 131 | /* Slow EEPROM */ |
130 | {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, | 132 | {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, |
131 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, | 133 | BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, |
132 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, | 134 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, |
133 | "EEPROM - slow"}, | 135 | "EEPROM - slow"}, |
134 | /* Expansion entry 0001 */ | 136 | /* Expansion entry 0001 */ |
135 | {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, | 137 | {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, |
136 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 138 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
137 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 139 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
138 | "Entry 0001"}, | 140 | "Entry 0001"}, |
139 | /* Saifun SA25F010 (non-buffered flash) */ | 141 | /* Saifun SA25F010 (non-buffered flash) */ |
140 | /* strap, cfg1, & write1 need updates */ | 142 | /* strap, cfg1, & write1 need updates */ |
141 | {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, | 143 | {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, |
142 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 144 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
143 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, | 145 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, |
144 | "Non-buffered flash (128kB)"}, | 146 | "Non-buffered flash (128kB)"}, |
145 | /* Saifun SA25F020 (non-buffered flash) */ | 147 | /* Saifun SA25F020 (non-buffered flash) */ |
146 | /* strap, cfg1, & write1 need updates */ | 148 | /* strap, cfg1, & write1 need updates */ |
147 | {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, | 149 | {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, |
148 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 150 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
149 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, | 151 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, |
150 | "Non-buffered flash (256kB)"}, | 152 | "Non-buffered flash (256kB)"}, |
151 | /* Expansion entry 0100 */ | 153 | /* Expansion entry 0100 */ |
152 | {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, | 154 | {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, |
153 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 155 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
154 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 156 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
155 | "Entry 0100"}, | 157 | "Entry 0100"}, |
156 | /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ | 158 | /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ |
157 | {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, | 159 | {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, |
158 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, | 160 | NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, |
159 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, | 161 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, |
160 | "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, | 162 | "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, |
161 | /* Entry 0110: ST M45PE20 (non-buffered flash)*/ | 163 | /* Entry 0110: ST M45PE20 (non-buffered flash)*/ |
162 | {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, | 164 | {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, |
163 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, | 165 | NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, |
164 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, | 166 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, |
165 | "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, | 167 | "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, |
166 | /* Saifun SA25F005 (non-buffered flash) */ | 168 | /* Saifun SA25F005 (non-buffered flash) */ |
167 | /* strap, cfg1, & write1 need updates */ | 169 | /* strap, cfg1, & write1 need updates */ |
168 | {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, | 170 | {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, |
169 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 171 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
170 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, | 172 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, |
171 | "Non-buffered flash (64kB)"}, | 173 | "Non-buffered flash (64kB)"}, |
172 | /* Fast EEPROM */ | 174 | /* Fast EEPROM */ |
173 | {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, | 175 | {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, |
174 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, | 176 | BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, |
175 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, | 177 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, |
176 | "EEPROM - fast"}, | 178 | "EEPROM - fast"}, |
177 | /* Expansion entry 1001 */ | 179 | /* Expansion entry 1001 */ |
178 | {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, | 180 | {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, |
179 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 181 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
180 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 182 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
181 | "Entry 1001"}, | 183 | "Entry 1001"}, |
182 | /* Expansion entry 1010 */ | 184 | /* Expansion entry 1010 */ |
183 | {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, | 185 | {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, |
184 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 186 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
185 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 187 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
186 | "Entry 1010"}, | 188 | "Entry 1010"}, |
187 | /* ATMEL AT45DB011B (buffered flash) */ | 189 | /* ATMEL AT45DB011B (buffered flash) */ |
188 | {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, | 190 | {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, |
189 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 191 | BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
190 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, | 192 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, |
191 | "Buffered flash (128kB)"}, | 193 | "Buffered flash (128kB)"}, |
192 | /* Expansion entry 1100 */ | 194 | /* Expansion entry 1100 */ |
193 | {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, | 195 | {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, |
194 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 196 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
195 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 197 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
196 | "Entry 1100"}, | 198 | "Entry 1100"}, |
197 | /* Expansion entry 1101 */ | 199 | /* Expansion entry 1101 */ |
198 | {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, | 200 | {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, |
199 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 201 | NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
200 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 202 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
201 | "Entry 1101"}, | 203 | "Entry 1101"}, |
202 | /* Ateml Expansion entry 1110 */ | 204 | /* Ateml Expansion entry 1110 */ |
203 | {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, | 205 | {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, |
204 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 206 | BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
205 | BUFFERED_FLASH_BYTE_ADDR_MASK, 0, | 207 | BUFFERED_FLASH_BYTE_ADDR_MASK, 0, |
206 | "Entry 1110 (Atmel)"}, | 208 | "Entry 1110 (Atmel)"}, |
207 | /* ATMEL AT45DB021B (buffered flash) */ | 209 | /* ATMEL AT45DB021B (buffered flash) */ |
208 | {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, | 210 | {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, |
209 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 211 | BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
210 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, | 212 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, |
211 | "Buffered flash (256kB)"}, | 213 | "Buffered flash (256kB)"}, |
212 | }; | 214 | }; |
213 | 215 | ||
216 | static struct flash_spec flash_5709 = { | ||
217 | .flags = BNX2_NV_BUFFERED, | ||
218 | .page_bits = BCM5709_FLASH_PAGE_BITS, | ||
219 | .page_size = BCM5709_FLASH_PAGE_SIZE, | ||
220 | .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, | ||
221 | .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, | ||
222 | .name = "5709 Buffered flash (256kB)", | ||
223 | }; | ||
224 | |||
214 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | 225 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); |
215 | 226 | ||
216 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 227 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) |
@@ -3289,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp) | |||
3289 | val = REG_RD(bp, BNX2_MISC_CFG); | 3300 | val = REG_RD(bp, BNX2_MISC_CFG); |
3290 | REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); | 3301 | REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); |
3291 | 3302 | ||
3292 | if (!bp->flash_info->buffered) { | 3303 | if (bp->flash_info->flags & BNX2_NV_WREN) { |
3293 | int j; | 3304 | int j; |
3294 | 3305 | ||
3295 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); | 3306 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); |
@@ -3349,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) | |||
3349 | u32 cmd; | 3360 | u32 cmd; |
3350 | int j; | 3361 | int j; |
3351 | 3362 | ||
3352 | if (bp->flash_info->buffered) | 3363 | if (bp->flash_info->flags & BNX2_NV_BUFFERED) |
3353 | /* Buffered flash, no erase needed */ | 3364 | /* Buffered flash, no erase needed */ |
3354 | return 0; | 3365 | return 0; |
3355 | 3366 | ||
@@ -3392,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) | |||
3392 | /* Build the command word. */ | 3403 | /* Build the command word. */ |
3393 | cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; | 3404 | cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; |
3394 | 3405 | ||
3395 | /* Calculate an offset of a buffered flash. */ | 3406 | /* Calculate an offset of a buffered flash, not needed for 5709. */ |
3396 | if (bp->flash_info->buffered) { | 3407 | if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { |
3397 | offset = ((offset / bp->flash_info->page_size) << | 3408 | offset = ((offset / bp->flash_info->page_size) << |
3398 | bp->flash_info->page_bits) + | 3409 | bp->flash_info->page_bits) + |
3399 | (offset % bp->flash_info->page_size); | 3410 | (offset % bp->flash_info->page_size); |
@@ -3439,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) | |||
3439 | /* Build the command word. */ | 3450 | /* Build the command word. */ |
3440 | cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; | 3451 | cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; |
3441 | 3452 | ||
3442 | /* Calculate an offset of a buffered flash. */ | 3453 | /* Calculate an offset of a buffered flash, not needed for 5709. */ |
3443 | if (bp->flash_info->buffered) { | 3454 | if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { |
3444 | offset = ((offset / bp->flash_info->page_size) << | 3455 | offset = ((offset / bp->flash_info->page_size) << |
3445 | bp->flash_info->page_bits) + | 3456 | bp->flash_info->page_bits) + |
3446 | (offset % bp->flash_info->page_size); | 3457 | (offset % bp->flash_info->page_size); |
@@ -3478,15 +3489,19 @@ static int | |||
3478 | bnx2_init_nvram(struct bnx2 *bp) | 3489 | bnx2_init_nvram(struct bnx2 *bp) |
3479 | { | 3490 | { |
3480 | u32 val; | 3491 | u32 val; |
3481 | int j, entry_count, rc; | 3492 | int j, entry_count, rc = 0; |
3482 | struct flash_spec *flash; | 3493 | struct flash_spec *flash; |
3483 | 3494 | ||
3495 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | ||
3496 | bp->flash_info = &flash_5709; | ||
3497 | goto get_flash_size; | ||
3498 | } | ||
3499 | |||
3484 | /* Determine the selected interface. */ | 3500 | /* Determine the selected interface. */ |
3485 | val = REG_RD(bp, BNX2_NVM_CFG1); | 3501 | val = REG_RD(bp, BNX2_NVM_CFG1); |
3486 | 3502 | ||
3487 | entry_count = sizeof(flash_table) / sizeof(struct flash_spec); | 3503 | entry_count = sizeof(flash_table) / sizeof(struct flash_spec); |
3488 | 3504 | ||
3489 | rc = 0; | ||
3490 | if (val & 0x40000000) { | 3505 | if (val & 0x40000000) { |
3491 | 3506 | ||
3492 | /* Flash interface has been reconfigured */ | 3507 | /* Flash interface has been reconfigured */ |
@@ -3542,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp) | |||
3542 | return -ENODEV; | 3557 | return -ENODEV; |
3543 | } | 3558 | } |
3544 | 3559 | ||
3560 | get_flash_size: | ||
3545 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); | 3561 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); |
3546 | val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; | 3562 | val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; |
3547 | if (val) | 3563 | if (val) |
@@ -3706,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3706 | buf = align_buf; | 3722 | buf = align_buf; |
3707 | } | 3723 | } |
3708 | 3724 | ||
3709 | if (bp->flash_info->buffered == 0) { | 3725 | if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { |
3710 | flash_buffer = kmalloc(264, GFP_KERNEL); | 3726 | flash_buffer = kmalloc(264, GFP_KERNEL); |
3711 | if (flash_buffer == NULL) { | 3727 | if (flash_buffer == NULL) { |
3712 | rc = -ENOMEM; | 3728 | rc = -ENOMEM; |
@@ -3739,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3739 | bnx2_enable_nvram_access(bp); | 3755 | bnx2_enable_nvram_access(bp); |
3740 | 3756 | ||
3741 | cmd_flags = BNX2_NVM_COMMAND_FIRST; | 3757 | cmd_flags = BNX2_NVM_COMMAND_FIRST; |
3742 | if (bp->flash_info->buffered == 0) { | 3758 | if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { |
3743 | int j; | 3759 | int j; |
3744 | 3760 | ||
3745 | /* Read the whole page into the buffer | 3761 | /* Read the whole page into the buffer |
@@ -3767,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3767 | /* Loop to write back the buffer data from page_start to | 3783 | /* Loop to write back the buffer data from page_start to |
3768 | * data_start */ | 3784 | * data_start */ |
3769 | i = 0; | 3785 | i = 0; |
3770 | if (bp->flash_info->buffered == 0) { | 3786 | if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { |
3771 | /* Erase the page */ | 3787 | /* Erase the page */ |
3772 | if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) | 3788 | if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) |
3773 | goto nvram_write_end; | 3789 | goto nvram_write_end; |
@@ -3791,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3791 | /* Loop to write the new data from data_start to data_end */ | 3807 | /* Loop to write the new data from data_start to data_end */ |
3792 | for (addr = data_start; addr < data_end; addr += 4, i += 4) { | 3808 | for (addr = data_start; addr < data_end; addr += 4, i += 4) { |
3793 | if ((addr == page_end - 4) || | 3809 | if ((addr == page_end - 4) || |
3794 | ((bp->flash_info->buffered) && | 3810 | ((bp->flash_info->flags & BNX2_NV_BUFFERED) && |
3795 | (addr == data_end - 4))) { | 3811 | (addr == data_end - 4))) { |
3796 | 3812 | ||
3797 | cmd_flags |= BNX2_NVM_COMMAND_LAST; | 3813 | cmd_flags |= BNX2_NVM_COMMAND_LAST; |
@@ -3808,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | |||
3808 | 3824 | ||
3809 | /* Loop to write back the buffer data from data_end | 3825 | /* Loop to write back the buffer data from data_end |
3810 | * to page_end */ | 3826 | * to page_end */ |
3811 | if (bp->flash_info->buffered == 0) { | 3827 | if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { |
3812 | for (addr = data_end; addr < page_end; | 3828 | for (addr = data_end; addr < page_end; |
3813 | addr += 4, i += 4) { | 3829 | addr += 4, i += 4) { |
3814 | 3830 | ||
@@ -4107,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp) | |||
4107 | if (CHIP_NUM(bp) == CHIP_NUM_5708) | 4123 | if (CHIP_NUM(bp) == CHIP_NUM_5708) |
4108 | REG_WR(bp, BNX2_HC_STATS_TICKS, 0); | 4124 | REG_WR(bp, BNX2_HC_STATS_TICKS, 0); |
4109 | else | 4125 | else |
4110 | REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); | 4126 | REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); |
4111 | REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ | 4127 | REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ |
4112 | 4128 | ||
4113 | if (CHIP_ID(bp) == CHIP_ID_5706_A1) | 4129 | if (CHIP_ID(bp) == CHIP_ID_5706_A1) |
@@ -4127,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp) | |||
4127 | 4143 | ||
4128 | REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); | 4144 | REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); |
4129 | 4145 | ||
4130 | if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & | ||
4131 | BNX2_PORT_FEATURE_ASF_ENABLED) | ||
4132 | bp->flags |= ASF_ENABLE_FLAG; | ||
4133 | |||
4134 | /* Initialize the receive filter. */ | 4146 | /* Initialize the receive filter. */ |
4135 | bnx2_set_rx_mode(bp->dev); | 4147 | bnx2_set_rx_mode(bp->dev); |
4136 | 4148 | ||
@@ -5786,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | |||
5786 | if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) | 5798 | if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) |
5787 | bp->stats_ticks = USEC_PER_SEC; | 5799 | bp->stats_ticks = USEC_PER_SEC; |
5788 | } | 5800 | } |
5789 | if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; | 5801 | if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) |
5790 | bp->stats_ticks &= 0xffff00; | 5802 | bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; |
5803 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; | ||
5791 | 5804 | ||
5792 | if (netif_running(bp->dev)) { | 5805 | if (netif_running(bp->dev)) { |
5793 | bnx2_netif_stop(bp); | 5806 | bnx2_netif_stop(bp); |
@@ -6629,6 +6642,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
6629 | if (i != 2) | 6642 | if (i != 2) |
6630 | bp->fw_version[j++] = '.'; | 6643 | bp->fw_version[j++] = '.'; |
6631 | } | 6644 | } |
6645 | if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & | ||
6646 | BNX2_PORT_FEATURE_ASF_ENABLED) { | ||
6647 | bp->flags |= ASF_ENABLE_FLAG; | ||
6648 | |||
6649 | for (i = 0; i < 30; i++) { | ||
6650 | reg = REG_RD_IND(bp, bp->shmem_base + | ||
6651 | BNX2_BC_STATE_CONDITION); | ||
6652 | if (reg & BNX2_CONDITION_MFW_RUN_MASK) | ||
6653 | break; | ||
6654 | msleep(10); | ||
6655 | } | ||
6656 | } | ||
6632 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); | 6657 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); |
6633 | reg &= BNX2_CONDITION_MFW_RUN_MASK; | 6658 | reg &= BNX2_CONDITION_MFW_RUN_MASK; |
6634 | if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && | 6659 | if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && |
@@ -6672,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
6672 | bp->rx_ticks_int = 18; | 6697 | bp->rx_ticks_int = 18; |
6673 | bp->rx_ticks = 18; | 6698 | bp->rx_ticks = 18; |
6674 | 6699 | ||
6675 | bp->stats_ticks = 1000000 & 0xffff00; | 6700 | bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; |
6676 | 6701 | ||
6677 | bp->timer_interval = HZ; | 6702 | bp->timer_interval = HZ; |
6678 | bp->current_interval = HZ; | 6703 | bp->current_interval = HZ; |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index d8cd1afeb23d..102adfe1e923 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6433,6 +6433,11 @@ struct sw_bd { | |||
6433 | #define ST_MICRO_FLASH_PAGE_SIZE 256 | 6433 | #define ST_MICRO_FLASH_PAGE_SIZE 256 |
6434 | #define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 | 6434 | #define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 |
6435 | 6435 | ||
6436 | #define BCM5709_FLASH_PAGE_BITS 8 | ||
6437 | #define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) | ||
6438 | #define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) | ||
6439 | #define BCM5709_FLASH_PAGE_SIZE 256 | ||
6440 | |||
6436 | #define NVRAM_TIMEOUT_COUNT 30000 | 6441 | #define NVRAM_TIMEOUT_COUNT 30000 |
6437 | 6442 | ||
6438 | 6443 | ||
@@ -6449,7 +6454,10 @@ struct flash_spec { | |||
6449 | u32 config2; | 6454 | u32 config2; |
6450 | u32 config3; | 6455 | u32 config3; |
6451 | u32 write1; | 6456 | u32 write1; |
6452 | u32 buffered; | 6457 | u32 flags; |
6458 | #define BNX2_NV_BUFFERED 0x00000001 | ||
6459 | #define BNX2_NV_TRANSLATE 0x00000002 | ||
6460 | #define BNX2_NV_WREN 0x00000004 | ||
6453 | u32 page_bits; | 6461 | u32 page_bits; |
6454 | u32 page_size; | 6462 | u32 page_size; |
6455 | u32 addr_mask; | 6463 | u32 addr_mask; |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 9afa47edfc58..3c54014acece 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev) | |||
2292 | struct net_device *dev = pci_get_drvdata (pdev); | 2292 | struct net_device *dev = pci_get_drvdata (pdev); |
2293 | struct speedo_private *sp = netdev_priv(dev); | 2293 | struct speedo_private *sp = netdev_priv(dev); |
2294 | void __iomem *ioaddr = sp->regs; | 2294 | void __iomem *ioaddr = sp->regs; |
2295 | int rc; | ||
2295 | 2296 | ||
2296 | pci_set_power_state(pdev, PCI_D0); | 2297 | pci_set_power_state(pdev, PCI_D0); |
2297 | pci_restore_state(pdev); | 2298 | pci_restore_state(pdev); |
2298 | pci_enable_device(pdev); | 2299 | |
2300 | rc = pci_enable_device(pdev); | ||
2301 | if (rc) | ||
2302 | return rc; | ||
2303 | |||
2299 | pci_set_master(pdev); | 2304 | pci_set_master(pdev); |
2300 | 2305 | ||
2301 | if (!netif_running(dev)) | 2306 | if (!netif_running(dev)) |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index d7a1a58de766..f92690555dd9 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -420,8 +420,18 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) | |||
420 | if (ecntrl & ECNTRL_REDUCED_MODE) { | 420 | if (ecntrl & ECNTRL_REDUCED_MODE) { |
421 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | 421 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) |
422 | return PHY_INTERFACE_MODE_RMII; | 422 | return PHY_INTERFACE_MODE_RMII; |
423 | else | 423 | else { |
424 | phy_interface_t interface = priv->einfo->interface; | ||
425 | |||
426 | /* | ||
427 | * This isn't autodetected right now, so it must | ||
428 | * be set by the device tree or platform code. | ||
429 | */ | ||
430 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | ||
431 | return PHY_INTERFACE_MODE_RGMII_ID; | ||
432 | |||
424 | return PHY_INTERFACE_MODE_RGMII; | 433 | return PHY_INTERFACE_MODE_RGMII; |
434 | } | ||
425 | } | 435 | } |
426 | 436 | ||
427 | if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) | 437 | if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 84aa2117c0ee..355c6cf3d112 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc) | |||
320 | sprintf(portarg, "%ld", bc->pdev->port->base); | 320 | sprintf(portarg, "%ld", bc->pdev->port->base); |
321 | printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); | 321 | printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); |
322 | 322 | ||
323 | return call_usermodehelper(eppconfig_path, argv, envp, 1); | 323 | return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC); |
324 | } | 324 | } |
325 | 325 | ||
326 | /* ---------------------------------------------------------------------- */ | 326 | /* ---------------------------------------------------------------------- */ |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 3450051ae56b..6bb48ba80964 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \ | |||
671 | #define NATSEMI_CREATE_FILE(_dev, _name) \ | 671 | #define NATSEMI_CREATE_FILE(_dev, _name) \ |
672 | device_create_file(&_dev->dev, &dev_attr_##_name) | 672 | device_create_file(&_dev->dev, &dev_attr_##_name) |
673 | #define NATSEMI_REMOVE_FILE(_dev, _name) \ | 673 | #define NATSEMI_REMOVE_FILE(_dev, _name) \ |
674 | device_create_file(&_dev->dev, &dev_attr_##_name) | 674 | device_remove_file(&_dev->dev, &dev_attr_##_name) |
675 | 675 | ||
676 | NATSEMI_ATTR(dspcfg_workaround); | 676 | NATSEMI_ATTR(dspcfg_workaround); |
677 | 677 | ||
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index 995c0a5d4066..cfdeaf7aa163 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c | |||
@@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
669 | static int ne2k_pci_resume (struct pci_dev *pdev) | 669 | static int ne2k_pci_resume (struct pci_dev *pdev) |
670 | { | 670 | { |
671 | struct net_device *dev = pci_get_drvdata (pdev); | 671 | struct net_device *dev = pci_get_drvdata (pdev); |
672 | int rc; | ||
672 | 673 | ||
673 | pci_set_power_state(pdev, 0); | 674 | pci_set_power_state(pdev, 0); |
674 | pci_restore_state(pdev); | 675 | pci_restore_state(pdev); |
675 | pci_enable_device(pdev); | 676 | |
677 | rc = pci_enable_device(pdev); | ||
678 | if (rc) | ||
679 | return rc; | ||
680 | |||
676 | NS8390_init(dev, 1); | 681 | NS8390_init(dev, 1); |
677 | netif_device_attach(dev); | 682 | netif_device_attach(dev); |
678 | 683 | ||
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 3d5b4232f65f..22a3b3dc7d89 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -670,14 +670,10 @@ static void ni5010_set_multicast_list(struct net_device *dev) | |||
670 | 670 | ||
671 | PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); | 671 | PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); |
672 | 672 | ||
673 | if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) { | 673 | if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { |
674 | dev->flags |= IFF_PROMISC; | 674 | dev->flags |= IFF_PROMISC; |
675 | outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ | 675 | outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ |
676 | PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); | 676 | PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); |
677 | } else if (dev->mc_list) { | ||
678 | /* Sorry, multicast not supported */ | ||
679 | PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name)); | ||
680 | outb(RMD_BROADCAST, EDLC_RMODE); | ||
681 | } else { | 677 | } else { |
682 | PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name)); | 678 | PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name)); |
683 | outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */ | 679 | outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */ |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 104aab3c957f..ea80e6cb3dec 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -1582,7 +1582,7 @@ static void ns83820_set_multicast(struct net_device *ndev) | |||
1582 | else | 1582 | else |
1583 | and_mask &= ~(RFCR_AAU | RFCR_AAM); | 1583 | and_mask &= ~(RFCR_AAU | RFCR_AAM); |
1584 | 1584 | ||
1585 | if (ndev->flags & IFF_ALLMULTI) | 1585 | if (ndev->flags & IFF_ALLMULTI || ndev->mc_count) |
1586 | or_mask |= RFCR_AAM; | 1586 | or_mask |= RFCR_AAM; |
1587 | else | 1587 | else |
1588 | and_mask &= ~RFCR_AAM; | 1588 | and_mask &= ~RFCR_AAM; |
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 596222b260d6..6a5385647911 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
@@ -21,6 +21,10 @@ | |||
21 | /* Vitesse Extended Control Register 1 */ | 21 | /* Vitesse Extended Control Register 1 */ |
22 | #define MII_VSC8244_EXT_CON1 0x17 | 22 | #define MII_VSC8244_EXT_CON1 0x17 |
23 | #define MII_VSC8244_EXTCON1_INIT 0x0000 | 23 | #define MII_VSC8244_EXTCON1_INIT 0x0000 |
24 | #define MII_VSC8244_EXTCON1_TX_SKEW_MASK 0x0c00 | ||
25 | #define MII_VSC8244_EXTCON1_RX_SKEW_MASK 0x0300 | ||
26 | #define MII_VSC8244_EXTCON1_TX_SKEW 0x0800 | ||
27 | #define MII_VSC8244_EXTCON1_RX_SKEW 0x0200 | ||
24 | 28 | ||
25 | /* Vitesse Interrupt Mask Register */ | 29 | /* Vitesse Interrupt Mask Register */ |
26 | #define MII_VSC8244_IMASK 0x19 | 30 | #define MII_VSC8244_IMASK 0x19 |
@@ -39,7 +43,7 @@ | |||
39 | 43 | ||
40 | /* Vitesse Auxiliary Control/Status Register */ | 44 | /* Vitesse Auxiliary Control/Status Register */ |
41 | #define MII_VSC8244_AUX_CONSTAT 0x1c | 45 | #define MII_VSC8244_AUX_CONSTAT 0x1c |
42 | #define MII_VSC8244_AUXCONSTAT_INIT 0x0004 | 46 | #define MII_VSC8244_AUXCONSTAT_INIT 0x0000 |
43 | #define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020 | 47 | #define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020 |
44 | #define MII_VSC8244_AUXCONSTAT_SPEED 0x0018 | 48 | #define MII_VSC8244_AUXCONSTAT_SPEED 0x0018 |
45 | #define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 | 49 | #define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 |
@@ -51,6 +55,7 @@ MODULE_LICENSE("GPL"); | |||
51 | 55 | ||
52 | static int vsc824x_config_init(struct phy_device *phydev) | 56 | static int vsc824x_config_init(struct phy_device *phydev) |
53 | { | 57 | { |
58 | int extcon; | ||
54 | int err; | 59 | int err; |
55 | 60 | ||
56 | err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT, | 61 | err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT, |
@@ -58,14 +63,34 @@ static int vsc824x_config_init(struct phy_device *phydev) | |||
58 | if (err < 0) | 63 | if (err < 0) |
59 | return err; | 64 | return err; |
60 | 65 | ||
61 | err = phy_write(phydev, MII_VSC8244_EXT_CON1, | 66 | extcon = phy_read(phydev, MII_VSC8244_EXT_CON1); |
62 | MII_VSC8244_EXTCON1_INIT); | 67 | |
68 | if (extcon < 0) | ||
69 | return err; | ||
70 | |||
71 | extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK | | ||
72 | MII_VSC8244_EXTCON1_RX_SKEW_MASK); | ||
73 | |||
74 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) | ||
75 | extcon |= (MII_VSC8244_EXTCON1_TX_SKEW | | ||
76 | MII_VSC8244_EXTCON1_RX_SKEW); | ||
77 | |||
78 | err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon); | ||
79 | |||
63 | return err; | 80 | return err; |
64 | } | 81 | } |
65 | 82 | ||
66 | static int vsc824x_ack_interrupt(struct phy_device *phydev) | 83 | static int vsc824x_ack_interrupt(struct phy_device *phydev) |
67 | { | 84 | { |
68 | int err = phy_read(phydev, MII_VSC8244_ISTAT); | 85 | int err = 0; |
86 | |||
87 | /* | ||
88 | * Don't bother to ACK the interrupts if interrupts | ||
89 | * are disabled. The 824x cannot clear the interrupts | ||
90 | * if they are disabled. | ||
91 | */ | ||
92 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | ||
93 | err = phy_read(phydev, MII_VSC8244_ISTAT); | ||
69 | 94 | ||
70 | return (err < 0) ? err : 0; | 95 | return (err < 0) ? err : 0; |
71 | } | 96 | } |
@@ -77,8 +102,19 @@ static int vsc824x_config_intr(struct phy_device *phydev) | |||
77 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 102 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
78 | err = phy_write(phydev, MII_VSC8244_IMASK, | 103 | err = phy_write(phydev, MII_VSC8244_IMASK, |
79 | MII_VSC8244_IMASK_MASK); | 104 | MII_VSC8244_IMASK_MASK); |
80 | else | 105 | else { |
106 | /* | ||
107 | * The Vitesse PHY cannot clear the interrupt | ||
108 | * once it has disabled them, so we clear them first | ||
109 | */ | ||
110 | err = phy_read(phydev, MII_VSC8244_ISTAT); | ||
111 | |||
112 | if (err) | ||
113 | return err; | ||
114 | |||
81 | err = phy_write(phydev, MII_VSC8244_IMASK, 0); | 115 | err = phy_write(phydev, MII_VSC8244_IMASK, 0); |
116 | } | ||
117 | |||
82 | return err; | 118 | return err; |
83 | } | 119 | } |
84 | 120 | ||
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 5891a0fbdc8b..f87176055d0e 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -824,6 +824,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
824 | struct pppol2tp_session *session; | 824 | struct pppol2tp_session *session; |
825 | struct pppol2tp_tunnel *tunnel; | 825 | struct pppol2tp_tunnel *tunnel; |
826 | struct udphdr *uh; | 826 | struct udphdr *uh; |
827 | unsigned int len; | ||
827 | 828 | ||
828 | error = -ENOTCONN; | 829 | error = -ENOTCONN; |
829 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | 830 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) |
@@ -912,14 +913,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
912 | } | 913 | } |
913 | 914 | ||
914 | /* Queue the packet to IP for output */ | 915 | /* Queue the packet to IP for output */ |
916 | len = skb->len; | ||
915 | error = ip_queue_xmit(skb, 1); | 917 | error = ip_queue_xmit(skb, 1); |
916 | 918 | ||
917 | /* Update stats */ | 919 | /* Update stats */ |
918 | if (error >= 0) { | 920 | if (error >= 0) { |
919 | tunnel->stats.tx_packets++; | 921 | tunnel->stats.tx_packets++; |
920 | tunnel->stats.tx_bytes += skb->len; | 922 | tunnel->stats.tx_bytes += len; |
921 | session->stats.tx_packets++; | 923 | session->stats.tx_packets++; |
922 | session->stats.tx_bytes += skb->len; | 924 | session->stats.tx_bytes += len; |
923 | } else { | 925 | } else { |
924 | tunnel->stats.tx_errors++; | 926 | tunnel->stats.tx_errors++; |
925 | session->stats.tx_errors++; | 927 | session->stats.tx_errors++; |
@@ -958,6 +960,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
958 | __wsum csum = 0; | 960 | __wsum csum = 0; |
959 | struct sk_buff *skb2 = NULL; | 961 | struct sk_buff *skb2 = NULL; |
960 | struct udphdr *uh; | 962 | struct udphdr *uh; |
963 | unsigned int len; | ||
961 | 964 | ||
962 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | 965 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) |
963 | goto abort; | 966 | goto abort; |
@@ -1046,18 +1049,25 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1046 | printk("\n"); | 1049 | printk("\n"); |
1047 | } | 1050 | } |
1048 | 1051 | ||
1052 | memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt)); | ||
1053 | IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | ||
1054 | IPSKB_REROUTED); | ||
1055 | nf_reset(skb2); | ||
1056 | |||
1049 | /* Get routing info from the tunnel socket */ | 1057 | /* Get routing info from the tunnel socket */ |
1058 | dst_release(skb2->dst); | ||
1050 | skb2->dst = sk_dst_get(sk_tun); | 1059 | skb2->dst = sk_dst_get(sk_tun); |
1051 | 1060 | ||
1052 | /* Queue the packet to IP for output */ | 1061 | /* Queue the packet to IP for output */ |
1062 | len = skb2->len; | ||
1053 | rc = ip_queue_xmit(skb2, 1); | 1063 | rc = ip_queue_xmit(skb2, 1); |
1054 | 1064 | ||
1055 | /* Update stats */ | 1065 | /* Update stats */ |
1056 | if (rc >= 0) { | 1066 | if (rc >= 0) { |
1057 | tunnel->stats.tx_packets++; | 1067 | tunnel->stats.tx_packets++; |
1058 | tunnel->stats.tx_bytes += skb2->len; | 1068 | tunnel->stats.tx_bytes += len; |
1059 | session->stats.tx_packets++; | 1069 | session->stats.tx_packets++; |
1060 | session->stats.tx_bytes += skb2->len; | 1070 | session->stats.tx_bytes += len; |
1061 | } else { | 1071 | } else { |
1062 | tunnel->stats.tx_errors++; | 1072 | tunnel->stats.tx_errors++; |
1063 | session->stats.tx_errors++; | 1073 | session->stats.tx_errors++; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 982a9010c7a9..bb6896ae3151 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, | |||
2338 | { | 2338 | { |
2339 | struct skb_shared_info *info = skb_shinfo(skb); | 2339 | struct skb_shared_info *info = skb_shinfo(skb); |
2340 | unsigned int cur_frag, entry; | 2340 | unsigned int cur_frag, entry; |
2341 | struct TxDesc *txd; | 2341 | struct TxDesc * uninitialized_var(txd); |
2342 | 2342 | ||
2343 | entry = tp->cur_tx; | 2343 | entry = tp->cur_tx; |
2344 | for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { | 2344 | for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { |
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 451486b32f23..7dae4d404978 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c | |||
@@ -940,15 +940,14 @@ static void lan_saa9730_set_multicast(struct net_device *dev) | |||
940 | CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, | 940 | CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, |
941 | &lp->lan_saa9730_regs->CamCtl); | 941 | &lp->lan_saa9730_regs->CamCtl); |
942 | } else { | 942 | } else { |
943 | if (dev->flags & IFF_ALLMULTI) { | 943 | if (dev->flags & IFF_ALLMULTI || dev->mc_count) { |
944 | /* accept all multicast packets */ | 944 | /* accept all multicast packets */ |
945 | writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | | ||
946 | CAM_CONTROL_BROAD_ACC, | ||
947 | &lp->lan_saa9730_regs->CamCtl); | ||
948 | } else { | ||
949 | /* | 945 | /* |
950 | * Will handle the multicast stuff later. -carstenl | 946 | * Will handle the multicast stuff later. -carstenl |
951 | */ | 947 | */ |
948 | writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | | ||
949 | CAM_CONTROL_BROAD_ACC, | ||
950 | &lp->lan_saa9730_regs->CamCtl); | ||
952 | } | 951 | } |
953 | } | 952 | } |
954 | 953 | ||
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 8a667c13faef..b801e3b3a11a 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/ethtool.h> | 13 | #include <linux/ethtool.h> |
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/mutex.h> | ||
15 | 16 | ||
16 | #include <asm/vio.h> | 17 | #include <asm/vio.h> |
17 | #include <asm/ldc.h> | 18 | #include <asm/ldc.h> |
@@ -497,6 +498,8 @@ static void vnet_event(void *arg, int event) | |||
497 | vio_link_state_change(vio, event); | 498 | vio_link_state_change(vio, event); |
498 | spin_unlock_irqrestore(&vio->lock, flags); | 499 | spin_unlock_irqrestore(&vio->lock, flags); |
499 | 500 | ||
501 | if (event == LDC_EVENT_RESET) | ||
502 | vio_port_up(vio); | ||
500 | return; | 503 | return; |
501 | } | 504 | } |
502 | 505 | ||
@@ -875,6 +878,115 @@ err_out: | |||
875 | return err; | 878 | return err; |
876 | } | 879 | } |
877 | 880 | ||
881 | static LIST_HEAD(vnet_list); | ||
882 | static DEFINE_MUTEX(vnet_list_mutex); | ||
883 | |||
884 | static struct vnet * __devinit vnet_new(const u64 *local_mac) | ||
885 | { | ||
886 | struct net_device *dev; | ||
887 | struct vnet *vp; | ||
888 | int err, i; | ||
889 | |||
890 | dev = alloc_etherdev(sizeof(*vp)); | ||
891 | if (!dev) { | ||
892 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | ||
893 | return ERR_PTR(-ENOMEM); | ||
894 | } | ||
895 | |||
896 | for (i = 0; i < ETH_ALEN; i++) | ||
897 | dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; | ||
898 | |||
899 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
900 | |||
901 | vp = netdev_priv(dev); | ||
902 | |||
903 | spin_lock_init(&vp->lock); | ||
904 | vp->dev = dev; | ||
905 | |||
906 | INIT_LIST_HEAD(&vp->port_list); | ||
907 | for (i = 0; i < VNET_PORT_HASH_SIZE; i++) | ||
908 | INIT_HLIST_HEAD(&vp->port_hash[i]); | ||
909 | INIT_LIST_HEAD(&vp->list); | ||
910 | vp->local_mac = *local_mac; | ||
911 | |||
912 | dev->open = vnet_open; | ||
913 | dev->stop = vnet_close; | ||
914 | dev->set_multicast_list = vnet_set_rx_mode; | ||
915 | dev->set_mac_address = vnet_set_mac_addr; | ||
916 | dev->tx_timeout = vnet_tx_timeout; | ||
917 | dev->ethtool_ops = &vnet_ethtool_ops; | ||
918 | dev->watchdog_timeo = VNET_TX_TIMEOUT; | ||
919 | dev->change_mtu = vnet_change_mtu; | ||
920 | dev->hard_start_xmit = vnet_start_xmit; | ||
921 | |||
922 | err = register_netdev(dev); | ||
923 | if (err) { | ||
924 | printk(KERN_ERR PFX "Cannot register net device, " | ||
925 | "aborting.\n"); | ||
926 | goto err_out_free_dev; | ||
927 | } | ||
928 | |||
929 | printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); | ||
930 | |||
931 | for (i = 0; i < 6; i++) | ||
932 | printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | ||
933 | |||
934 | list_add(&vp->list, &vnet_list); | ||
935 | |||
936 | return vp; | ||
937 | |||
938 | err_out_free_dev: | ||
939 | free_netdev(dev); | ||
940 | |||
941 | return ERR_PTR(err); | ||
942 | } | ||
943 | |||
944 | static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) | ||
945 | { | ||
946 | struct vnet *iter, *vp; | ||
947 | |||
948 | mutex_lock(&vnet_list_mutex); | ||
949 | vp = NULL; | ||
950 | list_for_each_entry(iter, &vnet_list, list) { | ||
951 | if (iter->local_mac == *local_mac) { | ||
952 | vp = iter; | ||
953 | break; | ||
954 | } | ||
955 | } | ||
956 | if (!vp) | ||
957 | vp = vnet_new(local_mac); | ||
958 | mutex_unlock(&vnet_list_mutex); | ||
959 | |||
960 | return vp; | ||
961 | } | ||
962 | |||
963 | static const char *local_mac_prop = "local-mac-address"; | ||
964 | |||
965 | static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, | ||
966 | u64 port_node) | ||
967 | { | ||
968 | const u64 *local_mac = NULL; | ||
969 | u64 a; | ||
970 | |||
971 | mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { | ||
972 | u64 target = mdesc_arc_target(hp, a); | ||
973 | const char *name; | ||
974 | |||
975 | name = mdesc_get_property(hp, target, "name", NULL); | ||
976 | if (!name || strcmp(name, "network")) | ||
977 | continue; | ||
978 | |||
979 | local_mac = mdesc_get_property(hp, target, | ||
980 | local_mac_prop, NULL); | ||
981 | if (local_mac) | ||
982 | break; | ||
983 | } | ||
984 | if (!local_mac) | ||
985 | return ERR_PTR(-ENODEV); | ||
986 | |||
987 | return vnet_find_or_create(local_mac); | ||
988 | } | ||
989 | |||
878 | static struct ldc_channel_config vnet_ldc_cfg = { | 990 | static struct ldc_channel_config vnet_ldc_cfg = { |
879 | .event = vnet_event, | 991 | .event = vnet_event, |
880 | .mtu = 64, | 992 | .mtu = 64, |
@@ -887,6 +999,14 @@ static struct vio_driver_ops vnet_vio_ops = { | |||
887 | .handshake_complete = vnet_handshake_complete, | 999 | .handshake_complete = vnet_handshake_complete, |
888 | }; | 1000 | }; |
889 | 1001 | ||
1002 | static void print_version(void) | ||
1003 | { | ||
1004 | static int version_printed; | ||
1005 | |||
1006 | if (version_printed++ == 0) | ||
1007 | printk(KERN_INFO "%s", version); | ||
1008 | } | ||
1009 | |||
890 | const char *remote_macaddr_prop = "remote-mac-address"; | 1010 | const char *remote_macaddr_prop = "remote-mac-address"; |
891 | 1011 | ||
892 | static int __devinit vnet_port_probe(struct vio_dev *vdev, | 1012 | static int __devinit vnet_port_probe(struct vio_dev *vdev, |
@@ -899,14 +1019,17 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev, | |||
899 | const u64 *rmac; | 1019 | const u64 *rmac; |
900 | int len, i, err, switch_port; | 1020 | int len, i, err, switch_port; |
901 | 1021 | ||
902 | vp = dev_get_drvdata(vdev->dev.parent); | 1022 | print_version(); |
903 | if (!vp) { | ||
904 | printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); | ||
905 | return -ENODEV; | ||
906 | } | ||
907 | 1023 | ||
908 | hp = mdesc_grab(); | 1024 | hp = mdesc_grab(); |
909 | 1025 | ||
1026 | vp = vnet_find_parent(hp, vdev->mp); | ||
1027 | if (IS_ERR(vp)) { | ||
1028 | printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); | ||
1029 | err = PTR_ERR(vp); | ||
1030 | goto err_out_put_mdesc; | ||
1031 | } | ||
1032 | |||
910 | rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); | 1033 | rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); |
911 | err = -ENODEV; | 1034 | err = -ENODEV; |
912 | if (!rmac) { | 1035 | if (!rmac) { |
@@ -1025,139 +1148,14 @@ static struct vio_driver vnet_port_driver = { | |||
1025 | } | 1148 | } |
1026 | }; | 1149 | }; |
1027 | 1150 | ||
1028 | const char *local_mac_prop = "local-mac-address"; | ||
1029 | |||
1030 | static int __devinit vnet_probe(struct vio_dev *vdev, | ||
1031 | const struct vio_device_id *id) | ||
1032 | { | ||
1033 | static int vnet_version_printed; | ||
1034 | struct mdesc_handle *hp; | ||
1035 | struct net_device *dev; | ||
1036 | struct vnet *vp; | ||
1037 | const u64 *mac; | ||
1038 | int err, i, len; | ||
1039 | |||
1040 | if (vnet_version_printed++ == 0) | ||
1041 | printk(KERN_INFO "%s", version); | ||
1042 | |||
1043 | hp = mdesc_grab(); | ||
1044 | |||
1045 | mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len); | ||
1046 | if (!mac) { | ||
1047 | printk(KERN_ERR PFX "vnet lacks %s property.\n", | ||
1048 | local_mac_prop); | ||
1049 | err = -ENODEV; | ||
1050 | goto err_out; | ||
1051 | } | ||
1052 | |||
1053 | dev = alloc_etherdev(sizeof(*vp)); | ||
1054 | if (!dev) { | ||
1055 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | ||
1056 | err = -ENOMEM; | ||
1057 | goto err_out; | ||
1058 | } | ||
1059 | |||
1060 | for (i = 0; i < ETH_ALEN; i++) | ||
1061 | dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff; | ||
1062 | |||
1063 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
1064 | |||
1065 | SET_NETDEV_DEV(dev, &vdev->dev); | ||
1066 | |||
1067 | vp = netdev_priv(dev); | ||
1068 | |||
1069 | spin_lock_init(&vp->lock); | ||
1070 | vp->dev = dev; | ||
1071 | vp->vdev = vdev; | ||
1072 | |||
1073 | INIT_LIST_HEAD(&vp->port_list); | ||
1074 | for (i = 0; i < VNET_PORT_HASH_SIZE; i++) | ||
1075 | INIT_HLIST_HEAD(&vp->port_hash[i]); | ||
1076 | |||
1077 | dev->open = vnet_open; | ||
1078 | dev->stop = vnet_close; | ||
1079 | dev->set_multicast_list = vnet_set_rx_mode; | ||
1080 | dev->set_mac_address = vnet_set_mac_addr; | ||
1081 | dev->tx_timeout = vnet_tx_timeout; | ||
1082 | dev->ethtool_ops = &vnet_ethtool_ops; | ||
1083 | dev->watchdog_timeo = VNET_TX_TIMEOUT; | ||
1084 | dev->change_mtu = vnet_change_mtu; | ||
1085 | dev->hard_start_xmit = vnet_start_xmit; | ||
1086 | |||
1087 | err = register_netdev(dev); | ||
1088 | if (err) { | ||
1089 | printk(KERN_ERR PFX "Cannot register net device, " | ||
1090 | "aborting.\n"); | ||
1091 | goto err_out_free_dev; | ||
1092 | } | ||
1093 | |||
1094 | printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); | ||
1095 | |||
1096 | for (i = 0; i < 6; i++) | ||
1097 | printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | ||
1098 | |||
1099 | dev_set_drvdata(&vdev->dev, vp); | ||
1100 | |||
1101 | mdesc_release(hp); | ||
1102 | |||
1103 | return 0; | ||
1104 | |||
1105 | err_out_free_dev: | ||
1106 | free_netdev(dev); | ||
1107 | |||
1108 | err_out: | ||
1109 | mdesc_release(hp); | ||
1110 | return err; | ||
1111 | } | ||
1112 | |||
1113 | static int vnet_remove(struct vio_dev *vdev) | ||
1114 | { | ||
1115 | |||
1116 | struct vnet *vp = dev_get_drvdata(&vdev->dev); | ||
1117 | |||
1118 | if (vp) { | ||
1119 | /* XXX unregister port, or at least check XXX */ | ||
1120 | unregister_netdevice(vp->dev); | ||
1121 | dev_set_drvdata(&vdev->dev, NULL); | ||
1122 | } | ||
1123 | return 0; | ||
1124 | } | ||
1125 | |||
1126 | static struct vio_device_id vnet_match[] = { | ||
1127 | { | ||
1128 | .type = "network", | ||
1129 | }, | ||
1130 | {}, | ||
1131 | }; | ||
1132 | MODULE_DEVICE_TABLE(vio, vnet_match); | ||
1133 | |||
1134 | static struct vio_driver vnet_driver = { | ||
1135 | .id_table = vnet_match, | ||
1136 | .probe = vnet_probe, | ||
1137 | .remove = vnet_remove, | ||
1138 | .driver = { | ||
1139 | .name = "vnet", | ||
1140 | .owner = THIS_MODULE, | ||
1141 | } | ||
1142 | }; | ||
1143 | |||
1144 | static int __init vnet_init(void) | 1151 | static int __init vnet_init(void) |
1145 | { | 1152 | { |
1146 | int err = vio_register_driver(&vnet_driver); | 1153 | return vio_register_driver(&vnet_port_driver); |
1147 | |||
1148 | if (!err) { | ||
1149 | err = vio_register_driver(&vnet_port_driver); | ||
1150 | if (err) | ||
1151 | vio_unregister_driver(&vnet_driver); | ||
1152 | } | ||
1153 | |||
1154 | return err; | ||
1155 | } | 1154 | } |
1156 | 1155 | ||
1157 | static void __exit vnet_exit(void) | 1156 | static void __exit vnet_exit(void) |
1158 | { | 1157 | { |
1159 | vio_unregister_driver(&vnet_port_driver); | 1158 | vio_unregister_driver(&vnet_port_driver); |
1160 | vio_unregister_driver(&vnet_driver); | ||
1161 | } | 1159 | } |
1162 | 1160 | ||
1163 | module_init(vnet_init); | 1161 | module_init(vnet_init); |
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h index 1c887302d46d..7d3a0cac727b 100644 --- a/drivers/net/sunvnet.h +++ b/drivers/net/sunvnet.h | |||
@@ -60,11 +60,13 @@ struct vnet { | |||
60 | struct net_device *dev; | 60 | struct net_device *dev; |
61 | 61 | ||
62 | u32 msg_enable; | 62 | u32 msg_enable; |
63 | struct vio_dev *vdev; | ||
64 | 63 | ||
65 | struct list_head port_list; | 64 | struct list_head port_list; |
66 | 65 | ||
67 | struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; | 66 | struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; |
67 | |||
68 | struct list_head list; | ||
69 | u64 local_mac; | ||
68 | }; | 70 | }; |
69 | 71 | ||
70 | #endif /* _SUNVNET_H */ | 72 | #endif /* _SUNVNET_H */ |
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index 58d7e5d452fa..f83bb5cb0d3d 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c | |||
@@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, | |||
3692 | __u16 rcode, correlator; | 3692 | __u16 rcode, correlator; |
3693 | int err = 0; | 3693 | int err = 0; |
3694 | __u8 xframe = 1; | 3694 | __u8 xframe = 1; |
3695 | __u16 tx_fstatus; | ||
3696 | 3695 | ||
3697 | rmf->vl = SWAP_BYTES(rmf->vl); | 3696 | rmf->vl = SWAP_BYTES(rmf->vl); |
3698 | if(rx_status & FCB_RX_STATUS_DA_MATCHED) | 3697 | if(rx_status & FCB_RX_STATUS_DA_MATCHED) |
@@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, | |||
3783 | } | 3782 | } |
3784 | break; | 3783 | break; |
3785 | 3784 | ||
3786 | case TX_FORWARD: | 3785 | case TX_FORWARD: { |
3786 | __u16 uninitialized_var(tx_fstatus); | ||
3787 | |||
3787 | if((rcode = smctr_rcv_tx_forward(dev, rmf)) | 3788 | if((rcode = smctr_rcv_tx_forward(dev, rmf)) |
3788 | != POSITIVE_ACK) | 3789 | != POSITIVE_ACK) |
3789 | { | 3790 | { |
@@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, | |||
3811 | } | 3812 | } |
3812 | } | 3813 | } |
3813 | break; | 3814 | break; |
3815 | } | ||
3814 | 3816 | ||
3815 | /* Received MAC Frames Processed by CRS/REM/RPS. */ | 3817 | /* Received MAC Frames Processed by CRS/REM/RPS. */ |
3816 | case RSP: | 3818 | case RSP: |
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index ec1c556a47ca..5d8c78ee2cd9 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c | |||
@@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io) | |||
2833 | int br, tc; | 2833 | int br, tc; |
2834 | int br_pwr, error; | 2834 | int br_pwr, error; |
2835 | 2835 | ||
2836 | *br_io = 0; | ||
2837 | |||
2836 | if (rate == 0) | 2838 | if (rate == 0) |
2837 | return (0); | 2839 | return (0); |
2838 | 2840 | ||
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 35eded7ffb2d..1cc18e787a65 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c | |||
@@ -595,8 +595,8 @@ recv_frame( struct net_device *dev ) | |||
595 | 595 | ||
596 | u32 crc = CRC32_INITIAL; | 596 | u32 crc = CRC32_INITIAL; |
597 | 597 | ||
598 | unsigned framelen, frameno, ack; | 598 | unsigned framelen = 0, frameno, ack; |
599 | unsigned is_first, frame_ok; | 599 | unsigned is_first, frame_ok = 0; |
600 | 600 | ||
601 | if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { | 601 | if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { |
602 | frame_ok = framelen > 4 | 602 | frame_ok = framelen > 4 |
@@ -604,8 +604,7 @@ recv_frame( struct net_device *dev ) | |||
604 | : skip_tail( ioaddr, framelen, crc ); | 604 | : skip_tail( ioaddr, framelen, crc ); |
605 | if( frame_ok ) | 605 | if( frame_ok ) |
606 | interpret_ack( dev, ack ); | 606 | interpret_ack( dev, ack ); |
607 | } else | 607 | } |
608 | frame_ok = 0; | ||
609 | 608 | ||
610 | outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); | 609 | outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); |
611 | if( frame_ok ) { | 610 | if( frame_ok ) { |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c new file mode 100644 index 000000000000..489f69c5d6ca --- /dev/null +++ b/drivers/net/xen-netfront.c | |||
@@ -0,0 +1,1863 @@ | |||
1 | /* | ||
2 | * Virtual network driver for conversing with remote driver backends. | ||
3 | * | ||
4 | * Copyright (c) 2002-2005, K A Fraser | ||
5 | * Copyright (c) 2005, XenSource Ltd | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version 2 | ||
9 | * as published by the Free Software Foundation; or, when distributed | ||
10 | * separately from the Linux kernel or incorporated into other | ||
11 | * software packages, subject to the following license: | ||
12 | * | ||
13 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
14 | * of this source file (the "Software"), to deal in the Software without | ||
15 | * restriction, including without limitation the rights to use, copy, modify, | ||
16 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
17 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
18 | * the following conditions: | ||
19 | * | ||
20 | * The above copyright notice and this permission notice shall be included in | ||
21 | * all copies or substantial portions of the Software. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
25 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
26 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
27 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
28 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
29 | * IN THE SOFTWARE. | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/etherdevice.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/ethtool.h> | ||
38 | #include <linux/if_ether.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/udp.h> | ||
41 | #include <linux/moduleparam.h> | ||
42 | #include <linux/mm.h> | ||
43 | #include <net/ip.h> | ||
44 | |||
45 | #include <xen/xenbus.h> | ||
46 | #include <xen/events.h> | ||
47 | #include <xen/page.h> | ||
48 | #include <xen/grant_table.h> | ||
49 | |||
50 | #include <xen/interface/io/netif.h> | ||
51 | #include <xen/interface/memory.h> | ||
52 | #include <xen/interface/grant_table.h> | ||
53 | |||
54 | static struct ethtool_ops xennet_ethtool_ops; | ||
55 | |||
56 | struct netfront_cb { | ||
57 | struct page *page; | ||
58 | unsigned offset; | ||
59 | }; | ||
60 | |||
61 | #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) | ||
62 | |||
63 | #define RX_COPY_THRESHOLD 256 | ||
64 | |||
65 | #define GRANT_INVALID_REF 0 | ||
66 | |||
67 | #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) | ||
68 | #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) | ||
69 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
70 | |||
71 | struct netfront_info { | ||
72 | struct list_head list; | ||
73 | struct net_device *netdev; | ||
74 | |||
75 | struct net_device_stats stats; | ||
76 | |||
77 | struct xen_netif_tx_front_ring tx; | ||
78 | struct xen_netif_rx_front_ring rx; | ||
79 | |||
80 | spinlock_t tx_lock; | ||
81 | spinlock_t rx_lock; | ||
82 | |||
83 | unsigned int evtchn; | ||
84 | |||
85 | /* Receive-ring batched refills. */ | ||
86 | #define RX_MIN_TARGET 8 | ||
87 | #define RX_DFL_MIN_TARGET 64 | ||
88 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
89 | unsigned rx_min_target, rx_max_target, rx_target; | ||
90 | struct sk_buff_head rx_batch; | ||
91 | |||
92 | struct timer_list rx_refill_timer; | ||
93 | |||
94 | /* | ||
95 | * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries | ||
96 | * are linked from tx_skb_freelist through skb_entry.link. | ||
97 | * | ||
98 | * NB. Freelist index entries are always going to be less than | ||
99 | * PAGE_OFFSET, whereas pointers to skbs will always be equal or | ||
100 | * greater than PAGE_OFFSET: we use this property to distinguish | ||
101 | * them. | ||
102 | */ | ||
103 | union skb_entry { | ||
104 | struct sk_buff *skb; | ||
105 | unsigned link; | ||
106 | } tx_skbs[NET_TX_RING_SIZE]; | ||
107 | grant_ref_t gref_tx_head; | ||
108 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; | ||
109 | unsigned tx_skb_freelist; | ||
110 | |||
111 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | ||
112 | grant_ref_t gref_rx_head; | ||
113 | grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; | ||
114 | |||
115 | struct xenbus_device *xbdev; | ||
116 | int tx_ring_ref; | ||
117 | int rx_ring_ref; | ||
118 | |||
119 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | ||
120 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | ||
121 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | ||
122 | }; | ||
123 | |||
124 | struct netfront_rx_info { | ||
125 | struct xen_netif_rx_response rx; | ||
126 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * Access macros for acquiring freeing slots in tx_skbs[]. | ||
131 | */ | ||
132 | |||
133 | static void add_id_to_freelist(unsigned *head, union skb_entry *list, | ||
134 | unsigned short id) | ||
135 | { | ||
136 | list[id].link = *head; | ||
137 | *head = id; | ||
138 | } | ||
139 | |||
140 | static unsigned short get_id_from_freelist(unsigned *head, | ||
141 | union skb_entry *list) | ||
142 | { | ||
143 | unsigned int id = *head; | ||
144 | *head = list[id].link; | ||
145 | return id; | ||
146 | } | ||
147 | |||
148 | static int xennet_rxidx(RING_IDX idx) | ||
149 | { | ||
150 | return idx & (NET_RX_RING_SIZE - 1); | ||
151 | } | ||
152 | |||
153 | static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, | ||
154 | RING_IDX ri) | ||
155 | { | ||
156 | int i = xennet_rxidx(ri); | ||
157 | struct sk_buff *skb = np->rx_skbs[i]; | ||
158 | np->rx_skbs[i] = NULL; | ||
159 | return skb; | ||
160 | } | ||
161 | |||
162 | static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, | ||
163 | RING_IDX ri) | ||
164 | { | ||
165 | int i = xennet_rxidx(ri); | ||
166 | grant_ref_t ref = np->grant_rx_ref[i]; | ||
167 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | ||
168 | return ref; | ||
169 | } | ||
170 | |||
171 | #ifdef CONFIG_SYSFS | ||
172 | static int xennet_sysfs_addif(struct net_device *netdev); | ||
173 | static void xennet_sysfs_delif(struct net_device *netdev); | ||
174 | #else /* !CONFIG_SYSFS */ | ||
175 | #define xennet_sysfs_addif(dev) (0) | ||
176 | #define xennet_sysfs_delif(dev) do { } while (0) | ||
177 | #endif | ||
178 | |||
179 | static int xennet_can_sg(struct net_device *dev) | ||
180 | { | ||
181 | return dev->features & NETIF_F_SG; | ||
182 | } | ||
183 | |||
184 | |||
185 | static void rx_refill_timeout(unsigned long data) | ||
186 | { | ||
187 | struct net_device *dev = (struct net_device *)data; | ||
188 | netif_rx_schedule(dev); | ||
189 | } | ||
190 | |||
191 | static int netfront_tx_slot_available(struct netfront_info *np) | ||
192 | { | ||
193 | return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < | ||
194 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); | ||
195 | } | ||
196 | |||
197 | static void xennet_maybe_wake_tx(struct net_device *dev) | ||
198 | { | ||
199 | struct netfront_info *np = netdev_priv(dev); | ||
200 | |||
201 | if (unlikely(netif_queue_stopped(dev)) && | ||
202 | netfront_tx_slot_available(np) && | ||
203 | likely(netif_running(dev))) | ||
204 | netif_wake_queue(dev); | ||
205 | } | ||
206 | |||
207 | static void xennet_alloc_rx_buffers(struct net_device *dev) | ||
208 | { | ||
209 | unsigned short id; | ||
210 | struct netfront_info *np = netdev_priv(dev); | ||
211 | struct sk_buff *skb; | ||
212 | struct page *page; | ||
213 | int i, batch_target, notify; | ||
214 | RING_IDX req_prod = np->rx.req_prod_pvt; | ||
215 | struct xen_memory_reservation reservation; | ||
216 | grant_ref_t ref; | ||
217 | unsigned long pfn; | ||
218 | void *vaddr; | ||
219 | int nr_flips; | ||
220 | struct xen_netif_rx_request *req; | ||
221 | |||
222 | if (unlikely(!netif_carrier_ok(dev))) | ||
223 | return; | ||
224 | |||
225 | /* | ||
226 | * Allocate skbuffs greedily, even though we batch updates to the | ||
227 | * receive ring. This creates a less bursty demand on the memory | ||
228 | * allocator, so should reduce the chance of failed allocation requests | ||
229 | * both for ourself and for other kernel subsystems. | ||
230 | */ | ||
231 | batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); | ||
232 | for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { | ||
233 | skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, | ||
234 | GFP_ATOMIC | __GFP_NOWARN); | ||
235 | if (unlikely(!skb)) | ||
236 | goto no_skb; | ||
237 | |||
238 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | ||
239 | if (!page) { | ||
240 | kfree_skb(skb); | ||
241 | no_skb: | ||
242 | /* Any skbuffs queued for refill? Force them out. */ | ||
243 | if (i != 0) | ||
244 | goto refill; | ||
245 | /* Could not allocate any skbuffs. Try again later. */ | ||
246 | mod_timer(&np->rx_refill_timer, | ||
247 | jiffies + (HZ/10)); | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | skb_shinfo(skb)->frags[0].page = page; | ||
252 | skb_shinfo(skb)->nr_frags = 1; | ||
253 | __skb_queue_tail(&np->rx_batch, skb); | ||
254 | } | ||
255 | |||
256 | /* Is the batch large enough to be worthwhile? */ | ||
257 | if (i < (np->rx_target/2)) { | ||
258 | if (req_prod > np->rx.sring->req_prod) | ||
259 | goto push; | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | /* Adjust our fill target if we risked running out of buffers. */ | ||
264 | if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && | ||
265 | ((np->rx_target *= 2) > np->rx_max_target)) | ||
266 | np->rx_target = np->rx_max_target; | ||
267 | |||
268 | refill: | ||
269 | for (nr_flips = i = 0; ; i++) { | ||
270 | skb = __skb_dequeue(&np->rx_batch); | ||
271 | if (skb == NULL) | ||
272 | break; | ||
273 | |||
274 | skb->dev = dev; | ||
275 | |||
276 | id = xennet_rxidx(req_prod + i); | ||
277 | |||
278 | BUG_ON(np->rx_skbs[id]); | ||
279 | np->rx_skbs[id] = skb; | ||
280 | |||
281 | ref = gnttab_claim_grant_reference(&np->gref_rx_head); | ||
282 | BUG_ON((signed short)ref < 0); | ||
283 | np->grant_rx_ref[id] = ref; | ||
284 | |||
285 | pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); | ||
286 | vaddr = page_address(skb_shinfo(skb)->frags[0].page); | ||
287 | |||
288 | req = RING_GET_REQUEST(&np->rx, req_prod + i); | ||
289 | gnttab_grant_foreign_access_ref(ref, | ||
290 | np->xbdev->otherend_id, | ||
291 | pfn_to_mfn(pfn), | ||
292 | 0); | ||
293 | |||
294 | req->id = id; | ||
295 | req->gref = ref; | ||
296 | } | ||
297 | |||
298 | if (nr_flips != 0) { | ||
299 | reservation.extent_start = np->rx_pfn_array; | ||
300 | reservation.nr_extents = nr_flips; | ||
301 | reservation.extent_order = 0; | ||
302 | reservation.address_bits = 0; | ||
303 | reservation.domid = DOMID_SELF; | ||
304 | |||
305 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
306 | /* After all PTEs have been zapped, flush the TLB. */ | ||
307 | np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = | ||
308 | UVMF_TLB_FLUSH|UVMF_ALL; | ||
309 | |||
310 | /* Give away a batch of pages. */ | ||
311 | np->rx_mcl[i].op = __HYPERVISOR_memory_op; | ||
312 | np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; | ||
313 | np->rx_mcl[i].args[1] = (unsigned long)&reservation; | ||
314 | |||
315 | /* Zap PTEs and give away pages in one big | ||
316 | * multicall. */ | ||
317 | (void)HYPERVISOR_multicall(np->rx_mcl, i+1); | ||
318 | |||
319 | /* Check return status of HYPERVISOR_memory_op(). */ | ||
320 | if (unlikely(np->rx_mcl[i].result != i)) | ||
321 | panic("Unable to reduce memory reservation\n"); | ||
322 | } else { | ||
323 | if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
324 | &reservation) != i) | ||
325 | panic("Unable to reduce memory reservation\n"); | ||
326 | } | ||
327 | } else { | ||
328 | wmb(); /* barrier so backend seens requests */ | ||
329 | } | ||
330 | |||
331 | /* Above is a suitable barrier to ensure backend will see requests. */ | ||
332 | np->rx.req_prod_pvt = req_prod + i; | ||
333 | push: | ||
334 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); | ||
335 | if (notify) | ||
336 | notify_remote_via_irq(np->netdev->irq); | ||
337 | } | ||
338 | |||
339 | static int xennet_open(struct net_device *dev) | ||
340 | { | ||
341 | struct netfront_info *np = netdev_priv(dev); | ||
342 | |||
343 | memset(&np->stats, 0, sizeof(np->stats)); | ||
344 | |||
345 | spin_lock_bh(&np->rx_lock); | ||
346 | if (netif_carrier_ok(dev)) { | ||
347 | xennet_alloc_rx_buffers(dev); | ||
348 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | ||
349 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
350 | netif_rx_schedule(dev); | ||
351 | } | ||
352 | spin_unlock_bh(&np->rx_lock); | ||
353 | |||
354 | xennet_maybe_wake_tx(dev); | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void xennet_tx_buf_gc(struct net_device *dev) | ||
360 | { | ||
361 | RING_IDX cons, prod; | ||
362 | unsigned short id; | ||
363 | struct netfront_info *np = netdev_priv(dev); | ||
364 | struct sk_buff *skb; | ||
365 | |||
366 | BUG_ON(!netif_carrier_ok(dev)); | ||
367 | |||
368 | do { | ||
369 | prod = np->tx.sring->rsp_prod; | ||
370 | rmb(); /* Ensure we see responses up to 'rp'. */ | ||
371 | |||
372 | for (cons = np->tx.rsp_cons; cons != prod; cons++) { | ||
373 | struct xen_netif_tx_response *txrsp; | ||
374 | |||
375 | txrsp = RING_GET_RESPONSE(&np->tx, cons); | ||
376 | if (txrsp->status == NETIF_RSP_NULL) | ||
377 | continue; | ||
378 | |||
379 | id = txrsp->id; | ||
380 | skb = np->tx_skbs[id].skb; | ||
381 | if (unlikely(gnttab_query_foreign_access( | ||
382 | np->grant_tx_ref[id]) != 0)) { | ||
383 | printk(KERN_ALERT "xennet_tx_buf_gc: warning " | ||
384 | "-- grant still in use by backend " | ||
385 | "domain.\n"); | ||
386 | BUG(); | ||
387 | } | ||
388 | gnttab_end_foreign_access_ref( | ||
389 | np->grant_tx_ref[id], GNTMAP_readonly); | ||
390 | gnttab_release_grant_reference( | ||
391 | &np->gref_tx_head, np->grant_tx_ref[id]); | ||
392 | np->grant_tx_ref[id] = GRANT_INVALID_REF; | ||
393 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); | ||
394 | dev_kfree_skb_irq(skb); | ||
395 | } | ||
396 | |||
397 | np->tx.rsp_cons = prod; | ||
398 | |||
399 | /* | ||
400 | * Set a new event, then check for race with update of tx_cons. | ||
401 | * Note that it is essential to schedule a callback, no matter | ||
402 | * how few buffers are pending. Even if there is space in the | ||
403 | * transmit ring, higher layers may be blocked because too much | ||
404 | * data is outstanding: in such cases notification from Xen is | ||
405 | * likely to be the only kick that we'll get. | ||
406 | */ | ||
407 | np->tx.sring->rsp_event = | ||
408 | prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; | ||
409 | mb(); /* update shared area */ | ||
410 | } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); | ||
411 | |||
412 | xennet_maybe_wake_tx(dev); | ||
413 | } | ||
414 | |||
415 | static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | ||
416 | struct xen_netif_tx_request *tx) | ||
417 | { | ||
418 | struct netfront_info *np = netdev_priv(dev); | ||
419 | char *data = skb->data; | ||
420 | unsigned long mfn; | ||
421 | RING_IDX prod = np->tx.req_prod_pvt; | ||
422 | int frags = skb_shinfo(skb)->nr_frags; | ||
423 | unsigned int offset = offset_in_page(data); | ||
424 | unsigned int len = skb_headlen(skb); | ||
425 | unsigned int id; | ||
426 | grant_ref_t ref; | ||
427 | int i; | ||
428 | |||
429 | /* While the header overlaps a page boundary (including being | ||
430 | larger than a page), split it it into page-sized chunks. */ | ||
431 | while (len > PAGE_SIZE - offset) { | ||
432 | tx->size = PAGE_SIZE - offset; | ||
433 | tx->flags |= NETTXF_more_data; | ||
434 | len -= tx->size; | ||
435 | data += tx->size; | ||
436 | offset = 0; | ||
437 | |||
438 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
439 | np->tx_skbs[id].skb = skb_get(skb); | ||
440 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
441 | tx->id = id; | ||
442 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
443 | BUG_ON((signed short)ref < 0); | ||
444 | |||
445 | mfn = virt_to_mfn(data); | ||
446 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | ||
447 | mfn, GNTMAP_readonly); | ||
448 | |||
449 | tx->gref = np->grant_tx_ref[id] = ref; | ||
450 | tx->offset = offset; | ||
451 | tx->size = len; | ||
452 | tx->flags = 0; | ||
453 | } | ||
454 | |||
455 | /* Grant backend access to each skb fragment page. */ | ||
456 | for (i = 0; i < frags; i++) { | ||
457 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | ||
458 | |||
459 | tx->flags |= NETTXF_more_data; | ||
460 | |||
461 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
462 | np->tx_skbs[id].skb = skb_get(skb); | ||
463 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
464 | tx->id = id; | ||
465 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
466 | BUG_ON((signed short)ref < 0); | ||
467 | |||
468 | mfn = pfn_to_mfn(page_to_pfn(frag->page)); | ||
469 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | ||
470 | mfn, GNTMAP_readonly); | ||
471 | |||
472 | tx->gref = np->grant_tx_ref[id] = ref; | ||
473 | tx->offset = frag->page_offset; | ||
474 | tx->size = frag->size; | ||
475 | tx->flags = 0; | ||
476 | } | ||
477 | |||
478 | np->tx.req_prod_pvt = prod; | ||
479 | } | ||
480 | |||
481 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
482 | { | ||
483 | unsigned short id; | ||
484 | struct netfront_info *np = netdev_priv(dev); | ||
485 | struct xen_netif_tx_request *tx; | ||
486 | struct xen_netif_extra_info *extra; | ||
487 | char *data = skb->data; | ||
488 | RING_IDX i; | ||
489 | grant_ref_t ref; | ||
490 | unsigned long mfn; | ||
491 | int notify; | ||
492 | int frags = skb_shinfo(skb)->nr_frags; | ||
493 | unsigned int offset = offset_in_page(data); | ||
494 | unsigned int len = skb_headlen(skb); | ||
495 | |||
496 | frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; | ||
497 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | ||
498 | printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", | ||
499 | frags); | ||
500 | dump_stack(); | ||
501 | goto drop; | ||
502 | } | ||
503 | |||
504 | spin_lock_irq(&np->tx_lock); | ||
505 | |||
506 | if (unlikely(!netif_carrier_ok(dev) || | ||
507 | (frags > 1 && !xennet_can_sg(dev)) || | ||
508 | netif_needs_gso(dev, skb))) { | ||
509 | spin_unlock_irq(&np->tx_lock); | ||
510 | goto drop; | ||
511 | } | ||
512 | |||
513 | i = np->tx.req_prod_pvt; | ||
514 | |||
515 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
516 | np->tx_skbs[id].skb = skb; | ||
517 | |||
518 | tx = RING_GET_REQUEST(&np->tx, i); | ||
519 | |||
520 | tx->id = id; | ||
521 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
522 | BUG_ON((signed short)ref < 0); | ||
523 | mfn = virt_to_mfn(data); | ||
524 | gnttab_grant_foreign_access_ref( | ||
525 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); | ||
526 | tx->gref = np->grant_tx_ref[id] = ref; | ||
527 | tx->offset = offset; | ||
528 | tx->size = len; | ||
529 | extra = NULL; | ||
530 | |||
531 | tx->flags = 0; | ||
532 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
533 | /* local packet? */ | ||
534 | tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; | ||
535 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
536 | /* remote but checksummed. */ | ||
537 | tx->flags |= NETTXF_data_validated; | ||
538 | |||
539 | if (skb_shinfo(skb)->gso_size) { | ||
540 | struct xen_netif_extra_info *gso; | ||
541 | |||
542 | gso = (struct xen_netif_extra_info *) | ||
543 | RING_GET_REQUEST(&np->tx, ++i); | ||
544 | |||
545 | if (extra) | ||
546 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | ||
547 | else | ||
548 | tx->flags |= NETTXF_extra_info; | ||
549 | |||
550 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | ||
551 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | ||
552 | gso->u.gso.pad = 0; | ||
553 | gso->u.gso.features = 0; | ||
554 | |||
555 | gso->type = XEN_NETIF_EXTRA_TYPE_GSO; | ||
556 | gso->flags = 0; | ||
557 | extra = gso; | ||
558 | } | ||
559 | |||
560 | np->tx.req_prod_pvt = i + 1; | ||
561 | |||
562 | xennet_make_frags(skb, dev, tx); | ||
563 | tx->size = skb->len; | ||
564 | |||
565 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); | ||
566 | if (notify) | ||
567 | notify_remote_via_irq(np->netdev->irq); | ||
568 | |||
569 | xennet_tx_buf_gc(dev); | ||
570 | |||
571 | if (!netfront_tx_slot_available(np)) | ||
572 | netif_stop_queue(dev); | ||
573 | |||
574 | spin_unlock_irq(&np->tx_lock); | ||
575 | |||
576 | np->stats.tx_bytes += skb->len; | ||
577 | np->stats.tx_packets++; | ||
578 | |||
579 | return 0; | ||
580 | |||
581 | drop: | ||
582 | np->stats.tx_dropped++; | ||
583 | dev_kfree_skb(skb); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int xennet_close(struct net_device *dev) | ||
588 | { | ||
589 | struct netfront_info *np = netdev_priv(dev); | ||
590 | netif_stop_queue(np->netdev); | ||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | static struct net_device_stats *xennet_get_stats(struct net_device *dev) | ||
595 | { | ||
596 | struct netfront_info *np = netdev_priv(dev); | ||
597 | return &np->stats; | ||
598 | } | ||
599 | |||
600 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, | ||
601 | grant_ref_t ref) | ||
602 | { | ||
603 | int new = xennet_rxidx(np->rx.req_prod_pvt); | ||
604 | |||
605 | BUG_ON(np->rx_skbs[new]); | ||
606 | np->rx_skbs[new] = skb; | ||
607 | np->grant_rx_ref[new] = ref; | ||
608 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; | ||
609 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; | ||
610 | np->rx.req_prod_pvt++; | ||
611 | } | ||
612 | |||
613 | static int xennet_get_extras(struct netfront_info *np, | ||
614 | struct xen_netif_extra_info *extras, | ||
615 | RING_IDX rp) | ||
616 | |||
617 | { | ||
618 | struct xen_netif_extra_info *extra; | ||
619 | struct device *dev = &np->netdev->dev; | ||
620 | RING_IDX cons = np->rx.rsp_cons; | ||
621 | int err = 0; | ||
622 | |||
623 | do { | ||
624 | struct sk_buff *skb; | ||
625 | grant_ref_t ref; | ||
626 | |||
627 | if (unlikely(cons + 1 == rp)) { | ||
628 | if (net_ratelimit()) | ||
629 | dev_warn(dev, "Missing extra info\n"); | ||
630 | err = -EBADR; | ||
631 | break; | ||
632 | } | ||
633 | |||
634 | extra = (struct xen_netif_extra_info *) | ||
635 | RING_GET_RESPONSE(&np->rx, ++cons); | ||
636 | |||
637 | if (unlikely(!extra->type || | ||
638 | extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | ||
639 | if (net_ratelimit()) | ||
640 | dev_warn(dev, "Invalid extra type: %d\n", | ||
641 | extra->type); | ||
642 | err = -EINVAL; | ||
643 | } else { | ||
644 | memcpy(&extras[extra->type - 1], extra, | ||
645 | sizeof(*extra)); | ||
646 | } | ||
647 | |||
648 | skb = xennet_get_rx_skb(np, cons); | ||
649 | ref = xennet_get_rx_ref(np, cons); | ||
650 | xennet_move_rx_slot(np, skb, ref); | ||
651 | } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); | ||
652 | |||
653 | np->rx.rsp_cons = cons; | ||
654 | return err; | ||
655 | } | ||
656 | |||
657 | static int xennet_get_responses(struct netfront_info *np, | ||
658 | struct netfront_rx_info *rinfo, RING_IDX rp, | ||
659 | struct sk_buff_head *list) | ||
660 | { | ||
661 | struct xen_netif_rx_response *rx = &rinfo->rx; | ||
662 | struct xen_netif_extra_info *extras = rinfo->extras; | ||
663 | struct device *dev = &np->netdev->dev; | ||
664 | RING_IDX cons = np->rx.rsp_cons; | ||
665 | struct sk_buff *skb = xennet_get_rx_skb(np, cons); | ||
666 | grant_ref_t ref = xennet_get_rx_ref(np, cons); | ||
667 | int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); | ||
668 | int frags = 1; | ||
669 | int err = 0; | ||
670 | unsigned long ret; | ||
671 | |||
672 | if (rx->flags & NETRXF_extra_info) { | ||
673 | err = xennet_get_extras(np, extras, rp); | ||
674 | cons = np->rx.rsp_cons; | ||
675 | } | ||
676 | |||
677 | for (;;) { | ||
678 | if (unlikely(rx->status < 0 || | ||
679 | rx->offset + rx->status > PAGE_SIZE)) { | ||
680 | if (net_ratelimit()) | ||
681 | dev_warn(dev, "rx->offset: %x, size: %u\n", | ||
682 | rx->offset, rx->status); | ||
683 | xennet_move_rx_slot(np, skb, ref); | ||
684 | err = -EINVAL; | ||
685 | goto next; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * This definitely indicates a bug, either in this driver or in | ||
690 | * the backend driver. In future this should flag the bad | ||
691 | * situation to the system controller to reboot the backed. | ||
692 | */ | ||
693 | if (ref == GRANT_INVALID_REF) { | ||
694 | if (net_ratelimit()) | ||
695 | dev_warn(dev, "Bad rx response id %d.\n", | ||
696 | rx->id); | ||
697 | err = -EINVAL; | ||
698 | goto next; | ||
699 | } | ||
700 | |||
701 | ret = gnttab_end_foreign_access_ref(ref, 0); | ||
702 | BUG_ON(!ret); | ||
703 | |||
704 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | ||
705 | |||
706 | __skb_queue_tail(list, skb); | ||
707 | |||
708 | next: | ||
709 | if (!(rx->flags & NETRXF_more_data)) | ||
710 | break; | ||
711 | |||
712 | if (cons + frags == rp) { | ||
713 | if (net_ratelimit()) | ||
714 | dev_warn(dev, "Need more frags\n"); | ||
715 | err = -ENOENT; | ||
716 | break; | ||
717 | } | ||
718 | |||
719 | rx = RING_GET_RESPONSE(&np->rx, cons + frags); | ||
720 | skb = xennet_get_rx_skb(np, cons + frags); | ||
721 | ref = xennet_get_rx_ref(np, cons + frags); | ||
722 | frags++; | ||
723 | } | ||
724 | |||
725 | if (unlikely(frags > max)) { | ||
726 | if (net_ratelimit()) | ||
727 | dev_warn(dev, "Too many frags\n"); | ||
728 | err = -E2BIG; | ||
729 | } | ||
730 | |||
731 | if (unlikely(err)) | ||
732 | np->rx.rsp_cons = cons + frags; | ||
733 | |||
734 | return err; | ||
735 | } | ||
736 | |||
737 | static int xennet_set_skb_gso(struct sk_buff *skb, | ||
738 | struct xen_netif_extra_info *gso) | ||
739 | { | ||
740 | if (!gso->u.gso.size) { | ||
741 | if (net_ratelimit()) | ||
742 | printk(KERN_WARNING "GSO size must not be zero.\n"); | ||
743 | return -EINVAL; | ||
744 | } | ||
745 | |||
746 | /* Currently only TCPv4 S.O. is supported. */ | ||
747 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | ||
748 | if (net_ratelimit()) | ||
749 | printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | |||
753 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | ||
754 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | ||
755 | |||
756 | /* Header must be checked, and gso_segs computed. */ | ||
757 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | ||
758 | skb_shinfo(skb)->gso_segs = 0; | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static RING_IDX xennet_fill_frags(struct netfront_info *np, | ||
764 | struct sk_buff *skb, | ||
765 | struct sk_buff_head *list) | ||
766 | { | ||
767 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
768 | int nr_frags = shinfo->nr_frags; | ||
769 | RING_IDX cons = np->rx.rsp_cons; | ||
770 | skb_frag_t *frag = shinfo->frags + nr_frags; | ||
771 | struct sk_buff *nskb; | ||
772 | |||
773 | while ((nskb = __skb_dequeue(list))) { | ||
774 | struct xen_netif_rx_response *rx = | ||
775 | RING_GET_RESPONSE(&np->rx, ++cons); | ||
776 | |||
777 | frag->page = skb_shinfo(nskb)->frags[0].page; | ||
778 | frag->page_offset = rx->offset; | ||
779 | frag->size = rx->status; | ||
780 | |||
781 | skb->data_len += rx->status; | ||
782 | |||
783 | skb_shinfo(nskb)->nr_frags = 0; | ||
784 | kfree_skb(nskb); | ||
785 | |||
786 | frag++; | ||
787 | nr_frags++; | ||
788 | } | ||
789 | |||
790 | shinfo->nr_frags = nr_frags; | ||
791 | return cons; | ||
792 | } | ||
793 | |||
794 | static int skb_checksum_setup(struct sk_buff *skb) | ||
795 | { | ||
796 | struct iphdr *iph; | ||
797 | unsigned char *th; | ||
798 | int err = -EPROTO; | ||
799 | |||
800 | if (skb->protocol != htons(ETH_P_IP)) | ||
801 | goto out; | ||
802 | |||
803 | iph = (void *)skb->data; | ||
804 | th = skb->data + 4 * iph->ihl; | ||
805 | if (th >= skb_tail_pointer(skb)) | ||
806 | goto out; | ||
807 | |||
808 | skb->csum_start = th - skb->head; | ||
809 | switch (iph->protocol) { | ||
810 | case IPPROTO_TCP: | ||
811 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
812 | break; | ||
813 | case IPPROTO_UDP: | ||
814 | skb->csum_offset = offsetof(struct udphdr, check); | ||
815 | break; | ||
816 | default: | ||
817 | if (net_ratelimit()) | ||
818 | printk(KERN_ERR "Attempting to checksum a non-" | ||
819 | "TCP/UDP packet, dropping a protocol" | ||
820 | " %d packet", iph->protocol); | ||
821 | goto out; | ||
822 | } | ||
823 | |||
824 | if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) | ||
825 | goto out; | ||
826 | |||
827 | err = 0; | ||
828 | |||
829 | out: | ||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static int handle_incoming_queue(struct net_device *dev, | ||
834 | struct sk_buff_head *rxq) | ||
835 | { | ||
836 | struct netfront_info *np = netdev_priv(dev); | ||
837 | int packets_dropped = 0; | ||
838 | struct sk_buff *skb; | ||
839 | |||
840 | while ((skb = __skb_dequeue(rxq)) != NULL) { | ||
841 | struct page *page = NETFRONT_SKB_CB(skb)->page; | ||
842 | void *vaddr = page_address(page); | ||
843 | unsigned offset = NETFRONT_SKB_CB(skb)->offset; | ||
844 | |||
845 | memcpy(skb->data, vaddr + offset, | ||
846 | skb_headlen(skb)); | ||
847 | |||
848 | if (page != skb_shinfo(skb)->frags[0].page) | ||
849 | __free_page(page); | ||
850 | |||
851 | /* Ethernet work: Delayed to here as it peeks the header. */ | ||
852 | skb->protocol = eth_type_trans(skb, dev); | ||
853 | |||
854 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
855 | if (skb_checksum_setup(skb)) { | ||
856 | kfree_skb(skb); | ||
857 | packets_dropped++; | ||
858 | np->stats.rx_errors++; | ||
859 | continue; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | np->stats.rx_packets++; | ||
864 | np->stats.rx_bytes += skb->len; | ||
865 | |||
866 | /* Pass it up. */ | ||
867 | netif_receive_skb(skb); | ||
868 | dev->last_rx = jiffies; | ||
869 | } | ||
870 | |||
871 | return packets_dropped; | ||
872 | } | ||
873 | |||
874 | static int xennet_poll(struct net_device *dev, int *pbudget) | ||
875 | { | ||
876 | struct netfront_info *np = netdev_priv(dev); | ||
877 | struct sk_buff *skb; | ||
878 | struct netfront_rx_info rinfo; | ||
879 | struct xen_netif_rx_response *rx = &rinfo.rx; | ||
880 | struct xen_netif_extra_info *extras = rinfo.extras; | ||
881 | RING_IDX i, rp; | ||
882 | int work_done, budget, more_to_do = 1; | ||
883 | struct sk_buff_head rxq; | ||
884 | struct sk_buff_head errq; | ||
885 | struct sk_buff_head tmpq; | ||
886 | unsigned long flags; | ||
887 | unsigned int len; | ||
888 | int err; | ||
889 | |||
890 | spin_lock(&np->rx_lock); | ||
891 | |||
892 | if (unlikely(!netif_carrier_ok(dev))) { | ||
893 | spin_unlock(&np->rx_lock); | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | skb_queue_head_init(&rxq); | ||
898 | skb_queue_head_init(&errq); | ||
899 | skb_queue_head_init(&tmpq); | ||
900 | |||
901 | budget = *pbudget; | ||
902 | if (budget > dev->quota) | ||
903 | budget = dev->quota; | ||
904 | rp = np->rx.sring->rsp_prod; | ||
905 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | ||
906 | |||
907 | i = np->rx.rsp_cons; | ||
908 | work_done = 0; | ||
909 | while ((i != rp) && (work_done < budget)) { | ||
910 | memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); | ||
911 | memset(extras, 0, sizeof(rinfo.extras)); | ||
912 | |||
913 | err = xennet_get_responses(np, &rinfo, rp, &tmpq); | ||
914 | |||
915 | if (unlikely(err)) { | ||
916 | err: | ||
917 | while ((skb = __skb_dequeue(&tmpq))) | ||
918 | __skb_queue_tail(&errq, skb); | ||
919 | np->stats.rx_errors++; | ||
920 | i = np->rx.rsp_cons; | ||
921 | continue; | ||
922 | } | ||
923 | |||
924 | skb = __skb_dequeue(&tmpq); | ||
925 | |||
926 | if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { | ||
927 | struct xen_netif_extra_info *gso; | ||
928 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | ||
929 | |||
930 | if (unlikely(xennet_set_skb_gso(skb, gso))) { | ||
931 | __skb_queue_head(&tmpq, skb); | ||
932 | np->rx.rsp_cons += skb_queue_len(&tmpq); | ||
933 | goto err; | ||
934 | } | ||
935 | } | ||
936 | |||
937 | NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; | ||
938 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | ||
939 | |||
940 | len = rx->status; | ||
941 | if (len > RX_COPY_THRESHOLD) | ||
942 | len = RX_COPY_THRESHOLD; | ||
943 | skb_put(skb, len); | ||
944 | |||
945 | if (rx->status > len) { | ||
946 | skb_shinfo(skb)->frags[0].page_offset = | ||
947 | rx->offset + len; | ||
948 | skb_shinfo(skb)->frags[0].size = rx->status - len; | ||
949 | skb->data_len = rx->status - len; | ||
950 | } else { | ||
951 | skb_shinfo(skb)->frags[0].page = NULL; | ||
952 | skb_shinfo(skb)->nr_frags = 0; | ||
953 | } | ||
954 | |||
955 | i = xennet_fill_frags(np, skb, &tmpq); | ||
956 | |||
957 | /* | ||
958 | * Truesize approximates the size of true data plus | ||
959 | * any supervisor overheads. Adding hypervisor | ||
960 | * overheads has been shown to significantly reduce | ||
961 | * achievable bandwidth with the default receive | ||
962 | * buffer size. It is therefore not wise to account | ||
963 | * for it here. | ||
964 | * | ||
965 | * After alloc_skb(RX_COPY_THRESHOLD), truesize is set | ||
966 | * to RX_COPY_THRESHOLD + the supervisor | ||
967 | * overheads. Here, we add the size of the data pulled | ||
968 | * in xennet_fill_frags(). | ||
969 | * | ||
970 | * We also adjust for any unused space in the main | ||
971 | * data area by subtracting (RX_COPY_THRESHOLD - | ||
972 | * len). This is especially important with drivers | ||
973 | * which split incoming packets into header and data, | ||
974 | * using only 66 bytes of the main data area (see the | ||
975 | * e1000 driver for example.) On such systems, | ||
976 | * without this last adjustement, our achievable | ||
977 | * receive throughout using the standard receive | ||
978 | * buffer size was cut by 25%(!!!). | ||
979 | */ | ||
980 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | ||
981 | skb->len += skb->data_len; | ||
982 | |||
983 | if (rx->flags & NETRXF_csum_blank) | ||
984 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
985 | else if (rx->flags & NETRXF_data_validated) | ||
986 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
987 | |||
988 | __skb_queue_tail(&rxq, skb); | ||
989 | |||
990 | np->rx.rsp_cons = ++i; | ||
991 | work_done++; | ||
992 | } | ||
993 | |||
994 | while ((skb = __skb_dequeue(&errq))) | ||
995 | kfree_skb(skb); | ||
996 | |||
997 | work_done -= handle_incoming_queue(dev, &rxq); | ||
998 | |||
999 | /* If we get a callback with very few responses, reduce fill target. */ | ||
1000 | /* NB. Note exponential increase, linear decrease. */ | ||
1001 | if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > | ||
1002 | ((3*np->rx_target) / 4)) && | ||
1003 | (--np->rx_target < np->rx_min_target)) | ||
1004 | np->rx_target = np->rx_min_target; | ||
1005 | |||
1006 | xennet_alloc_rx_buffers(dev); | ||
1007 | |||
1008 | *pbudget -= work_done; | ||
1009 | dev->quota -= work_done; | ||
1010 | |||
1011 | if (work_done < budget) { | ||
1012 | local_irq_save(flags); | ||
1013 | |||
1014 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | ||
1015 | if (!more_to_do) | ||
1016 | __netif_rx_complete(dev); | ||
1017 | |||
1018 | local_irq_restore(flags); | ||
1019 | } | ||
1020 | |||
1021 | spin_unlock(&np->rx_lock); | ||
1022 | |||
1023 | return more_to_do; | ||
1024 | } | ||
1025 | |||
1026 | static int xennet_change_mtu(struct net_device *dev, int mtu) | ||
1027 | { | ||
1028 | int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; | ||
1029 | |||
1030 | if (mtu > max) | ||
1031 | return -EINVAL; | ||
1032 | dev->mtu = mtu; | ||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | static void xennet_release_tx_bufs(struct netfront_info *np) | ||
1037 | { | ||
1038 | struct sk_buff *skb; | ||
1039 | int i; | ||
1040 | |||
1041 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | ||
1042 | /* Skip over entries which are actually freelist references */ | ||
1043 | if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) | ||
1044 | continue; | ||
1045 | |||
1046 | skb = np->tx_skbs[i].skb; | ||
1047 | gnttab_end_foreign_access_ref(np->grant_tx_ref[i], | ||
1048 | GNTMAP_readonly); | ||
1049 | gnttab_release_grant_reference(&np->gref_tx_head, | ||
1050 | np->grant_tx_ref[i]); | ||
1051 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | ||
1052 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); | ||
1053 | dev_kfree_skb_irq(skb); | ||
1054 | } | ||
1055 | } | ||
1056 | |||
1057 | static void xennet_release_rx_bufs(struct netfront_info *np) | ||
1058 | { | ||
1059 | struct mmu_update *mmu = np->rx_mmu; | ||
1060 | struct multicall_entry *mcl = np->rx_mcl; | ||
1061 | struct sk_buff_head free_list; | ||
1062 | struct sk_buff *skb; | ||
1063 | unsigned long mfn; | ||
1064 | int xfer = 0, noxfer = 0, unused = 0; | ||
1065 | int id, ref; | ||
1066 | |||
1067 | dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", | ||
1068 | __func__); | ||
1069 | return; | ||
1070 | |||
1071 | skb_queue_head_init(&free_list); | ||
1072 | |||
1073 | spin_lock_bh(&np->rx_lock); | ||
1074 | |||
1075 | for (id = 0; id < NET_RX_RING_SIZE; id++) { | ||
1076 | ref = np->grant_rx_ref[id]; | ||
1077 | if (ref == GRANT_INVALID_REF) { | ||
1078 | unused++; | ||
1079 | continue; | ||
1080 | } | ||
1081 | |||
1082 | skb = np->rx_skbs[id]; | ||
1083 | mfn = gnttab_end_foreign_transfer_ref(ref); | ||
1084 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | ||
1085 | np->grant_rx_ref[id] = GRANT_INVALID_REF; | ||
1086 | |||
1087 | if (0 == mfn) { | ||
1088 | skb_shinfo(skb)->nr_frags = 0; | ||
1089 | dev_kfree_skb(skb); | ||
1090 | noxfer++; | ||
1091 | continue; | ||
1092 | } | ||
1093 | |||
1094 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
1095 | /* Remap the page. */ | ||
1096 | struct page *page = skb_shinfo(skb)->frags[0].page; | ||
1097 | unsigned long pfn = page_to_pfn(page); | ||
1098 | void *vaddr = page_address(page); | ||
1099 | |||
1100 | MULTI_update_va_mapping(mcl, (unsigned long)vaddr, | ||
1101 | mfn_pte(mfn, PAGE_KERNEL), | ||
1102 | 0); | ||
1103 | mcl++; | ||
1104 | mmu->ptr = ((u64)mfn << PAGE_SHIFT) | ||
1105 | | MMU_MACHPHYS_UPDATE; | ||
1106 | mmu->val = pfn; | ||
1107 | mmu++; | ||
1108 | |||
1109 | set_phys_to_machine(pfn, mfn); | ||
1110 | } | ||
1111 | __skb_queue_tail(&free_list, skb); | ||
1112 | xfer++; | ||
1113 | } | ||
1114 | |||
1115 | dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", | ||
1116 | __func__, xfer, noxfer, unused); | ||
1117 | |||
1118 | if (xfer) { | ||
1119 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
1120 | /* Do all the remapping work and M2P updates. */ | ||
1121 | MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, | ||
1122 | 0, DOMID_SELF); | ||
1123 | mcl++; | ||
1124 | HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | while ((skb = __skb_dequeue(&free_list)) != NULL) | ||
1129 | dev_kfree_skb(skb); | ||
1130 | |||
1131 | spin_unlock_bh(&np->rx_lock); | ||
1132 | } | ||
1133 | |||
1134 | static void xennet_uninit(struct net_device *dev) | ||
1135 | { | ||
1136 | struct netfront_info *np = netdev_priv(dev); | ||
1137 | xennet_release_tx_bufs(np); | ||
1138 | xennet_release_rx_bufs(np); | ||
1139 | gnttab_free_grant_references(np->gref_tx_head); | ||
1140 | gnttab_free_grant_references(np->gref_rx_head); | ||
1141 | } | ||
1142 | |||
1143 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | ||
1144 | { | ||
1145 | int i, err; | ||
1146 | struct net_device *netdev; | ||
1147 | struct netfront_info *np; | ||
1148 | |||
1149 | netdev = alloc_etherdev(sizeof(struct netfront_info)); | ||
1150 | if (!netdev) { | ||
1151 | printk(KERN_WARNING "%s> alloc_etherdev failed.\n", | ||
1152 | __func__); | ||
1153 | return ERR_PTR(-ENOMEM); | ||
1154 | } | ||
1155 | |||
1156 | np = netdev_priv(netdev); | ||
1157 | np->xbdev = dev; | ||
1158 | |||
1159 | spin_lock_init(&np->tx_lock); | ||
1160 | spin_lock_init(&np->rx_lock); | ||
1161 | |||
1162 | skb_queue_head_init(&np->rx_batch); | ||
1163 | np->rx_target = RX_DFL_MIN_TARGET; | ||
1164 | np->rx_min_target = RX_DFL_MIN_TARGET; | ||
1165 | np->rx_max_target = RX_MAX_TARGET; | ||
1166 | |||
1167 | init_timer(&np->rx_refill_timer); | ||
1168 | np->rx_refill_timer.data = (unsigned long)netdev; | ||
1169 | np->rx_refill_timer.function = rx_refill_timeout; | ||
1170 | |||
1171 | /* Initialise tx_skbs as a free chain containing every entry. */ | ||
1172 | np->tx_skb_freelist = 0; | ||
1173 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | ||
1174 | np->tx_skbs[i].link = i+1; | ||
1175 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | ||
1176 | } | ||
1177 | |||
1178 | /* Clear out rx_skbs */ | ||
1179 | for (i = 0; i < NET_RX_RING_SIZE; i++) { | ||
1180 | np->rx_skbs[i] = NULL; | ||
1181 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | ||
1182 | } | ||
1183 | |||
1184 | /* A grant for every tx ring slot */ | ||
1185 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | ||
1186 | &np->gref_tx_head) < 0) { | ||
1187 | printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); | ||
1188 | err = -ENOMEM; | ||
1189 | goto exit; | ||
1190 | } | ||
1191 | /* A grant for every rx ring slot */ | ||
1192 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | ||
1193 | &np->gref_rx_head) < 0) { | ||
1194 | printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); | ||
1195 | err = -ENOMEM; | ||
1196 | goto exit_free_tx; | ||
1197 | } | ||
1198 | |||
1199 | netdev->open = xennet_open; | ||
1200 | netdev->hard_start_xmit = xennet_start_xmit; | ||
1201 | netdev->stop = xennet_close; | ||
1202 | netdev->get_stats = xennet_get_stats; | ||
1203 | netdev->poll = xennet_poll; | ||
1204 | netdev->uninit = xennet_uninit; | ||
1205 | netdev->change_mtu = xennet_change_mtu; | ||
1206 | netdev->weight = 64; | ||
1207 | netdev->features = NETIF_F_IP_CSUM; | ||
1208 | |||
1209 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | ||
1210 | SET_MODULE_OWNER(netdev); | ||
1211 | SET_NETDEV_DEV(netdev, &dev->dev); | ||
1212 | |||
1213 | np->netdev = netdev; | ||
1214 | |||
1215 | netif_carrier_off(netdev); | ||
1216 | |||
1217 | return netdev; | ||
1218 | |||
1219 | exit_free_tx: | ||
1220 | gnttab_free_grant_references(np->gref_tx_head); | ||
1221 | exit: | ||
1222 | free_netdev(netdev); | ||
1223 | return ERR_PTR(err); | ||
1224 | } | ||
1225 | |||
1226 | /** | ||
1227 | * Entry point to this code when a new device is created. Allocate the basic | ||
1228 | * structures and the ring buffers for communication with the backend, and | ||
1229 | * inform the backend of the appropriate details for those. | ||
1230 | */ | ||
1231 | static int __devinit netfront_probe(struct xenbus_device *dev, | ||
1232 | const struct xenbus_device_id *id) | ||
1233 | { | ||
1234 | int err; | ||
1235 | struct net_device *netdev; | ||
1236 | struct netfront_info *info; | ||
1237 | |||
1238 | netdev = xennet_create_dev(dev); | ||
1239 | if (IS_ERR(netdev)) { | ||
1240 | err = PTR_ERR(netdev); | ||
1241 | xenbus_dev_fatal(dev, err, "creating netdev"); | ||
1242 | return err; | ||
1243 | } | ||
1244 | |||
1245 | info = netdev_priv(netdev); | ||
1246 | dev->dev.driver_data = info; | ||
1247 | |||
1248 | err = register_netdev(info->netdev); | ||
1249 | if (err) { | ||
1250 | printk(KERN_WARNING "%s: register_netdev err=%d\n", | ||
1251 | __func__, err); | ||
1252 | goto fail; | ||
1253 | } | ||
1254 | |||
1255 | err = xennet_sysfs_addif(info->netdev); | ||
1256 | if (err) { | ||
1257 | unregister_netdev(info->netdev); | ||
1258 | printk(KERN_WARNING "%s: add sysfs failed err=%d\n", | ||
1259 | __func__, err); | ||
1260 | goto fail; | ||
1261 | } | ||
1262 | |||
1263 | return 0; | ||
1264 | |||
1265 | fail: | ||
1266 | free_netdev(netdev); | ||
1267 | dev->dev.driver_data = NULL; | ||
1268 | return err; | ||
1269 | } | ||
1270 | |||
1271 | static void xennet_end_access(int ref, void *page) | ||
1272 | { | ||
1273 | /* This frees the page as a side-effect */ | ||
1274 | if (ref != GRANT_INVALID_REF) | ||
1275 | gnttab_end_foreign_access(ref, 0, (unsigned long)page); | ||
1276 | } | ||
1277 | |||
1278 | static void xennet_disconnect_backend(struct netfront_info *info) | ||
1279 | { | ||
1280 | /* Stop old i/f to prevent errors whilst we rebuild the state. */ | ||
1281 | spin_lock_bh(&info->rx_lock); | ||
1282 | spin_lock_irq(&info->tx_lock); | ||
1283 | netif_carrier_off(info->netdev); | ||
1284 | spin_unlock_irq(&info->tx_lock); | ||
1285 | spin_unlock_bh(&info->rx_lock); | ||
1286 | |||
1287 | if (info->netdev->irq) | ||
1288 | unbind_from_irqhandler(info->netdev->irq, info->netdev); | ||
1289 | info->evtchn = info->netdev->irq = 0; | ||
1290 | |||
1291 | /* End access and free the pages */ | ||
1292 | xennet_end_access(info->tx_ring_ref, info->tx.sring); | ||
1293 | xennet_end_access(info->rx_ring_ref, info->rx.sring); | ||
1294 | |||
1295 | info->tx_ring_ref = GRANT_INVALID_REF; | ||
1296 | info->rx_ring_ref = GRANT_INVALID_REF; | ||
1297 | info->tx.sring = NULL; | ||
1298 | info->rx.sring = NULL; | ||
1299 | } | ||
1300 | |||
1301 | /** | ||
1302 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | ||
1303 | * driver restart. We tear down our netif structure and recreate it, but | ||
1304 | * leave the device-layer structures intact so that this is transparent to the | ||
1305 | * rest of the kernel. | ||
1306 | */ | ||
1307 | static int netfront_resume(struct xenbus_device *dev) | ||
1308 | { | ||
1309 | struct netfront_info *info = dev->dev.driver_data; | ||
1310 | |||
1311 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | ||
1312 | |||
1313 | xennet_disconnect_backend(info); | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) | ||
1318 | { | ||
1319 | char *s, *e, *macstr; | ||
1320 | int i; | ||
1321 | |||
1322 | macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); | ||
1323 | if (IS_ERR(macstr)) | ||
1324 | return PTR_ERR(macstr); | ||
1325 | |||
1326 | for (i = 0; i < ETH_ALEN; i++) { | ||
1327 | mac[i] = simple_strtoul(s, &e, 16); | ||
1328 | if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { | ||
1329 | kfree(macstr); | ||
1330 | return -ENOENT; | ||
1331 | } | ||
1332 | s = e+1; | ||
1333 | } | ||
1334 | |||
1335 | kfree(macstr); | ||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | ||
1340 | { | ||
1341 | struct net_device *dev = dev_id; | ||
1342 | struct netfront_info *np = netdev_priv(dev); | ||
1343 | unsigned long flags; | ||
1344 | |||
1345 | spin_lock_irqsave(&np->tx_lock, flags); | ||
1346 | |||
1347 | if (likely(netif_carrier_ok(dev))) { | ||
1348 | xennet_tx_buf_gc(dev); | ||
1349 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | ||
1350 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
1351 | netif_rx_schedule(dev); | ||
1352 | } | ||
1353 | |||
1354 | spin_unlock_irqrestore(&np->tx_lock, flags); | ||
1355 | |||
1356 | return IRQ_HANDLED; | ||
1357 | } | ||
1358 | |||
1359 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | ||
1360 | { | ||
1361 | struct xen_netif_tx_sring *txs; | ||
1362 | struct xen_netif_rx_sring *rxs; | ||
1363 | int err; | ||
1364 | struct net_device *netdev = info->netdev; | ||
1365 | |||
1366 | info->tx_ring_ref = GRANT_INVALID_REF; | ||
1367 | info->rx_ring_ref = GRANT_INVALID_REF; | ||
1368 | info->rx.sring = NULL; | ||
1369 | info->tx.sring = NULL; | ||
1370 | netdev->irq = 0; | ||
1371 | |||
1372 | err = xen_net_read_mac(dev, netdev->dev_addr); | ||
1373 | if (err) { | ||
1374 | xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); | ||
1375 | goto fail; | ||
1376 | } | ||
1377 | |||
1378 | txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); | ||
1379 | if (!txs) { | ||
1380 | err = -ENOMEM; | ||
1381 | xenbus_dev_fatal(dev, err, "allocating tx ring page"); | ||
1382 | goto fail; | ||
1383 | } | ||
1384 | SHARED_RING_INIT(txs); | ||
1385 | FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); | ||
1386 | |||
1387 | err = xenbus_grant_ring(dev, virt_to_mfn(txs)); | ||
1388 | if (err < 0) { | ||
1389 | free_page((unsigned long)txs); | ||
1390 | goto fail; | ||
1391 | } | ||
1392 | |||
1393 | info->tx_ring_ref = err; | ||
1394 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); | ||
1395 | if (!rxs) { | ||
1396 | err = -ENOMEM; | ||
1397 | xenbus_dev_fatal(dev, err, "allocating rx ring page"); | ||
1398 | goto fail; | ||
1399 | } | ||
1400 | SHARED_RING_INIT(rxs); | ||
1401 | FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); | ||
1402 | |||
1403 | err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); | ||
1404 | if (err < 0) { | ||
1405 | free_page((unsigned long)rxs); | ||
1406 | goto fail; | ||
1407 | } | ||
1408 | info->rx_ring_ref = err; | ||
1409 | |||
1410 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | ||
1411 | if (err) | ||
1412 | goto fail; | ||
1413 | |||
1414 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, | ||
1415 | IRQF_SAMPLE_RANDOM, netdev->name, | ||
1416 | netdev); | ||
1417 | if (err < 0) | ||
1418 | goto fail; | ||
1419 | netdev->irq = err; | ||
1420 | return 0; | ||
1421 | |||
1422 | fail: | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | /* Common code used when first setting up, and when resuming. */ | ||
1427 | static int talk_to_backend(struct xenbus_device *dev, | ||
1428 | struct netfront_info *info) | ||
1429 | { | ||
1430 | const char *message; | ||
1431 | struct xenbus_transaction xbt; | ||
1432 | int err; | ||
1433 | |||
1434 | /* Create shared ring, alloc event channel. */ | ||
1435 | err = setup_netfront(dev, info); | ||
1436 | if (err) | ||
1437 | goto out; | ||
1438 | |||
1439 | again: | ||
1440 | err = xenbus_transaction_start(&xbt); | ||
1441 | if (err) { | ||
1442 | xenbus_dev_fatal(dev, err, "starting transaction"); | ||
1443 | goto destroy_ring; | ||
1444 | } | ||
1445 | |||
1446 | err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", | ||
1447 | info->tx_ring_ref); | ||
1448 | if (err) { | ||
1449 | message = "writing tx ring-ref"; | ||
1450 | goto abort_transaction; | ||
1451 | } | ||
1452 | err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", | ||
1453 | info->rx_ring_ref); | ||
1454 | if (err) { | ||
1455 | message = "writing rx ring-ref"; | ||
1456 | goto abort_transaction; | ||
1457 | } | ||
1458 | err = xenbus_printf(xbt, dev->nodename, | ||
1459 | "event-channel", "%u", info->evtchn); | ||
1460 | if (err) { | ||
1461 | message = "writing event-channel"; | ||
1462 | goto abort_transaction; | ||
1463 | } | ||
1464 | |||
1465 | err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", | ||
1466 | 1); | ||
1467 | if (err) { | ||
1468 | message = "writing request-rx-copy"; | ||
1469 | goto abort_transaction; | ||
1470 | } | ||
1471 | |||
1472 | err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); | ||
1473 | if (err) { | ||
1474 | message = "writing feature-rx-notify"; | ||
1475 | goto abort_transaction; | ||
1476 | } | ||
1477 | |||
1478 | err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); | ||
1479 | if (err) { | ||
1480 | message = "writing feature-sg"; | ||
1481 | goto abort_transaction; | ||
1482 | } | ||
1483 | |||
1484 | err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); | ||
1485 | if (err) { | ||
1486 | message = "writing feature-gso-tcpv4"; | ||
1487 | goto abort_transaction; | ||
1488 | } | ||
1489 | |||
1490 | err = xenbus_transaction_end(xbt, 0); | ||
1491 | if (err) { | ||
1492 | if (err == -EAGAIN) | ||
1493 | goto again; | ||
1494 | xenbus_dev_fatal(dev, err, "completing transaction"); | ||
1495 | goto destroy_ring; | ||
1496 | } | ||
1497 | |||
1498 | return 0; | ||
1499 | |||
1500 | abort_transaction: | ||
1501 | xenbus_transaction_end(xbt, 1); | ||
1502 | xenbus_dev_fatal(dev, err, "%s", message); | ||
1503 | destroy_ring: | ||
1504 | xennet_disconnect_backend(info); | ||
1505 | out: | ||
1506 | return err; | ||
1507 | } | ||
1508 | |||
1509 | static int xennet_set_sg(struct net_device *dev, u32 data) | ||
1510 | { | ||
1511 | if (data) { | ||
1512 | struct netfront_info *np = netdev_priv(dev); | ||
1513 | int val; | ||
1514 | |||
1515 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | ||
1516 | "%d", &val) < 0) | ||
1517 | val = 0; | ||
1518 | if (!val) | ||
1519 | return -ENOSYS; | ||
1520 | } else if (dev->mtu > ETH_DATA_LEN) | ||
1521 | dev->mtu = ETH_DATA_LEN; | ||
1522 | |||
1523 | return ethtool_op_set_sg(dev, data); | ||
1524 | } | ||
1525 | |||
1526 | static int xennet_set_tso(struct net_device *dev, u32 data) | ||
1527 | { | ||
1528 | if (data) { | ||
1529 | struct netfront_info *np = netdev_priv(dev); | ||
1530 | int val; | ||
1531 | |||
1532 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1533 | "feature-gso-tcpv4", "%d", &val) < 0) | ||
1534 | val = 0; | ||
1535 | if (!val) | ||
1536 | return -ENOSYS; | ||
1537 | } | ||
1538 | |||
1539 | return ethtool_op_set_tso(dev, data); | ||
1540 | } | ||
1541 | |||
1542 | static void xennet_set_features(struct net_device *dev) | ||
1543 | { | ||
1544 | /* Turn off all GSO bits except ROBUST. */ | ||
1545 | dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; | ||
1546 | dev->features |= NETIF_F_GSO_ROBUST; | ||
1547 | xennet_set_sg(dev, 0); | ||
1548 | |||
1549 | /* We need checksum offload to enable scatter/gather and TSO. */ | ||
1550 | if (!(dev->features & NETIF_F_IP_CSUM)) | ||
1551 | return; | ||
1552 | |||
1553 | if (!xennet_set_sg(dev, 1)) | ||
1554 | xennet_set_tso(dev, 1); | ||
1555 | } | ||
1556 | |||
1557 | static int xennet_connect(struct net_device *dev) | ||
1558 | { | ||
1559 | struct netfront_info *np = netdev_priv(dev); | ||
1560 | int i, requeue_idx, err; | ||
1561 | struct sk_buff *skb; | ||
1562 | grant_ref_t ref; | ||
1563 | struct xen_netif_rx_request *req; | ||
1564 | unsigned int feature_rx_copy; | ||
1565 | |||
1566 | err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1567 | "feature-rx-copy", "%u", &feature_rx_copy); | ||
1568 | if (err != 1) | ||
1569 | feature_rx_copy = 0; | ||
1570 | |||
1571 | if (!feature_rx_copy) { | ||
1572 | dev_info(&dev->dev, | ||
1573 | "backend does not support copying recieve path"); | ||
1574 | return -ENODEV; | ||
1575 | } | ||
1576 | |||
1577 | err = talk_to_backend(np->xbdev, np); | ||
1578 | if (err) | ||
1579 | return err; | ||
1580 | |||
1581 | xennet_set_features(dev); | ||
1582 | |||
1583 | spin_lock_bh(&np->rx_lock); | ||
1584 | spin_lock_irq(&np->tx_lock); | ||
1585 | |||
1586 | /* Step 1: Discard all pending TX packet fragments. */ | ||
1587 | xennet_release_tx_bufs(np); | ||
1588 | |||
1589 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | ||
1590 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | ||
1591 | if (!np->rx_skbs[i]) | ||
1592 | continue; | ||
1593 | |||
1594 | skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); | ||
1595 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); | ||
1596 | req = RING_GET_REQUEST(&np->rx, requeue_idx); | ||
1597 | |||
1598 | gnttab_grant_foreign_access_ref( | ||
1599 | ref, np->xbdev->otherend_id, | ||
1600 | pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> | ||
1601 | frags->page)), | ||
1602 | 0); | ||
1603 | req->gref = ref; | ||
1604 | req->id = requeue_idx; | ||
1605 | |||
1606 | requeue_idx++; | ||
1607 | } | ||
1608 | |||
1609 | np->rx.req_prod_pvt = requeue_idx; | ||
1610 | |||
1611 | /* | ||
1612 | * Step 3: All public and private state should now be sane. Get | ||
1613 | * ready to start sending and receiving packets and give the driver | ||
1614 | * domain a kick because we've probably just requeued some | ||
1615 | * packets. | ||
1616 | */ | ||
1617 | netif_carrier_on(np->netdev); | ||
1618 | notify_remote_via_irq(np->netdev->irq); | ||
1619 | xennet_tx_buf_gc(dev); | ||
1620 | xennet_alloc_rx_buffers(dev); | ||
1621 | |||
1622 | spin_unlock_irq(&np->tx_lock); | ||
1623 | spin_unlock_bh(&np->rx_lock); | ||
1624 | |||
1625 | return 0; | ||
1626 | } | ||
1627 | |||
1628 | /** | ||
1629 | * Callback received when the backend's state changes. | ||
1630 | */ | ||
1631 | static void backend_changed(struct xenbus_device *dev, | ||
1632 | enum xenbus_state backend_state) | ||
1633 | { | ||
1634 | struct netfront_info *np = dev->dev.driver_data; | ||
1635 | struct net_device *netdev = np->netdev; | ||
1636 | |||
1637 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | ||
1638 | |||
1639 | switch (backend_state) { | ||
1640 | case XenbusStateInitialising: | ||
1641 | case XenbusStateInitialised: | ||
1642 | case XenbusStateConnected: | ||
1643 | case XenbusStateUnknown: | ||
1644 | case XenbusStateClosed: | ||
1645 | break; | ||
1646 | |||
1647 | case XenbusStateInitWait: | ||
1648 | if (dev->state != XenbusStateInitialising) | ||
1649 | break; | ||
1650 | if (xennet_connect(netdev) != 0) | ||
1651 | break; | ||
1652 | xenbus_switch_state(dev, XenbusStateConnected); | ||
1653 | break; | ||
1654 | |||
1655 | case XenbusStateClosing: | ||
1656 | xenbus_frontend_closed(dev); | ||
1657 | break; | ||
1658 | } | ||
1659 | } | ||
1660 | |||
1661 | static struct ethtool_ops xennet_ethtool_ops = | ||
1662 | { | ||
1663 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1664 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
1665 | .get_sg = ethtool_op_get_sg, | ||
1666 | .set_sg = xennet_set_sg, | ||
1667 | .get_tso = ethtool_op_get_tso, | ||
1668 | .set_tso = xennet_set_tso, | ||
1669 | .get_link = ethtool_op_get_link, | ||
1670 | }; | ||
1671 | |||
1672 | #ifdef CONFIG_SYSFS | ||
1673 | static ssize_t show_rxbuf_min(struct device *dev, | ||
1674 | struct device_attribute *attr, char *buf) | ||
1675 | { | ||
1676 | struct net_device *netdev = to_net_dev(dev); | ||
1677 | struct netfront_info *info = netdev_priv(netdev); | ||
1678 | |||
1679 | return sprintf(buf, "%u\n", info->rx_min_target); | ||
1680 | } | ||
1681 | |||
1682 | static ssize_t store_rxbuf_min(struct device *dev, | ||
1683 | struct device_attribute *attr, | ||
1684 | const char *buf, size_t len) | ||
1685 | { | ||
1686 | struct net_device *netdev = to_net_dev(dev); | ||
1687 | struct netfront_info *np = netdev_priv(netdev); | ||
1688 | char *endp; | ||
1689 | unsigned long target; | ||
1690 | |||
1691 | if (!capable(CAP_NET_ADMIN)) | ||
1692 | return -EPERM; | ||
1693 | |||
1694 | target = simple_strtoul(buf, &endp, 0); | ||
1695 | if (endp == buf) | ||
1696 | return -EBADMSG; | ||
1697 | |||
1698 | if (target < RX_MIN_TARGET) | ||
1699 | target = RX_MIN_TARGET; | ||
1700 | if (target > RX_MAX_TARGET) | ||
1701 | target = RX_MAX_TARGET; | ||
1702 | |||
1703 | spin_lock_bh(&np->rx_lock); | ||
1704 | if (target > np->rx_max_target) | ||
1705 | np->rx_max_target = target; | ||
1706 | np->rx_min_target = target; | ||
1707 | if (target > np->rx_target) | ||
1708 | np->rx_target = target; | ||
1709 | |||
1710 | xennet_alloc_rx_buffers(netdev); | ||
1711 | |||
1712 | spin_unlock_bh(&np->rx_lock); | ||
1713 | return len; | ||
1714 | } | ||
1715 | |||
1716 | static ssize_t show_rxbuf_max(struct device *dev, | ||
1717 | struct device_attribute *attr, char *buf) | ||
1718 | { | ||
1719 | struct net_device *netdev = to_net_dev(dev); | ||
1720 | struct netfront_info *info = netdev_priv(netdev); | ||
1721 | |||
1722 | return sprintf(buf, "%u\n", info->rx_max_target); | ||
1723 | } | ||
1724 | |||
1725 | static ssize_t store_rxbuf_max(struct device *dev, | ||
1726 | struct device_attribute *attr, | ||
1727 | const char *buf, size_t len) | ||
1728 | { | ||
1729 | struct net_device *netdev = to_net_dev(dev); | ||
1730 | struct netfront_info *np = netdev_priv(netdev); | ||
1731 | char *endp; | ||
1732 | unsigned long target; | ||
1733 | |||
1734 | if (!capable(CAP_NET_ADMIN)) | ||
1735 | return -EPERM; | ||
1736 | |||
1737 | target = simple_strtoul(buf, &endp, 0); | ||
1738 | if (endp == buf) | ||
1739 | return -EBADMSG; | ||
1740 | |||
1741 | if (target < RX_MIN_TARGET) | ||
1742 | target = RX_MIN_TARGET; | ||
1743 | if (target > RX_MAX_TARGET) | ||
1744 | target = RX_MAX_TARGET; | ||
1745 | |||
1746 | spin_lock_bh(&np->rx_lock); | ||
1747 | if (target < np->rx_min_target) | ||
1748 | np->rx_min_target = target; | ||
1749 | np->rx_max_target = target; | ||
1750 | if (target < np->rx_target) | ||
1751 | np->rx_target = target; | ||
1752 | |||
1753 | xennet_alloc_rx_buffers(netdev); | ||
1754 | |||
1755 | spin_unlock_bh(&np->rx_lock); | ||
1756 | return len; | ||
1757 | } | ||
1758 | |||
1759 | static ssize_t show_rxbuf_cur(struct device *dev, | ||
1760 | struct device_attribute *attr, char *buf) | ||
1761 | { | ||
1762 | struct net_device *netdev = to_net_dev(dev); | ||
1763 | struct netfront_info *info = netdev_priv(netdev); | ||
1764 | |||
1765 | return sprintf(buf, "%u\n", info->rx_target); | ||
1766 | } | ||
1767 | |||
1768 | static struct device_attribute xennet_attrs[] = { | ||
1769 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | ||
1770 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | ||
1771 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | ||
1772 | }; | ||
1773 | |||
1774 | static int xennet_sysfs_addif(struct net_device *netdev) | ||
1775 | { | ||
1776 | int i; | ||
1777 | int err; | ||
1778 | |||
1779 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { | ||
1780 | err = device_create_file(&netdev->dev, | ||
1781 | &xennet_attrs[i]); | ||
1782 | if (err) | ||
1783 | goto fail; | ||
1784 | } | ||
1785 | return 0; | ||
1786 | |||
1787 | fail: | ||
1788 | while (--i >= 0) | ||
1789 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | ||
1790 | return err; | ||
1791 | } | ||
1792 | |||
1793 | static void xennet_sysfs_delif(struct net_device *netdev) | ||
1794 | { | ||
1795 | int i; | ||
1796 | |||
1797 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) | ||
1798 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | ||
1799 | } | ||
1800 | |||
1801 | #endif /* CONFIG_SYSFS */ | ||
1802 | |||
1803 | static struct xenbus_device_id netfront_ids[] = { | ||
1804 | { "vif" }, | ||
1805 | { "" } | ||
1806 | }; | ||
1807 | |||
1808 | |||
1809 | static int __devexit xennet_remove(struct xenbus_device *dev) | ||
1810 | { | ||
1811 | struct netfront_info *info = dev->dev.driver_data; | ||
1812 | |||
1813 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | ||
1814 | |||
1815 | unregister_netdev(info->netdev); | ||
1816 | |||
1817 | xennet_disconnect_backend(info); | ||
1818 | |||
1819 | del_timer_sync(&info->rx_refill_timer); | ||
1820 | |||
1821 | xennet_sysfs_delif(info->netdev); | ||
1822 | |||
1823 | free_netdev(info->netdev); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | static struct xenbus_driver netfront = { | ||
1829 | .name = "vif", | ||
1830 | .owner = THIS_MODULE, | ||
1831 | .ids = netfront_ids, | ||
1832 | .probe = netfront_probe, | ||
1833 | .remove = __devexit_p(xennet_remove), | ||
1834 | .resume = netfront_resume, | ||
1835 | .otherend_changed = backend_changed, | ||
1836 | }; | ||
1837 | |||
1838 | static int __init netif_init(void) | ||
1839 | { | ||
1840 | if (!is_running_on_xen()) | ||
1841 | return -ENODEV; | ||
1842 | |||
1843 | if (is_initial_xendomain()) | ||
1844 | return 0; | ||
1845 | |||
1846 | printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); | ||
1847 | |||
1848 | return xenbus_register_frontend(&netfront); | ||
1849 | } | ||
1850 | module_init(netif_init); | ||
1851 | |||
1852 | |||
1853 | static void __exit netif_exit(void) | ||
1854 | { | ||
1855 | if (is_initial_xendomain()) | ||
1856 | return; | ||
1857 | |||
1858 | return xenbus_unregister_driver(&netfront); | ||
1859 | } | ||
1860 | module_exit(netif_exit); | ||
1861 | |||
1862 | MODULE_DESCRIPTION("Xen virtual network device frontend"); | ||
1863 | MODULE_LICENSE("GPL"); | ||