diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /drivers/net/xen-netfront.c | |
parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 290 |
1 files changed, 77 insertions, 213 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index ca82f545ec2c..22bcb4e12e2a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -77,7 +77,9 @@ struct netfront_cb { | |||
77 | 77 | ||
78 | #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) | 78 | #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) |
79 | #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) | 79 | #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) |
80 | #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) | 80 | |
81 | /* Minimum number of Rx slots (includes slot for GSO metadata). */ | ||
82 | #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) | ||
81 | 83 | ||
82 | /* Queue name is interface name with "-qNNN" appended */ | 84 | /* Queue name is interface name with "-qNNN" appended */ |
83 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) | 85 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) |
@@ -137,13 +139,6 @@ struct netfront_queue { | |||
137 | struct xen_netif_rx_front_ring rx; | 139 | struct xen_netif_rx_front_ring rx; |
138 | int rx_ring_ref; | 140 | int rx_ring_ref; |
139 | 141 | ||
140 | /* Receive-ring batched refills. */ | ||
141 | #define RX_MIN_TARGET 8 | ||
142 | #define RX_DFL_MIN_TARGET 64 | ||
143 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
144 | unsigned rx_min_target, rx_max_target, rx_target; | ||
145 | struct sk_buff_head rx_batch; | ||
146 | |||
147 | struct timer_list rx_refill_timer; | 142 | struct timer_list rx_refill_timer; |
148 | 143 | ||
149 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | 144 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; |
@@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data) | |||
251 | static int netfront_tx_slot_available(struct netfront_queue *queue) | 246 | static int netfront_tx_slot_available(struct netfront_queue *queue) |
252 | { | 247 | { |
253 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < | 248 | return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < |
254 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); | 249 | (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); |
255 | } | 250 | } |
256 | 251 | ||
257 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) | 252 | static void xennet_maybe_wake_tx(struct netfront_queue *queue) |
@@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue) | |||
265 | netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); | 260 | netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); |
266 | } | 261 | } |
267 | 262 | ||
268 | static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | 263 | |
264 | static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) | ||
269 | { | 265 | { |
270 | unsigned short id; | ||
271 | struct sk_buff *skb; | 266 | struct sk_buff *skb; |
272 | struct page *page; | 267 | struct page *page; |
273 | int i, batch_target, notify; | ||
274 | RING_IDX req_prod = queue->rx.req_prod_pvt; | ||
275 | grant_ref_t ref; | ||
276 | unsigned long pfn; | ||
277 | void *vaddr; | ||
278 | struct xen_netif_rx_request *req; | ||
279 | 268 | ||
280 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) | 269 | skb = __netdev_alloc_skb(queue->info->netdev, |
281 | return; | 270 | RX_COPY_THRESHOLD + NET_IP_ALIGN, |
271 | GFP_ATOMIC | __GFP_NOWARN); | ||
272 | if (unlikely(!skb)) | ||
273 | return NULL; | ||
282 | 274 | ||
283 | /* | 275 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); |
284 | * Allocate skbuffs greedily, even though we batch updates to the | 276 | if (!page) { |
285 | * receive ring. This creates a less bursty demand on the memory | 277 | kfree_skb(skb); |
286 | * allocator, so should reduce the chance of failed allocation requests | 278 | return NULL; |
287 | * both for ourself and for other kernel subsystems. | ||
288 | */ | ||
289 | batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); | ||
290 | for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { | ||
291 | skb = __netdev_alloc_skb(queue->info->netdev, | ||
292 | RX_COPY_THRESHOLD + NET_IP_ALIGN, | ||
293 | GFP_ATOMIC | __GFP_NOWARN); | ||
294 | if (unlikely(!skb)) | ||
295 | goto no_skb; | ||
296 | |||
297 | /* Align ip header to a 16 bytes boundary */ | ||
298 | skb_reserve(skb, NET_IP_ALIGN); | ||
299 | |||
300 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | ||
301 | if (!page) { | ||
302 | kfree_skb(skb); | ||
303 | no_skb: | ||
304 | /* Could not allocate any skbuffs. Try again later. */ | ||
305 | mod_timer(&queue->rx_refill_timer, | ||
306 | jiffies + (HZ/10)); | ||
307 | |||
308 | /* Any skbuffs queued for refill? Force them out. */ | ||
309 | if (i != 0) | ||
310 | goto refill; | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); | ||
315 | __skb_queue_tail(&queue->rx_batch, skb); | ||
316 | } | 279 | } |
280 | skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); | ||
281 | |||
282 | /* Align ip header to a 16 bytes boundary */ | ||
283 | skb_reserve(skb, NET_IP_ALIGN); | ||
284 | skb->dev = queue->info->netdev; | ||
285 | |||
286 | return skb; | ||
287 | } | ||
288 | |||
289 | |||
290 | static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | ||
291 | { | ||
292 | RING_IDX req_prod = queue->rx.req_prod_pvt; | ||
293 | int notify; | ||
317 | 294 | ||
318 | /* Is the batch large enough to be worthwhile? */ | 295 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) |
319 | if (i < (queue->rx_target/2)) { | ||
320 | if (req_prod > queue->rx.sring->req_prod) | ||
321 | goto push; | ||
322 | return; | 296 | return; |
323 | } | ||
324 | 297 | ||
325 | /* Adjust our fill target if we risked running out of buffers. */ | 298 | for (req_prod = queue->rx.req_prod_pvt; |
326 | if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && | 299 | req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; |
327 | ((queue->rx_target *= 2) > queue->rx_max_target)) | 300 | req_prod++) { |
328 | queue->rx_target = queue->rx_max_target; | 301 | struct sk_buff *skb; |
302 | unsigned short id; | ||
303 | grant_ref_t ref; | ||
304 | unsigned long pfn; | ||
305 | struct xen_netif_rx_request *req; | ||
329 | 306 | ||
330 | refill: | 307 | skb = xennet_alloc_one_rx_buffer(queue); |
331 | for (i = 0; ; i++) { | 308 | if (!skb) |
332 | skb = __skb_dequeue(&queue->rx_batch); | ||
333 | if (skb == NULL) | ||
334 | break; | 309 | break; |
335 | 310 | ||
336 | skb->dev = queue->info->netdev; | 311 | id = xennet_rxidx(req_prod); |
337 | |||
338 | id = xennet_rxidx(req_prod + i); | ||
339 | 312 | ||
340 | BUG_ON(queue->rx_skbs[id]); | 313 | BUG_ON(queue->rx_skbs[id]); |
341 | queue->rx_skbs[id] = skb; | 314 | queue->rx_skbs[id] = skb; |
@@ -345,9 +318,8 @@ no_skb: | |||
345 | queue->grant_rx_ref[id] = ref; | 318 | queue->grant_rx_ref[id] = ref; |
346 | 319 | ||
347 | pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); | 320 | pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); |
348 | vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); | ||
349 | 321 | ||
350 | req = RING_GET_REQUEST(&queue->rx, req_prod + i); | 322 | req = RING_GET_REQUEST(&queue->rx, req_prod); |
351 | gnttab_grant_foreign_access_ref(ref, | 323 | gnttab_grant_foreign_access_ref(ref, |
352 | queue->info->xbdev->otherend_id, | 324 | queue->info->xbdev->otherend_id, |
353 | pfn_to_mfn(pfn), | 325 | pfn_to_mfn(pfn), |
@@ -357,11 +329,16 @@ no_skb: | |||
357 | req->gref = ref; | 329 | req->gref = ref; |
358 | } | 330 | } |
359 | 331 | ||
332 | queue->rx.req_prod_pvt = req_prod; | ||
333 | |||
334 | /* Not enough requests? Try again later. */ | ||
335 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { | ||
336 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); | ||
337 | return; | ||
338 | } | ||
339 | |||
360 | wmb(); /* barrier so backend seens requests */ | 340 | wmb(); /* barrier so backend seens requests */ |
361 | 341 | ||
362 | /* Above is a suitable barrier to ensure backend will see requests. */ | ||
363 | queue->rx.req_prod_pvt = req_prod + i; | ||
364 | push: | ||
365 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); | 342 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); |
366 | if (notify) | 343 | if (notify) |
367 | notify_remote_via_irq(queue->rx_irq); | 344 | notify_remote_via_irq(queue->rx_irq); |
@@ -496,9 +473,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, | |||
496 | len = skb_frag_size(frag); | 473 | len = skb_frag_size(frag); |
497 | offset = frag->page_offset; | 474 | offset = frag->page_offset; |
498 | 475 | ||
499 | /* Data must not cross a page boundary. */ | ||
500 | BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); | ||
501 | |||
502 | /* Skip unused frames from start of page */ | 476 | /* Skip unused frames from start of page */ |
503 | page += offset >> PAGE_SHIFT; | 477 | page += offset >> PAGE_SHIFT; |
504 | offset &= ~PAGE_MASK; | 478 | offset &= ~PAGE_MASK; |
@@ -506,8 +480,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, | |||
506 | while (len > 0) { | 480 | while (len > 0) { |
507 | unsigned long bytes; | 481 | unsigned long bytes; |
508 | 482 | ||
509 | BUG_ON(offset >= PAGE_SIZE); | ||
510 | |||
511 | bytes = PAGE_SIZE - offset; | 483 | bytes = PAGE_SIZE - offset; |
512 | if (bytes > len) | 484 | if (bytes > len) |
513 | bytes = len; | 485 | bytes = len; |
@@ -632,13 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
632 | slots, skb->len); | 604 | slots, skb->len); |
633 | if (skb_linearize(skb)) | 605 | if (skb_linearize(skb)) |
634 | goto drop; | 606 | goto drop; |
607 | data = skb->data; | ||
608 | offset = offset_in_page(data); | ||
609 | len = skb_headlen(skb); | ||
635 | } | 610 | } |
636 | 611 | ||
637 | spin_lock_irqsave(&queue->tx_lock, flags); | 612 | spin_lock_irqsave(&queue->tx_lock, flags); |
638 | 613 | ||
639 | if (unlikely(!netif_carrier_ok(dev) || | 614 | if (unlikely(!netif_carrier_ok(dev) || |
640 | (slots > 1 && !xennet_can_sg(dev)) || | 615 | (slots > 1 && !xennet_can_sg(dev)) || |
641 | netif_needs_gso(skb, netif_skb_features(skb)))) { | 616 | netif_needs_gso(dev, skb, netif_skb_features(skb)))) { |
642 | spin_unlock_irqrestore(&queue->tx_lock, flags); | 617 | spin_unlock_irqrestore(&queue->tx_lock, flags); |
643 | goto drop; | 618 | goto drop; |
644 | } | 619 | } |
@@ -1002,7 +977,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) | |||
1002 | struct sk_buff_head rxq; | 977 | struct sk_buff_head rxq; |
1003 | struct sk_buff_head errq; | 978 | struct sk_buff_head errq; |
1004 | struct sk_buff_head tmpq; | 979 | struct sk_buff_head tmpq; |
1005 | unsigned long flags; | ||
1006 | int err; | 980 | int err; |
1007 | 981 | ||
1008 | spin_lock(&queue->rx_lock); | 982 | spin_lock(&queue->rx_lock); |
@@ -1070,27 +1044,16 @@ err: | |||
1070 | 1044 | ||
1071 | work_done -= handle_incoming_queue(queue, &rxq); | 1045 | work_done -= handle_incoming_queue(queue, &rxq); |
1072 | 1046 | ||
1073 | /* If we get a callback with very few responses, reduce fill target. */ | ||
1074 | /* NB. Note exponential increase, linear decrease. */ | ||
1075 | if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > | ||
1076 | ((3*queue->rx_target) / 4)) && | ||
1077 | (--queue->rx_target < queue->rx_min_target)) | ||
1078 | queue->rx_target = queue->rx_min_target; | ||
1079 | |||
1080 | xennet_alloc_rx_buffers(queue); | 1047 | xennet_alloc_rx_buffers(queue); |
1081 | 1048 | ||
1082 | if (work_done < budget) { | 1049 | if (work_done < budget) { |
1083 | int more_to_do = 0; | 1050 | int more_to_do = 0; |
1084 | 1051 | ||
1085 | napi_gro_flush(napi, false); | 1052 | napi_complete(napi); |
1086 | |||
1087 | local_irq_save(flags); | ||
1088 | 1053 | ||
1089 | RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); | 1054 | RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); |
1090 | if (!more_to_do) | 1055 | if (more_to_do) |
1091 | __napi_complete(napi); | 1056 | napi_schedule(napi); |
1092 | |||
1093 | local_irq_restore(flags); | ||
1094 | } | 1057 | } |
1095 | 1058 | ||
1096 | spin_unlock(&queue->rx_lock); | 1059 | spin_unlock(&queue->rx_lock); |
@@ -1643,11 +1606,6 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1643 | spin_lock_init(&queue->tx_lock); | 1606 | spin_lock_init(&queue->tx_lock); |
1644 | spin_lock_init(&queue->rx_lock); | 1607 | spin_lock_init(&queue->rx_lock); |
1645 | 1608 | ||
1646 | skb_queue_head_init(&queue->rx_batch); | ||
1647 | queue->rx_target = RX_DFL_MIN_TARGET; | ||
1648 | queue->rx_min_target = RX_DFL_MIN_TARGET; | ||
1649 | queue->rx_max_target = RX_MAX_TARGET; | ||
1650 | |||
1651 | init_timer(&queue->rx_refill_timer); | 1609 | init_timer(&queue->rx_refill_timer); |
1652 | queue->rx_refill_timer.data = (unsigned long)queue; | 1610 | queue->rx_refill_timer.data = (unsigned long)queue; |
1653 | queue->rx_refill_timer.function = rx_refill_timeout; | 1611 | queue->rx_refill_timer.function = rx_refill_timeout; |
@@ -1670,7 +1628,7 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1670 | } | 1628 | } |
1671 | 1629 | ||
1672 | /* A grant for every tx ring slot */ | 1630 | /* A grant for every tx ring slot */ |
1673 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | 1631 | if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, |
1674 | &queue->gref_tx_head) < 0) { | 1632 | &queue->gref_tx_head) < 0) { |
1675 | pr_alert("can't alloc tx grant refs\n"); | 1633 | pr_alert("can't alloc tx grant refs\n"); |
1676 | err = -ENOMEM; | 1634 | err = -ENOMEM; |
@@ -1678,7 +1636,7 @@ static int xennet_init_queue(struct netfront_queue *queue) | |||
1678 | } | 1636 | } |
1679 | 1637 | ||
1680 | /* A grant for every rx ring slot */ | 1638 | /* A grant for every rx ring slot */ |
1681 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | 1639 | if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, |
1682 | &queue->gref_rx_head) < 0) { | 1640 | &queue->gref_rx_head) < 0) { |
1683 | pr_alert("can't alloc rx grant refs\n"); | 1641 | pr_alert("can't alloc rx grant refs\n"); |
1684 | err = -ENOMEM; | 1642 | err = -ENOMEM; |
@@ -2146,30 +2104,18 @@ static const struct ethtool_ops xennet_ethtool_ops = | |||
2146 | }; | 2104 | }; |
2147 | 2105 | ||
2148 | #ifdef CONFIG_SYSFS | 2106 | #ifdef CONFIG_SYSFS |
2149 | static ssize_t show_rxbuf_min(struct device *dev, | 2107 | static ssize_t show_rxbuf(struct device *dev, |
2150 | struct device_attribute *attr, char *buf) | 2108 | struct device_attribute *attr, char *buf) |
2151 | { | 2109 | { |
2152 | struct net_device *netdev = to_net_dev(dev); | 2110 | return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); |
2153 | struct netfront_info *info = netdev_priv(netdev); | ||
2154 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2155 | |||
2156 | if (num_queues) | ||
2157 | return sprintf(buf, "%u\n", info->queues[0].rx_min_target); | ||
2158 | else | ||
2159 | return sprintf(buf, "%u\n", RX_MIN_TARGET); | ||
2160 | } | 2111 | } |
2161 | 2112 | ||
2162 | static ssize_t store_rxbuf_min(struct device *dev, | 2113 | static ssize_t store_rxbuf(struct device *dev, |
2163 | struct device_attribute *attr, | 2114 | struct device_attribute *attr, |
2164 | const char *buf, size_t len) | 2115 | const char *buf, size_t len) |
2165 | { | 2116 | { |
2166 | struct net_device *netdev = to_net_dev(dev); | ||
2167 | struct netfront_info *np = netdev_priv(netdev); | ||
2168 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2169 | char *endp; | 2117 | char *endp; |
2170 | unsigned long target; | 2118 | unsigned long target; |
2171 | unsigned int i; | ||
2172 | struct netfront_queue *queue; | ||
2173 | 2119 | ||
2174 | if (!capable(CAP_NET_ADMIN)) | 2120 | if (!capable(CAP_NET_ADMIN)) |
2175 | return -EPERM; | 2121 | return -EPERM; |
@@ -2178,97 +2124,15 @@ static ssize_t store_rxbuf_min(struct device *dev, | |||
2178 | if (endp == buf) | 2124 | if (endp == buf) |
2179 | return -EBADMSG; | 2125 | return -EBADMSG; |
2180 | 2126 | ||
2181 | if (target < RX_MIN_TARGET) | 2127 | /* rxbuf_min and rxbuf_max are no longer configurable. */ |
2182 | target = RX_MIN_TARGET; | ||
2183 | if (target > RX_MAX_TARGET) | ||
2184 | target = RX_MAX_TARGET; | ||
2185 | |||
2186 | for (i = 0; i < num_queues; ++i) { | ||
2187 | queue = &np->queues[i]; | ||
2188 | spin_lock_bh(&queue->rx_lock); | ||
2189 | if (target > queue->rx_max_target) | ||
2190 | queue->rx_max_target = target; | ||
2191 | queue->rx_min_target = target; | ||
2192 | if (target > queue->rx_target) | ||
2193 | queue->rx_target = target; | ||
2194 | 2128 | ||
2195 | xennet_alloc_rx_buffers(queue); | ||
2196 | |||
2197 | spin_unlock_bh(&queue->rx_lock); | ||
2198 | } | ||
2199 | return len; | 2129 | return len; |
2200 | } | 2130 | } |
2201 | 2131 | ||
2202 | static ssize_t show_rxbuf_max(struct device *dev, | ||
2203 | struct device_attribute *attr, char *buf) | ||
2204 | { | ||
2205 | struct net_device *netdev = to_net_dev(dev); | ||
2206 | struct netfront_info *info = netdev_priv(netdev); | ||
2207 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2208 | |||
2209 | if (num_queues) | ||
2210 | return sprintf(buf, "%u\n", info->queues[0].rx_max_target); | ||
2211 | else | ||
2212 | return sprintf(buf, "%u\n", RX_MAX_TARGET); | ||
2213 | } | ||
2214 | |||
2215 | static ssize_t store_rxbuf_max(struct device *dev, | ||
2216 | struct device_attribute *attr, | ||
2217 | const char *buf, size_t len) | ||
2218 | { | ||
2219 | struct net_device *netdev = to_net_dev(dev); | ||
2220 | struct netfront_info *np = netdev_priv(netdev); | ||
2221 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2222 | char *endp; | ||
2223 | unsigned long target; | ||
2224 | unsigned int i = 0; | ||
2225 | struct netfront_queue *queue = NULL; | ||
2226 | |||
2227 | if (!capable(CAP_NET_ADMIN)) | ||
2228 | return -EPERM; | ||
2229 | |||
2230 | target = simple_strtoul(buf, &endp, 0); | ||
2231 | if (endp == buf) | ||
2232 | return -EBADMSG; | ||
2233 | |||
2234 | if (target < RX_MIN_TARGET) | ||
2235 | target = RX_MIN_TARGET; | ||
2236 | if (target > RX_MAX_TARGET) | ||
2237 | target = RX_MAX_TARGET; | ||
2238 | |||
2239 | for (i = 0; i < num_queues; ++i) { | ||
2240 | queue = &np->queues[i]; | ||
2241 | spin_lock_bh(&queue->rx_lock); | ||
2242 | if (target < queue->rx_min_target) | ||
2243 | queue->rx_min_target = target; | ||
2244 | queue->rx_max_target = target; | ||
2245 | if (target < queue->rx_target) | ||
2246 | queue->rx_target = target; | ||
2247 | |||
2248 | xennet_alloc_rx_buffers(queue); | ||
2249 | |||
2250 | spin_unlock_bh(&queue->rx_lock); | ||
2251 | } | ||
2252 | return len; | ||
2253 | } | ||
2254 | |||
2255 | static ssize_t show_rxbuf_cur(struct device *dev, | ||
2256 | struct device_attribute *attr, char *buf) | ||
2257 | { | ||
2258 | struct net_device *netdev = to_net_dev(dev); | ||
2259 | struct netfront_info *info = netdev_priv(netdev); | ||
2260 | unsigned int num_queues = netdev->real_num_tx_queues; | ||
2261 | |||
2262 | if (num_queues) | ||
2263 | return sprintf(buf, "%u\n", info->queues[0].rx_target); | ||
2264 | else | ||
2265 | return sprintf(buf, "0\n"); | ||
2266 | } | ||
2267 | |||
2268 | static struct device_attribute xennet_attrs[] = { | 2132 | static struct device_attribute xennet_attrs[] = { |
2269 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | 2133 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), |
2270 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | 2134 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), |
2271 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | 2135 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL), |
2272 | }; | 2136 | }; |
2273 | 2137 | ||
2274 | static int xennet_sysfs_addif(struct net_device *netdev) | 2138 | static int xennet_sysfs_addif(struct net_device *netdev) |
@@ -2300,12 +2164,6 @@ static void xennet_sysfs_delif(struct net_device *netdev) | |||
2300 | 2164 | ||
2301 | #endif /* CONFIG_SYSFS */ | 2165 | #endif /* CONFIG_SYSFS */ |
2302 | 2166 | ||
2303 | static const struct xenbus_device_id netfront_ids[] = { | ||
2304 | { "vif" }, | ||
2305 | { "" } | ||
2306 | }; | ||
2307 | |||
2308 | |||
2309 | static int xennet_remove(struct xenbus_device *dev) | 2167 | static int xennet_remove(struct xenbus_device *dev) |
2310 | { | 2168 | { |
2311 | struct netfront_info *info = dev_get_drvdata(&dev->dev); | 2169 | struct netfront_info *info = dev_get_drvdata(&dev->dev); |
@@ -2338,12 +2196,18 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2338 | return 0; | 2196 | return 0; |
2339 | } | 2197 | } |
2340 | 2198 | ||
2341 | static DEFINE_XENBUS_DRIVER(netfront, , | 2199 | static const struct xenbus_device_id netfront_ids[] = { |
2200 | { "vif" }, | ||
2201 | { "" } | ||
2202 | }; | ||
2203 | |||
2204 | static struct xenbus_driver netfront_driver = { | ||
2205 | .ids = netfront_ids, | ||
2342 | .probe = netfront_probe, | 2206 | .probe = netfront_probe, |
2343 | .remove = xennet_remove, | 2207 | .remove = xennet_remove, |
2344 | .resume = netfront_resume, | 2208 | .resume = netfront_resume, |
2345 | .otherend_changed = netback_changed, | 2209 | .otherend_changed = netback_changed, |
2346 | ); | 2210 | }; |
2347 | 2211 | ||
2348 | static int __init netif_init(void) | 2212 | static int __init netif_init(void) |
2349 | { | 2213 | { |